aboutsummaryrefslogtreecommitdiffstats
path: root/lib/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'lib/kernel')
-rw-r--r--lib/kernel/doc/src/.gitignore1
-rw-r--r--lib/kernel/doc/src/Makefile48
-rw-r--r--lib/kernel/doc/src/application.xml121
-rw-r--r--lib/kernel/doc/src/auth.xml12
-rw-r--r--lib/kernel/doc/src/book.xml5
-rw-r--r--lib/kernel/doc/src/code.xml104
-rw-r--r--lib/kernel/doc/src/config.xml15
-rw-r--r--lib/kernel/doc/src/disk_log.xml74
-rw-r--r--lib/kernel/doc/src/erl_boot_server.xml12
-rw-r--r--lib/kernel/doc/src/erl_ddll.xml32
-rw-r--r--lib/kernel/doc/src/erl_epmd.xml104
-rw-r--r--lib/kernel/doc/src/error_handler.xml8
-rw-r--r--lib/kernel/doc/src/error_logger.xml371
-rw-r--r--lib/kernel/doc/src/file.xml399
-rw-r--r--lib/kernel/doc/src/gen_sctp.xml111
-rw-r--r--lib/kernel/doc/src/gen_tcp.xml52
-rw-r--r--lib/kernel/doc/src/gen_udp.xml51
-rw-r--r--lib/kernel/doc/src/global.xml42
-rw-r--r--lib/kernel/doc/src/global_group.xml22
-rw-r--r--lib/kernel/doc/src/heart.xml25
-rw-r--r--lib/kernel/doc/src/inet.xml478
-rw-r--r--lib/kernel/doc/src/inet_res.xml46
-rw-r--r--lib/kernel/doc/src/introduction_chapter.xml63
-rw-r--r--lib/kernel/doc/src/kernel_app.xml230
-rw-r--r--lib/kernel/doc/src/logger.xml1355
-rw-r--r--lib/kernel/doc/src/logger_arch.diabin0 -> 2527 bytes
-rw-r--r--lib/kernel/doc/src/logger_arch.pngbin0 -> 117205 bytes
-rw-r--r--lib/kernel/doc/src/logger_chapter.xml1399
-rw-r--r--lib/kernel/doc/src/logger_disk_log_h.xml168
-rw-r--r--lib/kernel/doc/src/logger_filters.xml254
-rw-r--r--lib/kernel/doc/src/logger_formatter.xml354
-rw-r--r--lib/kernel/doc/src/logger_std_h.xml217
-rw-r--r--lib/kernel/doc/src/net_adm.xml22
-rw-r--r--lib/kernel/doc/src/net_kernel.xml45
-rw-r--r--lib/kernel/doc/src/notes.xml773
-rw-r--r--lib/kernel/doc/src/os.xml130
-rw-r--r--lib/kernel/doc/src/part.xml37
-rw-r--r--lib/kernel/doc/src/pg2.xml22
-rw-r--r--lib/kernel/doc/src/ref_man.xml12
-rw-r--r--lib/kernel/doc/src/rpc.xml67
-rw-r--r--lib/kernel/doc/src/seq_trace.xml31
-rw-r--r--lib/kernel/doc/src/specs.xml6
-rw-r--r--lib/kernel/doc/src/wrap_log_reader.xml12
-rw-r--r--lib/kernel/examples/Makefile2
-rw-r--r--lib/kernel/examples/gen_tcp_dist/Makefile20
-rw-r--r--lib/kernel/examples/gen_tcp_dist/ebin/.gitignore0
-rw-r--r--lib/kernel/examples/gen_tcp_dist/src/gen_tcp_dist.erl781
-rw-r--r--lib/kernel/include/dist.hrl7
-rw-r--r--lib/kernel/include/dist_util.hrl14
-rw-r--r--lib/kernel/include/logger.hrl53
-rw-r--r--lib/kernel/src/Makefile56
-rw-r--r--lib/kernel/src/application.erl22
-rw-r--r--lib/kernel/src/application_controller.erl185
-rw-r--r--lib/kernel/src/application_master.erl6
-rw-r--r--lib/kernel/src/auth.erl8
-rw-r--r--lib/kernel/src/code.erl11
-rw-r--r--lib/kernel/src/code_server.erl22
-rw-r--r--lib/kernel/src/disk_log_1.erl4
-rw-r--r--lib/kernel/src/dist_util.erl474
-rw-r--r--lib/kernel/src/erl_boot_server.erl4
-rw-r--r--lib/kernel/src/erl_epmd.erl74
-rw-r--r--lib/kernel/src/erl_reply.erl4
-rw-r--r--lib/kernel/src/erl_signal_handler.erl13
-rw-r--r--lib/kernel/src/error_handler.erl6
-rw-r--r--lib/kernel/src/error_logger.erl615
-rw-r--r--lib/kernel/src/erts_debug.erl162
-rw-r--r--lib/kernel/src/file.erl96
-rw-r--r--lib/kernel/src/file_int.hrl33
-rw-r--r--lib/kernel/src/file_io_server.erl73
-rw-r--r--lib/kernel/src/file_server.erl143
-rw-r--r--lib/kernel/src/gen_sctp.erl22
-rw-r--r--lib/kernel/src/gen_tcp.erl22
-rw-r--r--lib/kernel/src/gen_udp.erl23
-rw-r--r--lib/kernel/src/group.erl301
-rw-r--r--lib/kernel/src/hipe_unified_loader.erl3
-rw-r--r--lib/kernel/src/inet.erl136
-rw-r--r--lib/kernel/src/inet6_tcp.erl8
-rw-r--r--lib/kernel/src/inet_config.erl4
-rw-r--r--lib/kernel/src/inet_db.erl7
-rw-r--r--lib/kernel/src/inet_dns.erl6
-rw-r--r--lib/kernel/src/inet_hosts.erl5
-rw-r--r--lib/kernel/src/inet_int.hrl16
-rw-r--r--lib/kernel/src/inet_parse.erl4
-rw-r--r--lib/kernel/src/inet_res.erl40
-rw-r--r--lib/kernel/src/inet_tcp.erl8
-rw-r--r--lib/kernel/src/inet_tcp_dist.erl150
-rw-r--r--lib/kernel/src/kernel.app.src34
-rw-r--r--lib/kernel/src/kernel.appup.src55
-rw-r--r--lib/kernel/src/kernel.erl53
-rw-r--r--lib/kernel/src/kernel_config.erl9
-rw-r--r--lib/kernel/src/kernel_refc.erl139
-rw-r--r--lib/kernel/src/logger.erl1146
-rw-r--r--lib/kernel/src/logger_backend.erl133
-rw-r--r--lib/kernel/src/logger_config.erl160
-rw-r--r--lib/kernel/src/logger_disk_log_h.erl252
-rw-r--r--lib/kernel/src/logger_filters.erl127
-rw-r--r--lib/kernel/src/logger_formatter.erl563
-rw-r--r--lib/kernel/src/logger_h_common.erl463
-rw-r--r--lib/kernel/src/logger_h_common.hrl127
-rw-r--r--lib/kernel/src/logger_handler_watcher.erl113
-rw-r--r--lib/kernel/src/logger_internal.hrl104
-rw-r--r--lib/kernel/src/logger_olp.erl627
-rw-r--r--lib/kernel/src/logger_olp.hrl185
-rw-r--r--lib/kernel/src/logger_proxy.erl165
-rw-r--r--lib/kernel/src/logger_server.erl617
-rw-r--r--lib/kernel/src/logger_simple_h.erl215
-rw-r--r--lib/kernel/src/logger_std_h.erl643
-rw-r--r--lib/kernel/src/logger_sup.erl59
-rw-r--r--lib/kernel/src/net_kernel.erl356
-rw-r--r--lib/kernel/src/os.erl125
-rw-r--r--lib/kernel/src/raw_file_io.erl75
-rw-r--r--lib/kernel/src/raw_file_io_compressed.erl134
-rw-r--r--lib/kernel/src/raw_file_io_deflate.erl159
-rw-r--r--lib/kernel/src/raw_file_io_delayed.erl320
-rw-r--r--lib/kernel/src/raw_file_io_inflate.erl261
-rw-r--r--lib/kernel/src/raw_file_io_list.erl128
-rw-r--r--lib/kernel/src/raw_file_io_raw.erl25
-rw-r--r--lib/kernel/src/rpc.erl15
-rw-r--r--lib/kernel/src/seq_trace.erl10
-rw-r--r--lib/kernel/src/standard_error.erl3
-rw-r--r--lib/kernel/src/user.erl3
-rw-r--r--lib/kernel/src/user_drv.erl9
-rw-r--r--lib/kernel/test/Makefile22
-rw-r--r--lib/kernel/test/application_SUITE.erl155
-rw-r--r--lib/kernel/test/code_SUITE.erl23
-rw-r--r--lib/kernel/test/disk_log_SUITE.erl218
-rw-r--r--lib/kernel/test/erl_distribution_SUITE.erl30
-rw-r--r--lib/kernel/test/erl_distribution_wb_SUITE.erl16
-rw-r--r--lib/kernel/test/erl_prim_loader_SUITE.erl5
-rw-r--r--lib/kernel/test/error_logger_SUITE.erl64
-rw-r--r--lib/kernel/test/error_logger_warn_SUITE.erl16
-rw-r--r--lib/kernel/test/file_SUITE.erl302
-rw-r--r--lib/kernel/test/file_name_SUITE.erl28
-rw-r--r--lib/kernel/test/gen_sctp_SUITE.erl8
-rw-r--r--lib/kernel/test/gen_tcp_api_SUITE.erl6
-rw-r--r--lib/kernel/test/gen_tcp_misc_SUITE.erl472
-rw-r--r--lib/kernel/test/gen_udp_SUITE.erl302
-rw-r--r--lib/kernel/test/global_SUITE.erl30
-rw-r--r--lib/kernel/test/heart_SUITE.erl23
-rw-r--r--lib/kernel/test/inet_SUITE.erl228
-rw-r--r--lib/kernel/test/inet_res_SUITE.erl6
-rw-r--r--lib/kernel/test/inet_sockopt_SUITE.erl9
-rw-r--r--lib/kernel/test/init_SUITE.erl88
-rw-r--r--lib/kernel/test/kernel.spec1
-rw-r--r--lib/kernel/test/kernel_SUITE.erl71
-rw-r--r--lib/kernel/test/kernel_bench.spec2
-rw-r--r--lib/kernel/test/kernel_config_SUITE.erl4
-rw-r--r--lib/kernel/test/logger.cover17
-rw-r--r--lib/kernel/test/logger.spec13
-rw-r--r--lib/kernel/test/logger_SUITE.erl1390
-rw-r--r--lib/kernel/test/logger_disk_log_h_SUITE.erl1668
-rw-r--r--lib/kernel/test/logger_env_var_SUITE.erl697
-rw-r--r--lib/kernel/test/logger_filters_SUITE.erl227
-rw-r--r--lib/kernel/test/logger_formatter_SUITE.erl886
-rw-r--r--lib/kernel/test/logger_legacy_SUITE.erl288
-rw-r--r--lib/kernel/test/logger_olp_SUITE.erl90
-rw-r--r--lib/kernel/test/logger_proxy_SUITE.erl274
-rw-r--r--lib/kernel/test/logger_simple_h_SUITE.erl209
-rw-r--r--lib/kernel/test/logger_std_h_SUITE.erl2127
-rw-r--r--lib/kernel/test/logger_stress_SUITE.erl550
-rw-r--r--lib/kernel/test/logger_test_lib.erl88
-rw-r--r--lib/kernel/test/os_SUITE.erl22
-rw-r--r--lib/kernel/test/pdict_SUITE.erl34
-rw-r--r--lib/kernel/test/prim_file_SUITE.erl742
-rw-r--r--lib/kernel/test/sendfile_SUITE.erl164
-rw-r--r--lib/kernel/test/seq_trace_SUITE.erl164
-rw-r--r--lib/kernel/test/wrap_log_reader_SUITE.erl19
-rw-r--r--lib/kernel/test/wrap_log_reader_SUITE_data/wrap_log_test.erl26
-rw-r--r--lib/kernel/test/zlib_SUITE.erl49
-rw-r--r--lib/kernel/test/zzz_SUITE.erl37
-rw-r--r--lib/kernel/vsn.mk2
171 files changed, 27075 insertions, 3422 deletions
diff --git a/lib/kernel/doc/src/.gitignore b/lib/kernel/doc/src/.gitignore
new file mode 100644
index 0000000000..c2813ac866
--- /dev/null
+++ b/lib/kernel/doc/src/.gitignore
@@ -0,0 +1 @@
+*.eps \ No newline at end of file
diff --git a/lib/kernel/doc/src/Makefile b/lib/kernel/doc/src/Makefile
index 0759f362d4..f8867ccf25 100644
--- a/lib/kernel/doc/src/Makefile
+++ b/lib/kernel/doc/src/Makefile
@@ -1,7 +1,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 1997-2016. All Rights Reserved.
+# Copyright Ericsson AB 1997-2018. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -42,6 +42,7 @@ XML_REF3_FILES = application.xml \
disk_log.xml \
erl_boot_server.xml \
erl_ddll.xml \
+ erl_epmd.xml \
erl_prim_loader_stub.xml \
erlang_stub.xml \
error_handler.xml \
@@ -56,6 +57,11 @@ XML_REF3_FILES = application.xml \
inet.xml \
inet_res.xml \
init_stub.xml \
+ logger.xml \
+ logger_std_h.xml \
+ logger_disk_log_h.xml \
+ logger_filters.xml \
+ logger_formatter.xml \
net_adm.xml \
net_kernel.xml \
os.xml \
@@ -70,11 +76,18 @@ XML_REF4_FILES = app.xml config.xml
XML_REF6_FILES = kernel_app.xml
-XML_PART_FILES =
-XML_CHAPTER_FILES = notes.xml
+XML_PART_FILES = part.xml
+XML_CHAPTER_FILES = \
+ notes.xml \
+ introduction_chapter.xml \
+ logger_chapter.xml
BOOK_FILES = book.xml
+# The .png file is generated from a .dia file with target 'update_png'
+IMAGE_FILES = \
+ logger_arch.png
+
XML_FILES = \
$(BOOK_FILES) $(XML_CHAPTER_FILES) \
$(XML_PART_FILES) $(XML_REF3_FILES) $(XML_REF4_FILES)\
@@ -100,6 +113,17 @@ SPECS_FILES = $(XML_REF3_FILES:%.xml=$(SPECDIR)/specs_%.xml)
TOP_SPECS_FILE = specs.xml
# ----------------------------------------------------
+# FIGURES
+# ----------------------------------------------------
+# In order to update the figures you have to have both dia
+# and imagemagick installed.
+# The generated .png file must be committed.
+
+update_png:
+ dia --export=logger_arch.eps logger_arch.dia
+ convert logger_arch.eps -resize 65% logger_arch.png
+
+# ----------------------------------------------------
# FLAGS
# ----------------------------------------------------
XML_FLAGS +=
@@ -111,7 +135,7 @@ SPECS_FLAGS = -I../../include
# ----------------------------------------------------
# Targets
# ----------------------------------------------------
-$(HTMLDIR)/%.gif: %.gif
+$(HTMLDIR)/%: %
$(INSTALL_DATA) $< $@
docs: man pdf html
@@ -120,33 +144,35 @@ $(TOP_PDF_FILE): $(XML_FILES)
pdf: $(TOP_PDF_FILE)
-html: gifs $(HTML_REF_MAN_FILE)
+html: images $(HTML_REF_MAN_FILE)
man: $(MAN3_FILES) $(MAN4_FILES) $(MAN6_FILES)
-gifs: $(GIF_FILES:%=$(HTMLDIR)/%)
+images: $(IMAGE_FILES:%=$(HTMLDIR)/%)
+
debug opt:
clean clean_docs:
rm -rf $(HTMLDIR)/*
+ rm -rf $(XMLDIR)
rm -f $(MAN3DIR)/*
rm -f $(MAN4DIR)/*
rm -f $(MAN6DIR)/*
rm -f $(TOP_PDF_FILE) $(TOP_PDF_FILE:%.pdf=%.fo)
rm -f $(SPECDIR)/*
- rm -f errs core *~
+ rm -f errs core *~ *.eps
$(SPECDIR)/specs_erl_prim_loader_stub.xml:
- escript $(SPECS_EXTRACTOR) $(SPECS_FLAGS) \
+ $(gen_verbose)escript $(SPECS_EXTRACTOR) $(SPECS_FLAGS) \
-o$(dir $@) -module erl_prim_loader_stub
$(SPECDIR)/specs_erlang_stub.xml:
- escript $(SPECS_EXTRACTOR) $(SPECS_FLAGS) \
+ $(gen_verbose)escript $(SPECS_EXTRACTOR) $(SPECS_FLAGS) \
-o$(dir $@) -module erlang_stub
$(SPECDIR)/specs_init_stub.xml:
- escript $(SPECS_EXTRACTOR) $(SPECS_FLAGS) \
+ $(gen_verbose)escript $(SPECS_EXTRACTOR) $(SPECS_FLAGS) \
-o$(dir $@) -module init_stub
$(SPECDIR)/specs_zlib_stub.xml:
- escript $(SPECS_EXTRACTOR) $(SPECS_FLAGS) \
+ $(gen_verbose)escript $(SPECS_EXTRACTOR) $(SPECS_FLAGS) \
-o$(dir $@) -module zlib_stub
# ----------------------------------------------------
diff --git a/lib/kernel/doc/src/application.xml b/lib/kernel/doc/src/application.xml
index 886286b76d..83a83ebad2 100644
--- a/lib/kernel/doc/src/application.xml
+++ b/lib/kernel/doc/src/application.xml
@@ -4,7 +4,7 @@
<erlref>
<header>
<copyright>
- <year>1996</year><year>2016</year>
+ <year>1996</year><year>2018</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -28,7 +28,7 @@
<date></date>
<rev></rev>
</header>
- <module>application</module>
+ <module since="">application</module>
<modulesummary>Generic OTP application functions</modulesummary>
<description>
<p>In OTP, <em>application</em> denotes a component implementing
@@ -67,8 +67,8 @@
</datatypes>
<funcs>
<func>
- <name name="ensure_all_started" arity="1"/>
- <name name="ensure_all_started" arity="2"/>
+ <name name="ensure_all_started" arity="1" since="OTP R16B02"/>
+ <name name="ensure_all_started" arity="2" since="OTP R16B02"/>
<fsummary>Load and start an application and its dependencies, recursively.</fsummary>
<desc>
<p>Equivalent to calling
@@ -85,8 +85,8 @@
</desc>
</func>
<func>
- <name name="ensure_started" arity="1"/>
- <name name="ensure_started" arity="2"/>
+ <name name="ensure_started" arity="1" since="OTP R16B01"/>
+ <name name="ensure_started" arity="2" since="OTP R16B01"/>
<fsummary>Load and start an application.</fsummary>
<desc>
<p>Equivalent to
@@ -95,8 +95,8 @@
</desc>
</func>
<func>
- <name name="get_all_env" arity="0"/>
- <name name="get_all_env" arity="1"/>
+ <name name="get_all_env" arity="0" since=""/>
+ <name name="get_all_env" arity="1" since=""/>
<fsummary>Get the configuration parameters for an application.</fsummary>
<desc>
<p>Returns the configuration parameters and their values for
@@ -108,8 +108,8 @@
</desc>
</func>
<func>
- <name name="get_all_key" arity="0"/>
- <name name="get_all_key" arity="1"/>
+ <name name="get_all_key" arity="0" since=""/>
+ <name name="get_all_key" arity="1" since=""/>
<fsummary>Get the application specification keys.</fsummary>
<desc>
<p>Returns the application specification keys and their values
@@ -122,8 +122,8 @@
</desc>
</func>
<func>
- <name name="get_application" arity="0"/>
- <name name="get_application" arity="1"/>
+ <name name="get_application" arity="0" since=""/>
+ <name name="get_application" arity="1" since=""/>
<fsummary>Get the name of an application containing a certain process or module.</fsummary>
<desc>
<p>Returns the name of the application to which the process
@@ -136,8 +136,8 @@
</desc>
</func>
<func>
- <name name="get_env" arity="1"/>
- <name name="get_env" arity="2"/>
+ <name name="get_env" arity="1" since=""/>
+ <name name="get_env" arity="2" since=""/>
<fsummary>Get the value of a configuration parameter.</fsummary>
<desc>
<p>Returns the value of configuration parameter <c><anno>Par</anno></c>
@@ -153,7 +153,7 @@
</desc>
</func>
<func>
- <name name="get_env" arity="3"/>
+ <name name="get_env" arity="3" since="OTP R16B"/>
<fsummary>Get the value of a configuration parameter using a default.</fsummary>
<desc>
<p>Works like <seealso marker="#get_env/2"><c>get_env/2</c></seealso> but returns
@@ -162,8 +162,8 @@
</desc>
</func>
<func>
- <name name="get_key" arity="1"/>
- <name name="get_key" arity="2"/>
+ <name name="get_key" arity="1" since=""/>
+ <name name="get_key" arity="2" since=""/>
<fsummary>Get the value of an application specification key.</fsummary>
<desc>
<p>Returns the value of the application specification key
@@ -180,8 +180,8 @@
</desc>
</func>
<func>
- <name name="load" arity="1"/>
- <name name="load" arity="2"/>
+ <name name="load" arity="1" since=""/>
+ <name name="load" arity="2" since=""/>
<fsummary>Load an application.</fsummary>
<type name="application_spec"/>
<type name="application_opt"/>
@@ -226,7 +226,7 @@ Nodes = [cp1@cave, {cp2@cave, cp3@cave}]</code>
</desc>
</func>
<func>
- <name name="loaded_applications" arity="0"/>
+ <name name="loaded_applications" arity="0" since=""/>
<fsummary>Get the currently loaded applications.</fsummary>
<desc>
<p>Returns a list with information about the applications, and included
@@ -238,7 +238,42 @@ Nodes = [cp1@cave, {cp2@cave, cp3@cave}]</code>
</desc>
</func>
<func>
- <name name="permit" arity="2"/>
+ <name name="set_env" arity="1" since="OTP 21.3"/>
+ <name name="set_env" arity="2" since="OTP 21.3"/>
+ <fsummary>Sets the configuration parameters of multiple applications.</fsummary>
+ <desc>
+ <p>Sets the configuration <c><anno>Config</anno></c> for multiple
+ applications. It is equivalent to calling <c>set_env/4</c> on
+ each application individially, except it is more efficient.
+ The given <c><anno>Config</anno></c> is validated before the
+ configuration is set.</p>
+ <p><c>set_env/2</c> uses the standard <c>gen_server</c> time-out
+ value (5000 ms). Option <c>timeout</c> can be specified
+ if another time-out value is useful, for example, in situations
+ where the application controller is heavily loaded.</p>
+ <p>Option <c>persistent</c> can be set to <c>true</c>
+ to guarantee that parameters set with <c>set_env/2</c>
+ are not overridden by those defined in the application resource
+ file on load. This means that persistent values will stick after the application
+ is loaded and also on application reload.</p>
+ <p>If an application is given more than once or if an application
+ has the same key given more than once, the behaviour is undefined
+ and a warning message will be logged. In future releases, an error
+ will be raised.</p>
+ <p><c>set_env/1</c> is equivalent to <c>set_env(Config, [])</c>.</p>
+ <warning>
+ <p>Use this function only if you know what you are doing,
+ that is, on your own applications. It is very
+ application-dependent and
+ configuration parameter-dependent when and how often
+ the value is read by the application. Careless use
+ of this function can put the application in a
+ weird, inconsistent, and malfunctioning state.</p>
+ </warning>
+ </desc>
+ </func>
+ <func>
+ <name name="permit" arity="2" since=""/>
<fsummary>Change the permission for an application to run at a node.</fsummary>
<desc>
<p>Changes the permission for <c><anno>Application</anno></c> to run at
@@ -271,8 +306,8 @@ Nodes = [cp1@cave, {cp2@cave, cp3@cave}]</code>
</desc>
</func>
<func>
- <name name="set_env" arity="3"/>
- <name name="set_env" arity="4"/>
+ <name name="set_env" arity="3" since=""/>
+ <name name="set_env" arity="4" since=""/>
<fsummary>Set the value of a configuration parameter.</fsummary>
<desc>
<p>Sets the value of configuration parameter <c><anno>Par</anno></c> for
@@ -302,8 +337,8 @@ Nodes = [cp1@cave, {cp2@cave, cp3@cave}]</code>
</desc>
</func>
<func>
- <name name="start" arity="1"/>
- <name name="start" arity="2"/>
+ <name name="start" arity="1" since=""/>
+ <name name="start" arity="2" since=""/>
<fsummary>Load and start an application.</fsummary>
<desc>
<p>Starts <c><anno>Application</anno></c>. If it is not loaded,
@@ -318,8 +353,13 @@ Nodes = [cp1@cave, {cp2@cave, cp3@cave}]</code>
<c>{error,{not_started,App}}</c> is returned, where <c>App</c>
is the name of the missing application.</p>
<p>The application controller then creates an <em>application master</em>
- for the application. The application master is
- the group leader of all the processes in the application.
+ for the application. The application master becomes the
+ group leader of all the processes in the application. I/O is
+ forwarded to the previous group leader, though, this is just
+ a way to identify processes that belong to the application.
+ Used for example to find itself from any process, or,
+ reciprocally, to kill them all when it terminates.</p>
+ <p>
The application master starts the application by calling
the application callback function <c>Module:start/2</c> as
defined by the application specification key <c>mod</c>.</p>
@@ -348,7 +388,7 @@ Nodes = [cp1@cave, {cp2@cave, cp3@cave}]</code>
</desc>
</func>
<func>
- <name name="start_type" arity="0"/>
+ <name name="start_type" arity="0" since=""/>
<fsummary>Get the start type of an ongoing application startup.</fsummary>
<desc>
<p>This function is intended to be called by a process belonging
@@ -365,7 +405,7 @@ Nodes = [cp1@cave, {cp2@cave, cp3@cave}]</code>
</desc>
</func>
<func>
- <name name="stop" arity="1"/>
+ <name name="stop" arity="1" since=""/>
<fsummary>Stop an application.</fsummary>
<desc>
<p>Stops <c><anno>Application</anno></c>. The application master calls
@@ -394,7 +434,7 @@ Nodes = [cp1@cave, {cp2@cave, cp3@cave}]</code>
</desc>
</func>
<func>
- <name name="takeover" arity="2"/>
+ <name name="takeover" arity="2" since=""/>
<fsummary>Take over a distributed application.</fsummary>
<desc>
<p>Takes over the distributed application
@@ -419,7 +459,7 @@ Nodes = [cp1@cave, {cp2@cave, cp3@cave}]</code>
</desc>
</func>
<func>
- <name name="unload" arity="1"/>
+ <name name="unload" arity="1" since=""/>
<fsummary>Unload an application.</fsummary>
<desc>
<p>Unloads the application specification for <c><anno>Application</anno></c>
@@ -430,8 +470,8 @@ Nodes = [cp1@cave, {cp2@cave, cp3@cave}]</code>
</desc>
</func>
<func>
- <name name="unset_env" arity="2"/>
- <name name="unset_env" arity="3"/>
+ <name name="unset_env" arity="2" since=""/>
+ <name name="unset_env" arity="3" since=""/>
<fsummary>Unset the value of a configuration parameter.</fsummary>
<desc>
<p>Removes the configuration parameter <c><anno>Par</anno></c> and its value
@@ -454,8 +494,8 @@ Nodes = [cp1@cave, {cp2@cave, cp3@cave}]</code>
</desc>
</func>
<func>
- <name name="which_applications" arity="0"/>
- <name name="which_applications" arity="1"/>
+ <name name="which_applications" arity="0" since=""/>
+ <name name="which_applications" arity="1" since=""/>
<fsummary>Get the currently running applications.</fsummary>
<desc>
<p>Returns a list with information about the applications that
@@ -479,7 +519,7 @@ Nodes = [cp1@cave, {cp2@cave, cp3@cave}]</code>
</section>
<funcs>
<func>
- <name>Module:start(StartType, StartArgs) -> {ok, Pid} | {ok, Pid, State} | {error, Reason}</name>
+ <name since="">Module:start(StartType, StartArgs) -> {ok, Pid} | {ok, Pid, State} | {error, Reason}</name>
<fsummary>Start an application.</fsummary>
<type>
<v>StartType = <seealso marker="#type-start_type"><c>start_type()</c></seealso></v>
@@ -521,7 +561,7 @@ Nodes = [cp1@cave, {cp2@cave, cp3@cave}]</code>
</desc>
</func>
<func>
- <name>Module:start_phase(Phase, StartType, PhaseArgs) -> ok | {error, Reason}</name>
+ <name since="">Module:start_phase(Phase, StartType, PhaseArgs) -> ok | {error, Reason}</name>
<fsummary>Extended start of an application.</fsummary>
<type>
<v>Phase = atom()</v>
@@ -546,7 +586,7 @@ Nodes = [cp1@cave, {cp2@cave, cp3@cave}]</code>
</desc>
</func>
<func>
- <name>Module:prep_stop(State) -> NewState</name>
+ <name since="">Module:prep_stop(State) -> NewState</name>
<fsummary>Prepare an application for termination.</fsummary>
<type>
<v>State = NewState = term()</v>
@@ -564,7 +604,7 @@ Nodes = [cp1@cave, {cp2@cave, cp3@cave}]</code>
</desc>
</func>
<func>
- <name>Module:stop(State)</name>
+ <name since="">Module:stop(State)</name>
<fsummary>Clean up after termination of an application.</fsummary>
<type>
<v>State = term()</v>
@@ -580,7 +620,7 @@ Nodes = [cp1@cave, {cp2@cave, cp3@cave}]</code>
</desc>
</func>
<func>
- <name>Module:config_change(Changed, New, Removed) -> ok</name>
+ <name since="">Module:config_change(Changed, New, Removed) -> ok</name>
<fsummary>Update the configuration parameters for an application.</fsummary>
<type>
<v>Changed = [{Par,Val}]</v>
@@ -608,4 +648,3 @@ Nodes = [cp1@cave, {cp2@cave, cp3@cave}]</code>
<seealso marker="app">app(4)</seealso></p>
</section>
</erlref>
-
diff --git a/lib/kernel/doc/src/auth.xml b/lib/kernel/doc/src/auth.xml
index 5901446960..a57da18de9 100644
--- a/lib/kernel/doc/src/auth.xml
+++ b/lib/kernel/doc/src/auth.xml
@@ -28,7 +28,7 @@
<date></date>
<rev></rev>
</header>
- <module>auth</module>
+ <module since="">auth</module>
<modulesummary>Erlang network authentication server.</modulesummary>
<description>
<p>This module is deprecated. For a description of the Magic
@@ -42,7 +42,7 @@
</datatypes>
<funcs>
<func>
- <name name="cookie" arity="0"/>
+ <name name="cookie" arity="0" since=""/>
<fsummary>Magic cookie for local node (deprecated).</fsummary>
<desc>
<p>Use
@@ -51,7 +51,7 @@
</desc>
</func>
<func>
- <name name="cookie" arity="1"/>
+ <name name="cookie" arity="1" since=""/>
<fsummary>Set the magic for the local node (deprecated).</fsummary>
<type_desc variable="TheCookie">
The cookie can also be specified as a list with a single atom element.
@@ -63,7 +63,7 @@
</desc>
</func>
<func>
- <name name="is_auth" arity="1"/>
+ <name name="is_auth" arity="1" since=""/>
<fsummary>Status of communication authorization (deprecated).</fsummary>
<desc>
<p>Returns <c>yes</c> if communication with <c><anno>Node</anno></c> is
@@ -76,7 +76,7 @@
</desc>
</func>
<func>
- <name>node_cookie([Node, Cookie]) -> yes | no</name>
+ <name since="">node_cookie([Node, Cookie]) -> yes | no</name>
<fsummary>Set the magic cookie for a node and verify authorization (deprecated).</fsummary>
<type>
<v>Node = node()</v>
@@ -88,7 +88,7 @@
</desc>
</func>
<func>
- <name name="node_cookie" arity="2"/>
+ <name name="node_cookie" arity="2" since=""/>
<fsummary>Set the magic cookie for a node and verify authorization (deprecated).</fsummary>
<desc>
<p>Sets the magic cookie of <c><anno>Node</anno></c> to
diff --git a/lib/kernel/doc/src/book.xml b/lib/kernel/doc/src/book.xml
index 81a87d126d..4b3573b9fe 100644
--- a/lib/kernel/doc/src/book.xml
+++ b/lib/kernel/doc/src/book.xml
@@ -4,7 +4,7 @@
<book xmlns:xi="http://www.w3.org/2001/XInclude">
<header titlestyle="normal">
<copyright>
- <year>1997</year><year>2016</year>
+ <year>1997</year><year>2018</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -34,6 +34,9 @@
<preamble>
<contents level="2"></contents>
</preamble>
+ <parts lift="yes">
+ <xi:include href="part.xml"/>
+ </parts>
<applications>
<xi:include href="ref_man.xml"/>
</applications>
diff --git a/lib/kernel/doc/src/code.xml b/lib/kernel/doc/src/code.xml
index c94f612c01..85178da930 100644
--- a/lib/kernel/doc/src/code.xml
+++ b/lib/kernel/doc/src/code.xml
@@ -4,7 +4,7 @@
<erlref>
<header>
<copyright>
- <year>1996</year><year>2017</year>
+ <year>1996</year><year>2018</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -28,32 +28,34 @@
<date></date>
<rev></rev>
</header>
- <module>code</module>
+ <module since="">code</module>
<modulesummary>Erlang code server.</modulesummary>
<description>
<p>This module contains the interface to the Erlang
<em>code server</em>, which deals with the loading of compiled
code into a running Erlang runtime system.</p>
- <p>The runtime system can be started in <em>embedded</em> or
- <em>interactive</em> mode. Which one is decided by command-line
+ <p>The runtime system can be started in <em>interactive</em> or
+ <em>embedded</em> mode. Which one is decided by the command-line
flag <c>-mode</c>:</p>
<pre>
% <input>erl -mode interactive</input></pre>
<p>The modes are as follows:</p>
<list type="bulleted">
<item>
- <p>In embedded mode, all code is loaded during system startup
- according to the boot script. (Code can also be loaded later
- by explicitly ordering the code server to do so).</p>
- </item>
- <item>
<p>In interactive mode, which is default, only some code is loaded
- during system startup, basically the modules needed by the runtime
+ during system startup, basically the modules needed by the runtime
system. Other code is dynamically loaded when first
referenced. When a call to a function in a certain module is
made, and the module is not loaded, the code server searches
for and tries to load the module.</p>
</item>
+ <item>
+ <p>In embedded mode, modules are not auto loaded. Trying to use
+ a module that has not been loaded results in an error. This mode is
+ recommended when the boot script loads all modules, as it is
+ typically done in OTP releases. (Code can still be loaded later
+ by explicitly ordering the code server to do so).</p>
+ </item>
</list>
<p>To prevent accidentally reloading of modules affecting the Erlang
runtime system, directories <c>kernel</c>, <c>stdlib</c>,
@@ -320,7 +322,7 @@ zip:create("mnesia-4.4.7.ez",
<funcs>
<func>
- <name name="set_path" arity="1"/>
+ <name name="set_path" arity="1" since=""/>
<fsummary>Set the code server search path.</fsummary>
<desc>
<p>Sets the code path to the list of directories <c><anno>Path</anno></c>.</p>
@@ -334,15 +336,15 @@ zip:create("mnesia-4.4.7.ez",
</desc>
</func>
<func>
- <name name="get_path" arity="0"/>
+ <name name="get_path" arity="0" since=""/>
<fsummary>Return the code server search path.</fsummary>
<desc>
<p>Returns the code path.</p>
</desc>
</func>
<func>
- <name name="add_path" arity="1"/>
- <name name="add_pathz" arity="1"/>
+ <name name="add_path" arity="1" since=""/>
+ <name name="add_pathz" arity="1" since=""/>
<fsummary>Add a directory to the end of the code path.</fsummary>
<type name="add_path_ret"/>
<desc>
@@ -355,7 +357,7 @@ zip:create("mnesia-4.4.7.ez",
</desc>
</func>
<func>
- <name name="add_patha" arity="1"/>
+ <name name="add_patha" arity="1" since=""/>
<fsummary>Add a directory to the beginning of the code path.</fsummary>
<type name="add_path_ret"/>
<desc>
@@ -368,8 +370,8 @@ zip:create("mnesia-4.4.7.ez",
</desc>
</func>
<func>
- <name name="add_paths" arity="1"/>
- <name name="add_pathsz" arity="1"/>
+ <name name="add_paths" arity="1" since=""/>
+ <name name="add_pathsz" arity="1" since=""/>
<fsummary>Add directories to the end of the code path.</fsummary>
<desc>
<p>Adds the directories in <c><anno>Dirs</anno></c> to the end of the code
@@ -379,7 +381,7 @@ zip:create("mnesia-4.4.7.ez",
</desc>
</func>
<func>
- <name name="add_pathsa" arity="1"/>
+ <name name="add_pathsa" arity="1" since=""/>
<fsummary>Add directories to the beginning of the code path.</fsummary>
<desc>
<p>Traverses <c><anno>Dirs</anno></c> and adds
@@ -395,7 +397,7 @@ zip:create("mnesia-4.4.7.ez",
</desc>
</func>
<func>
- <name name="del_path" arity="1"/>
+ <name name="del_path" arity="1" since=""/>
<fsummary>Delete a directory from the code path.</fsummary>
<desc>
<p>Deletes a directory from the code path. The argument can be
@@ -415,7 +417,7 @@ zip:create("mnesia-4.4.7.ez",
</desc>
</func>
<func>
- <name name="replace_path" arity="2"/>
+ <name name="replace_path" arity="2" since=""/>
<fsummary>Replace a directory with another in the code path.</fsummary>
<desc>
<p>Replaces an old occurrence of a directory
@@ -439,7 +441,7 @@ zip:create("mnesia-4.4.7.ez",
</desc>
</func>
<func>
- <name name="load_file" arity="1"/>
+ <name name="load_file" arity="1" since=""/>
<fsummary>Load a module.</fsummary>
<type name="load_ret"/>
<desc>
@@ -458,7 +460,7 @@ zip:create("mnesia-4.4.7.ez",
</desc>
</func>
<func>
- <name name="load_abs" arity="1"/>
+ <name name="load_abs" arity="1" since=""/>
<fsummary>Load a module, residing in a specified file.</fsummary>
<type name="load_ret"/>
<type name="loaded_filename"/>
@@ -475,7 +477,7 @@ zip:create("mnesia-4.4.7.ez",
</desc>
</func>
<func>
- <name name="ensure_loaded" arity="1"/>
+ <name name="ensure_loaded" arity="1" since=""/>
<fsummary>Ensure that a module is loaded.</fsummary>
<desc>
<p>Tries to load a module in the same way as
@@ -487,7 +489,7 @@ zip:create("mnesia-4.4.7.ez",
</desc>
</func>
<func>
- <name name="load_binary" arity="3"/>
+ <name name="load_binary" arity="3" since=""/>
<fsummary>Load object code for a module.</fsummary>
<type name="loaded_filename"/>
<type name="loaded_ret_atoms"/>
@@ -505,7 +507,7 @@ zip:create("mnesia-4.4.7.ez",
</desc>
</func>
<func>
- <name name="atomic_load" arity="1"/>
+ <name name="atomic_load" arity="1" since="OTP 19.0"/>
<fsummary>Load a list of modules atomically</fsummary>
<desc>
<p>Tries to load all of the modules in the list
@@ -564,7 +566,7 @@ ok = code:finish_loading(Prepared),
</desc>
</func>
<func>
- <name name="prepare_loading" arity="1"/>
+ <name name="prepare_loading" arity="1" since="OTP 19.0"/>
<fsummary>Prepare a list of modules atomically</fsummary>
<desc>
<p>Prepares to load the modules in the list
@@ -596,7 +598,7 @@ ok = code:finish_loading(Prepared),
</desc>
</func>
<func>
- <name name="finish_loading" arity="1"/>
+ <name name="finish_loading" arity="1" since="OTP 19.0"/>
<fsummary>Finish loading a list of prepared modules atomically</fsummary>
<desc>
<p>Tries to load code for all modules that have been previously
@@ -625,7 +627,7 @@ ok = code:finish_loading(Prepared),
</desc>
</func>
<func>
- <name name="ensure_modules_loaded" arity="1"/>
+ <name name="ensure_modules_loaded" arity="1" since="OTP 19.0"/>
<fsummary>Ensure that a list of modules is loaded</fsummary>
<desc>
<p>Tries to load any modules not already loaded in the list
@@ -637,7 +639,7 @@ ok = code:finish_loading(Prepared),
</desc>
</func>
<func>
- <name name="delete" arity="1"/>
+ <name name="delete" arity="1" since=""/>
<fsummary>Remove current code for a module.</fsummary>
<desc>
<p>Removes the current code for <c><anno>Module</anno></c>, that is,
@@ -650,7 +652,7 @@ ok = code:finish_loading(Prepared),
</desc>
</func>
<func>
- <name name="purge" arity="1"/>
+ <name name="purge" arity="1" since=""/>
<fsummary>Remove old code for a module.</fsummary>
<desc>
<p>Purges the code for <c><anno>Module</anno></c>, that is, removes code
@@ -666,7 +668,7 @@ ok = code:finish_loading(Prepared),
</desc>
</func>
<func>
- <name name="soft_purge" arity="1"/>
+ <name name="soft_purge" arity="1" since=""/>
<fsummary>Remove old code for a module, unless no process uses it.</fsummary>
<desc>
<p>Purges the code for <c><anno>Module</anno></c>, that is, removes code
@@ -681,7 +683,7 @@ ok = code:finish_loading(Prepared),
</desc>
</func>
<func>
- <name name="is_loaded" arity="1"/>
+ <name name="is_loaded" arity="1" since=""/>
<fsummary>Check if a module is loaded.</fsummary>
<type name="loaded_filename"/>
<type name="loaded_ret_atoms"/>
@@ -700,7 +702,7 @@ ok = code:finish_loading(Prepared),
</desc>
</func>
<func>
- <name name="all_loaded" arity="0"/>
+ <name name="all_loaded" arity="0" since=""/>
<fsummary>Get all loaded modules.</fsummary>
<type name="loaded_filename"/>
<type name="loaded_ret_atoms"/>
@@ -714,7 +716,7 @@ ok = code:finish_loading(Prepared),
</desc>
</func>
<func>
- <name name="which" arity="1"/>
+ <name name="which" arity="1" since=""/>
<fsummary>The object code file of a module.</fsummary>
<type name="loaded_ret_atoms"/>
<desc>
@@ -729,7 +731,7 @@ ok = code:finish_loading(Prepared),
</desc>
</func>
<func>
- <name name="get_object_code" arity="1"/>
+ <name name="get_object_code" arity="1" since=""/>
<fsummary>Gets the object code for a module.</fsummary>
<desc>
<p>Searches the code path for the object code of module
@@ -748,7 +750,7 @@ rpc:call(Node, code, load_binary, [Module, Filename, Binary]),
</desc>
</func>
<func>
- <name name="root_dir" arity="0"/>
+ <name name="root_dir" arity="0" since=""/>
<fsummary>Root directory of Erlang/OTP.</fsummary>
<desc>
<p>Returns the root directory of Erlang/OTP, which is
@@ -760,7 +762,7 @@ rpc:call(Node, code, load_binary, [Module, Filename, Binary]),
</desc>
</func>
<func>
- <name name="lib_dir" arity="0"/>
+ <name name="lib_dir" arity="0" since=""/>
<fsummary>Library directory of Erlang/OTP.</fsummary>
<desc>
<p>Returns the library directory, <c>$OTPROOT/lib</c>, where
@@ -772,7 +774,7 @@ rpc:call(Node, code, load_binary, [Module, Filename, Binary]),
</desc>
</func>
<func>
- <name name="lib_dir" arity="1"/>
+ <name name="lib_dir" arity="1" since=""/>
<fsummary>Library directory for an application.</fsummary>
<desc>
<p>Returns the path
@@ -805,7 +807,7 @@ rpc:call(Node, code, load_binary, [Module, Filename, Binary]),
</desc>
</func>
<func>
- <name name="lib_dir" arity="2"/>
+ <name name="lib_dir" arity="2" since=""/>
<fsummary>Subdirectory for an application.</fsummary>
<desc>
<p>Returns the path to a subdirectory directly under the top
@@ -825,7 +827,7 @@ rpc:call(Node, code, load_binary, [Module, Filename, Binary]),
</desc>
</func>
<func>
- <name name="compiler_dir" arity="0"/>
+ <name name="compiler_dir" arity="0" since=""/>
<fsummary>Library directory for the compiler.</fsummary>
<desc>
<p>Returns the compiler library directory. Equivalent to
@@ -833,7 +835,7 @@ rpc:call(Node, code, load_binary, [Module, Filename, Binary]),
</desc>
</func>
<func>
- <name name="priv_dir" arity="1"/>
+ <name name="priv_dir" arity="1" since=""/>
<fsummary>Priv directory for an application.</fsummary>
<desc>
<p>Returns the path to the <c>priv</c> directory in an
@@ -844,7 +846,7 @@ rpc:call(Node, code, load_binary, [Module, Filename, Binary]),
</desc>
</func>
<func>
- <name name="objfile_extension" arity="0"/>
+ <name name="objfile_extension" arity="0" since=""/>
<fsummary>Object code file extension.</fsummary>
<desc>
<p>Returns the object code file extension corresponding to
@@ -852,7 +854,7 @@ rpc:call(Node, code, load_binary, [Module, Filename, Binary]),
</desc>
</func>
<func>
- <name name="stick_dir" arity="1"/>
+ <name name="stick_dir" arity="1" since=""/>
<fsummary>Mark a directory as sticky.</fsummary>
<desc>
<p>Marks <c><anno>Dir</anno></c> as sticky.</p>
@@ -860,7 +862,7 @@ rpc:call(Node, code, load_binary, [Module, Filename, Binary]),
</desc>
</func>
<func>
- <name name="unstick_dir" arity="1"/>
+ <name name="unstick_dir" arity="1" since=""/>
<fsummary>Remove a sticky directory mark.</fsummary>
<desc>
<p>Unsticks a directory that is marked as
@@ -869,7 +871,7 @@ rpc:call(Node, code, load_binary, [Module, Filename, Binary]),
</desc>
</func>
<func>
- <name name="is_sticky" arity="1"/>
+ <name name="is_sticky" arity="1" since=""/>
<fsummary>Test if a module is sticky.</fsummary>
<desc>
<p>Returns <c>true</c> if <c><anno>Module</anno></c> is the
@@ -880,7 +882,7 @@ rpc:call(Node, code, load_binary, [Module, Filename, Binary]),
</desc>
</func>
<func>
- <name name="where_is_file" arity="1"/>
+ <name name="where_is_file" arity="1" since=""/>
<fsummary>Full name of a file located in the code path.</fsummary>
<desc>
<p>Searches the code path for <c><anno>Filename</anno></c>, a file of
@@ -891,7 +893,7 @@ rpc:call(Node, code, load_binary, [Module, Filename, Binary]),
</desc>
</func>
<func>
- <name name="clash" arity="0"/>
+ <name name="clash" arity="0" since=""/>
<fsummary>Search for modules with identical names.</fsummary>
<desc>
<p>Searches all directories in the code path for module names with
@@ -899,7 +901,7 @@ rpc:call(Node, code, load_binary, [Module, Filename, Binary]),
</desc>
</func>
<func>
- <name name="module_status" arity="1"/>
+ <name name="module_status" arity="1" since="OTP 20.0"/>
<fsummary>Return the status of the module in relation to object file on disk.</fsummary>
<desc>
<p>Returns:</p>
@@ -932,7 +934,7 @@ rpc:call(Node, code, load_binary, [Module, Filename, Binary]),
</desc>
</func>
<func>
- <name name="modified_modules" arity="0"/>
+ <name name="modified_modules" arity="0" since="OTP 20.0"/>
<fsummary>Return a list of all modules modified on disk.</fsummary>
<desc>
<p>Returns the list of all currently loaded modules for which
@@ -941,7 +943,7 @@ rpc:call(Node, code, load_binary, [Module, Filename, Binary]),
</desc>
</func>
<func>
- <name name="is_module_native" arity="1"/>
+ <name name="is_module_native" arity="1" since=""/>
<fsummary>Test if a module has native code.</fsummary>
<desc>
<p>Returns:</p>
@@ -959,7 +961,7 @@ rpc:call(Node, code, load_binary, [Module, Filename, Binary]),
</func>
<func>
- <name name="get_mode" arity="0"/>
+ <name name="get_mode" arity="0" since="OTP R16B"/>
<fsummary>The mode of the code server.</fsummary>
<desc>
<p>Returns an atom describing the mode of the code server:
diff --git a/lib/kernel/doc/src/config.xml b/lib/kernel/doc/src/config.xml
index fdb2d29f63..714af93f4d 100644
--- a/lib/kernel/doc/src/config.xml
+++ b/lib/kernel/doc/src/config.xml
@@ -4,7 +4,7 @@
<fileref>
<header>
<copyright>
- <year>1997</year><year>2017</year>
+ <year>1997</year><year>2018</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -37,10 +37,10 @@
data in the system configuration file <c>Name.config</c>.</p>
<p>Configuration parameter values in the configuration file
override the values in the application resource files (see
- <seealso marker="app"><c>app(4)</c></seealso>.
+ <seealso marker="app"><c>app(4)</c></seealso>).
The values in the configuration file can be
overridden by command-line flags (see
- <seealso marker="erts:erl"><c>erts:erl(1)</c></seealso>.</p>
+ <seealso marker="erts:erl"><c>erts:erl(1)</c></seealso>).</p>
<p>The value of a configuration parameter is retrieved by calling
<c>application:get_env/1,2</c>.</p>
</description>
@@ -86,8 +86,13 @@
<tag><c>File = string()</c></tag>
<item>Name of another <c>.config</c> file.
Extension <c>.config</c> can be omitted. It is
- recommended to use absolute paths. A relative path is
- relative the current working directory of the emulator.</item>
+ recommended to use absolute paths. If a relative path is used,
+ <c>File</c> is searched, first, relative from <c>sys.config</c> directory, then relative
+ to the current working directory of the emulator, for backward compatibility.
+ This allow to use a <c>sys.config</c> pointing out other <c>.config</c> files in a release
+ or in a node started manually using <c>-config ...</c> with same result whatever
+ the current working directory.
+ </item>
</taglist>
<p>When traversing the contents of <c>sys.config</c> and a filename
is encountered, its contents are read and merged with the result
diff --git a/lib/kernel/doc/src/disk_log.xml b/lib/kernel/doc/src/disk_log.xml
index 884cb32c0c..e308b06f3c 100644
--- a/lib/kernel/doc/src/disk_log.xml
+++ b/lib/kernel/doc/src/disk_log.xml
@@ -34,7 +34,7 @@
<rev>D</rev>
<file>disk_log.sgml</file>
</header>
- <module>disk_log</module>
+ <module since="">disk_log</module>
<modulesummary>A disk-based term logging facility.</modulesummary>
<description>
<p><c>disk_log</c> is a disk-based term logger that enables
@@ -238,7 +238,7 @@
</datatypes>
<funcs>
<func>
- <name name="accessible_logs" arity="0"/>
+ <name name="accessible_logs" arity="0" since=""/>
<fsummary>Return the accessible disk logs on the current node.</fsummary>
<desc>
<p>Returns the names of the disk logs accessible on the current node.
@@ -248,8 +248,8 @@
</desc>
</func>
<func>
- <name name="alog" arity="2"/>
- <name name="balog" arity="2"/>
+ <name name="alog" arity="2" since=""/>
+ <name name="balog" arity="2" since=""/>
<fsummary>Asynchronously log an item on to a disk log.</fsummary>
<type variable="Log"/>
<type variable="Term" name_i="1"/>
@@ -275,8 +275,8 @@
</desc>
</func>
<func>
- <name name="alog_terms" arity="2"/>
- <name name="balog_terms" arity="2"/>
+ <name name="alog_terms" arity="2" since=""/>
+ <name name="balog_terms" arity="2" since=""/>
<fsummary>Asynchronously log many items on to a disk log.</fsummary>
<type variable="Log"/>
<type variable="TermList" name_i="1"/>
@@ -303,8 +303,8 @@
</desc>
</func>
<func>
- <name name="block" arity="1"/>
- <name name="block" arity="2"/>
+ <name name="block" arity="1" since=""/>
+ <name name="block" arity="2" since=""/>
<fsummary>Block a disk log.</fsummary>
<type name="block_error_rsn"/>
<desc>
@@ -330,21 +330,21 @@
</desc>
</func>
<func>
- <name name="change_header" arity="2"/>
+ <name name="change_header" arity="2" since=""/>
<fsummary>Change option head or head_func for an owner of a disk log.</fsummary>
<desc>
<p>Changes the value of option <c>head</c> or <c>head_func</c> for an owner of a disk log.</p>
</desc>
</func>
<func>
- <name name="change_notify" arity="3"/>
+ <name name="change_notify" arity="3" since=""/>
<fsummary>Change option notify for an owner of a disk log.</fsummary>
<desc>
<p>Changes the value of option <c>notify</c> for an owner of a disk log. </p>
</desc>
</func>
<func>
- <name name="change_size" arity="2"/>
+ <name name="change_size" arity="2" since=""/>
<fsummary>Change the size of an open disk log.</fsummary>
<desc>
<p>Changes the size of an open log.
@@ -384,10 +384,10 @@
</desc>
</func>
<func>
- <name name="chunk" arity="2"/>
- <name name="chunk" arity="3"/>
- <name name="bchunk" arity="2"/>
- <name name="bchunk" arity="3"/>
+ <name name="chunk" arity="2" since=""/>
+ <name name="chunk" arity="3" since=""/>
+ <name name="bchunk" arity="2" since=""/>
+ <name name="bchunk" arity="3" since=""/>
<fsummary>Read a chunk of items written to a disk log.</fsummary>
<type variable="Log"/>
<type variable="Continuation"/>
@@ -447,7 +447,7 @@
</desc>
</func>
<func>
- <name name="chunk_info" arity="1"/>
+ <name name="chunk_info" arity="1" since=""/>
<fsummary>Return information about a chunk continuation of a disk log.</fsummary>
<desc>
<p>Returns the pair <c>{node, <anno>Node</anno>}</c>,
@@ -457,7 +457,7 @@
</desc>
</func>
<func>
- <name name="chunk_step" arity="3"/>
+ <name name="chunk_step" arity="3" since=""/>
<fsummary>Step forward or backward among the wrap log files of a disk log.</fsummary>
<desc>
<p>Can be used with <c>chunk/2,3</c> and <c>bchunk/2,3</c>
@@ -480,7 +480,7 @@
</desc>
</func>
<func>
- <name name="close" arity="1"/>
+ <name name="close" arity="1" since=""/>
<fsummary>Close a disk log.</fsummary>
<type name="close_error_rsn"/>
<desc>
@@ -505,7 +505,7 @@
</desc>
</func>
<func>
- <name name="format_error" arity="1"/>
+ <name name="format_error" arity="1" since=""/>
<fsummary>Return an English description of a disk log error reply.</fsummary>
<desc>
<p>Given the error returned by any function in this module,
@@ -517,7 +517,7 @@
</desc>
</func>
<func>
- <name name="inc_wrap_file" arity="1"/>
+ <name name="inc_wrap_file" arity="1" since=""/>
<fsummary>Change to the next wrap log file of a disk log.</fsummary>
<type name="inc_wrap_error_rsn"/>
<type name="invalid_header"/>
@@ -534,7 +534,7 @@
</desc>
</func>
<func>
- <name name="info" arity="1"/>
+ <name name="info" arity="1" since=""/>
<fsummary>Return information about a disk log.</fsummary>
<type name="dlog_info"/>
<desc>
@@ -685,8 +685,8 @@
</desc>
</func>
<func>
- <name name="lclose" arity="1"/>
- <name name="lclose" arity="2"/>
+ <name name="lclose" arity="1" since=""/>
+ <name name="lclose" arity="2" since=""/>
<fsummary>Close a disk log on one node.</fsummary>
<type name="lclose_error_rsn"/>
<desc>
@@ -704,8 +704,8 @@
</desc>
</func>
<func>
- <name name="log" arity="2"/>
- <name name="blog" arity="2"/>
+ <name name="log" arity="2" since=""/>
+ <name name="blog" arity="2" since=""/>
<fsummary>Log an item onto a disk log.</fsummary>
<type variable="Log"/>
<type variable="Term" name_i="1"/>
@@ -739,8 +739,8 @@
</desc>
</func>
<func>
- <name name="log_terms" arity="2"/>
- <name name="blog_terms" arity="2"/>
+ <name name="log_terms" arity="2" since=""/>
+ <name name="blog_terms" arity="2" since=""/>
<fsummary>Log many items onto a disk log.</fsummary>
<type variable="Log"/>
<type variable="TermList" name_i="1"/>
@@ -768,7 +768,7 @@
</desc>
</func>
<func>
- <name name="open" arity="1"/>
+ <name name="open" arity="1" since=""/>
<fsummary>Open a disk log file.</fsummary>
<type name="dlog_options"/>
<type name="dlog_option"/>
@@ -1041,7 +1041,7 @@
</desc>
</func>
<func>
- <name name="pid2name" arity="1"/>
+ <name name="pid2name" arity="1" since=""/>
<fsummary>Return the name of the disk log handled by a pid.</fsummary>
<desc>
<p>Returns the log name
@@ -1053,9 +1053,9 @@
</desc>
</func>
<func>
- <name name="reopen" arity="2"/>
- <name name="reopen" arity="3"/>
- <name name="breopen" arity="3"/>
+ <name name="reopen" arity="2" since=""/>
+ <name name="reopen" arity="3" since=""/>
+ <name name="breopen" arity="3" since=""/>
<fsummary>Reopen a disk log and save the old log.</fsummary>
<type variable="Log"/>
<type variable="File" name_i="1"/>
@@ -1087,7 +1087,7 @@
</desc>
</func>
<func>
- <name name="sync" arity="1"/>
+ <name name="sync" arity="1" since=""/>
<fsummary>Flush the contents of a disk log to the disk.</fsummary>
<type name="sync_error_rsn"/>
<desc>
@@ -1097,9 +1097,9 @@
</desc>
</func>
<func>
- <name name="truncate" arity="1"/>
- <name name="truncate" arity="2"/>
- <name name="btruncate" arity="2"/>
+ <name name="truncate" arity="1" since=""/>
+ <name name="truncate" arity="2" since=""/>
+ <name name="btruncate" arity="2" since=""/>
<fsummary>Truncate a disk log.</fsummary>
<type variable="Log"/>
<type variable="Head" name_i="2"/>
@@ -1129,7 +1129,7 @@
</desc>
</func>
<func>
- <name name="unblock" arity="1"/>
+ <name name="unblock" arity="1" since=""/>
<fsummary>Unblock a disk log.</fsummary>
<type name="unblock_error_rsn"/>
<desc>
diff --git a/lib/kernel/doc/src/erl_boot_server.xml b/lib/kernel/doc/src/erl_boot_server.xml
index 4109251387..89f9855c49 100644
--- a/lib/kernel/doc/src/erl_boot_server.xml
+++ b/lib/kernel/doc/src/erl_boot_server.xml
@@ -28,7 +28,7 @@
<date></date>
<rev></rev>
</header>
- <module>erl_boot_server</module>
+ <module since="">erl_boot_server</module>
<modulesummary>Boot server for other Erlang machines.</modulesummary>
<description>
<p>This server is used to assist diskless Erlang nodes that fetch
@@ -52,14 +52,14 @@
</description>
<funcs>
<func>
- <name name="add_slave" arity="1"/>
+ <name name="add_slave" arity="1" since=""/>
<fsummary>Add a slave to the list of allowed slaves.</fsummary>
<desc>
<p>Adds a <c><anno>Slave</anno></c> node to the list of allowed slave hosts.</p>
</desc>
</func>
<func>
- <name name="delete_slave" arity="1"/>
+ <name name="delete_slave" arity="1" since=""/>
<fsummary>Delete a slave from the list of allowed slaves.</fsummary>
<desc>
<p>Deletes a <c><anno>Slave</anno></c> node from the list of allowed slave
@@ -67,7 +67,7 @@
</desc>
</func>
<func>
- <name name="start" arity="1"/>
+ <name name="start" arity="1" since=""/>
<fsummary>Start the boot server.</fsummary>
<desc>
<p>Starts the boot server. <c><anno>Slaves</anno></c> is a list of
@@ -76,7 +76,7 @@
</desc>
</func>
<func>
- <name name="start_link" arity="1"/>
+ <name name="start_link" arity="1" since=""/>
<fsummary>Start the boot server and link to the the caller.</fsummary>
<desc>
<p>Starts the boot server and links to the caller. This function
@@ -85,7 +85,7 @@
</desc>
</func>
<func>
- <name name="which_slaves" arity="0"/>
+ <name name="which_slaves" arity="0" since=""/>
<fsummary>Return the current list of allowed slave hosts.</fsummary>
<desc>
<p>Returns the current list of allowed slave hosts.</p>
diff --git a/lib/kernel/doc/src/erl_ddll.xml b/lib/kernel/doc/src/erl_ddll.xml
index 75114e015c..f2d5e1b397 100644
--- a/lib/kernel/doc/src/erl_ddll.xml
+++ b/lib/kernel/doc/src/erl_ddll.xml
@@ -28,7 +28,7 @@
<date></date>
<rev></rev>
</header>
- <module>erl_ddll</module>
+ <module since="">erl_ddll</module>
<modulesummary>Dynamic driver loader and linker.</modulesummary>
<description>
<p>This module provides an interface for loading
@@ -196,7 +196,7 @@
</datatypes>
<funcs>
<func>
- <name name="demonitor" arity="1"/>
+ <name name="demonitor" arity="1" since=""/>
<fsummary>Remove a monitor for a driver.</fsummary>
<desc>
<p>Removes a driver monitor in much the same way as
@@ -212,7 +212,7 @@
</desc>
</func>
<func>
- <name name="format_error" arity="1"/>
+ <name name="format_error" arity="1" since=""/>
<fsummary>Format an error descriptor.</fsummary>
<desc>
<p>Takes an <c><anno>ErrorDesc</anno></c> returned by load, unload, or
@@ -229,7 +229,7 @@
</desc>
</func>
<func>
- <name name="info" arity="0"/>
+ <name name="info" arity="0" since=""/>
<fsummary>Retrieve information about all drivers.</fsummary>
<desc>
<p>Returns a list of tuples <c>{<anno>DriverName</anno>, <anno>InfoList</anno>}</c>,
@@ -240,7 +240,7 @@
</desc>
</func>
<func>
- <name name="info" arity="1"/>
+ <name name="info" arity="1" since=""/>
<fsummary>Retrieve information about one driver.</fsummary>
<desc>
<p>Returns a list of tuples <c>{<anno>Tag</anno>, <anno>Value</anno>}</c>,
@@ -266,7 +266,7 @@
</desc>
</func>
<func>
- <name name="info" arity="2"/>
+ <name name="info" arity="2" since=""/>
<fsummary>Retrieve specific information about one driver.</fsummary>
<desc>
<p>Returns specific information about one aspect of a driver.
@@ -328,7 +328,7 @@
</desc>
</func>
<func>
- <name name="load" arity="2"/>
+ <name name="load" arity="2" since=""/>
<fsummary>Load a driver.</fsummary>
<desc>
<p>Loads and links the dynamic driver <c><anno>Name</anno></c>.
@@ -390,7 +390,7 @@
</desc>
</func>
<func>
- <name name="load_driver" arity="2"/>
+ <name name="load_driver" arity="2" since=""/>
<fsummary>Load a driver.</fsummary>
<desc>
<p>Works essentially as <c>load/2</c>, but loads the driver
@@ -413,7 +413,7 @@
</desc>
</func>
<func>
- <name name="loaded_drivers" arity="0"/>
+ <name name="loaded_drivers" arity="0" since=""/>
<fsummary>List loaded drivers.</fsummary>
<desc>
<p>Returns a list of all the available drivers, both
@@ -425,7 +425,7 @@
</desc>
</func>
<func>
- <name name="monitor" arity="2"/>
+ <name name="monitor" arity="2" since=""/>
<fsummary>Create a monitor for a driver.</fsummary>
<desc>
<p>Creates a driver monitor and works in many
@@ -588,7 +588,7 @@
</desc>
</func>
<func>
- <name name="reload" arity="2"/>
+ <name name="reload" arity="2" since=""/>
<fsummary>Replace a driver.</fsummary>
<desc>
<p>Reloads the driver named <c><anno>Name</anno></c> from a possibly
@@ -626,7 +626,7 @@
</desc>
</func>
<func>
- <name name="reload_driver" arity="2"/>
+ <name name="reload_driver" arity="2" since=""/>
<fsummary>Replace a driver.</fsummary>
<desc>
<p>Works exactly as <seealso marker="#reload/2"><c>reload/2</c></seealso>,
@@ -644,7 +644,7 @@
</desc>
</func>
<func>
- <name name="try_load" arity="3"/>
+ <name name="try_load" arity="3" since=""/>
<fsummary>Load a driver.</fsummary>
<desc>
<p>Provides more control than the
@@ -931,7 +931,7 @@
</desc>
</func>
<func>
- <name name="try_unload" arity="2"/>
+ <name name="try_unload" arity="2" since=""/>
<fsummary>Unload a driver.</fsummary>
<desc>
<p>This is the low-level function to unload (or decrement
@@ -1116,7 +1116,7 @@
</desc>
</func>
<func>
- <name name="unload" arity="1"/>
+ <name name="unload" arity="1" since=""/>
<fsummary>Unload a driver.</fsummary>
<desc>
<p>Unloads, or at least dereferences the driver named
@@ -1143,7 +1143,7 @@
</desc>
</func>
<func>
- <name name="unload_driver" arity="1"/>
+ <name name="unload_driver" arity="1" since=""/>
<fsummary>Unload a driver.</fsummary>
<desc>
<p>Unloads, or at least dereferences the driver named
diff --git a/lib/kernel/doc/src/erl_epmd.xml b/lib/kernel/doc/src/erl_epmd.xml
new file mode 100644
index 0000000000..2adbf11a28
--- /dev/null
+++ b/lib/kernel/doc/src/erl_epmd.xml
@@ -0,0 +1,104 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE erlref SYSTEM "erlref.dtd">
+
+<erlref>
+ <header>
+ <copyright>
+ <year>2018</year><year>2018</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ </legalnotice>
+
+ <title>erl_epmd</title>
+ <prepared>Timmo Verlaan</prepared>
+ <docno>1</docno>
+ <date>2018-02-19</date>
+ <rev>A</rev>
+ </header>
+ <module since="OTP R14B">erl_epmd</module>
+ <modulesummary>
+ Erlang interface towards epmd
+ </modulesummary>
+ <description>
+ <p>This module communicates with the EPMD daemon, see <seealso
+ marker="erts:epmd">epmd</seealso>. To implement your own epmd module please
+ see <seealso marker="erts:alt_disco">ERTS User's Guide: How to Implement an
+ Alternative Service Discovery for Erlang Distribution</seealso></p>
+ </description>
+
+ <funcs>
+ <func>
+ <name name="start_link" arity="0" since="OTP 21.0"/>
+ <fsummary>Callback for erl_distribution supervisor.</fsummary>
+ <desc>
+ <p>This function is invoked as this module is added as a child of the
+ <c>erl_distribution</c> supervisor.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="register_node" arity="2" since="OTP 21.0"/>
+ <name name="register_node" arity="3" since="OTP 21.0"/>
+ <fsummary>Registers the node with <c>epmd</c>.</fsummary>
+ <desc>
+ <p>Registers the node with <c>epmd</c> and tells epmd what port will be
+ used for the current node. It returns a creation number. This number is
+ incremented on each register to help with identifying if a node is
+ reconnecting to epmd.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="port_please" arity="2" since="OTP 21.0"/>
+ <name name="port_please" arity="3" since="OTP 21.0"/>
+ <fsummary>Returns the port number for a given node.</fsummary>
+ <desc>
+ <p>Requests the distribution port for the given node of an EPMD
+ instance. Together with the port it returns a distribution protocol
+ version which has been 5 since Erlang/OTP R6.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="address_please" arity="3" since="OTP 21.0"/>
+ <fsummary>Returns address and port.</fsummary>
+ <desc>
+ <p>Called by the distribution module. Resolves the <c>Host</c> to an IP
+ address.</p>
+ <p>Another epmd module may return port and distribution protocol version
+ as well.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="names" arity="1" since="OTP 21.0"/>
+ <fsummary>Names of Erlang nodes at a host.</fsummary>
+ <desc>
+ <p>Called by <seealso marker="net_adm"><c>net_adm:names/0</c></seealso>.
+ <c>Host</c> defaults to the localhost. Returns the names and associated
+ port numbers of the Erlang nodes that <c>epmd</c> registered at the
+ specified host. Returns <c>{error, address}</c> if <c>epmd</c> is not
+ operational.</p>
+ <p><em>Example:</em></p>
+ <pre>
+(arne@dunn)1> <input>erl_epmd:names(localhost).</input>
+{ok,[{"arne",40262}]}</pre>
+ </desc>
+ </func>
+ </funcs>
+
+</erlref>
+
diff --git a/lib/kernel/doc/src/error_handler.xml b/lib/kernel/doc/src/error_handler.xml
index e5639487dc..eb01e87aee 100644
--- a/lib/kernel/doc/src/error_handler.xml
+++ b/lib/kernel/doc/src/error_handler.xml
@@ -30,7 +30,7 @@
<date></date>
<rev></rev>
</header>
- <module>error_handler</module>
+ <module since="">error_handler</module>
<modulesummary>Default system error handler.</modulesummary>
<description>
<p>This module defines what happens when certain types
@@ -38,7 +38,7 @@
</description>
<funcs>
<func>
- <name name="raise_undef_exception" arity="3"/>
+ <name name="raise_undef_exception" arity="3" since="OTP R16B"/>
<fsummary>Raise an undef exception.</fsummary>
<type_desc variable="Args">
A (possibly empty) list of arguments <c>Arg1,..,ArgN</c>
@@ -51,7 +51,7 @@
</desc>
</func>
<func>
- <name name="undefined_function" arity="3"/>
+ <name name="undefined_function" arity="3" since=""/>
<fsummary>Called when an undefined function is encountered.</fsummary>
<type_desc variable="Args">
A (possibly empty) list of arguments <c>Arg1,..,ArgN</c>
@@ -93,7 +93,7 @@
</desc>
</func>
<func>
- <name name="undefined_lambda" arity="3"/>
+ <name name="undefined_lambda" arity="3" since=""/>
<fsummary>Called when an undefined lambda (fun) is encountered.</fsummary>
<type_desc variable="Args">
A (possibly empty) list of arguments <c>Arg1,..,ArgN</c>
diff --git a/lib/kernel/doc/src/error_logger.xml b/lib/kernel/doc/src/error_logger.xml
index 91bf57cb91..c170b4fa34 100644
--- a/lib/kernel/doc/src/error_logger.xml
+++ b/lib/kernel/doc/src/error_logger.xml
@@ -4,7 +4,7 @@
<erlref>
<header>
<copyright>
- <year>1996</year><year>2017</year>
+ <year>1996</year><year>2018</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -28,39 +28,40 @@
<date></date>
<rev></rev>
</header>
- <module>error_logger</module>
+ <module since="">error_logger</module>
<modulesummary>Erlang error logger.</modulesummary>
<description>
+
+ <note>
+ <p>In Erlang/OTP 21.0, a new API for logging was added. The
+ old <c>error_logger</c> module can still be used by legacy
+ code, but log events are redirected to the new Logger API. New
+ code should use the Logger API directly.</p>
+ <p><c>error_logger</c> is no longer started by default, but is
+ automatically started when an event handler is added
+ with <c>error_logger:add_report_handler/1,2</c>. The <c>error_logger</c>
+ module is then also added as a handler to the new logger.</p>
+ <p>See <seealso marker="logger"><c>logger(3)</c></seealso> and
+ the <seealso marker="logger_chapter">Logging</seealso> chapter
+ in the User's Guide for more information.</p>
+ </note>
+
<p>The Erlang <em>error logger</em> is an event manager (see
<seealso marker="doc/design_principles:des_princ">OTP Design Principles</seealso> and
<seealso marker="stdlib:gen_event"><c>gen_event(3)</c></seealso>),
- registered as <c>error_logger</c>. Errors, warnings, and info events
- are sent to the error logger from the Erlang runtime system and
- the different Erlang/OTP applications. The events are, by default,
- logged to the terminal. Notice that an event from a process <c>P</c> is
- logged at the node of the group leader of <c>P</c>. This means
- that log output is directed to the node from which a process was
- created, which not necessarily is the same node as where it is
- executing.</p>
- <p>Initially, <c>error_logger</c> has only a primitive event
- handler, which buffers and prints the raw event messages. During
- system startup, the Kernel application replaces this with a
- <em>standard event handler</em>, by default one that writes
- nicely formatted output to the terminal. Kernel can also be
- configured so that events are logged to a file instead, or not logged at all,
- see <seealso marker="kernel_app"><c>kernel(6)</c></seealso>.</p>
- <p>Also the SASL application, if started, adds its own event
- handler, which by default writes supervisor, crash, and progress
- reports to the terminal. See
- <seealso marker="sasl:sasl_app"><c>sasl(6)</c></seealso>.</p>
- <p>It is recommended that user-defined applications report
- errors through the error logger to get uniform reports.
- User-defined event handlers can be added to handle application-specific
- events, see
- <seealso marker="#add_report_handler/1"><c>add_report_handler/1,2</c></seealso>.
- Also, a useful event handler is provided in STDLIB for multi-file
- logging of events, see
- <seealso marker="stdlib:log_mf_h"><c>log_mf_h(3)</c></seealso>.</p>
+ registered as <c>error_logger</c>.</p>
+ <p>Error logger is no longer started by default, but is
+ automatically started when an event handler is added
+ with <seealso marker="#add_report_handler/1">
+ <c>add_report_handler/1,2</c></seealso>. The <c>error_logger</c>
+ module is then also added as a handler to the new logger,
+ causing log events to be forwarded from logger to error logger,
+ and consequently to all installed error logger event
+ handlers.</p>
+ <p>User-defined event handlers can be added to handle application-specific
+ events.</p>
+ <p>Existing event handlers provided by STDLIB and SASL are still
+ available, but are no longer used by OTP.</p>
<p>Warning events were introduced in Erlang/OTP R9C and are enabled
by default as from Erlang/OTP 18.0. To retain backwards compatibility
with existing user-defined event handlers, the warning events can be
@@ -75,8 +76,8 @@
</datatypes>
<funcs>
<func>
- <name name="add_report_handler" arity="1"/>
- <name name="add_report_handler" arity="2"/>
+ <name name="add_report_handler" arity="1" since=""/>
+ <name name="add_report_handler" arity="2" since=""/>
<fsummary>Add an event handler to the error logger.</fsummary>
<desc>
<p>Adds a new event handler to the error logger. The event
@@ -89,167 +90,213 @@
The function returns <c>ok</c> if successful.</p>
<p>The event handler must be able to handle the events in this module, see
section <seealso marker="#events">Events</seealso>.</p>
+ <p>The first time this function is called,
+ <c>error_logger</c> is added as a Logger handler, and
+ the <c>error_logger</c> process is started.</p>
</desc>
</func>
<func>
- <name name="delete_report_handler" arity="1"/>
+ <name name="delete_report_handler" arity="1" since=""/>
<fsummary>Delete an event handler from the error logger.</fsummary>
<desc>
<p>Deletes an event handler from the error logger by calling
<c>gen_event:delete_handler(error_logger, <anno>Handler</anno>, [])</c>,
see <seealso marker="stdlib:gen_event"><c>gen_event(3)</c></seealso>.</p>
+ <p>If no more event handlers exist after the deletion,
+ <c>error_logger</c> is removed as a Logger handler, and
+ the <c>error_logger</c> process is stopped.</p>
</desc>
</func>
<func>
- <name name="error_msg" arity="1"/>
- <name name="error_msg" arity="2"/>
- <name name="format" arity="2"/>
- <fsummary>Send a standard error event to the error logger.</fsummary>
+ <name name="error_msg" arity="1" since=""/>
+ <name name="error_msg" arity="2" since=""/>
+ <name name="format" arity="2" since=""/>
+ <fsummary>Log a standard error event.</fsummary>
<desc>
- <p>Sends a standard error event to the error logger.
- The <c><anno>Format</anno></c> and <c><anno>Data</anno></c> arguments
- are the same as the arguments of
+ <p>Log a standard error event. The <c><anno>Format</anno></c>
+ and <c><anno>Data</anno></c> arguments are the same as the
+ arguments of
<seealso marker="stdlib:io#format/2"><c>io:format/2</c></seealso>
- in STDLIB.
- The event is handled by the standard event handler.</p>
+ in STDLIB.</p>
+ <p>Error logger forwards the event to Logger, including
+ metadata that allows backwards compatibility with legacy
+ error logger event handlers.</p>
+ <p>The event is handled by the default Logger handler.</p>
+ <p>These functions are kept for backwards compatibility and
+ must not be used by new code. Use the <seealso marker="logger#macros">
+ <c>?LOG_ERROR</c></seealso> macro or
+ <seealso marker="logger#error-1"><c>logger:error/1,2,3</c></seealso>
+ instead.</p>
<p><em>Example:</em></p>
<pre>
-1> <input>error_logger:error_msg("An error occurred in ~p~n", [a_module]).</input>
-
-=ERROR REPORT==== 11-Aug-2005::14:03:19 ===
+1> <input>error_logger:error_msg("An error occurred in ~p", [a_module]).</input>
+=ERROR REPORT==== 22-May-2018::11:18:43.376917 ===
An error occurred in a_module
ok</pre>
<warning>
- <p>If called with bad arguments, this function can crash
- the standard event handler, meaning no further events are
- logged. When in doubt, use
- <seealso marker="#error_report/1"><c>error_report/1</c></seealso>
- instead.</p>
- </warning>
- <warning>
<p>If the Unicode translation modifier (<c>t</c>) is used in
- the format string, all error handlers must ensure that the
+ the format string, all event handlers must ensure that the
formatted output is correctly encoded for the I/O
device.</p>
</warning>
</desc>
</func>
<func>
- <name name="error_report" arity="1"/>
- <fsummary>Send a standard error report event to the error logger.</fsummary>
+ <name name="error_report" arity="1" since=""/>
+ <fsummary>Log a standard error event.</fsummary>
<desc>
- <p>Sends a standard error report event to the error logger.
- The event is handled by the standard event handler.</p>
+ <p>Log a standard error event. Error logger forwards the event
+ to Logger, including metadata that allows backwards
+ compatibility with legacy error logger event handlers.</p>
+ <p>The event is handled by the default Logger handler.</p>
+ <p>This functions is kept for backwards compatibility and
+ must not be used by new code. Use the <seealso marker="logger#macros">
+ <c>?LOG_ERROR</c></seealso> macro or
+ <seealso marker="logger#error-1"><c>logger:error/1,2,3</c></seealso>
+ instead.</p>
<p><em>Example:</em></p>
<pre>
2> <input>error_logger:error_report([{tag1,data1},a_term,{tag2,data}]).</input>
-
-=ERROR REPORT==== 11-Aug-2005::13:45:41 ===
+=ERROR REPORT==== 22-May-2018::11:24:23.699306 ===
tag1: data1
a_term
tag2: data
ok
3> <input>error_logger:error_report("Serious error in my module").</input>
-
-=ERROR REPORT==== 11-Aug-2005::13:45:49 ===
+=ERROR REPORT==== 22-May-2018::11:24:45.972445 ===
Serious error in my module
ok</pre>
</desc>
</func>
<func>
- <name name="error_report" arity="2"/>
- <fsummary>Send a user-defined error report event to the error logger.</fsummary>
+ <name name="error_report" arity="2" since=""/>
+ <fsummary>Log a user-defined error event.</fsummary>
<desc>
- <p>Sends a user-defined error report event to the error logger.
- An event handler to handle the event is supposed to have been
- added. The event is ignored by the standard event handler.</p>
+ <p>Log a user-defined error event. Error logger forwards the
+ event to Logger, including metadata that allows backwards
+ compatibility with legacy error logger event handlers.</p>
+ <p>Error logger also adds a <c>domain</c> field with
+ value <c>[<anno>Type</anno>]</c> to this event's metadata,
+ causing the filters of the default Logger handler to discard
+ the event. A different Logger handler, or an error logger
+ event handler, must be added to handle this event.</p>
<p>It is recommended that <c><anno>Report</anno></c> follows the same
structure as for
<seealso marker="#error_report/1"><c>error_report/1</c></seealso>.</p>
+ <p>This functions is kept for backwards compatibility and
+ must not be used by new code. Use the <seealso marker="logger#macros">
+ <c>?LOG_ERROR</c></seealso> macro or
+ <seealso marker="logger#error-1"><c>logger:error/1,2,3</c></seealso>
+ instead.</p>
</desc>
</func>
<func>
- <name name="get_format_depth" arity="0"/>
+ <name name="get_format_depth" arity="0" since="OTP 20.0"/>
<fsummary>Get the value of the Kernel application variable
<c>error_logger_format_depth</c>.</fsummary>
<desc>
<p>Returns <c>max(10, Depth)</c>, where <c>Depth</c> is the
- value of
- <seealso marker="kernel:kernel_app#error_logger_format_depth">
- error_logger_format_depth</seealso>
+ value of <c>error_logger_format_depth</c>
in the Kernel application, if Depth is an integer. Otherwise,
<c>unlimited</c> is returned.</p>
+ <note>
+ <p>The <c>error_logger_format_depth</c> variable
+ is <seealso marker="kernel_app#deprecated-configuration-parameters">
+ deprecated</seealso> since
+ the <seealso marker="logger">Logger API</seealso> was
+ introduced in Erlang/OTP 21.0. The variable, and this
+ function, are kept for backwards compatibility since they
+ still might be used by legacy report handlers.</p>
+ </note>
</desc>
</func>
<func>
- <name name="info_msg" arity="1"/>
- <name name="info_msg" arity="2"/>
- <fsummary>Send a standard information event to the error logger.</fsummary>
+ <name name="info_msg" arity="1" since=""/>
+ <name name="info_msg" arity="2" since=""/>
+ <fsummary>Log a standard information event.</fsummary>
<desc>
- <p>Sends a standard information event to the error logger.
- The <c><anno>Format</anno></c> and <c><anno>Data</anno></c> arguments
- are the same as the arguments of
+ <p>Log a standard information event. The <c><anno>Format</anno></c>
+ and <c><anno>Data</anno></c> arguments are the same as the
+ arguments of
<seealso marker="stdlib:io#format/2"><c>io:format/2</c></seealso>
- in STDLIB. The event is handled by the standard event handler.</p>
+ in STDLIB.</p>
+ <p>Error logger forwards the event to Logger, including
+ metadata that allows backwards compatibility with legacy
+ error logger event handlers.</p>
+ <p>The event is handled by the default Logger handler.</p>
+ <p>These functions are kept for backwards compatibility and
+ must not be used by new code. Use the <seealso marker="logger#macros">
+ <c>?LOG_INFO</c></seealso> macro or
+ <seealso marker="logger#info-1"><c>logger:info/1,2,3</c></seealso>
+ instead.</p>
<p><em>Example:</em></p>
<pre>
-1> <input>error_logger:info_msg("Something happened in ~p~n", [a_module]).</input>
-
-=INFO REPORT==== 11-Aug-2005::14:06:15 ===
+1> <input>error_logger:info_msg("Something happened in ~p", [a_module]).</input>
+=INFO REPORT==== 22-May-2018::12:03:32.612462 ===
Something happened in a_module
ok</pre>
<warning>
- <p>If called with bad arguments, this function can crash
- the standard event handler, meaning no further events are
- logged. When in doubt, use <c>info_report/1</c> instead.</p>
- </warning>
- <warning>
<p>If the Unicode translation modifier (<c>t</c>) is used in
- the format string, all error handlers must ensure that the
+ the format string, all event handlers must ensure that the
formatted output is correctly encoded for the I/O
device.</p>
</warning>
</desc>
</func>
<func>
- <name name="info_report" arity="1"/>
- <fsummary>Send a standard information report event to the error logger.</fsummary>
+ <name name="info_report" arity="1" since=""/>
+ <fsummary>Log a standard information event.</fsummary>
<desc>
- <p>Sends a standard information report event to the error
- logger. The event is handled by the standard event handler.</p>
+ <p>Log a standard information event. Error logger forwards the
+ event to Logger, including metadata that allows backwards
+ compatibility with legacy error logger event handlers.</p>
+ <p>The event is handled by the default Logger handler.</p>
+ <p>This functions is kept for backwards compatibility and
+ must not be used by new code. Use the <seealso marker="logger#macros">
+ <c>?LOG_INFO</c></seealso> macro or
+ <seealso marker="logger#info-1"><c>logger:info/1,2,3</c></seealso>
+ instead.</p>
<p><em>Example:</em></p>
<pre>
2> <input>error_logger:info_report([{tag1,data1},a_term,{tag2,data}]).</input>
-
-=INFO REPORT==== 11-Aug-2005::13:55:09 ===
+=INFO REPORT==== 22-May-2018::12:06:35.994440 ===
tag1: data1
a_term
tag2: data
ok
3> <input>error_logger:info_report("Something strange happened").</input>
-
-=INFO REPORT==== 11-Aug-2005::13:55:36 ===
+=INFO REPORT==== 22-May-2018::12:06:49.066872 ===
Something strange happened
ok</pre>
</desc>
</func>
<func>
- <name name="info_report" arity="2"/>
- <fsummary>Send a user-defined information report event to the error logger.</fsummary>
+ <name name="info_report" arity="2" since=""/>
+ <fsummary>Log a user-defined information event.</fsummary>
<desc>
- <p>Sends a user-defined information report event to the error
- logger. An event handler to handle the event is supposed to
- have been added. The event is ignored by the standard event
- handler.</p>
+ <p>Log a user-defined information event. Error logger forwards
+ the event to Logger, including metadata that allows
+ backwards compatibility with legacy error logger event
+ handlers.</p>
+ <p>Error logger also adds a <c>domain</c> field with
+ value <c>[<anno>Type</anno>]</c> to this event's metadata,
+ causing the filters of the default Logger handler to discard
+ the event. A different Logger handler, or an error logger
+ event handler, must be added to handle this event.</p>
<p>It is recommended that <c><anno>Report</anno></c> follows the same
structure as for
<seealso marker="#info_report/1"><c>info_report/1</c></seealso>.</p>
+ <p>This functions is kept for backwards compatibility and
+ must not be used by new code. Use the <seealso marker="logger#macros">
+ <c>?LOG_INFO</c></seealso> macro or
+ <seealso marker="logger#info-1"><c>logger:info/1,2,3</c></seealso>
+ instead.</p>
</desc>
</func>
<func>
- <name name="logfile" arity="1" clause_i="1"/>
- <name name="logfile" arity="1" clause_i="2"/>
- <name name="logfile" arity="1" clause_i="3"/>
+ <name name="logfile" arity="1" clause_i="1" since=""/>
+ <name name="logfile" arity="1" clause_i="2" since=""/>
+ <name name="logfile" arity="1" clause_i="3" since=""/>
<fsummary>Enable or disable error printouts to a file.</fsummary>
<type variable="Filename"/>
<type variable="OpenReason" name_i="1"/>
@@ -258,14 +305,22 @@ ok</pre>
<type name="open_error"/>
<desc>
<p>Enables or disables printout of standard events to a file.</p>
- <p>This is done by adding or deleting the standard event handler
- for output to file. Thus, calling this function overrides
- the value of the Kernel <c>error_logger</c> configuration
- parameter.</p>
- <p>Enabling file logging can be used together with calling
- <c>tty(false)</c>, to have a silent system where
- all standard events are logged to a file only.
- Only one log file can be active at a time.</p>
+ <p>This is done by adding or deleting
+ the <c>error_logger_file_h</c> event handler, and thus
+ indirectly adding <c>error_logger</c> as a Logger
+ handler.</p>
+ <p>Notice that this function does not manipulate the Logger
+ configuration directly, meaning that if the default Logger
+ handler is already logging to a file, this function can
+ potentially cause logging to a second file.</p>
+ <p>This function is useful as a shortcut during development
+ and testing, but must not be used in a production
+ system. See
+ section <seealso marker="logger_chapter">Logging</seealso>
+ in the Kernel User's Guide, and
+ the <seealso marker="logger"><c>logger(3)</c></seealso>
+ manual page for information about how to configure Logger
+ for live systems.</p>
<p><c>Request</c> is one of the following:</p>
<taglist>
<tag><c>{open, <anno>Filename</anno>}</c></tag>
@@ -291,19 +346,24 @@ ok</pre>
</desc>
</func>
<func>
- <name name="tty" arity="1"/>
+ <name name="tty" arity="1" since=""/>
<fsummary>Enable or disable printouts to the terminal.</fsummary>
<desc>
<p>Enables (<c><anno>Flag</anno> == true</c>) or disables
(<c><anno>Flag</anno> == false</c>) printout of standard events
to the terminal.</p>
- <p>This is done by adding or deleting the standard event handler
- for output to the terminal. Thus, calling this function overrides
- the value of the Kernel <c>error_logger</c> configuration parameter.</p>
+ <p>This is done by manipulating the Logger configuration. The
+ function is useful as a shortcut during development and
+ testing, but must not be used in a production system. See
+ section <seealso marker="logger_chapter">Logging</seealso>
+ in the Kernel User's Guide, and
+ the <seealso marker="logger"><c>logger(3)</c></seealso>
+ manual page for information about how to configure Logger
+ for live systems.</p>
</desc>
</func>
<func>
- <name name="warning_map" arity="0"/>
+ <name name="warning_map" arity="0" since=""/>
<fsummary>Return the current mapping for warning events.</fsummary>
<desc>
<p>Returns the current mapping for warning events. Events sent
@@ -340,51 +400,73 @@ ok</pre>
</desc>
</func>
<func>
- <name name="warning_msg" arity="1"/>
- <name name="warning_msg" arity="2"/>
- <fsummary>Send a standard warning event to the error logger.</fsummary>
+ <name name="warning_msg" arity="1" since=""/>
+ <name name="warning_msg" arity="2" since=""/>
+ <fsummary>Log a standard warning event.</fsummary>
<desc>
- <p>Sends a standard warning event to the error logger.
- The <c><anno>Format</anno></c> and <c><anno>Data</anno></c> arguments
- are the same as the arguments of
+ <p>Log a standard warning event. The <c><anno>Format</anno></c>
+ and <c><anno>Data</anno></c> arguments are the same as the
+ arguments of
<seealso marker="stdlib:io#format/2"><c>io:format/2</c></seealso>
- in STDLIB.
- The event is handled by the standard event handler. It is tagged
- as an error, warning, or info, see
+ in STDLIB.</p>
+ <p>Error logger forwards the event to Logger, including
+ metadata that allows backwards compatibility with legacy
+ error logger event handlers.</p>
+ <p>The event is handled by the default Logger handler. The log
+ level can be changed to error or info, see
<seealso marker="#warning_map/0"><c>warning_map/0</c></seealso>.</p>
- <warning>
- <p>If called with bad arguments, this function can crash
- the standard event handler, meaning no further events are
- logged. When in doubt, use <c>warning_report/1</c> instead.</p>
- </warning>
+ <p>These functions are kept for backwards compatibility and
+ must not be used by new code. Use the <seealso marker="logger#macros">
+ <c>?LOG_WARNING</c></seealso> macro or
+ <seealso marker="logger#warning-1"><c>logger:warning/1,2,3</c></seealso>
+ instead.</p>
<warning>
<p>If the Unicode translation modifier (<c>t</c>) is used in
- the format string, all error handlers must ensure that the
+ the format string, all event handlers must ensure that the
formatted output is correctly encoded for the I/O
device.</p>
</warning>
</desc>
</func>
<func>
- <name name="warning_report" arity="1"/>
- <fsummary>Send a standard warning report event to the error logger.</fsummary>
+ <name name="warning_report" arity="1" since=""/>
+ <fsummary>Log a standard warning event.</fsummary>
<desc>
- <p>Sends a standard warning report event to the error logger.
- The event is handled by the standard event handler. It is
- tagged as an error, warning, or info, see
+ <p>Log a standard warning event. Error logger forwards the event
+ to Logger, including metadata that allows backwards
+ compatibility with legacy error logger event handlers.</p>
+ <p>The event is handled by the default Logger handler. The log
+ level can be changed to error or info, see
<seealso marker="#warning_map/0"><c>warning_map/0</c></seealso>.</p>
+ <p>This functions is kept for backwards compatibility and
+ must not be used by new code. Use the <seealso marker="logger#macros">
+ <c>?LOG_WARNING</c></seealso> macro or
+ <seealso marker="logger#warning-1"><c>logger:warning/1,2,3</c></seealso>
+ instead.</p>
</desc>
</func>
<func>
- <name name="warning_report" arity="2"/>
- <fsummary>Send a user-defined warning report event to the error logger.</fsummary>
+ <name name="warning_report" arity="2" since=""/>
+ <fsummary>Log a user-defined warning event.</fsummary>
<desc>
- <p>Sends a user-defined warning report event to the error
- logger. An event handler to handle the event is supposed to
- have been added. The event is ignored by the standard event
- handler. It is tagged as an error, warning, or info,
- depending on the value of
+ <p>Log a user-defined warning event. Error logger forwards the
+ event to Logger, including metadata that allows backwards
+ compatibility with legacy error logger event handlers.</p>
+ <p>Error logger also adds a <c>domain</c> field with
+ value <c>[<anno>Type</anno>]</c> to this event's metadata,
+ causing the filters of the default Logger handler to discard
+ the event. A different Logger handler, or an error logger
+ event handler, must be added to handle this event.</p>
+ <p>The log level can be changed to error or info, see
<seealso marker="#warning_map/0"><c>warning_map/0</c></seealso>.</p>
+ <p>It is recommended that <c><anno>Report</anno></c> follows the same
+ structure as for
+ <seealso marker="#warning_report/1"><c>warning_report/1</c></seealso>.</p>
+ <p>This functions is kept for backwards compatibility and
+ must not be used by new code. Use the <seealso marker="logger#macros">
+ <c>?LOG_WARNING</c></seealso> macro or
+ <seealso marker="logger#warning-1"><c>logger:warning/1,2,3</c></seealso>
+ instead.</p>
</desc>
</func>
</funcs>
@@ -448,8 +530,9 @@ ok</pre>
<section>
<title>See Also</title>
<p><seealso marker="stdlib:gen_event"><c>gen_event(3)</c></seealso>,
- <seealso marker="stdlib:log_mf_h"><c>log_mf_h(3)</c></seealso>
- <seealso marker="kernel_app"><c>kernel(6)</c></seealso>
+ <seealso marker="kernel:logger"><c>logger(3)</c></seealso>,
+ <seealso marker="stdlib:log_mf_h"><c>log_mf_h(3)</c></seealso>,
+ <seealso marker="kernel_app"><c>kernel(6)</c></seealso>,
<seealso marker="sasl:sasl_app"><c>sasl(6)</c></seealso></p>
</section>
</erlref>
diff --git a/lib/kernel/doc/src/file.xml b/lib/kernel/doc/src/file.xml
index b674b3ca93..fc25e83d40 100644
--- a/lib/kernel/doc/src/file.xml
+++ b/lib/kernel/doc/src/file.xml
@@ -4,7 +4,7 @@
<erlref>
<header>
<copyright>
- <year>1996</year><year>2016</year>
+ <year>1996</year><year>2018</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -28,20 +28,23 @@
<date></date>
<rev></rev>
</header>
- <module>file</module>
+ <module since="">file</module>
<modulesummary>File interface module.</modulesummary>
<description>
<p>This module provides an interface to the file system.</p>
- <p>On operating systems with thread support,
- file operations can be performed in threads of their own, allowing
- other Erlang processes to continue executing in parallel with
- the file operations. See command-line flag
- <c>+A</c> in <seealso marker="erts:erl"><c>erl(1)</c></seealso>.</p>
+ <warning>
+ <p>File operations are only guaranteed to appear atomic when going
+ through the same file server. A NIF or other OS process may observe
+ intermediate steps on certain operations on some operating systems,
+ eg. renaming an existing file on Windows, or
+ <seealso marker="#write_file_info/2"><c>write_file_info/2</c>
+ </seealso> on any OS at the time of writing.</p>
+ </warning>
<p>Regarding filename encoding, the Erlang VM can operate in
two modes. The current mode can be queried using function
- <seealso marker="#native_name_encoding"><c>native_name_encoding/0</c></seealso>.
+ <seealso marker="#native_name_encoding/0"><c>native_name_encoding/0</c></seealso>.
It returns <c>latin1</c> or <c>utf8</c>.</p>
<p>In <c>latin1</c> mode, the Erlang VM does not change the
@@ -59,7 +62,7 @@
terminal supports UTF-8, otherwise <c>latin1</c>. The default can
be overridden using <c>+fnl</c> (to force <c>latin1</c> mode)
or <c>+fnu</c> (to force <c>utf8</c> mode) when starting
- <seealso marker="erts:erl"><c>erts:erl</c></seealso>.</p>
+ <seealso marker="erts:erl"><c>erl</c></seealso>.</p>
<p>On operating systems with transparent naming, files can be
inconsistently named, for example, some files are encoded in UTF-8 while
@@ -81,6 +84,16 @@
<p>See also section <seealso marker="stdlib:unicode_usage#notes-about-raw-filenames">Notes About Raw Filenames</seealso> in the STDLIB User's Guide.</p>
+ <note><p>
+ File operations used to accept filenames containing
+ null characters (integer value zero). This caused
+ the name to be truncated and in some cases arguments
+ to primitive operations to be mixed up. Filenames
+ containing null characters inside the filename
+ are now <em>rejected</em> and will cause primitive
+ file operations fail.
+ </p></note>
+
</description>
<datatypes>
@@ -96,9 +109,21 @@
</datatype>
<datatype>
<name name="filename"/>
+ <desc>
+ <p>
+ See also the documentation of the
+ <seealso marker="#type-name_all"><c>name_all()</c></seealso> type.
+ </p>
+ </desc>
</datatype>
<datatype>
<name name="filename_all"/>
+ <desc>
+ <p>
+ See also the documentation of the
+ <seealso marker="#type-name_all"><c>name_all()</c></seealso> type.
+ </p>
+ </desc>
</datatype>
<datatype>
<name name="io_device"/>
@@ -112,21 +137,23 @@
<name name="name"/>
<desc>
<p>If VM is in Unicode filename mode, <c>string()</c> and <c>char()</c>
- are allowed to be &gt; 255.
+ are allowed to be &gt; 255. See also the documentation of the
+ <seealso marker="#type-name_all"><c>name_all()</c></seealso> type.
</p>
</desc>
</datatype>
<datatype>
<name name="name_all"/>
<desc>
- <p>If VM is in Unicode filename mode, <c>string()</c> and <c>char()</c>
+ <p>If VM is in Unicode filename mode, characters
are allowed to be &gt; 255.
<c><anno>RawFilename</anno></c> is a filename not subject to
Unicode translation,
meaning that it can contain characters not conforming to
the Unicode encoding expected from the file system
(that is, non-UTF-8 characters although the VM is started
- in Unicode filename mode).
+ in Unicode filename mode). Null characters (integer value zero)
+ are <em>not</em> allowed in filenames (not even at the end).
</p>
</desc>
</datatype>
@@ -159,7 +186,7 @@
<funcs>
<func>
- <name name="advise" arity="4"/>
+ <name name="advise" arity="4" since="OTP R14B"/>
<fsummary>Predeclare an access pattern for file data.</fsummary>
<type name="posix_file_advise"/>
<desc>
@@ -170,7 +197,7 @@
</desc>
</func>
<func>
- <name name="allocate" arity="3"/>
+ <name name="allocate" arity="3" since="OTP R16B"/>
<fsummary>Allocate file space.</fsummary>
<desc>
<p><c>allocate/3</c> can be used to preallocate space for a file.</p>
@@ -182,7 +209,7 @@
</desc>
</func>
<func>
- <name name="change_group" arity="2"/>
+ <name name="change_group" arity="2" since=""/>
<fsummary>Change group of a file.</fsummary>
<desc>
<p>Changes group of a file. See
@@ -190,7 +217,7 @@
</desc>
</func>
<func>
- <name name="change_mode" arity="2"/>
+ <name name="change_mode" arity="2" since="OTP R14B"/>
<fsummary>Change permissions of a file.</fsummary>
<desc>
<p>Changes permissions of a file. See
@@ -198,7 +225,7 @@
</desc>
</func>
<func>
- <name name="change_owner" arity="2"/>
+ <name name="change_owner" arity="2" since=""/>
<fsummary>Change owner of a file.</fsummary>
<desc>
<p>Changes owner of a file. See
@@ -206,7 +233,7 @@
</desc>
</func>
<func>
- <name name="change_owner" arity="3"/>
+ <name name="change_owner" arity="3" since=""/>
<fsummary>Change owner and group of a file.</fsummary>
<desc>
<p>Changes owner and group of a file. See
@@ -214,7 +241,7 @@
</desc>
</func>
<func>
- <name name="change_time" arity="2"/>
+ <name name="change_time" arity="2" since=""/>
<fsummary>Change the modification time of a file.</fsummary>
<desc>
<p>Changes the modification and access times of a file. See
@@ -222,7 +249,7 @@
</desc>
</func>
<func>
- <name name="change_time" arity="3"/>
+ <name name="change_time" arity="3" since=""/>
<fsummary>Change the modification and last access time of a file.</fsummary>
<desc>
<p>Changes the modification and last access times of a file. See
@@ -230,7 +257,7 @@
</desc>
</func>
<func>
- <name name="close" arity="1"/>
+ <name name="close" arity="1" since=""/>
<fsummary>Close a file.</fsummary>
<desc>
<p>Closes the file referenced by <c><anno>IoDevice</anno></c>. It mostly
@@ -243,7 +270,7 @@
</desc>
</func>
<func>
- <name name="consult" arity="1"/>
+ <name name="consult" arity="1" since=""/>
<fsummary>Read Erlang terms from a file.</fsummary>
<desc>
<p>Reads Erlang terms, separated by '.', from
@@ -281,8 +308,8 @@ f.txt: {person, "kalle", 25}.
</desc>
</func>
<func>
- <name name="copy" arity="2"/>
- <name name="copy" arity="3"/>
+ <name name="copy" arity="2" since=""/>
+ <name name="copy" arity="3" since=""/>
<fsummary>Copy file contents.</fsummary>
<desc>
<p>Copies <c><anno>ByteCount</anno></c> bytes from
@@ -319,7 +346,7 @@ f.txt: {person, "kalle", 25}.
</desc>
</func>
<func>
- <name name="datasync" arity="1"/>
+ <name name="datasync" arity="1" since="OTP R14B"/>
<fsummary>Synchronize the in-memory data of a file, ignoring most of its metadata, with that on the physical medium.</fsummary>
<desc>
<p>Ensures that any buffers kept by the operating system
@@ -342,7 +369,7 @@ f.txt: {person, "kalle", 25}.
</desc>
</func>
<func>
- <name name="del_dir" arity="1"/>
+ <name name="del_dir" arity="1" since=""/>
<fsummary>Delete a directory.</fsummary>
<desc>
<p>Tries to delete directory <c><anno>Dir</anno></c>.
@@ -378,7 +405,7 @@ f.txt: {person, "kalle", 25}.
</desc>
</func>
<func>
- <name name="delete" arity="1"/>
+ <name name="delete" arity="1" since=""/>
<fsummary>Delete a file.</fsummary>
<desc>
<p>Tries to delete file <c><anno>Filename</anno></c>.
@@ -415,7 +442,7 @@ f.txt: {person, "kalle", 25}.
</desc>
</func>
<func>
- <name name="eval" arity="1"/>
+ <name name="eval" arity="1" since=""/>
<fsummary>Evaluate Erlang expressions in a file.</fsummary>
<desc>
<p>Reads and evaluates Erlang expressions, separated by '.' (or
@@ -449,7 +476,7 @@ f.txt: {person, "kalle", 25}.
</desc>
</func>
<func>
- <name name="eval" arity="2"/>
+ <name name="eval" arity="2" since=""/>
<fsummary>Evaluate Erlang expressions in a file.</fsummary>
<desc>
<p>The same as <c>eval/1</c>, but the variable bindings
@@ -459,7 +486,7 @@ f.txt: {person, "kalle", 25}.
</desc>
</func>
<func>
- <name name="format_error" arity="1"/>
+ <name name="format_error" arity="1" since=""/>
<fsummary>Return a descriptive string for an error reason.</fsummary>
<desc>
<p>Given the error reason returned by any function in this
@@ -467,7 +494,7 @@ f.txt: {person, "kalle", 25}.
</desc>
</func>
<func>
- <name name="get_cwd" arity="0"/>
+ <name name="get_cwd" arity="0" since=""/>
<fsummary>Get the current working directory.</fsummary>
<desc>
<p>Returns <c>{ok, <anno>Dir</anno>}</c>, where <c><anno>Dir</anno></c>
@@ -489,7 +516,7 @@ f.txt: {person, "kalle", 25}.
</desc>
</func>
<func>
- <name name="get_cwd" arity="1"/>
+ <name name="get_cwd" arity="1" since=""/>
<fsummary>Get the current working directory for the specified drive.</fsummary>
<desc>
<p>Returns <c>{ok, <anno>Dir</anno>}</c> or
@@ -520,7 +547,7 @@ f.txt: {person, "kalle", 25}.
</desc>
</func>
<func>
- <name name="list_dir" arity="1"/>
+ <name name="list_dir" arity="1" since=""/>
<fsummary>List files in a directory.</fsummary>
<desc>
<p>Lists all files in a directory, <em>except</em> files
@@ -551,7 +578,7 @@ f.txt: {person, "kalle", 25}.
</desc>
</func>
<func>
- <name name="list_dir_all" arity="1"/>
+ <name name="list_dir_all" arity="1" since="OTP R16B"/>
<fsummary>List all files in a directory.</fsummary>
<desc>
<p><marker id="list_dir_all"/>Lists all the files in a directory,
@@ -576,7 +603,7 @@ f.txt: {person, "kalle", 25}.
</desc>
</func>
<func>
- <name name="make_dir" arity="1"/>
+ <name name="make_dir" arity="1" since=""/>
<fsummary>Make a directory.</fsummary>
<desc>
<p>Tries to create directory <c><anno>Dir</anno></c>. Missing parent
@@ -610,7 +637,7 @@ f.txt: {person, "kalle", 25}.
</desc>
</func>
<func>
- <name name="make_link" arity="2"/>
+ <name name="make_link" arity="2" since=""/>
<fsummary>Make a hard link to a file.</fsummary>
<desc>
<p>Makes a hard link from <c><anno>Existing</anno></c> to
@@ -639,7 +666,7 @@ f.txt: {person, "kalle", 25}.
</desc>
</func>
<func>
- <name name="make_symlink" arity="2"/>
+ <name name="make_symlink" arity="2" since=""/>
<fsummary>Make a symbolic link to a file or directory.</fsummary>
<desc>
<p>Creates a symbolic link <c><anno>New</anno></c> to
@@ -675,7 +702,7 @@ f.txt: {person, "kalle", 25}.
</desc>
</func>
<func>
- <name name="native_name_encoding" arity="0"/>
+ <name name="native_name_encoding" arity="0" since="OTP R14B01"/>
<fsummary>Return the configured filename encoding of the VM.</fsummary>
<desc>
<p><marker id="native_name_encoding"/>Returns
@@ -687,7 +714,7 @@ f.txt: {person, "kalle", 25}.
</desc>
</func>
<func>
- <name name="open" arity="2"/>
+ <name name="open" arity="2" since=""/>
<fsummary>Open a file.</fsummary>
<desc>
<p>Opens file <c><anno>File</anno></c> in the mode determined
@@ -954,8 +981,7 @@ f.txt: {person, "kalle", 25}.
</item>
<tag><c>eisdir</c></tag>
<item>
- <p>The named file is not a regular file. It can be a
- directory, a FIFO, or a device.</p>
+ <p>The named file is a directory.</p>
</item>
<tag><c>enotdir</c></tag>
<item>
@@ -971,7 +997,7 @@ f.txt: {person, "kalle", 25}.
</desc>
</func>
<func>
- <name name="path_consult" arity="2"/>
+ <name name="path_consult" arity="2" since=""/>
<fsummary>Read Erlang terms from a file.</fsummary>
<desc>
<p>Searches the path <c><anno>Path</anno></c> (a list of directory
@@ -1013,7 +1039,7 @@ f.txt: {person, "kalle", 25}.
</desc>
</func>
<func>
- <name name="path_eval" arity="2"/>
+ <name name="path_eval" arity="2" since=""/>
<fsummary>Evaluate Erlang expressions in a file.</fsummary>
<desc>
<p>Searches the path <c><anno>Path</anno></c> (a list of directory
@@ -1059,7 +1085,7 @@ f.txt: {person, "kalle", 25}.
</desc>
</func>
<func>
- <name name="path_open" arity="3"/>
+ <name name="path_open" arity="3" since=""/>
<fsummary>Open a file.</fsummary>
<desc>
<p>Searches the path <c><anno>Path</anno></c> (a list of directory
@@ -1088,7 +1114,7 @@ f.txt: {person, "kalle", 25}.
</desc>
</func>
<func>
- <name name="path_script" arity="2"/>
+ <name name="path_script" arity="2" since=""/>
<fsummary>Evaluate and return the value of Erlang expressions in a file.</fsummary>
<desc>
<p>Searches the path <c><anno>Path</anno></c> (a list of directory
@@ -1132,7 +1158,7 @@ f.txt: {person, "kalle", 25}.
</desc>
</func>
<func>
- <name name="path_script" arity="3"/>
+ <name name="path_script" arity="3" since=""/>
<fsummary>Evaluate and return the value of Erlang expressions in a file.</fsummary>
<desc>
<p>The same as <c>path_script/2</c> but the variable bindings
@@ -1142,7 +1168,7 @@ f.txt: {person, "kalle", 25}.
</desc>
</func>
<func>
- <name name="pid2name" arity="1"/>
+ <name name="pid2name" arity="1" since=""/>
<fsummary>Return the name of the file handled by a pid.</fsummary>
<desc>
<p>If <c><anno>Pid</anno></c> is an I/O device, that is, a pid returned from
@@ -1167,7 +1193,7 @@ f.txt: {person, "kalle", 25}.
</desc>
</func>
<func>
- <name name="position" arity="2"/>
+ <name name="position" arity="2" since=""/>
<fsummary>Set position in a file.</fsummary>
<desc>
<p>Sets the position of the file referenced by <c><anno>IoDevice</anno></c>
@@ -1219,7 +1245,7 @@ f.txt: {person, "kalle", 25}.
</desc>
</func>
<func>
- <name name="pread" arity="2"/>
+ <name name="pread" arity="2" since=""/>
<fsummary>Read from a file at certain positions.</fsummary>
<desc>
<p>Performs a sequence of <c>pread/3</c> in one operation,
@@ -1237,7 +1263,7 @@ f.txt: {person, "kalle", 25}.
</desc>
</func>
<func>
- <name name="pread" arity="3"/>
+ <name name="pread" arity="3" since=""/>
<fsummary>Read from a file at a certain position.</fsummary>
<desc>
<p>Combines <c>position/2</c> and <c>read/2</c> in one
@@ -1257,7 +1283,7 @@ f.txt: {person, "kalle", 25}.
</desc>
</func>
<func>
- <name name="pwrite" arity="2"/>
+ <name name="pwrite" arity="2" since=""/>
<fsummary>Write to a file at certain positions.</fsummary>
<desc>
<p>Performs a sequence of <c>pwrite/3</c> in one operation,
@@ -1272,7 +1298,7 @@ f.txt: {person, "kalle", 25}.
</desc>
</func>
<func>
- <name name="pwrite" arity="3"/>
+ <name name="pwrite" arity="3" since=""/>
<fsummary>Write to a file at a certain position.</fsummary>
<desc>
<p>Combines <c>position/2</c> and <c>write/2</c> in one
@@ -1291,7 +1317,7 @@ f.txt: {person, "kalle", 25}.
</desc>
</func>
<func>
- <name name="read" arity="2"/>
+ <name name="read" arity="2" since=""/>
<fsummary>Read from a file.</fsummary>
<desc>
<p>Reads <c><anno>Number</anno></c> bytes/characters from the file
@@ -1345,7 +1371,7 @@ f.txt: {person, "kalle", 25}.
</desc>
</func>
<func>
- <name name="read_file" arity="1"/>
+ <name name="read_file" arity="1" since=""/>
<fsummary>Read a file.</fsummary>
<desc>
<p>Returns <c>{ok, <anno>Binary</anno>}</c>, where
@@ -1381,8 +1407,8 @@ f.txt: {person, "kalle", 25}.
</desc>
</func>
<func>
- <name name="read_file_info" arity="1"/>
- <name name="read_file_info" arity="2"/>
+ <name name="read_file_info" arity="1" since=""/>
+ <name name="read_file_info" arity="2" since="OTP R15B"/>
<fsummary>Retrieve information about a file.</fsummary>
<desc>
<p>Retrieves information about a file. Returns
@@ -1407,8 +1433,12 @@ f.txt: {person, "kalle", 25}.
which is 1970-01-01 00:00 UTC.</p></item>
</taglist>
<p>Default is <c>{time, local}</c>.</p>
- <p>If the option <c>raw</c> is set, the file server is not called
- and only information about local files is returned.</p>
+ <p>If the option <c>raw</c> is set, the file server is not called and
+ only information about local files is returned. Note that this will
+ break this module's atomicity guarantees as it can race with a
+ concurrent call to
+ <seealso marker="#write_file_info/2"><c>write_file_info/1,2</c>
+ </seealso></p>
<note>
<p>As file times are stored in POSIX time on most OS, it is faster to
query file information with option <c>posix</c>.</p>
@@ -1532,7 +1562,7 @@ f.txt: {person, "kalle", 25}.
</desc>
</func>
<func>
- <name name="read_line" arity="1"/>
+ <name name="read_line" arity="1" since=""/>
<fsummary>Read a line from a file.</fsummary>
<desc>
<p>Reads a line of bytes/characters from the file referenced by
@@ -1589,7 +1619,7 @@ f.txt: {person, "kalle", 25}.
</desc>
</func>
<func>
- <name name="read_link" arity="1"/>
+ <name name="read_link" arity="1" since=""/>
<fsummary>See what a link is pointing to.</fsummary>
<desc>
<p><marker id="read_link_all"/>Returns
@@ -1619,7 +1649,7 @@ f.txt: {person, "kalle", 25}.
</desc>
</func>
<func>
- <name name="read_link_all" arity="1"/>
+ <name name="read_link_all" arity="1" since="OTP R16B"/>
<fsummary>See what a link is pointing to.</fsummary>
<desc>
<p>Returns <c>{ok, <anno>Filename</anno>}</c> if
@@ -1647,8 +1677,8 @@ f.txt: {person, "kalle", 25}.
</desc>
</func>
<func>
- <name name="read_link_info" arity="1"/>
- <name name="read_link_info" arity="2"/>
+ <name name="read_link_info" arity="1" since=""/>
+ <name name="read_link_info" arity="2" since="OTP R15B"/>
<fsummary>Retrieve information about a link or file.</fsummary>
<desc>
<p>Works like
@@ -1656,8 +1686,12 @@ f.txt: {person, "kalle", 25}.
except that if <c><anno>Name</anno></c> is a symbolic link, information
about the link is returned in the <c>file_info</c> record and
the <c>type</c> field of the record is set to <c>symlink</c>.</p>
- <p>If the option <c>raw</c> is set, the file server is not called
- and only information about local files is returned.</p>
+ <p>If the option <c>raw</c> is set, the file server is not called and
+ only information about local files is returned. Note that this will
+ break this module's atomicity guarantees as it can race with a
+ concurrent call to
+ <seealso marker="#write_file_info/2"><c>write_file_info/1,2</c>
+ </seealso></p>
<p>If <c><anno>Name</anno></c> is not a symbolic link, this function returns
the same result as <c>read_file_info/1</c>.
On platforms that do not support symbolic links, this function
@@ -1665,7 +1699,7 @@ f.txt: {person, "kalle", 25}.
</desc>
</func>
<func>
- <name name="rename" arity="2"/>
+ <name name="rename" arity="2" since=""/>
<fsummary>Rename a file.</fsummary>
<desc>
<p>Tries to rename the file <c><anno>Source</anno></c> to
@@ -1728,7 +1762,7 @@ f.txt: {person, "kalle", 25}.
</desc>
</func>
<func>
- <name name="script" arity="1"/>
+ <name name="script" arity="1" since=""/>
<fsummary>Evaluate and return the value of Erlang expressions in a file.</fsummary>
<desc>
<p>Reads and evaluates Erlang expressions, separated by '.' (or
@@ -1763,7 +1797,7 @@ f.txt: {person, "kalle", 25}.
</desc>
</func>
<func>
- <name name="script" arity="2"/>
+ <name name="script" arity="2" since=""/>
<fsummary>Evaluate and return the value of Erlang expressions in a file.</fsummary>
<desc>
<p>The same as <c>script/1</c> but the variable bindings
@@ -1773,7 +1807,7 @@ f.txt: {person, "kalle", 25}.
</desc>
</func>
<func>
- <name name="sendfile" arity="2"/>
+ <name name="sendfile" arity="2" since="OTP R15B"/>
<fsummary>Send a file to a socket.</fsummary>
<desc>
<p>Sends the file <c>Filename</c> to <c>Socket</c>.
@@ -1782,7 +1816,7 @@ f.txt: {person, "kalle", 25}.
</desc>
</func>
<func>
- <name name="sendfile" arity="5"/>
+ <name name="sendfile" arity="5" since="OTP R15B"/>
<fsummary>Send a file to a socket.</fsummary>
<type name="sendfile_option"/>
<desc>
@@ -1795,29 +1829,21 @@ f.txt: {person, "kalle", 25}.
<p>The file used must be opened using the <c>raw</c> flag, and the process
calling <c>sendfile</c> must be the controlling process of the socket.
See <seealso marker="gen_tcp#controlling_process-2"><c>gen_tcp:controlling_process/2</c></seealso>.</p>
- <p>If the OS used does not support <c>sendfile</c>, an Erlang fallback
- using
- <seealso marker="#read/2"><c>read/2</c></seealso> and
- <seealso marker="gen_tcp#send/2"><c>gen_tcp:send/2</c></seealso> is used.</p>
+ <p>If the OS used does not support non-blocking <c>sendfile</c>, an
+ Erlang fallback using <seealso marker="#read/2"><c>read/2</c></seealso>
+ and <seealso marker="gen_tcp#send/2"><c>gen_tcp:send/2</c></seealso> is
+ used.</p>
<p>The option list can contain the following options:</p>
<taglist>
<tag><c>chunk_size</c></tag>
<item><p>The chunk size used by the Erlang fallback to send
data. If using the fallback, set this to a value
that comfortably fits in the systems memory. Default is 20 MB.</p></item>
- <tag><c>use_threads</c></tag>
- <item><p>Instructs the emulator to use the <c>async</c> thread pool for the
- <c>sendfile</c> system call. This can be useful if the OS you are running
- on does not properly support non-blocking <c>sendfile</c> calls. Notice that
- using <c>async</c> threads potentially makes your system vulnerable to slow
- client attacks. If set to <c>true</c> and no <c>async</c> threads are available,
- the <c>sendfile</c> call returns <c>{error,einval}</c>.
- Introduced in Erlang/OTP 17.0. Default is <c>false</c>.</p></item>
</taglist>
</desc>
</func>
<func>
- <name name="set_cwd" arity="1"/>
+ <name name="set_cwd" arity="1" since=""/>
<fsummary>Set the current working directory.</fsummary>
<desc>
<p>Sets the current working directory of the file server to
@@ -1825,7 +1851,7 @@ f.txt: {person, "kalle", 25}.
<p>The functions in the module <c>file</c> usually treat binaries
as raw filenames, that is, they are passed "as is" even when the
encoding of the binary does not agree with
- <seealso marker="#native_name_encoding"><c>native_name_encoding()</c></seealso>.
+ <seealso marker="#native_name_encoding/0"><c>native_name_encoding()</c></seealso>.
However, this function expects binaries to be encoded according to the
value returned by <c>native_name_encoding()</c>.</p>
<p>Typical error reasons are:</p>
@@ -1864,7 +1890,7 @@ f.txt: {person, "kalle", 25}.
</desc>
</func>
<func>
- <name name="sync" arity="1"/>
+ <name name="sync" arity="1" since=""/>
<fsummary>Synchronize the in-memory state of a file with that on the physical medium.</fsummary>
<desc>
<p>Ensures that any buffers kept by the operating system
@@ -1880,7 +1906,7 @@ f.txt: {person, "kalle", 25}.
</desc>
</func>
<func>
- <name name="truncate" arity="1"/>
+ <name name="truncate" arity="1" since=""/>
<fsummary>Truncate a file.</fsummary>
<desc>
<p>Truncates the file referenced by <c><anno>IoDevice</anno></c> at
@@ -1889,7 +1915,7 @@ f.txt: {person, "kalle", 25}.
</desc>
</func>
<func>
- <name name="write" arity="2"/>
+ <name name="write" arity="2" since=""/>
<fsummary>Write to a file.</fsummary>
<desc>
<p>Writes <c><anno>Bytes</anno></c> to the file referenced by
@@ -1915,7 +1941,7 @@ f.txt: {person, "kalle", 25}.
</desc>
</func>
<func>
- <name name="write_file" arity="2"/>
+ <name name="write_file" arity="2" since=""/>
<fsummary>Write a file.</fsummary>
<desc>
<p>Writes the contents of the <c>iodata</c> term <c><anno>Bytes</anno></c>
@@ -1952,7 +1978,7 @@ f.txt: {person, "kalle", 25}.
</desc>
</func>
<func>
- <name name="write_file" arity="3"/>
+ <name name="write_file" arity="3" since=""/>
<fsummary>Write a file.</fsummary>
<desc>
<p>Same as <c>write_file/2</c>, but takes a third argument
@@ -1963,8 +1989,8 @@ f.txt: {person, "kalle", 25}.
</desc>
</func>
<func>
- <name name="write_file_info" arity="2"/>
- <name name="write_file_info" arity="3"/>
+ <name name="write_file_info" arity="2" since=""/>
+ <name name="write_file_info" arity="3" since="OTP R15B"/>
<fsummary>Change file information.</fsummary>
<desc>
<p>Changes file information. Returns <c>ok</c> if successful,
@@ -2117,144 +2143,77 @@ f.txt: {person, "kalle", 25}.
<section>
<title>Performance</title>
- <p>Some operating system file operations, for example, a
- <c>sync/1</c> or <c>close/1</c> on a huge file, can block their
- calling thread for seconds. If this affects the emulator main
- thread, the response time is no longer in the order of
- milliseconds, depending on the definition of "soft" in soft
- real-time system.</p>
- <p>If the device driver thread pool is active, file operations are
- done through those threads instead, so the emulator can go on
- executing Erlang processes. Unfortunately, the time for serving a
- file operation increases because of the extra scheduling required
- from the operating system.</p>
- <p>If the device driver thread pool is disabled or of size 0, large
- file reads and writes are segmented into many smaller, which
- enable the emulator to serve other processes during the file
- operation. This has the same effect as when using the thread
- pool, but with larger overhead. Other file operations, for
- example, <c>sync/1</c> or <c>close/1</c> on a huge file, still are
- a problem.</p>
- <p>For increased performance, raw files are recommended. Raw files
- use the file system of the host machine of the node.</p>
+ <p>For increased performance, raw files are recommended.</p>
+ <p>A normal file is really a process so it can be used as an I/O
+ device (see <seealso marker="stdlib:io"><c>io</c></seealso>).
+ Therefore, when data is written to a normal file, the sending of the
+ data to the file process, copies all data that are not binaries. Opening
+ the file in binary mode and writing binaries is therefore recommended.
+ If the file is opened on another node, or if the file server runs as
+ slave to the file server of another node, also binaries are copied.</p>
<note>
- <p>
- For normal files (non-raw), the file server is used to find the files,
- and if the node is running its file server as slave to the file server
- of another node, and the other node runs on some other host machine,
- they can have different file systems.
- However, this is seldom a problem.</p>
+ <p>Raw files use the file system of the host machine of the node.
+ For normal files (non-raw), the file server is used to find the files,
+ and if the node is running its file server as slave to the file server
+ of another node, and the other node runs on some other host machine,
+ they can have different file systems.
+ However, this is seldom a problem.</p>
</note>
- <p>A normal file is really a process so it can be used as an I/O
- device (see
- <seealso marker="stdlib:io"><c>io</c></seealso>).
- Therefore, when data is written to a
- normal file, the sending of the data to the file process, copies
- all data that are not binaries. Opening the file in binary mode
- and writing binaries is therefore recommended. If the file is
- opened on another node, or if the file server runs as slave to
- the file server of another node, also binaries are copied.</p>
- <p>Caching data to reduce the number of file operations, or rather
- the number of calls to the file driver, generally increases
- performance. The following function writes 4 MBytes in 23
- seconds when tested:</p>
+ <p><seealso marker="#open/2"><c>open/2</c></seealso> can be given the
+ options <c>delayed_write</c> and <c>read_ahead</c> to turn on caching,
+ which will reduce the number of operating system calls and greatly
+ improve performance for small reads and writes. However, the overhead
+ won't disappear completely and it's best to keep the number of file
+ operations to a minimum. As a contrived example, the following function
+ writes 4MB in 2.5 seconds when tested:</p>
+
<code type="none"><![CDATA[
-create_file_slow(Name, N) when integer(N), N >= 0 ->
- {ok, FD} = file:open(Name, [raw, write, delayed_write, binary]),
- ok = create_file_slow(FD, 0, N),
- ok = ?FILE_MODULE:close(FD),
- ok.
-
-create_file_slow(FD, M, M) ->
+create_file_slow(Name) ->
+ {ok, Fd} = file:open(Name, [raw, write, delayed_write, binary]),
+ create_file_slow_1(Fd, 4 bsl 20),
+ file:close(Fd).
+
+create_file_slow_1(_Fd, 0) ->
ok;
-create_file_slow(FD, M, N) ->
- ok = file:write(FD, <<M:32/unsigned>>),
- create_file_slow(FD, M+1, N).]]></code>
+create_file_slow_1(Fd, M) ->
+ ok = file:write(Fd, <<0>>),
+ create_file_slow_1(Fd, M - 1).]]></code>
+
+ <p>The following functionally equivalent code writes 128 bytes per call
+ to <seealso marker="#write/2"><c>write/2</c></seealso> and so does the
+ same work in 0.08 seconds, which is roughly 30 times faster:</p>
- <p>The following, functionally equivalent, function collects 1024
- entries into a list of 128 32-byte binaries before each call to
- <seealso marker="#write/2"><c>write/2</c></seealso> and so
- does the same work in 0.52 seconds,
- which is 44 times faster:</p>
<code type="none"><![CDATA[
-create_file(Name, N) when integer(N), N >= 0 ->
- {ok, FD} = file:open(Name, [raw, write, delayed_write, binary]),
- ok = create_file(FD, 0, N),
- ok = ?FILE_MODULE:close(FD),
+create_file(Name) ->
+ {ok, Fd} = file:open(Name, [raw, write, delayed_write, binary]),
+ create_file_1(Fd, 4 bsl 20),
+ file:close(Fd),
ok.
-
-create_file(FD, M, M) ->
+
+create_file_1(_Fd, 0) ->
ok;
-create_file(FD, M, N) when M + 1024 =&lt; N ->
- create_file(FD, M, M + 1024, []),
- create_file(FD, M + 1024, N);
-create_file(FD, M, N) ->
- create_file(FD, M, N, []).
-
-create_file(FD, M, M, R) ->
- ok = file:write(FD, R);
-create_file(FD, M, N0, R) when M + 8 =&lt; N0 ->
- N1 = N0-1, N2 = N0-2, N3 = N0-3, N4 = N0-4,
- N5 = N0-5, N6 = N0-6, N7 = N0-7, N8 = N0-8,
- create_file(FD, M, N8,
- [<<N8:32/unsigned, N7:32/unsigned,
- N6:32/unsigned, N5:32/unsigned,
- N4:32/unsigned, N3:32/unsigned,
- N2:32/unsigned, N1:32/unsigned>> | R]);
-create_file(FD, M, N0, R) ->
- N1 = N0-1,
- create_file(FD, M, N1, [<<N1:32/unsigned>> | R]).]]></code>
+create_file_1(Fd, M) when M >= 128 ->
+ ok = file:write(Fd, <<0:(128)/unit:8>>),
+ create_file_1(Fd, M - 128);
+create_file_1(Fd, M) ->
+ ok = file:write(Fd, <<0:(M)/unit:8>>),
+ create_file_1(Fd, M - 1).]]></code>
- <note>
- <p>Trust only your own benchmarks. If the list length in
- <c>create_file/2</c> above is increased, it runs slightly
- faster, but consumes more memory and causes more memory
- fragmentation. How much this affects your application is
- something that this simple benchmark cannot predict.</p>
- <p>If the size of each binary is increased to 64 bytes, it
- also runs slightly faster, but the code is then twice as clumsy.
- In the current implementation, binaries larger than 64 bytes are
- stored in memory common to all processes and not copied when
- sent between processes, while these smaller binaries are stored
- on the process heap and copied when sent like any other term.</p>
- <p>So, with a binary size of 68 bytes, <c>create_file/2</c> runs
- 30 percent slower than with 64 bytes, and causes much more
- memory fragmentation. Notice that if the binaries were to be sent
- between processes (for example, a non-raw file), the results
- would probably be completely different.</p>
- </note>
- <p>A raw file is really a port. When writing data to a port, it is
- efficient to write a list of binaries. It is not needed to
- flatten a deep list before writing. On Unix hosts, scatter output,
- which writes a set of buffers in one operation, is used when
- possible. In this way <c>write(FD, [Bin1, Bin2 | Bin3])</c>
- writes the contents of the binaries without copying the data
- at all, except for perhaps deep down in the operating system
- kernel.</p>
- <p>For raw files, <c>pwrite/2</c> and <c>pread/2</c> are
- efficiently implemented. The file driver is called only once for
- the whole operation, and the list iteration is done in the file
- driver.</p>
- <p>The options <c>delayed_write</c> and <c>read_ahead</c> to
- <seealso marker="#open/2"><c>open/2</c></seealso>
- make the file driver cache data to reduce
- the number of operating system calls. The function
- <c>create_file/2</c> in the recent example takes 60 seconds
- without option <c>delayed_write</c>, which is 2.6
- times slower.</p>
- <p>As a bad example, <c>create_file_slow/2</c>
- without options <c>raw</c>, <c>binary</c>, and <c>delayed_write</c>,
- meaning it calls <c>open(Name, [write])</c>, needs
- 1 min 20 seconds for the job, which is 3.5 times slower than
- the first example, and 150 times slower than the optimized
- <c>create_file/2</c>.</p>
- <warning>
- <p>If an error occurs when accessing an open file with module
- <seealso marker="stdlib:io"><c>io</c></seealso>,
- the process handling the file exits. The dead
- file process can hang if a process tries to access it later.
- This will be fixed in a future release.</p>
- </warning>
+ <p>When writing data it's generally more efficient to write a list of
+ binaries rather than a list of integers. It is not needed to
+ flatten a deep list before writing. On Unix hosts, scatter output,
+ which writes a set of buffers in one operation, is used when
+ possible. In this way <c>write(FD, [Bin1, Bin2 | Bin3])</c>
+ writes the contents of the binaries without copying the data
+ at all, except for perhaps deep down in the operating system
+ kernel.</p>
+ <warning>
+ <p>If an error occurs when accessing an open file with module
+ <seealso marker="stdlib:io"><c>io</c></seealso>, the process
+ handling the file exits. The dead file process can hang if a process
+ tries to access it later. This will be fixed in a future release.
+ </p>
+ </warning>
</section>
<section>
diff --git a/lib/kernel/doc/src/gen_sctp.xml b/lib/kernel/doc/src/gen_sctp.xml
index 737800c6b1..f70d6c24db 100644
--- a/lib/kernel/doc/src/gen_sctp.xml
+++ b/lib/kernel/doc/src/gen_sctp.xml
@@ -4,7 +4,7 @@
<erlref>
<header>
<copyright>
- <year>2007</year><year>2016</year>
+ <year>2007</year><year>2018</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -32,7 +32,7 @@
<rev>A</rev>
<file>gen_sctp.xml</file>
</header>
- <module>gen_sctp</module>
+ <module since="">gen_sctp</module>
<modulesummary>Functions for communicating with sockets using the SCTP
protocol.</modulesummary>
<description>
@@ -100,7 +100,7 @@
<funcs>
<func>
- <name name="abort" arity="2"/>
+ <name name="abort" arity="2" since=""/>
<fsummary>Abnormally terminate the association specified by
<c>Assoc</c>, without flushing of unsent data.</fsummary>
<desc>
@@ -113,7 +113,7 @@
</func>
<func>
- <name name="close" arity="1"/>
+ <name name="close" arity="1" since=""/>
<fsummary>Close the socket and all associations on it.</fsummary>
<desc>
<p>Closes the socket and all associations on it. The unsent
@@ -128,7 +128,7 @@
</func>
<func>
- <name name="connect" arity="4"/>
+ <name name="connect" arity="4" since=""/>
<fsummary>Same as <c>connect(Socket, Addr, Port, Opts, infinity)</c>.</fsummary>
<desc>
<p>Same as <c>connect(<anno>Socket</anno>, <anno>Addr</anno>,
@@ -137,7 +137,7 @@
</func>
<func>
- <name name="connect" arity="5"/>
+ <name name="connect" arity="5" since=""/>
<fsummary>Establish a new association for socket <c>Socket</c>, with a
peer (SCTP server socket).</fsummary>
<desc>
@@ -213,7 +213,7 @@ connect(Socket, Ip, Port>,
</func>
<func>
- <name name="connect_init" arity="4"/>
+ <name name="connect_init" arity="4" since="OTP R13B04"/>
<fsummary>Same as <c>connect_init(Socket, Addr, Port, Opts, infinity)</c>..</fsummary>
<desc>
<p>Same as <c>connect_init(<anno>Socket</anno>, <anno>Addr</anno>,
@@ -222,7 +222,7 @@ connect(Socket, Ip, Port>,
</func>
<func>
- <name name="connect_init" arity="5"/>
+ <name name="connect_init" arity="5" since="OTP R13B04"/>
<fsummary>Initiate a new association for socket <c>Socket</c>, with a
peer (SCTP server socket).</fsummary>
<desc>
@@ -248,7 +248,7 @@ connect(Socket, Ip, Port>,
</func>
<func>
- <name name="controlling_process" arity="2"/>
+ <name name="controlling_process" arity="2" since=""/>
<fsummary>Assign a new controlling process pid to the socket.</fsummary>
<desc>
<p>Assigns a new controlling process <c><anno>Pid</anno></c> to
@@ -259,7 +259,7 @@ connect(Socket, Ip, Port>,
</func>
<func>
- <name name="eof" arity="2"/>
+ <name name="eof" arity="2" since=""/>
<fsummary>Gracefully terminate the association specified by <c>Assoc</c>,
with flushing of all unsent data.</fsummary>
<desc>
@@ -272,7 +272,7 @@ connect(Socket, Ip, Port>,
</func>
<func>
- <name name="error_string" arity="1"/>
+ <name name="error_string" arity="1" since=""/>
<fsummary>Translate an SCTP error number into a string.</fsummary>
<desc>
<p>Translates an SCTP error number from, for example,
@@ -283,8 +283,8 @@ connect(Socket, Ip, Port>,
</func>
<func>
- <name name="listen" arity="2" clause_i="1"/>
- <name name="listen" arity="2" clause_i="2"/>
+ <name name="listen" arity="2" clause_i="1" since=""/>
+ <name name="listen" arity="2" clause_i="2" since="OTP R15B"/>
<fsummary>Set up a socket to listen.</fsummary>
<desc>
<p>Sets up a socket to listen on the IP address and port number
@@ -300,10 +300,10 @@ connect(Socket, Ip, Port>,
</func>
<func>
- <name name="open" arity="0"/>
- <name name="open" arity="1" clause_i="1"/>
- <name name="open" arity="1" clause_i="2"/>
- <name name="open" arity="2"/>
+ <name name="open" arity="0" since=""/>
+ <name name="open" arity="1" clause_i="1" since=""/>
+ <name name="open" arity="1" clause_i="2" since=""/>
+ <name name="open" arity="2" since=""/>
<fsummary>Create an SCTP socket and binds it to local addresses.</fsummary>
<desc>
<p>Creates an SCTP socket and binds it to the local addresses
@@ -331,11 +331,42 @@ connect(Socket, Ip, Port>,
with <anno>SockType</anno> <c>seqpacket</c>, and with reasonably large
<seealso marker="inet#option-sndbuf">kernel</seealso> and driver
<seealso marker="inet#option-buffer">buffers</seealso>.</p>
+ <p>
+ If the socket is in
+ <seealso marker="#option-active">passive</seealso>
+ mode data can be received through the
+ <seealso marker="#recv/1"><c>recv/1,2</c></seealso>
+ calls.
+ </p>
+ <p>
+ If the socket is in
+ <seealso marker="#option-active">active</seealso>
+ mode data received data is delivered to the controlling process
+ as messages:
+ </p>
+ <code type="none">
+{sctp, <anno>Socket</anno>, FromIP, FromPort, {AncData, Data}}
+ </code>
+ <p>
+ See
+ <seealso marker="#recv/1"><c>recv/1,2</c></seealso>
+ for a description of the message fields.
+ </p>
+ <note>
+ <p>
+ This message format unfortunately differs slightly from the
+ <seealso marker="gen_udp#open/1"><c>gen_udp</c></seealso>
+ message format with ancillary data,
+ and from the
+ <seealso marker="#recv/1"><c>recv/1,2</c></seealso>
+ return tuple format.
+ </p>
+ </note>
</desc>
</func>
<func>
- <name name="peeloff" arity="2"/>
+ <name name="peeloff" arity="2" since="OTP R15B"/>
<fsummary>Peel off a type <c>stream</c> socket from a type
<c>seqpacket</c> one.</fsummary>
<desc>
@@ -356,8 +387,8 @@ connect(Socket, Ip, Port>,
</func>
<func>
- <name name="recv" arity="1"/>
- <name name="recv" arity="2"/>
+ <name name="recv" arity="1" since=""/>
+ <name name="recv" arity="2" since=""/>
<fsummary>Receive a message from a socket.</fsummary>
<desc>
<p>Receives the <c><anno>Data</anno></c> message from any association
@@ -380,6 +411,19 @@ connect(Socket, Ip, Port>,
socket option
<seealso marker="#option-sctp_get_peer_addr_info"><c>sctp_get_peer_addr_info</c></seealso>,
but this does still not produce the stream number).</p>
+ <p>
+ <c><anno>AncData</anno></c> may also contain
+ <seealso marker="inet#type-ancillary_data">
+ ancillary data
+ </seealso>
+ from the socket
+ <seealso marker="#type-option">options</seealso>
+ <seealso marker="inet#option-recvtos"><c>recvtos</c></seealso>,
+ <seealso marker="inet#option-recvtclass"><c>recvtclass</c></seealso>
+ or
+ <seealso marker="inet#option-recvttl"><c>recvttl</c></seealso>,
+ if that is supported by the platform for the socket.
+ </p>
<p>The <c><anno>Data</anno></c> received can be a <c>binary()</c>
or a <c>list()</c> of bytes (integers in the range 0 through 255)
depending on the socket mode, or an SCTP event.</p>
@@ -488,7 +532,7 @@ connect(Socket, Ip, Port>,
</func>
<func>
- <name name="send" arity="3"/>
+ <name name="send" arity="3" since=""/>
<fsummary>Send a message using an <c>#sctp_sndrcvinfo{}</c>record.</fsummary>
<desc>
<p>Sends the <c><anno>Data</anno></c> message with all sending
@@ -503,7 +547,7 @@ connect(Socket, Ip, Port>,
</func>
<func>
- <name name="send" arity="4"/>
+ <name name="send" arity="4" since=""/>
<fsummary>Send a message over an existing association and specified
stream.</fsummary>
<desc>
@@ -544,12 +588,25 @@ connect(Socket, Ip, Port>,
<seealso marker="#recv/1"><c>recv</c></seealso> call
to retrieve the available data from the socket.</p>
</item>
+ <item>
+ <p>
+ If <c>true|once|N</c> (active modes)
+ received data or events are sent to the owning process.
+ See <seealso marker="#open/0"><c>open/0..2</c></seealso>
+ for the message format.
+ </p>
+ </item>
<item>
- <p>If <c>true</c> (full active mode), the pending data or events are
- sent to the owning process.</p>
- <p>Notice that this can cause the message queue to overflow,
- as there is no way to throttle the sender in this case
- (no flow control).</p>
+ <p>
+ If <c>true</c> (full active mode) there is no flow control.
+ </p>
+ <note>
+ <p>
+ Note that this can cause the message queue to overflow
+ causing for example the virtual machine
+ to run out of memory and crash.
+ </p>
+ </note>
</item>
<item>
<p>If <c>once</c>, only one message is automatically placed
diff --git a/lib/kernel/doc/src/gen_tcp.xml b/lib/kernel/doc/src/gen_tcp.xml
index e6104b0c76..fc16473393 100644
--- a/lib/kernel/doc/src/gen_tcp.xml
+++ b/lib/kernel/doc/src/gen_tcp.xml
@@ -4,7 +4,7 @@
<erlref>
<header>
<copyright>
- <year>1997</year><year>2017</year>
+ <year>1997</year><year>2018</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -27,7 +27,7 @@
<date>1997-10-24</date>
<rev>A</rev>
</header>
- <module>gen_tcp</module>
+ <module since="">gen_tcp</module>
<modulesummary>Interface to TCP/IP sockets.</modulesummary>
<description>
<p>This module provides functions for communicating
@@ -70,6 +70,32 @@ do_recv(Sock, Bs) ->
<name name="option"/>
</datatype>
<datatype>
+ <name name="pktoptions_value"/>
+ <desc>
+ <p>
+ If the platform implements the IPv4 option
+ <c>IP_PKTOPTIONS</c>, or the IPv6 option
+ <c>IPV6_PKTOPTIONS</c> or <c>IPV6_2292PKTOPTIONS</c> for the socket
+ this value is returned from
+ <seealso marker="inet#getopts/2"><c>inet:getopts/2</c></seealso>
+ when called with the option name
+ <seealso marker="#type-option_name"><c>pktoptions</c></seealso>.
+ </p>
+ <note>
+ <p>
+ This option appears to be VERY Linux specific,
+ and its existence in future Linux kernel versions
+ is also worrying since the option is part of RFC 2292
+ which is since long (2003) obsoleted by RFC 3542
+ that <em>explicitly</em> removes this possibility to get
+ packet information from a stream socket.
+ For comparision: it has existed in FreeBSD but is now removed,
+ at least since FreeBSD 10.
+ </p>
+ </note>
+ </desc>
+ </datatype>
+ <datatype>
<name name="option_name"/>
</datatype>
<datatype>
@@ -90,8 +116,8 @@ do_recv(Sock, Bs) ->
<funcs>
<func>
- <name name="accept" arity="1"/>
- <name name="accept" arity="2"/>
+ <name name="accept" arity="1" since=""/>
+ <name name="accept" arity="2" since=""/>
<fsummary>Accept an incoming connection request on a listening socket.</fsummary>
<type_desc variable="ListenSocket">Returned by
<seealso marker="#listen/2"><c>listen/2</c></seealso>.
@@ -137,7 +163,7 @@ do_recv(Sock, Bs) ->
</func>
<func>
- <name name="close" arity="1"/>
+ <name name="close" arity="1" since=""/>
<fsummary>Close a TCP socket.</fsummary>
<desc>
<p>Closes a TCP socket.</p>
@@ -162,8 +188,8 @@ do_recv(Sock, Bs) ->
</func>
<func>
- <name name="connect" arity="3"/>
- <name name="connect" arity="4"/>
+ <name name="connect" arity="3" since=""/>
+ <name name="connect" arity="4" since=""/>
<fsummary>Connect to a TCP port.</fsummary>
<desc>
<p>Connects to a server on TCP port <c><anno>Port</anno></c> on the host
@@ -242,7 +268,7 @@ do_recv(Sock, Bs) ->
</func>
<func>
- <name name="controlling_process" arity="2"/>
+ <name name="controlling_process" arity="2" since=""/>
<fsummary>Change controlling process of a socket.</fsummary>
<desc>
<p>Assigns a new controlling process <c><anno>Pid</anno></c> to
@@ -266,7 +292,7 @@ do_recv(Sock, Bs) ->
</func>
<func>
- <name name="listen" arity="2"/>
+ <name name="listen" arity="2" since=""/>
<fsummary>Set up a socket to listen on a port.</fsummary>
<desc>
<p>Sets up a socket to listen on port <c><anno>Port</anno></c> on
@@ -323,8 +349,8 @@ do_recv(Sock, Bs) ->
</func>
<func>
- <name name="recv" arity="2"/>
- <name name="recv" arity="3"/>
+ <name name="recv" arity="2" since=""/>
+ <name name="recv" arity="3" since=""/>
<fsummary>Receive a packet from a passive socket.</fsummary>
<type_desc variable="HttpPacket">See the description of
<c>HttpPacket</c> in
@@ -349,7 +375,7 @@ do_recv(Sock, Bs) ->
</func>
<func>
- <name name="send" arity="2"/>
+ <name name="send" arity="2" since=""/>
<fsummary>Send a packet.</fsummary>
<desc>
<p>Sends a packet on a socket.</p>
@@ -360,7 +386,7 @@ do_recv(Sock, Bs) ->
</func>
<func>
- <name name="shutdown" arity="2"/>
+ <name name="shutdown" arity="2" since=""/>
<fsummary>Asynchronously close a socket.</fsummary>
<desc>
<p>Closes a socket in one or two directions.</p>
diff --git a/lib/kernel/doc/src/gen_udp.xml b/lib/kernel/doc/src/gen_udp.xml
index f79566ef71..d20fc1fdfd 100644
--- a/lib/kernel/doc/src/gen_udp.xml
+++ b/lib/kernel/doc/src/gen_udp.xml
@@ -4,7 +4,7 @@
<erlref>
<header>
<copyright>
- <year>1997</year><year>2016</year>
+ <year>1997</year><year>2018</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -28,7 +28,7 @@
<date>1997-12-03</date>
<rev>A</rev>
</header>
- <module>gen_udp</module>
+ <module since="">gen_udp</module>
<modulesummary>Interface to UDP sockets.</modulesummary>
<description>
<p>This module provides functions for communicating
@@ -53,7 +53,7 @@
<funcs>
<func>
- <name name="close" arity="1"/>
+ <name name="close" arity="1" since=""/>
<fsummary>Close a UDP socket.</fsummary>
<desc>
<p>Closes a UDP socket.</p>
@@ -61,7 +61,7 @@
</func>
<func>
- <name name="controlling_process" arity="2"/>
+ <name name="controlling_process" arity="2" since=""/>
<fsummary>Change controlling process of a socket.</fsummary>
<desc>
<p>Assigns a new controlling process <c><anno>Pid</anno></c> to
@@ -77,8 +77,8 @@
</func>
<func>
- <name name="open" arity="1"/>
- <name name="open" arity="2"/>
+ <name name="open" arity="1" since=""/>
+ <name name="open" arity="2" since=""/>
<fsummary>Associate a UDP port number with the process calling it.</fsummary>
<desc>
<p>Associates a UDP port number (<c><anno>Port</anno></c>) with the
@@ -147,7 +147,21 @@
at the opened port, if the socket is in an active mode, the packets
are delivered as messages to the controlling process:</p>
<code type="none">
-{udp, Socket, IP, InPortNo, Packet}</code>
+{udp, Socket, IP, InPortNo, Packet} % Without ancillary data
+{udp, Socket, IP, InPortNo, AncData, Packet} % With ancillary data
+ </code>
+ <p>
+ The message contains an <c>AncData</c> field
+ if any of the socket
+ <seealso marker="#type-option">options</seealso>
+ <seealso marker="inet#option-recvtos"><c>recvtos</c></seealso>,
+ <seealso marker="inet#option-recvtclass"><c>recvtclass</c></seealso>
+ or
+ <seealso marker="inet#option-recvttl"><c>recvttl</c></seealso>
+ are active, otherwise it does not.
+ </p>
+ <p>
+ </p>
<p>If the socket is not in an active mode, data can be
retrieved through the
<seealso marker="#recv/2"><c>recv/2,3</c></seealso> calls.
@@ -175,18 +189,31 @@
</func>
<func>
- <name name="recv" arity="2"/>
- <name name="recv" arity="3"/>
+ <name name="recv" arity="2" since=""/>
+ <name name="recv" arity="3" since=""/>
<fsummary>Receive a packet from a passive socket.</fsummary>
<desc>
- <p>Receives a packet from a socket in passive mode. Optional parameter
+ <p>
+ Receives a packet from a socket in passive mode. Optional parameter
<c><anno>Timeout</anno></c> specifies a time-out in milliseconds.
- Defaults to <c>infinity</c>.</p>
+ Defaults to <c>infinity</c>.
+ </p>
+ <p>
+ If any of the socket
+ <seealso marker="#type-option">options</seealso>
+ <seealso marker="inet#option-recvtos"><c>recvtos</c></seealso>,
+ <seealso marker="inet#option-recvtclass"><c>recvtclass</c></seealso>
+ or
+ <seealso marker="inet#option-recvttl"><c>recvttl</c></seealso>
+ are active, the <c><anno>RecvData</anno></c> tuple contains an
+ <c><anno>AncData</anno></c> field,
+ otherwise it does not.
+ </p>
</desc>
</func>
<func>
- <name name="send" arity="4"/>
+ <name name="send" arity="4" since=""/>
<fsummary>Send a packet.</fsummary>
<desc>
<p>
diff --git a/lib/kernel/doc/src/global.xml b/lib/kernel/doc/src/global.xml
index 4442741f54..dfe71de5ce 100644
--- a/lib/kernel/doc/src/global.xml
+++ b/lib/kernel/doc/src/global.xml
@@ -28,7 +28,7 @@
<date>1997-11-17</date>
<rev></rev>
</header>
- <module>global</module>
+ <module since="">global</module>
<modulesummary>A global name registration facility.</modulesummary>
<description>
<p>This module consists of the following services:</p>
@@ -100,8 +100,8 @@
<funcs>
<func>
- <name name="del_lock" arity="1"/>
- <name name="del_lock" arity="2"/>
+ <name name="del_lock" arity="1" since=""/>
+ <name name="del_lock" arity="2" since=""/>
<fsummary>Delete a lock.</fsummary>
<desc>
<p>Deletes the lock <c><anno>Id</anno></c> synchronously.</p>
@@ -109,7 +109,7 @@
</func>
<func>
- <name name="notify_all_name" arity="3"/>
+ <name name="notify_all_name" arity="3" since=""/>
<fsummary>Name resolving function that notifies both pids.</fsummary>
<desc>
<p>Can be used as a name resolving function for
@@ -123,7 +123,7 @@
</func>
<func>
- <name name="random_exit_name" arity="3"/>
+ <name name="random_exit_name" arity="3" since=""/>
<fsummary>Name resolving function that kills one pid.</fsummary>
<desc>
<p>Can be used as a name resolving function for
@@ -136,7 +136,7 @@
</func>
<func>
- <name name="random_notify_name" arity="3"/>
+ <name name="random_notify_name" arity="3" since=""/>
<fsummary>Name resolving function that notifies one pid.</fsummary>
<desc>
<p>Can be used as a name resolving function for
@@ -150,8 +150,8 @@
</func>
<func>
- <name name="re_register_name" arity="2"/>
- <name name="re_register_name" arity="3"/>
+ <name name="re_register_name" arity="2" since=""/>
+ <name name="re_register_name" arity="3" since=""/>
<fsummary>Atomically re-register a name.</fsummary>
<type name="method"/>
<type_desc name="method">{<c>Module</c>, <c>Function</c>}
@@ -167,8 +167,8 @@
</func>
<func>
- <name name="register_name" arity="2"/>
- <name name="register_name" arity="3"/>
+ <name name="register_name" arity="2" since=""/>
+ <name name="register_name" arity="3" since=""/>
<fsummary>Globally register a name for a pid.</fsummary>
<type name="method"/>
<type_desc name="method">{<c>Module</c>, <c>Function</c>} is also
@@ -221,7 +221,7 @@
</func>
<func>
- <name name="registered_names" arity="0"/>
+ <name name="registered_names" arity="0" since=""/>
<fsummary>All globally registered names.</fsummary>
<desc>
<p>Returns a list of all globally registered names.</p>
@@ -229,7 +229,7 @@
</func>
<func>
- <name name="send" arity="2"/>
+ <name name="send" arity="2" since=""/>
<fsummary>Send a message to a globally registered pid.</fsummary>
<desc>
<p>Sends message <c><anno>Msg</anno></c> to the pid globally registered
@@ -241,9 +241,9 @@
</func>
<func>
- <name name="set_lock" arity="1"/>
- <name name="set_lock" arity="2"/>
- <name name="set_lock" arity="3"/>
+ <name name="set_lock" arity="1" since=""/>
+ <name name="set_lock" arity="2" since=""/>
+ <name name="set_lock" arity="3" since=""/>
<fsummary>Set a lock on the specified nodes.</fsummary>
<type name="id"/>
<type name="retries"/>
@@ -287,7 +287,7 @@
</func>
<func>
- <name name="sync" arity="0"/>
+ <name name="sync" arity="0" since=""/>
<fsummary>Synchronize the global name server.</fsummary>
<desc>
<p>Synchronizes the global name server with all nodes known to
@@ -302,9 +302,9 @@
</func>
<func>
- <name name="trans" arity="2"/>
- <name name="trans" arity="3"/>
- <name name="trans" arity="4"/>
+ <name name="trans" arity="2" since=""/>
+ <name name="trans" arity="3" since=""/>
+ <name name="trans" arity="4" since=""/>
<fsummary>Micro transaction facility.</fsummary>
<type name="retries"/>
<type name="trans_fun"/>
@@ -322,7 +322,7 @@
</func>
<func>
- <name name="unregister_name" arity="1"/>
+ <name name="unregister_name" arity="1" since=""/>
<fsummary>Remove a globally registered name for a pid.</fsummary>
<desc>
<p>Removes the globally registered name <c><anno>Name</anno></c> from
@@ -331,7 +331,7 @@
</func>
<func>
- <name name="whereis_name" arity="1"/>
+ <name name="whereis_name" arity="1" since=""/>
<fsummary>Get the pid with a specified globally registered name.</fsummary>
<desc>
<p>Returns the pid with the globally registered name
diff --git a/lib/kernel/doc/src/global_group.xml b/lib/kernel/doc/src/global_group.xml
index 8f947b9adf..74d15cd476 100644
--- a/lib/kernel/doc/src/global_group.xml
+++ b/lib/kernel/doc/src/global_group.xml
@@ -28,7 +28,7 @@
<date>1998-12-18</date>
<rev>B</rev>
</header>
- <module>global_group</module>
+ <module since="">global_group</module>
<modulesummary>Grouping nodes to global name registration groups.</modulesummary>
<description>
<p>This module makes it possible to partition the nodes of a
@@ -105,7 +105,7 @@
<funcs>
<func>
- <name name="global_groups" arity="0"/>
+ <name name="global_groups" arity="0" since=""/>
<fsummary>Return the global group names.</fsummary>
<desc>
<p>Returns a tuple containing the name of the global group that
@@ -116,7 +116,7 @@
</func>
<func>
- <name name="info" arity="0"/>
+ <name name="info" arity="0" since=""/>
<fsummary>Information about global groups.</fsummary>
<type name="info_item"/>
<type name="sync_state"/>
@@ -173,7 +173,7 @@
</func>
<func>
- <name name="monitor_nodes" arity="1"/>
+ <name name="monitor_nodes" arity="1" since=""/>
<fsummary>Subscribe to node status changes.</fsummary>
<desc>
<p>Depending on <c><anno>Flag</anno></c>, the calling process
@@ -187,7 +187,7 @@
</func>
<func>
- <name name="own_nodes" arity="0"/>
+ <name name="own_nodes" arity="0" since=""/>
<fsummary>Return the group nodes.</fsummary>
<desc>
<p>Returns the names of all group nodes, regardless of their
@@ -196,7 +196,7 @@
</func>
<func>
- <name name="registered_names" arity="1"/>
+ <name name="registered_names" arity="1" since=""/>
<fsummary>Return globally registered names.</fsummary>
<desc>
<p>Returns a list of all names that are globally registered
@@ -205,8 +205,8 @@
</func>
<func>
- <name name="send" arity="2"/>
- <name name="send" arity="3"/>
+ <name name="send" arity="2" since=""/>
+ <name name="send" arity="3" since=""/>
<fsummary>Send a message to a globally registered pid.</fsummary>
<desc>
<p>Searches for <c><anno>Name</anno></c>, globally registered on
@@ -224,7 +224,7 @@
</func>
<func>
- <name name="sync" arity="0"/>
+ <name name="sync" arity="0" since=""/>
<fsummary>Synchronize the group nodes.</fsummary>
<desc>
<p>Synchronizes the group nodes, that is, the global name
@@ -242,8 +242,8 @@
</func>
<func>
- <name name="whereis_name" arity="1"/>
- <name name="whereis_name" arity="2"/>
+ <name name="whereis_name" arity="1" since=""/>
+ <name name="whereis_name" arity="2" since=""/>
<fsummary>Get the pid with a specified globally registered name.</fsummary>
<desc>
<p>Searches for <c><anno>Name</anno></c>, globally registered on
diff --git a/lib/kernel/doc/src/heart.xml b/lib/kernel/doc/src/heart.xml
index 5b5b71e521..4243b1ffe8 100644
--- a/lib/kernel/doc/src/heart.xml
+++ b/lib/kernel/doc/src/heart.xml
@@ -4,7 +4,7 @@
<erlref>
<header>
<copyright>
- <year>1996</year><year>2016</year>
+ <year>1996</year><year>2018</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -28,7 +28,7 @@
<date>1998-01-28</date>
<rev>A</rev>
</header>
- <module>heart</module>
+ <module since="">heart</module>
<modulesummary>Heartbeat monitoring of an Erlang runtime system.</modulesummary>
<description>
<p>This modules contains the interface to the <c>heart</c> process.
@@ -59,8 +59,9 @@
<pre>
% <input>erl -heart -env HEART_BEAT_TIMEOUT 30 ...</input></pre>
<p>The value (in seconds) must be in the range 10 &lt; X &lt;= 65535.</p>
- <p>Notice that if the system clock is adjusted with
- more than <c>HEART_BEAT_TIMEOUT</c> seconds, <c>heart</c>
+ <p>When running on OSs lacking support for monotonic time,
+ <c>heart</c> is susceptible to system clock adjustments of more than
+ <c>HEART_BEAT_TIMEOUT</c> seconds. When this happens, <c>heart</c>
times out and tries to reboot the system. This can occur, for
example, if the system clock is adjusted automatically by use of the
Network Time Protocol (NTP).</p>
@@ -118,7 +119,7 @@
<funcs>
<func>
- <name name="set_cmd" arity="1"/>
+ <name name="set_cmd" arity="1" since=""/>
<fsummary>Set a temporary reboot command.</fsummary>
<desc>
<p>Sets a temporary reboot command. This command is used if
@@ -135,7 +136,7 @@
</func>
<func>
- <name name="clear_cmd" arity="0"/>
+ <name name="clear_cmd" arity="0" since=""/>
<fsummary>Clear the temporary boot command.</fsummary>
<desc>
<p>Clears the temporary boot command. If the system terminates,
@@ -144,7 +145,7 @@
</func>
<func>
- <name name="get_cmd" arity="0"/>
+ <name name="get_cmd" arity="0" since=""/>
<fsummary>Get the temporary reboot command.</fsummary>
<desc>
<p>Gets the temporary reboot command. If the command is cleared,
@@ -153,7 +154,7 @@
</func>
<func>
- <name name="set_callback" arity="2"/>
+ <name name="set_callback" arity="2" since="OTP 18.3"/>
<fsummary>Set a validation callback</fsummary>
<desc>
<p> This validation callback will be executed before any
@@ -165,14 +166,14 @@
</desc>
</func>
<func>
- <name name="clear_callback" arity="0"/>
+ <name name="clear_callback" arity="0" since="OTP 18.3"/>
<fsummary>Clear the validation callback</fsummary>
<desc>
<p>Removes the validation callback call before heartbeats.</p>
</desc>
</func>
<func>
- <name name="get_callback" arity="0"/>
+ <name name="get_callback" arity="0" since="OTP 18.3"/>
<fsummary>Get the validation callback</fsummary>
<desc>
<p>Get the validation callback. If the callback is cleared, <c>none</c> will be returned.</p>
@@ -180,7 +181,7 @@
</func>
<func>
- <name name="set_options" arity="1"/>
+ <name name="set_options" arity="1" since="OTP 18.3"/>
<fsummary>Set a list of options</fsummary>
<desc>
<p> Valid options <c>set_options</c> are: </p>
@@ -198,7 +199,7 @@
</desc>
</func>
<func>
- <name name="get_options" arity="0"/>
+ <name name="get_options" arity="0" since="OTP 18.3"/>
<fsummary>Get the temporary reboot command</fsummary>
<desc>
<p>Returns <c>{ok, Options}</c> where <c>Options</c> is a list of current options enabled for heart.
diff --git a/lib/kernel/doc/src/inet.xml b/lib/kernel/doc/src/inet.xml
index 169a76463b..5e33bbc3ff 100644
--- a/lib/kernel/doc/src/inet.xml
+++ b/lib/kernel/doc/src/inet.xml
@@ -4,7 +4,7 @@
<erlref>
<header>
<copyright>
- <year>1997</year><year>2017</year>
+ <year>1997</year><year>2018</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -28,7 +28,7 @@
<date>1998-02-04</date>
<rev>A</rev>
</header>
- <module>inet</module>
+ <module since="">inet</module>
<modulesummary>Access to TCP/IP protocols.</modulesummary>
<description>
<p>This module provides access to TCP/IP protocols.</p>
@@ -177,6 +177,100 @@ fe80::204:acff:fe17:bf38
</desc>
</datatype>
<datatype>
+ <name name="ancillary_data"/>
+ <desc>
+ <p>
+ Ancillary data received with the data packet
+ or read with the socket option
+ <seealso marker="gen_tcp#type-pktoptions_value">
+ <c>pktoptions</c>
+ </seealso>
+ from a TCP socket.
+ </p>
+ <p>
+ The value(s) correspond to the currently active socket
+ <seealso marker="#type-socket_setopt">options</seealso>
+ <seealso marker="inet#option-recvtos"><c>recvtos</c></seealso>,
+ <seealso marker="inet#option-recvtclass"><c>recvtclass</c></seealso>
+ and
+ <seealso marker="inet#option-recvttl"><c>recvttl</c></seealso>.
+ </p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="getifaddrs_ifopts"/>
+ <desc>
+ <p>
+ Interface address description list returned from
+ <seealso marker="#getifaddrs/0"><c>getifaddrs/0,1</c></seealso>
+ for a named interface, translated from the returned
+ data of the POSIX API function <c>getaddrinfo()</c>.
+ </p>
+ <p>
+ <c><anno>Hwaddr</anno></c> is hardware dependent,
+ for example, on Ethernet interfaces it is
+ the 6-byte Ethernet address (MAC address (EUI-48 address)).
+ </p>
+ <p>
+ The tuples <c>{addr,<anno>Addr</anno>}</c>,
+ <c>{netmask,<anno>Netmask</anno>}</c>, and possibly
+ <c>{broadaddr,<anno>Broadaddr</anno>}</c> or
+ <c>{dstaddr,<anno>Dstaddr</anno>}</c>
+ are repeated in the list
+ if the interface has got multiple addresses.
+ An interface may have multiple <c>{flag,_}</c> tuples
+ for example if it has different flags
+ for different address families.
+ Multiple <c>{hwaddr,<anno>Hwaddr</anno>}</c> tuples
+ is hard to say anything definite about, though.
+ The tuple <c>{flag,<anno>Flags</anno>}</c> is mandatory,
+ all others are optional.
+ </p>
+ <p>
+ Do not rely too much on the order
+ of <c><anno>Flags</anno></c> atoms
+ or the <c><anno>Ifopt</anno></c> tuples.
+ There are however some rules:
+ </p>
+ <list type="bulleted">
+ <item><p>
+ A <c>{flag,_}</c> tuple applies to all other tuples that follow.
+ </p></item>
+ <item><p>
+ Immediately after <c>{addr,_}</c> follows <c>{netmask,_}</c>.
+ </p></item>
+ <item><p>
+ Immediately thereafter may <c>{broadaddr,_}</c> follow
+ if <c>broadcast</c> is member of <c><anno>Flags</anno></c>,
+ or <c>{dstaddr,_}</c> if <c>pointtopoint</c>
+ is member of <c><anno>Flags</anno></c>.
+ Both <c>{dstaddr,_}</c> and <c>{broadaddr,_}</c>
+ does not occur for the same <c>{addr,_}</c>.
+ </p></item>
+ <item><p>
+ Any <c>{netmask,_}</c>, <c>{broadaddr,_}</c>, or
+ <c>{dstaddr,_}</c> tuples that follow an
+ <c>{addr,<anno>Addr</anno>}</c>
+ tuple concerns the address <c><anno>Addr</anno></c>.
+ </p></item>
+ </list>
+ <p>
+ The tuple <c>{hwaddr,_}</c> is not returned on Solaris, as the
+ hardware address historically belongs to the link layer
+ and it is not returned by the Solaris API function
+ <c>getaddrinfo()</c>.
+ </p>
+ <warning>
+ <p>
+ On Windows, the data is fetched from different
+ OS API functions, so the <c><anno>Netmask</anno></c>
+ and <c><anno>Broadaddr</anno></c> values may be calculated,
+ just as some <c><anno>Flags</anno></c> values.
+ </p>
+ </warning>
+ </desc>
+ </datatype>
+ <datatype>
<name name="posix"/>
<desc>
<p>An atom that is named from the POSIX error codes used in Unix,
@@ -197,11 +291,17 @@ fe80::204:acff:fe17:bf38
<datatype>
<name name="address_family"/>
</datatype>
+ <datatype>
+ <name name="socket_protocol"/>
+ </datatype>
+ <datatype>
+ <name name="stat_option"/>
+ </datatype>
</datatypes>
<funcs>
<func>
- <name name="close" arity="1"/>
+ <name name="close" arity="1" since=""/>
<fsummary>Close a socket of any type.</fsummary>
<desc>
<p>Closes a socket of any type.</p>
@@ -209,7 +309,7 @@ fe80::204:acff:fe17:bf38
</func>
<func>
- <name name="format_error" arity="1"/>
+ <name name="format_error" arity="1" since=""/>
<fsummary>Return a descriptive string for an error reason.</fsummary>
<desc>
<p>Returns a diagnostic error string. For possible POSIX values and
@@ -219,7 +319,7 @@ fe80::204:acff:fe17:bf38
</func>
<func>
- <name name="get_rc" arity="0"/>
+ <name name="get_rc" arity="0" since=""/>
<fsummary>Return a list of IP configuration parameters.</fsummary>
<desc>
<p>
@@ -238,7 +338,7 @@ fe80::204:acff:fe17:bf38
</func>
<func>
- <name name="getaddr" arity="2"/>
+ <name name="getaddr" arity="2" since=""/>
<fsummary>Return the IP address for a host.</fsummary>
<desc>
<p>Returns the IP address for <c><anno>Host</anno></c> as a tuple of
@@ -248,7 +348,7 @@ fe80::204:acff:fe17:bf38
</func>
<func>
- <name name="getaddrs" arity="2"/>
+ <name name="getaddrs" arity="2" since=""/>
<fsummary>Return the IP addresses for a host.</fsummary>
<desc>
<p>Returns a list of all IP addresses for <c><anno>Host</anno></c>.
@@ -258,7 +358,7 @@ fe80::204:acff:fe17:bf38
</func>
<func>
- <name name="gethostbyaddr" arity="1"/>
+ <name name="gethostbyaddr" arity="1" since=""/>
<fsummary>Return a hostent record for the host with the specified
address.</fsummary>
<desc>
@@ -267,21 +367,19 @@ fe80::204:acff:fe17:bf38
</func>
<func>
- <name name="gethostbyname" arity="1"/>
+ <name name="gethostbyname" arity="1" since=""/>
<fsummary>Return a hostent record for the host with the specified name.
</fsummary>
<desc>
<p>Returns a <c>hostent</c> record for the host with the specified
hostname.</p>
<p>If resolver option <c>inet6</c> is <c>true</c>,
- an IPv6 address is looked up. If that fails,
- the IPv4 address is looked up and returned on
- IPv6-mapped IPv4 format.</p>
+ an IPv6 address is looked up.</p>
</desc>
</func>
<func>
- <name name="gethostbyname" arity="2"/>
+ <name name="gethostbyname" arity="2" since=""/>
<fsummary>Return a hostent record for the host with the specified name.
</fsummary>
<desc>
@@ -291,7 +389,7 @@ fe80::204:acff:fe17:bf38
</func>
<func>
- <name name="gethostname" arity="0"/>
+ <name name="gethostname" arity="0" since=""/>
<fsummary>Return the local hostname.</fsummary>
<desc>
<p>Returns the local hostname. Never fails.</p>
@@ -299,51 +397,81 @@ fe80::204:acff:fe17:bf38
</func>
<func>
- <name name="getifaddrs" arity="0"/>
+ <name name="getifaddrs" arity="0" since="OTP R14B01"/>
<fsummary>Return a list of interfaces and their addresses.</fsummary>
<desc>
- <p>Returns a list of 2-tuples containing interface names and the
- interface addresses. <c><anno>Ifname</anno></c> is a Unicode string.
- <c><anno>Hwaddr</anno></c> is hardware dependent, for example, on
- Ethernet interfaces
- it is the 6-byte Ethernet address (MAC address (EUI-48 address)).</p>
- <p>The tuples <c>{addr,<anno>Addr</anno>}</c>, <c>{netmask,_}</c>, and
- <c>{broadaddr,_}</c> are repeated in the result list if the interface
- has multiple addresses. If you come across an interface with
- multiple <c>{flag,_}</c> or <c>{hwaddr,_}</c> tuples, you have
- a strange interface or possibly a bug in this function. The tuple
- <c>{flag,_}</c> is mandatory, all others are optional.</p>
- <p>Do not rely too much on the order of <c><anno>Flag</anno></c> atoms
- or <c><anno>Ifopt</anno></c> tuples. There are however some rules:</p>
- <list type="bulleted">
- <item><p>Immediately after
- <c>{addr,_}</c> follows <c>{netmask,_}</c>.</p></item>
- <item><p>Immediately thereafter follows <c>{broadaddr,_}</c> if flag
- <c>broadcast</c> is <em>not</em> set and flag
- <c>pointtopoint</c> <em>is</em> set.</p></item>
- <item><p>Any <c>{netmask,_}</c>, <c>{broadaddr,_}</c>, or
- <c>{dstaddr,_}</c> tuples that follow an <c>{addr,_}</c>
- tuple concerns that address.</p></item>
- </list>
- <p>The tuple <c>{hwaddr,_}</c> is not returned on Solaris, as the
- hardware address historically belongs to the link layer and only
- the superuser can read such addresses.</p>
- <warning>
- <p>On Windows, the data is fetched from different OS API functions,
- so the <c><anno>Netmask</anno></c> and <c><anno>Broadaddr</anno></c>
- values can be calculated, just as some <c><anno>Flag</anno></c>
- values. Report flagrant bugs.</p>
- </warning>
+ <p>
+ Returns a list of 2-tuples containing interface names and
+ the interfaces' addresses. <c><anno>Ifname</anno></c>
+ is a Unicode string and
+ <c><anno>Ifopts</anno></c> is a list of
+ interface address description tuples.
+ </p>
+ <p>
+ The interface address description tuples
+ are documented under the type of the
+ <seealso marker="#type-getifaddrs_ifopts">
+ <c><anno>Ifopts</anno></c>
+ </seealso>
+ value.
+ </p>
+ </desc>
+ </func>
+
+ <func>
+ <name since="OTP 21.2">getifaddrs(Opts) ->
+ {ok, [{Ifname, Ifopts}]} | {error, Posix}
+ </name>
+ <fsummary>Return a list of interfaces and their addresses.</fsummary>
+ <type>
+ <v>
+ Opts = [{netns, Namespace}]
+ </v>
+ <v>
+ Namespace =
+ <seealso marker="file#type-filename_all">
+ file:filename_all()
+ </seealso>
+ </v>
+ <v>Ifname = string()</v>
+ <v>
+ Ifopts =
+ <seealso marker="#type-getifaddrs_ifopts">
+ getifaddrs_ifopts()
+ </seealso>
+ </v>
+ <v>Posix = <seealso marker="#type-posix">posix()</seealso></v>
+ </type>
+ <desc>
+ <p>
+ The same as
+ <seealso marker="#getifaddrs/0"><c>getifaddrs/0</c></seealso>
+ but the <c>Option</c>
+ <c>{netns, Namespace}</c> sets a network namespace
+ for the OS call, on platforms that supports that feature.
+ </p>
+ <p>
+ See the socket option
+ <seealso marker="#option-netns">
+ <c>{netns, Namespace}</c>
+ </seealso>
+ under
+ <seealso marker="#setopts/2"><c>setopts/2</c></seealso>.
+ </p>
</desc>
</func>
<func>
- <name name="getopts" arity="2"/>
+ <name name="getopts" arity="2" since=""/>
<fsummary>Get one or more options for a socket.</fsummary>
<desc>
<p>Gets one or more options for a socket. For a list of available
options, see
- <seealso marker="#setopts/2"><c>setopts/2</c></seealso>.</p>
+ <seealso marker="#setopts/2"><c>setopts/2</c></seealso>.
+ See also the description for the type
+ <seealso marker="gen_tcp#type-pktoptions_value">
+ <c>gen_tcp:pktoptions_value()</c>
+ </seealso>.</p>
<p>The number of elements in the returned
<c><anno>OptionValues</anno></c>
list does not necessarily correspond to the number of options
@@ -359,7 +487,7 @@ fe80::204:acff:fe17:bf38
socket options not (explicitly) supported by the emulator. The
use of raw socket options makes the code non-portable, but
allows the Erlang programmer to take advantage of unusual features
- present on the current platform.</p>
+ present on a particular platform.</p>
<p><c>RawOptReq</c> consists of tag <c>raw</c> followed
by the protocol level, the option number, and either a binary
or the size, in bytes, of the
@@ -404,8 +532,8 @@ get_tcpi_sacked(Sock) ->
</func>
<func>
- <name name="getstat" arity="1"/>
- <name name="getstat" arity="2"/>
+ <name name="getstat" arity="1" since=""/>
+ <name name="getstat" arity="2" since=""/>
<fsummary>Get one or more statistic options for a socket.</fsummary>
<type name="stat_option"/>
<desc>
@@ -461,7 +589,62 @@ get_tcpi_sacked(Sock) ->
</func>
<func>
- <name name="ntoa" arity="1" />
+ <name name="i" arity="0" since="OTP 21.0"/>
+ <name name="i" arity="1" since="OTP 21.0"/>
+ <name name="i" arity="2" since="OTP 21.0"/>
+ <fsummary>Displays information and statistics about sockets on the terminal</fsummary>
+ <desc>
+ <p>
+ Lists all TCP, UDP and SCTP sockets, including those that the Erlang runtime system uses as well as
+ those created by the application.
+ </p>
+ <p>
+ The following options are available:
+ </p>
+
+ <taglist>
+ <tag><c>port</c></tag>
+ <item>
+ <p>The internal index of the port.</p>
+ </item>
+ <tag><c>module</c></tag>
+ <item>
+ <p>The callback module of the socket.</p>
+ </item>
+ <tag><c>recv</c></tag>
+ <item>
+ <p>Number of bytes received by the socket.</p>
+ </item>
+ <tag><c>sent</c></tag>
+ <item>
+ <p>Number of bytes sent from the socket.</p>
+ </item>
+ <tag><c>owner</c></tag>
+ <item>
+ <p>The socket owner process.</p>
+ </item>
+ <tag><c>local_address</c></tag>
+ <item>
+ <p>The local address of the socket.</p>
+ </item>
+ <tag><c>foreign_address</c></tag>
+ <item>
+ <p>The address and port of the other end of the connection.</p>
+ </item>
+ <tag><c>state</c></tag>
+ <item>
+ <p>The connection state.</p>
+ </item>
+ <tag><c>type</c></tag>
+ <item>
+ <p>STREAM or DGRAM or SEQPACKET.</p>
+ </item>
+ </taglist>
+ </desc>
+ </func>
+
+ <func>
+ <name name="ntoa" arity="1" since="OTP R16B02"/>
<fsummary>Convert IPv6/IPV4 address to ASCII.</fsummary>
<desc>
<p>Parses an
@@ -471,7 +654,7 @@ get_tcpi_sacked(Sock) ->
</func>
<func>
- <name name="parse_address" arity="1" />
+ <name name="parse_address" arity="1" since="OTP R16B"/>
<fsummary>Parse an IPv4 or IPv6 address.</fsummary>
<desc>
<p>Parses an IPv4 or IPv6 address string and returns an
@@ -482,7 +665,7 @@ get_tcpi_sacked(Sock) ->
</func>
<func>
- <name name="parse_ipv4_address" arity="1" />
+ <name name="parse_ipv4_address" arity="1" since="OTP R16B"/>
<fsummary>Parse an IPv4 address.</fsummary>
<desc>
<p>Parses an IPv4 address string and returns an
@@ -492,7 +675,7 @@ get_tcpi_sacked(Sock) ->
</func>
<func>
- <name name="parse_ipv4strict_address" arity="1" />
+ <name name="parse_ipv4strict_address" arity="1" since="OTP R16B"/>
<fsummary>Parse an IPv4 address strict.</fsummary>
<desc>
<p>Parses an IPv4 address string containing four fields, that is,
@@ -503,7 +686,7 @@ get_tcpi_sacked(Sock) ->
</func>
<func>
- <name name="parse_ipv6_address" arity="1" />
+ <name name="parse_ipv6_address" arity="1" since="OTP R16B"/>
<fsummary>Parse an IPv6 address.</fsummary>
<desc>
<p>Parses an IPv6 address string and returns an
@@ -514,7 +697,7 @@ get_tcpi_sacked(Sock) ->
</func>
<func>
- <name name="parse_ipv6strict_address" arity="1" />
+ <name name="parse_ipv6strict_address" arity="1" since="OTP R16B"/>
<fsummary>Parse an IPv6 address strict.</fsummary>
<desc>
<p>Parses an IPv6 address string and returns an
@@ -524,7 +707,20 @@ get_tcpi_sacked(Sock) ->
</func>
<func>
- <name name="parse_strict_address" arity="1" />
+ <name name="ipv4_mapped_ipv6_address" arity="1" since="OTP 21.0"/>
+ <fsummary>Convert to and from IPv4-mapped IPv6 address.</fsummary>
+ <desc>
+ <p>
+ Convert an IPv4 address to an IPv4-mapped IPv6 address
+ or the reverse. When converting from an IPv6 address
+ all but the 2 low words are ignored so this function also
+ works on some other types of addresses than IPv4-mapped.
+ </p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="parse_strict_address" arity="1" since="OTP R16B"/>
<fsummary>Parse an IPv4 or IPv6 address strict.</fsummary>
<desc>
<p>Parses an IPv4 or IPv6 address string and returns an
@@ -535,7 +731,7 @@ get_tcpi_sacked(Sock) ->
</func>
<func>
- <name name="peername" arity="1"/>
+ <name name="peername" arity="1" since=""/>
<fsummary>Return the address and port for the other end of a connection.
</fsummary>
<desc>
@@ -548,7 +744,7 @@ get_tcpi_sacked(Sock) ->
</func>
<func>
- <name name="peernames" arity="1"/>
+ <name name="peernames" arity="1" since="OTP R16B03"/>
<fsummary>Return all address/port numbers for the other end of a
connection.</fsummary>
<desc>
@@ -562,7 +758,7 @@ get_tcpi_sacked(Sock) ->
</func>
<func>
- <name name="peernames" arity="2"/>
+ <name name="peernames" arity="2" since="OTP R16B03"/>
<fsummary>Return all address/port numbers for the other end of a
connection.</fsummary>
<desc>
@@ -581,7 +777,7 @@ get_tcpi_sacked(Sock) ->
</func>
<func>
- <name name="port" arity="1"/>
+ <name name="port" arity="1" since=""/>
<fsummary>Return the local port number for a socket.</fsummary>
<desc>
<p>Returns the local port number for a socket.</p>
@@ -589,7 +785,7 @@ get_tcpi_sacked(Sock) ->
</func>
<func>
- <name name="setopts" arity="2"/>
+ <name name="setopts" arity="2" since=""/>
<fsummary>Set one or more options for a socket.</fsummary>
<desc>
<p>Sets one or more options for a socket.</p>
@@ -665,22 +861,23 @@ get_tcpi_sacked(Sock) ->
</item>
<tag><c>{buffer, Size}</c></tag>
<item>
- <p>The size of the user-level software buffer used by
- the driver.
- Not to be confused with options <c>sndbuf</c>
+ <p>The size of the user-level buffer used by
+ the driver. Not to be confused with options <c>sndbuf</c>
and <c>recbuf</c>, which correspond to the
- Kernel socket buffers. It is recommended
- to have <c>val(buffer) &gt;= max(val(sndbuf),val(recbuf))</c> to
+ Kernel socket buffers. For TCP it is recommended
+ to have <c>val(buffer) &gt;= val(recbuf)</c> to
avoid performance issues because of unnecessary copying.
+ For UDP the same recommendation applies, but the max should
+ not be larger than the MTU of the network path.
<c>val(buffer)</c> is automatically set to the above
- maximum when values <c>sndbuf</c> or <c>recbuf</c> are set.
- However, as the sizes set for <c>sndbuf</c> and <c>recbuf</c>
+ maximum when <c>recbuf</c> is set.
+ However, as the size set for <c>recbuf</c>
usually become larger, you are encouraged to use
<seealso marker="#getopts/2"><c>getopts/2</c></seealso>
to analyze the behavior of your operating system.</p>
<p>Note that this is also the maximum amount of data that can be
- received from a single recv call. If you are using higher than
- normal MTU consider setting buffer higher.</p>
+ received from a single recv call. If you are using higher than
+ normal MTU consider setting buffer higher.</p>
</item>
<tag><c>{delay_send, Boolean}</c></tag>
<item>
@@ -855,20 +1052,29 @@ get_tcpi_sacked(Sock) ->
</item>
<tag><c>{mode, Mode :: binary | list}</c></tag>
<item>
- <p>Received <c>Packet</c> is delivered as defined by <c>Mode</c>.
+ <p>
+ Received <c>Packet</c> is delivered as defined by <c>Mode</c>.
</p>
</item>
- <tag><c>{netns, Namespace :: file:filename_all()}</c></tag>
+ <tag>
+ <marker id="option-netns"/>
+ <c>{netns, Namespace :: file:filename_all()}</c>
+ </tag>
<item>
- <p>Sets a network namespace for the socket. Parameter
+ <p>
+ Sets a network namespace for the socket. Parameter
<c>Namespace</c> is a filename defining the namespace, for
example, <c>"/var/run/netns/example"</c>, typically created by
command <c>ip netns add example</c>. This option must be used in
a function call that creates a socket, that is,
<seealso marker="gen_tcp#connect/3"><c>gen_tcp:connect/3,4</c></seealso>,
<seealso marker="gen_tcp#listen/2"><c>gen_tcp:listen/2</c></seealso>,
- <seealso marker="gen_udp#open/1"><c>gen_udp:open/1,2</c></seealso>, or
- <seealso marker="gen_sctp#open/0"><c>gen_sctp:open/0,1,2</c></seealso>.</p>
+ <seealso marker="gen_udp#open/1"><c>gen_udp:open/1,2</c></seealso>
+ or
+ <seealso marker="gen_sctp#open/0"><c>gen_sctp:open/0,1,2</c></seealso>,
+ and also
+ <seealso marker="#getifaddrs/1"><c>getifaddrs/1</c></seealso>.
+ </p>
<p>This option uses the Linux-specific syscall
<c>setns()</c>, such as in Linux kernel 3.0 or later,
and therefore only exists when the runtime system
@@ -944,6 +1150,18 @@ setcap cap_sys_admin,cap_sys_ptrace,cap_dac_read_search+epi beam.smp</code>
is turned on for the socket, which means that also small
amounts of data are sent immediately.</p>
</item>
+ <tag><c>{nopush, Boolean}</c>(TCP/IP sockets)</tag>
+ <item>
+ <p>This translates to <c>TCP_NOPUSH</c> on BSD and
+ to <c>TCP_CORK</c> on Linux.</p>
+ <p>If <c>Boolean == true</c>, the corresponding option
+ is turned on for the socket, which means that small
+ amounts of data are accumulated until a full MSS-worth
+ of data is available or this option is turned off.</p>
+ <p>Note that while <c>TCP_NOPUSH</c> socket option is available on OSX, its semantics
+ is very different (e.g., unsetting it does not cause immediate send
+ of accumulated data). Hence, <c>nopush</c> option is intentionally ignored on OSX.</p>
+ </item>
<tag><c>{packet, PacketType}</c>(TCP/IP sockets)</tag>
<item>
<p><marker id="packet"/>Defines the type of packets to use for a socket.
@@ -1045,6 +1263,100 @@ setcap cap_sys_admin,cap_sys_ptrace,cap_dac_read_search+epi beam.smp</code>
the socket. You are encouraged to use
<seealso marker="#getopts/2"><c>getopts/2</c></seealso>
to retrieve the size set by your operating system.</p>
+ <marker id="option-recvtclass"></marker>
+ </item>
+ <tag><c>{recvtclass, Boolean}</c></tag>
+ <item>
+ <p>
+ If set to <c>true</c> activates returning the received
+ <c>TCLASS</c> value on platforms that implements
+ the protocol <c>IPPROTO_IPV6</c>
+ option <c>IPV6_RECVTCLASS</c> or <c>IPV6_2292RECVTCLASS</c>
+ for the socket.
+ The value is returned as a <c>{tclass,TCLASS}</c> tuple
+ regardless of if the platform returns an <c>IPV6_TCLASS</c>
+ or an <c>IPV6_RECVTCLASS</c> CMSG value.
+ </p>
+ <p>
+ For packet oriented sockets that supports receiving
+ ancillary data with the payload data
+ (<c>gen_udp</c> and <c>gen_sctp</c>),
+ the <c>TCLASS</c> value is returned
+ in an extended return tuple contained in an
+ <seealso marker="inet#type-ancillary_data">
+ ancillary data
+ </seealso>
+ list.
+ For stream oriented sockets (<c>gen_tcp</c>)
+ the only way to get the <c>TCLASS</c>
+ value is if the platform supports the
+ <seealso marker="gen_tcp#type-pktoptions_value">
+ <c>pktoptions</c>
+ </seealso>
+ option.
+ </p>
+ <marker id="option-recvtos"></marker>
+ </item>
+ <tag><c>{recvtos, Boolean}</c></tag>
+ <item>
+ <p>
+ If set to <c>true</c> activates returning the received
+ <c>TOS</c> value on platforms that implements
+ the protocol <c>IPPROTO_IP</c> option <c>IP_RECVTOS</c>
+ for the socket.
+ The value is returned as a <c>{tos,TOS}</c> tuple
+ regardless of if the platform returns an <c>IP_TOS</c>
+ or an <c>IP_RECVTOS</c> CMSG value.
+ </p>
+ <p>
+ For packet oriented sockets that supports receiving
+ ancillary data with the payload data
+ (<c>gen_udp</c> and <c>gen_sctp</c>),
+ the <c>TOS</c> value is returned
+ in an extended return tuple contained in an
+ <seealso marker="inet#type-ancillary_data">
+ ancillary data
+ </seealso>
+ list.
+ For stream oriented sockets (<c>gen_tcp</c>)
+ the only way to get the <c>TOS</c>
+ value is if the platform supports the
+ <seealso marker="gen_tcp#type-pktoptions_value">
+ <c>pktoptions</c>
+ </seealso>
+ option.
+ </p>
+ <marker id="option-recvttl"></marker>
+ </item>
+ <tag><c>{recvttl, Boolean}</c></tag>
+ <item>
+ <p>
+ If set to <c>true</c> activates returning the received
+ <c>TTL</c> value on platforms that implements
+ the protocol <c>IPPROTO_IP</c> option <c>IP_RECVTTL</c>
+ for the socket.
+ The value is returned as a <c>{ttl,TTL}</c> tuple
+ regardless of if the platform returns an <c>IP_TTL</c>
+ or an <c>IP_RECVTTL</c> CMSG value.
+ </p>
+ <p>
+ For packet oriented sockets that supports receiving
+ ancillary data with the payload data
+ (<c>gen_udp</c> and <c>gen_sctp</c>),
+ the <c>TTL</c> value is returned
+ in an extended return tuple contained in an
+ <seealso marker="inet#type-ancillary_data">
+ ancillary data
+ </seealso>
+ list.
+ For stream oriented sockets (<c>gen_tcp</c>)
+ the only way to get the <c>TTL</c>
+ value is if the platform supports the
+ <seealso marker="gen_tcp#type-pktoptions_value">
+ <c>pktoptions</c>
+ </seealso>
+ option.
+ </p>
</item>
<tag><c>{reuseaddr, Boolean}</c></tag>
<item>
@@ -1080,7 +1392,7 @@ setcap cap_sys_admin,cap_sys_ptrace,cap_dac_read_search+epi beam.smp</code>
<seealso marker="gen_tcp#recv/2"><c>gen_tcp:recv/2</c></seealso>
gets <c>{error, closed}</c>. In active
mode, the controlling process receives a
- <c>{tcp_close, Socket}</c> message, indicating that the
+ <c>{tcp_closed, Socket}</c> message, indicating that the
peer has closed the connection.</p>
<p>Setting this option to <c>true</c> allows you to
distinguish between a connection that was closed normally,
@@ -1177,7 +1489,7 @@ inet:setopts(Sock,[{raw,6,8,<<30:32/native>>}]),]]></code>
</func>
<func>
- <name name="sockname" arity="1"/>
+ <name name="sockname" arity="1" since=""/>
<fsummary>Return the local address and port number for a socket.
</fsummary>
<desc>
@@ -1190,7 +1502,7 @@ inet:setopts(Sock,[{raw,6,8,<<30:32/native>>}]),]]></code>
</func>
<func>
- <name name="socknames" arity="1"/>
+ <name name="socknames" arity="1" since="OTP R16B03"/>
<fsummary>Return all local address/port numbers for a socket.</fsummary>
<desc>
<p>Equivalent to
@@ -1200,7 +1512,7 @@ inet:setopts(Sock,[{raw,6,8,<<30:32/native>>}]),]]></code>
</func>
<func>
- <name name="socknames" arity="2"/>
+ <name name="socknames" arity="2" since="OTP R16B03"/>
<fsummary>Return all local address/port numbers for a socket.</fsummary>
<desc>
<p>Returns a list of all local address/port number pairs for a socket
@@ -1214,7 +1526,7 @@ inet:setopts(Sock,[{raw,6,8,<<30:32/native>>}]),]]></code>
For one-to-many style sockets, the special value <c>0</c>
is defined to mean that the returned addresses must be
without any particular association.
- How different SCTP implementations interprets this varies somewhat.
+ How different SCTP implementations interpret this varies somewhat.
</p>
</desc>
</func>
diff --git a/lib/kernel/doc/src/inet_res.xml b/lib/kernel/doc/src/inet_res.xml
index 4ada4203c0..1904e371f7 100644
--- a/lib/kernel/doc/src/inet_res.xml
+++ b/lib/kernel/doc/src/inet_res.xml
@@ -4,7 +4,7 @@
<erlref>
<header>
<copyright>
- <year>2009</year><year>2015</year>
+ <year>2009</year><year>2018</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -28,7 +28,7 @@
<date>2009-09-11</date>
<rev>A</rev>
</header>
- <module>inet_res</module>
+ <module since="">inet_res</module>
<modulesummary>A rudimentary DNS client.</modulesummary>
<description>
<p>This module performs DNS name resolving to recursive name servers.</p>
@@ -130,7 +130,7 @@ dns_header() = DnsHeader
inet_dns:header(DnsHeader) ->
[ {id, integer()}
| {qr, boolean()}
- | {opcode, 'query' | iquery | status | integer()}
+ | {opcode, query | iquery | status | integer()}
| {aa, boolean()}
| {tc, boolean()}
| {rd, boolean()}
@@ -185,8 +185,8 @@ inet_dns:record_type(_) -> undefined.</pre>
<funcs>
<func>
- <name name="getbyname" arity="2"/>
- <name name="getbyname" arity="3"/>
+ <name name="getbyname" arity="2" since=""/>
+ <name name="getbyname" arity="3" since=""/>
<fsummary>Resolve a DNS record of the specified type for the specified
host.</fsummary>
<desc>
@@ -205,8 +205,8 @@ inet_dns:record_type(_) -> undefined.</pre>
</func>
<func>
- <name name="gethostbyaddr" arity="1"/>
- <name name="gethostbyaddr" arity="2"/>
+ <name name="gethostbyaddr" arity="1" since=""/>
+ <name name="gethostbyaddr" arity="2" since=""/>
<fsummary>Return a hostent record for the host with the specified
address.</fsummary>
<desc>
@@ -217,9 +217,9 @@ inet_dns:record_type(_) -> undefined.</pre>
</func>
<func>
- <name name="gethostbyname" arity="1"/>
- <name name="gethostbyname" arity="2"/>
- <name name="gethostbyname" arity="3"/>
+ <name name="gethostbyname" arity="1" since=""/>
+ <name name="gethostbyname" arity="2" since=""/>
+ <name name="gethostbyname" arity="3" since=""/>
<fsummary>Return a hostent record for the host with the specified name.
</fsummary>
<desc>
@@ -230,16 +230,14 @@ inet_dns:record_type(_) -> undefined.</pre>
<seealso marker="#getbyname/2"><c>getbyname/2,3</c></seealso>.
</p>
<p>If resolver option <c>inet6</c> is <c>true</c>,
- an IPv6 address is looked up. If that fails,
- the IPv4 address is looked up and returned on
- IPv6-mapped IPv4 format.</p>
+ an IPv6 address is looked up.</p>
</desc>
</func>
<func>
- <name name="lookup" arity="3"/>
- <name name="lookup" arity="4"/>
- <name name="lookup" arity="5"/>
+ <name name="lookup" arity="3" since=""/>
+ <name name="lookup" arity="4" since=""/>
+ <name name="lookup" arity="5" since=""/>
<fsummary>Resolve the DNS data for the record of the specified type
and class for the specified name.</fsummary>
<desc>
@@ -259,9 +257,9 @@ inet_dns:record_type(_) -> undefined.</pre>
</func>
<func>
- <name name="resolve" arity="3"/>
- <name name="resolve" arity="4"/>
- <name name="resolve" arity="5"/>
+ <name name="resolve" arity="3" since=""/>
+ <name name="resolve" arity="4" since=""/>
+ <name name="resolve" arity="5" since=""/>
<fsummary>Resolve a DNS record of the specified type and class
for the specified name.</fsummary>
<desc>
@@ -328,9 +326,9 @@ example_lookup(Name, Class, Type) ->
<funcs>
<func>
- <name name="nslookup" arity="3"/>
- <name name="nslookup" arity="4" clause_i="1"/>
- <name name="nslookup" arity="4" clause_i="2"/>
+ <name name="nslookup" arity="3" since=""/>
+ <name name="nslookup" arity="4" clause_i="1" since=""/>
+ <name name="nslookup" arity="4" clause_i="2" since=""/>
<fsummary>Resolve a DNS record of the specified type and class for the
specified name.</fsummary>
<type variable="Name"/>
@@ -346,8 +344,8 @@ example_lookup(Name, Class, Type) ->
</func>
<func>
- <name name="nnslookup" arity="4"/>
- <name name="nnslookup" arity="5"/>
+ <name name="nnslookup" arity="4" since=""/>
+ <name name="nnslookup" arity="5" since=""/>
<fsummary>Resolve a DNS record of the specified type and class
for the specified name.</fsummary>
<desc>
diff --git a/lib/kernel/doc/src/introduction_chapter.xml b/lib/kernel/doc/src/introduction_chapter.xml
new file mode 100644
index 0000000000..d02b1a2ee5
--- /dev/null
+++ b/lib/kernel/doc/src/introduction_chapter.xml
@@ -0,0 +1,63 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE chapter SYSTEM "chapter.dtd">
+
+<chapter>
+ <header>
+ <copyright>
+ <year>2017</year><year>2018</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ </legalnotice>
+
+ <title>Introduction</title>
+ <prepared></prepared>
+ <responsible></responsible>
+ <docno></docno>
+ <approved></approved>
+ <checked></checked>
+ <date></date>
+ <rev></rev>
+ <file>introduction.xml</file>
+ </header>
+
+ <section>
+ <title>Scope</title>
+ <p>The Kernel application has all the code necessary to run
+ the Erlang runtime system: file servers, code servers,
+ and so on.</p>
+ <p>The Kernel application is the first application started. It is
+ mandatory in the sense that the minimal system based on
+ Erlang/OTP consists of Kernel and STDLIB. Kernel
+ contains the following functional areas:</p>
+ <list type="bulleted">
+ <item>Start, stop, supervision, configuration, and distribution of applications</item>
+ <item>Code loading</item>
+ <item>Logging</item>
+ <item>Global name service</item>
+ <item>Supervision of Erlang/OTP</item>
+ <item>Communication with sockets</item>
+ <item>Operating system interface</item>
+ </list>
+ </section>
+
+ <section>
+ <title>Prerequisites</title>
+ <p>It is assumed that the reader is familiar with the Erlang programming
+ language.</p>
+ </section>
+</chapter>
+
+
diff --git a/lib/kernel/doc/src/kernel_app.xml b/lib/kernel/doc/src/kernel_app.xml
index 0762cebc94..dbd83e1a6e 100644
--- a/lib/kernel/doc/src/kernel_app.xml
+++ b/lib/kernel/doc/src/kernel_app.xml
@@ -42,7 +42,6 @@
<item>Start, stop, supervision, configuration, and distribution of applications</item>
<item>Code loading</item>
<item>Logging</item>
- <item>Error logging</item>
<item>Global name service</item>
<item>Supervision of Erlang/OTP</item>
<item>Communication with sockets</item>
@@ -51,10 +50,13 @@
</description>
<section>
- <title>Error Logger Event Handlers</title>
- <p>Two standard error logger event handlers are defined in
- the Kernel application. These are described in
- <seealso marker="error_logger"><c>error_logger(3)</c></seealso>.</p>
+ <title>Logger Handlers</title>
+ <p>Two standard logger handlers are defined in
+ the Kernel application. These are described in the
+ <seealso marker="logger_chapter">Kernel User's Guide</seealso>,
+ and in the <seealso marker="logger_std_h"><c>logger_std_h(3)</c></seealso>
+ and <seealso marker="logger_disk_log_h"><c>logger_disk_log_h(3)</c>
+ </seealso> manual pages.</p>
</section>
<section>
@@ -113,26 +115,12 @@
</section>
<section>
+ <marker id="configuration"/>
<title>Configuration</title>
<p>The following configuration parameters are defined for the Kernel
application. For more information about configuration parameters,
see file <seealso marker="app"><c>app(4)</c></seealso>.</p>
<taglist>
- <tag><c>browser_cmd = string() | {M,F,A}</c></tag>
- <item>
- <p>When pressing the <em>Help</em> button in a tool such as Debugger,
- the help text (an HTML file <c>File</c>) is by default
- displayed in a Netscape browser, which is required to be
- operational. This parameter can be used to change the command for
- how to display the help text if another browser than Netscape
- is preferred, or if another platform than Unix or Windows is
- used.</p>
- <p>If set to a string <c>Command</c>, the command
- <c>"Command File"</c> is evaluated using
- <seealso marker="os#cmd/1"><c>os:cmd/1</c></seealso>.</p>
- <p>If set to a module-function-args tuple, <c>{M,F,A}</c>,
- the call <c>apply(M,F,[File|A])</c> is evaluated.</p>
- </item>
<tag><c>distributed = [Distrib]</c></tag>
<item>
<p>Specifies which applications that are distributed and on which
@@ -176,65 +164,74 @@
<p>Permissions are described in
<seealso marker="application#permit/2"><c>application:permit/2</c></seealso>.</p>
</item>
- <tag><c>error_logger = Value</c></tag>
- <item>
- <p><c>Value</c> is one of:</p>
- <taglist>
- <tag><c>tty</c></tag>
- <item><p>Installs the standard event handler, which prints error
- reports to <c>stdio</c>. This is the default option.</p></item>
- <tag><c>{file, FileName}</c></tag>
- <item><p>Installs the standard event handler, which prints error
- reports to file <c>FileName</c>, where <c>FileName</c>
- is a string. The file is opened with encoding UTF-8.</p></item>
- <tag><c>false</c></tag>
- <item>
- <p>No standard event handler is installed, but
- the initial, primitive event handler is kept, printing
- raw event messages to <c>tty</c>.</p>
- </item>
- <tag><c>silent</c></tag>
- <item>
- <p>Error logging is turned off.</p>
- </item>
- </taglist>
+ <tag><marker id="logger"/><c>logger = [Config]</c></tag>
+ <item>
+ <p>Specifies the configuration
+ for <seealso marker="logger">Logger</seealso>, except the
+ primary log level, which is specified
+ with <seealso marker="#logger_level"><c>logger_level</c></seealso>,
+ and the compatibility
+ with <seealso marker="sasl:error_logging">SASL Error
+ Logging</seealso>, which is specified
+ with <seealso marker="#logger_sasl_compatible">
+ <c>logger_sasl_compatible</c></seealso>.</p>
+ <p>The <c>logger </c> parameter is described in
+ section <seealso marker="logger_chapter#logger_parameter">
+ Logging</seealso> in the Kernel User's Guide.</p>
+ </item>
+ <tag><marker id="logger_level"/><c>logger_level = Level</c></tag>
+ <item>
+ <p>Specifies the primary log level for Logger. Log events with
+ the same, or a more severe level, pass through the primary
+ log level check. See
+ section <seealso marker="logger_chapter">Logging</seealso>
+ in the Kernel User's Guide for more information about Logger
+ and log levels.</p>
+ <p><c>Level = emergency | alert | critical | error | warning |
+ notice | info | debug | all | none</c></p>
+ <p>To change the primary log level at runtime, use
+ <seealso marker="logger#set_primary_config/2">
+ <c>logger:set_primary_config(level, Level)</c></seealso>.</p>
+ <p>Defaults to <c>notice</c>.</p>
+ </item>
+ <tag><marker id="logger_sasl_compatible"/>
+ <c>logger_sasl_compatible = true | false</c></tag>
+ <item>
+ <p>Specifies if Logger behaves backwards compatible with the
+ SASL error logging functionality from releases prior to
+ Erlang/OTP 21.0.</p>
+ <p>If this parameter is set to <c>true</c>, the default Logger
+ handler does not log any progress-, crash-, or supervisor
+ reports. If the SASL application is then started, it adds a
+ Logger handler named <c>sasl</c>, which logs these events
+ according to values of the SASL configuration
+ parameter <c>sasl_error_logger</c>
+ and <c>sasl_errlog_type</c>.</p>
+ <p>See section
+ <seealso marker="sasl:sasl_app#deprecated_error_logger_config">
+ Deprecated Error Logger Event Handlers and
+ Configuration</seealso> in the sasl(6) manual page for
+ information about the SASL configuration parameters.</p>
+ <p>See section <seealso marker="sasl:error_logging">SASL Error
+ Logging</seealso> in the SASL User's Guide, and
+ section <seealso marker="logger_chapter#compatibility">Backwards
+ Compatibility with error_logger</seealso> in the Kernel
+ User's Guide for information about the SASL error logging
+ functionality, and how Logger can be backwards compatible
+ with this.</p>
+ <p>Defaults to <c>false</c>.</p>
+ <note>
+ <p>If this parameter is set to <c>true</c>,
+ <c>sasl_errlog_type</c> indicates that progress reports
+ shall be logged, and the configured primary log level
+ is <c>notice</c> or more severe, then SASL automatically
+ sets the primary log level to <c>info</c>. That is, this
+ setting can potentially overwrite the value of the Kernel
+ configuration parameter <c>logger_level</c>. This is to
+ allow progress reports, which have log level <c>info</c>,
+ to be forwarded to the handlers.</p>
+ </note>
</item>
- <tag><c>error_logger_format_depth = Depth</c></tag>
- <item>
- <marker id="error_logger_format_depth"></marker>
- <p>Can be used to limit the size of the
- formatted output from the error logger event handlers.</p>
-
- <note><p>This configuration parameter was introduced in OTP 18.1
- and is experimental. Based on user feedback, it
- can be changed or improved in future releases, for example,
- to gain better control over how to limit the size of the
- formatted output. We have no plans to remove this
- new feature entirely, unless it turns out to be
- useless.</p></note>
-
- <p><c>Depth</c> is a positive integer representing the maximum
- depth to which terms are printed by the error logger event
- handlers included in OTP. This
- configuration parameter is used by the two event handlers
- defined by the Kernel application and the two event
- handlers in the SASL application.
- (If you have implemented your own error handlers, this configuration
- parameter has no effect on them.)</p>
-
- <p><c>Depth</c> is used as follows: Format strings
- passed to the event handlers are rewritten.
- The format controls <c>~p</c> and <c>~w</c> are replaced with
- <c>~P</c> and <c>~W</c>, respectively, and <c>Depth</c> is
- used as the depth parameter. For details, see
- <seealso marker="stdlib:io#format/2"><c>io:format/2</c></seealso>
- in STDLIB.</p>
-
- <note><p>A reasonable starting value for <c>Depth</c> is
- <c>30</c>. We recommend to test crashing various processes in your
- application, examine the logs from the crashes, and then
- increase or decrease the value.</p></note>
- </item>
<tag><c>global_groups = [GroupTuple]</c></tag>
<item>
<marker id="global_groups"></marker>
@@ -286,9 +283,8 @@
</item>
<tag><c>inet_parse_error_log = silent</c></tag>
<item>
- <p>If set, no
- <c>error_logger</c> messages are generated when erroneous
- lines are found and skipped in the various Inet configuration
+ <p>If set, no log events are issued when erroneous lines are
+ found and skipped in the various Inet configuration
files.</p>
</item>
<tag><c>inetrc = Filename</c></tag>
@@ -314,24 +310,31 @@
<tag><c>net_ticktime = TickTime</c></tag>
<item>
<marker id="net_ticktime"></marker>
- <p>Specifies the <c>net_kernel</c> tick time. <c>TickTime</c>
- is specified in seconds. Once every <c>TickTime/4</c> second, all
- connected nodes are ticked (if anything else is written
- to a node). If nothing is received from another node
- within the last four tick times, that node is considered
- to be down. This ensures that nodes that are not responding,
- for reasons such as hardware errors, are considered to be
- down.</p>
- <p>The time <c>T</c>, in which a node that is not responding is
- detected, is calculated as <c><![CDATA[MinT < T < MaxT]]></c>, where:</p>
+ <p>Specifies the <c>net_kernel</c> tick time in seconds. This is the
+ approximate time a connected node may be unresponsive until it is
+ considered down and thereby disconnected.</p>
+ <p>Once every <c>TickTime/4</c> seconds, each connected node is ticked
+ if nothing has been sent to it during that last <c>TickTime/4</c>
+ interval. A tick is a small package sent on the connection. A connected
+ node is considered to be down if no ticks or payload packages have been
+ received during the last four <c>TickTime/4</c> intervals. This ensures
+ that nodes that are not responding, for reasons such as hardware errors,
+ are considered to be down.</p>
+ <p>As the availability is only checked every <c>TickTime/4</c> seconds,
+ the actual time <c>T</c> a node have been unresponsive when
+ detected may vary between <c>MinT</c> and <c>MaxT</c>,
+ where:</p>
<code type="none">
MinT = TickTime - TickTime / 4
MaxT = TickTime + TickTime / 4</code>
- <p><c>TickTime</c> defaults to <c>60</c> (seconds). Thus,
- <c><![CDATA[45 < T < 75]]></c> seconds.</p>
- <p>Notice that <em>all</em> communicating nodes are to have the <em>same</em>
- <c>TickTime</c> value specified.</p>
- <p>Normally, a terminating node is detected immediately.</p>
+ <p><c>TickTime</c> defaults to <c>60</c> seconds. Thus,
+ <c><![CDATA[45 < T < 75]]></c> seconds.</p>
+ <p>Notice that <em>all</em> communicating nodes are to have the
+ <em>same</em> <c>TickTime</c> value specified, as it determines both the
+ frequency of outgoing ticks and the expected frequency of incominging
+ ticks.</p>
+ <p>Normally, a terminating node is detected immediately by the transport
+ protocol (like TCP/IP).</p>
</item>
<tag><c>shutdown_timeout = integer() | infinity</c></tag>
<item>
@@ -497,6 +500,39 @@ MaxT = TickTime + TickTime / 4</code>
</section>
<section>
+ <title>Deprecated Configuration Parameters</title>
+ <p>In Erlang/OTP 21.0, a new API for logging was added. The
+ old <c>error_logger</c> event manager, and event handlers
+ running on this manager, still work, but they are no longer used
+ by default.</p>
+ <p>The following application configuration parameters can still be
+ set, but they are only used if the corresponding configuration
+ parameters for Logger are not set.</p>
+ <taglist>
+ <tag><c>error_logger</c></tag>
+ <item>Replaced by setting the <seealso
+ marker="logger_std_h#type"><c>type</c></seealso>, and possibly
+ <seealso marker="logger_std_h#file"><c>file</c></seealso> and
+ <seealso marker="logger_std_h#modes"><c>modes</c></seealso>
+ parameters of the default <c>logger_std_h</c> handler. Example:
+ <code type="none">
+erl -kernel logger '[{handler,default,logger_std_h,#{config=>#{file=>"/tmp/erlang.log"}}}]'
+ </code>
+ </item>
+ <tag><c>error_logger_format_depth</c></tag>
+ <item>Replaced by setting the <seealso marker="logger_formatter#depth"><c>depth</c></seealso>
+ parameter of the default handlers formatter. Example:
+ <code type="none">
+erl -kernel logger '[{handler,default,logger_std_h,#{formatter=>{logger_formatter,#{legacy_header=>true,template=>[{logger_formatter,header},"\n",msg,"\n"],depth=>10}}}]'
+ </code>
+ </item>
+ </taglist>
+ <p>See <seealso marker="logger_chapter#compatibility">Backwards
+ compatibility with error_logger</seealso> for more
+ information.</p>
+ </section>
+
+ <section>
<title>See Also</title>
<p><seealso marker="app"><c>app(4)</c></seealso>,
<seealso marker="application"><c>application(3)</c></seealso>,
@@ -504,12 +540,12 @@ MaxT = TickTime + TickTime / 4</code>
<seealso marker="disk_log"><c>disk_log(3)</c></seealso>,
<seealso marker="erl_boot_server"><c>erl_boot_server(3)</c></seealso>,
<seealso marker="erl_ddll"><c>erl_ddll(3)</c></seealso>,
- <seealso marker="error_logger"><c>error_logger(3)</c></seealso>,
<seealso marker="file"><c>file(3)</c></seealso>,
<seealso marker="global"><c>global(3)</c></seealso>,
<seealso marker="global_group"><c>global_group(3)</c></seealso>,
<seealso marker="heart"><c>heart(3)</c></seealso>,
<seealso marker="inet"><c>inet(3)</c></seealso>,
+ <seealso marker="logger"><c>logger(3)</c></seealso>,
<seealso marker="net_kernel"><c>net_kernel(3)</c></seealso>,
<seealso marker="os"><c>os(3)</c></seealso>,
<seealso marker="pg2"><c>pg2(3)</c></seealso>,
diff --git a/lib/kernel/doc/src/logger.xml b/lib/kernel/doc/src/logger.xml
new file mode 100644
index 0000000000..5bdfcf91db
--- /dev/null
+++ b/lib/kernel/doc/src/logger.xml
@@ -0,0 +1,1355 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE erlref SYSTEM "erlref.dtd">
+
+<erlref>
+ <header>
+ <copyright>
+ <year>2017</year><year>2018</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ </legalnotice>
+
+ <title>logger</title>
+ <prepared></prepared>
+ <responsible></responsible>
+ <docno></docno>
+ <approved></approved>
+ <checked></checked>
+ <date></date>
+ <rev>A</rev>
+ <file>logger.xml</file>
+ </header>
+ <module since="OTP 21.0">logger</module>
+ <modulesummary>API module for Logger, the standard logging facility
+ in Erlang/OTP.</modulesummary>
+
+ <description>
+ <p>This module implements the main API for logging in
+ Erlang/OTP. To create a log event, use the
+ <seealso marker="#logging_API">API functions</seealso> or the
+ log
+ <seealso marker="#macros">macros</seealso>, for example:</p>
+ <code>
+?LOG_ERROR("error happened because: ~p", [Reason]). % With macro
+logger:error("error happened because: ~p", [Reason]). % Without macro
+ </code>
+ <p>To configure the Logger backend,
+ use <seealso marker="kernel_app#logger">Kernel configuration
+ parameters</seealso>
+ or <seealso marker="#configuration_API">configuration
+ functions</seealso> in the Logger API.</p>
+
+ <p>By default, the Kernel application installs one log handler at
+ system start. This handler is named <c>default</c>. It receives
+ and processes standard log events produced by the Erlang runtime
+ system, standard behaviours and different Erlang/OTP
+ applications. The log events are by default printed to the
+ terminal.</p>
+ <p>If you want your systems logs to be printed to a file instead,
+ you must configure the default handler to do so. The simplest
+ way is to include the following in
+ your <seealso marker="config"><c>sys.config</c></seealso>:</p>
+ <code>
+[{kernel,
+ [{logger,
+ [{handler, default, logger_std_h,
+ #{config => #{file => "path/to/file.log"}}}]}]}].
+ </code>
+ <p>
+ For more information about:
+ </p>
+ <list type="bulleted">
+ <item>the Logger facility in general, see
+ the <seealso marker="logger_chapter">User's
+ Guide</seealso>.</item>
+ <item>how to configure Logger, see
+ the <seealso marker="logger_chapter#configuration">Configuration</seealso>
+ section in the User's Guide.</item>
+ <item>the built-in handlers,
+ see <seealso marker="logger_std_h">logger_std_h</seealso> and
+ <seealso marker="logger_disk_log_h">logger_disk_log_h</seealso>.</item>
+ <item>the built-in formatter,
+ see <seealso marker="logger_formatter">logger_formatter</seealso>.</item>
+ <item>built-in filters,
+ see <seealso marker="logger_filters">logger_filters</seealso>.</item>
+ </list>
+
+ <note>
+ <p>Since Logger is new in Erlang/OTP 21.0, we do reserve the right
+ to introduce changes to the Logger API and functionality in
+ patches following this release. These changes might or might not
+ be backwards compatible with the initial version.</p>
+ </note>
+
+ </description>
+
+ <datatypes>
+ <datatype>
+ <name name="filter"/>
+ <desc>
+ <p>A filter which can be installed as a handler filter, or as
+ a primary filter in Logger.</p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="filter_arg"/>
+ <desc>
+ <p>The second argument to the filter fun.</p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="filter_id"/>
+ <desc>
+ <p>A unique identifier for a filter.</p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="filter_return"/>
+ <desc>
+ <p>The return value from the filter fun.</p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="formatter_config"/>
+ <desc>
+ <p>Configuration data for the
+ formatter. See <seealso marker="logger_formatter">
+ <c>logger_formatter(3)</c></seealso>
+ for an example of a formatter implementation.</p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="handler_config"/>
+ <desc>
+ <p>Handler configuration data for Logger. The following
+ default values apply:</p>
+ <list>
+ <item><c>level => all</c></item>
+ <item><c>filter_default => log</c></item>
+ <item><c>filters => []</c></item>
+ <item><c>formatter => {logger_formatter, DefaultFormatterConfig</c>}</item>
+ </list>
+ <p>In addition to these, the following fields are
+ automatically inserted by Logger, values taken from the
+ two first parameters
+ to <seealso marker="#add_handler-3"><c>add_handler/3</c></seealso>:</p>
+ <list>
+ <item><c>id => HandlerId</c></item>
+ <item><c>module => Module</c></item>
+ </list>
+ <p>These are read-only and cannot be changed in runtime.</p>
+ <p>Handler specific configuration data is inserted by the
+ handler callback itself, in a sub structure associated with
+ the field named <c>config</c>. See
+ the <seealso marker="logger_std_h"><c>logger_std_h(3)</c></seealso>
+ and <seealso marker="logger_disk_log_h"><c>logger_disk_log_h</c></seealso>
+ manual pages for information about the specifc configuration
+ for these handlers.</p>
+ <p>See the <seealso marker="logger_formatter#type-config">
+ <c>logger_formatter(3)</c></seealso> manual page for
+ information about the default configuration for this
+ formatter.</p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="handler_id"/>
+ <desc>
+ <p>A unique identifier for a handler instance.</p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="level"/>
+ <desc>
+ <p>The severity level for the message to be logged.</p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="log_event"/>
+ <desc>
+ <p></p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="metadata"/>
+ <desc>
+ <p>Metadata for the log event.</p>
+ <p>Logger adds the following metadata to each log event:</p>
+ <list>
+ <item><c>pid => self()</c></item>
+ <item><c>gl => group_leader()</c></item>
+ <item><c>time => logger:timestamp()</c></item>
+ </list>
+ <p>When a log macro is used, Logger also inserts location
+ information:</p>
+ <list>
+ <item><c>mfa => {?MODULE, ?FUNCTION_NAME, ?FUNCTION_ARITY}</c></item>
+ <item><c>file => ?FILE</c></item>
+ <item><c>line => ?LINE</c></item>
+ </list>
+ <p>You can add custom metadata, either by specifying a map as
+ the last parameter to any of the log macros or the API
+ functions, or by setting process metadata
+ with <seealso marker="#set_process_metadata-1">
+ <c>set_process_metadata/1</c></seealso>
+ or <seealso marker="#update_process_metadata-1">
+ <c>update_process_metadata/1</c></seealso>.</p>
+ <p>Logger merges all the metadata maps before forwarding the
+ log event to the handlers. If the same keys occur, values
+ from the log call overwrite process metadata, which in turn
+ overwrite values set by Logger.</p>
+ <p>The following custom metadata keys have special meaning:</p>
+ <taglist>
+ <tag><c>domain</c></tag>
+ <item>
+ <p>The value associated with this key is used by filters
+ for grouping log events originating from, for example,
+ specific functional
+ areas. See <seealso marker="logger_filters#domain-2">
+ <c>logger_filters:domain/2</c></seealso>
+ for a description of how this field can be used.</p>
+ </item>
+ <tag><c>report_cb</c></tag>
+ <item>
+ <p>If the log message is specified as
+ a <seealso marker="#type-report"><c>report()</c></seealso>,
+ the <c>report_cb</c> key can be associated with a fun
+ (report callback) that converts the report to a format
+ string and arguments, or directly to a string. See the
+ type definition
+ of <seealso marker="#type-report_cb"><c>report_cb()</c></seealso>,
+ and
+ section <seealso marker="logger_chapter#log_message">Log
+ Message</seealso> in the User's Guide for more
+ information about report callbacks.</p>
+ </item>
+ </taglist>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="msg_fun"/>
+ <desc>
+ <p></p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="olp_config"/>
+ <desc>
+ <p></p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="primary_config"/>
+ <desc>
+ <p>Primary configuration data for Logger. The following
+ default values apply:</p>
+ <list>
+ <item><c>level => info</c></item>
+ <item><c>filter_default => log</c></item>
+ <item><c>filters => []</c></item>
+ </list>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="report"/>
+ <desc>
+ <p></p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="report_cb"/>
+ <desc>
+ <p>A fun which converts a <seealso marker="#type-report"><c>report()</c>
+ </seealso> to a format string and arguments, or directly to a string.
+ See section <seealso marker="logger_chapter#log_message">Log
+ Message</seealso> in the User's Guide for more
+ information.</p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="report_cb_config"/>
+ <desc>
+ <p></p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="timestamp"/>
+ <desc>
+ <p>A timestamp produced
+ with <seealso marker="#timestamp-0">
+ <c>logger:timestamp()</c></seealso>.</p>
+ </desc>
+ </datatype>
+ </datatypes>
+
+ <section>
+ <title>Macros</title>
+ <p>The following macros are defined in <c>logger.hrl</c>, which
+ is included in a module with the directive</p>
+ <code>
+ -include_lib("kernel/include/logger.hrl").</code>
+
+ <list>
+ <item><c>?LOG_EMERGENCY(StringOrReport[,Metadata])</c></item>
+ <item><c>?LOG_EMERGENCY(FunOrFormat,Args[,Metadata])</c></item>
+ <item><c>?LOG_ALERT(StringOrReport[,Metadata])</c></item>
+ <item><c>?LOG_ALERT(FunOrFormat,Args[,Metadata])</c></item>
+ <item><c>?LOG_CRITICAL(StringOrReport[,Metadata])</c></item>
+ <item><c>?LOG_CRITICAL(FunOrFormat,Args[,Metadata])</c></item>
+ <item><c>?LOG_ERROR(StringOrReport[,Metadata])</c></item>
+ <item><c>?LOG_ERROR(FunOrFormat,Args[,Metadata])</c></item>
+ <item><c>?LOG_WARNING(StringOrReport[,Metadata])</c></item>
+ <item><c>?LOG_WARNING(FunOrFormat,Args[,Metadata])</c></item>
+ <item><c>?LOG_NOTICE(StringOrReport[,Metadata])</c></item>
+ <item><c>?LOG_NOTICE(FunOrFormat,Args[,Metadata])</c></item>
+ <item><c>?LOG_INFO(StringOrReport[,Metadata])</c></item>
+ <item><c>?LOG_INFO(FunOrFormat,Args[,Metadata])</c></item>
+ <item><c>?LOG_DEBUG(StringOrReport[,Metadata])</c></item>
+ <item><c>?LOG_DEBUG(FunOrFormat,Args[,Metadata])</c></item>
+ <item><c>?LOG(Level,StringOrReport[,Metadata])</c></item>
+ <item><c>?LOG(Level,FunOrFormat,Args[,Metadata])</c></item>
+ </list>
+
+ <p>All macros expand to a call to Logger, where <c>Level</c> is
+ taken from the macro name, or from the first argument in the
+ case of the <c>?LOG</c> macro. Location data is added to the
+ metadata as described under
+ the <seealso marker="#type-metadata"><c>metadata()</c></seealso>
+ type definition.</p>
+
+ <p>The call is wrapped in a case statement and will be evaluated
+ only if <c>Level</c> is equal to or below the configured log
+ level.</p>
+ </section>
+
+ <section>
+ <marker id="logging_API"/>
+ <title>Logging API functions</title>
+ </section>
+ <funcs>
+ <func>
+ <name since="OTP 21.0">emergency(StringOrReport[,Metadata])</name>
+ <name since="OTP 21.0">emergency(Format,Args[,Metadata])</name>
+ <name since="OTP 21.0">emergency(Fun,FunArgs[,Metadata])</name>
+ <fsummary>Logs the given message as level <c>emergency</c>.</fsummary>
+ <desc>
+ <p>Equivalent to
+ <seealso marker="#log-2"><c>log(emergency,...)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name since="OTP 21.0">alert(StringOrReport[,Metadata])</name>
+ <name since="OTP 21.0">alert(Format,Args[,Metadata])</name>
+ <name since="OTP 21.0">alert(Fun,FunArgs[,Metadata])</name>
+ <fsummary>Logs the given message as level <c>alert</c>.</fsummary>
+ <desc>
+ <p>Equivalent to
+ <seealso marker="#log-2"><c>log(alert,...)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name since="OTP 21.0">critical(StringOrReport[,Metadata])</name>
+ <name since="OTP 21.0">critical(Format,Args[,Metadata])</name>
+ <name since="OTP 21.0">critical(Fun,FunArgs[,Metadata])</name>
+ <fsummary>Logs the given message as level <c>critical</c>.</fsummary>
+ <desc>
+ <p>Equivalent to
+ <seealso marker="#log-2"><c>log(critical,...)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name since="OTP 21.0">error(StringOrReport[,Metadata])</name>
+ <name since="OTP 21.0">error(Format,Args[,Metadata])</name>
+ <name since="OTP 21.0">error(Fun,FunArgs[,Metadata])</name>
+ <fsummary>Logs the given message as level <c>error</c>.</fsummary>
+ <desc>
+ <p>Equivalent to
+ <seealso marker="#log-2"><c>log(error,...)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name since="OTP 21.0">warning(StringOrReport[,Metadata])</name>
+ <name since="OTP 21.0">warning(Format,Args[,Metadata])</name>
+ <name since="OTP 21.0">warning(Fun,FunArgs[,Metadata])</name>
+ <fsummary>Logs the given message as level <c>warning</c>.</fsummary>
+ <desc>
+ <p>Equivalent to
+ <seealso marker="#log-2"><c>log(warning,...)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name since="OTP 21.0">notice(StringOrReport[,Metadata])</name>
+ <name since="OTP 21.0">notice(Format,Args[,Metadata])</name>
+ <name since="OTP 21.0">notice(Fun,FunArgs[,Metadata])</name>
+ <fsummary>Logs the given message as level <c>notice</c>.</fsummary>
+ <desc>
+ <p>Equivalent to
+ <seealso marker="#log-2"><c>log(notice,...)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name since="OTP 21.0">info(StringOrReport[,Metadata])</name>
+ <name since="OTP 21.0">info(Format,Args[,Metadata])</name>
+ <name since="OTP 21.0">info(Fun,FunArgs[,Metadata])</name>
+ <fsummary>Logs the given message as level <c>info</c>.</fsummary>
+ <desc>
+ <p>Equivalent to
+ <seealso marker="#log-2"><c>log(info,...)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name since="OTP 21.0">debug(StringOrReport[,Metadata])</name>
+ <name since="OTP 21.0">debug(Format,Args[,Metadata])</name>
+ <name since="OTP 21.0">debug(Fun,FunArgs[,Metadata])</name>
+ <fsummary>Logs the given message as level <c>debug</c>.</fsummary>
+ <desc>
+ <p>Equivalent to
+ <seealso marker="#log-2"><c>log(debug,...)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="log" arity="2" since="OTP 21.0"/>
+ <name name="log" arity="3" clause_i="1" since="OTP 21.0"/>
+ <name name="log" arity="3" clause_i="2" since="OTP 21.0"/>
+ <name name="log" arity="3" clause_i="3" since="OTP 21.0"/>
+ <name name="log" arity="4" clause_i="1" since="OTP 21.0"/>
+ <name name="log" arity="4" clause_i="2" since="OTP 21.0"/>
+ <fsummary>Logs the given message.</fsummary>
+ <type variable="Level"/>
+ <type variable="StringOrReport" name_i="1"/>
+ <type variable="Format" name_i="3"/>
+ <type variable="Args" name_i="3"/>
+ <type variable="Fun" name_i="4"/>
+ <type variable="FunArgs" name_i="4"/>
+ <type variable="Metadata"/>
+ <desc>
+ <p>Log the given message.</p>
+ </desc>
+ </func>
+ </funcs>
+
+ <section>
+ <marker id="configuration_API"/>
+ <title>Configuration API functions</title>
+ </section>
+ <funcs>
+ <func>
+ <name name="add_handler" arity="3" since="OTP 21.0"/>
+ <fsummary>Add a handler with the given configuration.</fsummary>
+ <desc>
+ <p>Add a handler with the given configuration.</p>
+ <p><c><anno>HandlerId</anno></c> is a unique identifier which
+ must be used in all subsequent calls referring to this
+ handler.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="add_handler_filter" arity="3" since="OTP 21.0"/>
+ <fsummary>Add a filter to the specified handler.</fsummary>
+ <desc>
+ <p>Add a filter to the specified handler.</p>
+ <p>The filter fun is called with the log event as the first
+ parameter, and the specified <c>filter_args()</c> as the
+ second parameter.</p>
+ <p>The return value of the fun specifies if a log event is to
+ be discarded or forwarded to the handler callback:</p>
+ <taglist>
+ <tag><c>log_event()</c></tag>
+ <item>
+ <p>The filter <em>passed</em>. The next handler filter, if
+ any, is applied. If no more filters exist for this
+ handler, the log event is forwarded to the handler
+ callback.</p>
+ </item>
+ <tag><c>stop</c></tag>
+ <item>
+ <p>The filter <em>did not pass</em>, and the log event is
+ immediately discarded.</p>
+ </item>
+ <tag><c>ignore</c></tag>
+ <item>
+ <p>The filter has no knowledge of the log event. The next
+ handler filter, if any, is applied. If no more filters
+ exist for this handler, the value of
+ the <c>filter_default</c> configuration parameter for
+ the handler specifies if the log event shall be
+ discarded or forwarded to the handler callback.</p>
+ </item>
+ </taglist>
+ <p>See
+ section <seealso marker="logger_chapter#filters">Filters</seealso>
+ in the User's Guide for more information about filters.</p>
+ <p>Some built-in filters exist. These are defined in
+ <seealso marker="logger_filters"><c>logger_filters</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="add_handlers" arity="1" clause_i="1" since="OTP 21.0"/>
+ <fsummary>Set up log handlers from the application's
+ configuration parameters.</fsummary>
+ <desc>
+ <p>Reads the application configuration parameter <c>logger</c> and
+ calls <c>add_handlers/1</c> with its contents.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="add_handlers" arity="1" clause_i="2" since="OTP 21.0"/>
+ <fsummary>Setup logger handlers.</fsummary>
+ <type name="config_handler"/>
+ <desc>
+ <p>This function should be used by custom Logger handlers to make
+ configuration consistent no matter which handler the system uses.
+ Normal usage is to add a call to <c>logger:add_handlers/1</c>
+ just after the processes that the handler needs are started,
+ and pass the application's <c>logger</c> configuration as the argument.
+ For example:</p>
+ <code>
+-behaviour(application).
+start(_, []) ->
+ case supervisor:start_link({local, my_sup}, my_sup, []) of
+ {ok, Pid} ->
+ ok = logger:add_handlers(my_app),
+ {ok, Pid, []};
+ Error -> Error
+ end.</code>
+ <p>This reads the <c>logger</c> configuration parameter from
+ the <c>my_app</c> application and starts the configured
+ handlers. The contents of the configuration use the same
+ rules as the
+ <seealso marker="logger_chapter#handler-configuration">logger handler configuration</seealso>.
+ </p>
+ <p>If the handler is meant to replace the default handler, the Kernel's
+ default handler have to be disabled before the new handler is added.
+ A <c>sys.config</c> file that disables the Kernel handler and adds
+ a custom handler could look like this:</p>
+ <code>
+[{kernel,
+ [{logger,
+ %% Disable the default Kernel handler
+ [{handler, default, undefined}]}]},
+ {my_app,
+ [{logger,
+ %% Enable this handler as the default
+ [{handler, default, my_handler, #{}}]}]}].
+ </code>
+ </desc>
+ </func>
+
+ <func>
+ <name name="add_primary_filter" arity="2" since="OTP 21.0"/>
+ <fsummary>Add a primary filter to Logger.</fsummary>
+ <desc>
+ <p>Add a primary filter to Logger.</p>
+ <p>The filter fun is called with the log event as the first
+ parameter, and the specified <c>filter_args()</c> as the
+ second parameter.</p>
+ <p>The return value of the fun specifies if a log event is to
+ be discarded or forwarded to the handlers:</p>
+ <taglist>
+ <tag><c>log_event()</c></tag>
+ <item>
+ <p>The filter <em>passed</em>. The next primary filter, if
+ any, is applied. If no more primary filters exist, the
+ log event is forwarded to the handler part of Logger,
+ where handler filters are applied.</p>
+ </item>
+ <tag><c>stop</c></tag>
+ <item>
+ <p>The filter <em>did not pass</em>, and the log event is
+ immediately discarded.</p>
+ </item>
+ <tag><c>ignore</c></tag>
+ <item>
+ <p>The filter has no knowledge of the log event. The next
+ primary filter, if any, is applied. If no more primary
+ filters exist, the value of the
+ primary <c>filter_default</c> configuration parameter
+ specifies if the log event shall be discarded or
+ forwarded to the handler part.</p>
+ </item>
+ </taglist>
+ <p>See section <seealso marker="logger_chapter#filters">
+ Filters</seealso> in the User's Guide for more information
+ about filters.</p>
+ <p>Some built-in filters exist. These are defined
+ in <seealso marker="logger_filters"><c>logger_filters</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="get_config" arity="0" since="OTP 21.0"/>
+ <fsummary>Look up the current Logger configuration</fsummary>
+ <desc>
+ <p>Look up all current Logger configuration, including primary,
+ handler, and proxy configuration, and module level settings.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="get_handler_config" arity="0" since="OTP 21.0"/>
+ <fsummary>Look up the current configuration for all handlers.</fsummary>
+ <desc>
+ <p>Look up the current configuration for all handlers.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="get_handler_config" arity="1" since="OTP 21.0"/>
+ <fsummary>Look up the current configuration for the given
+ handler.</fsummary>
+ <desc>
+ <p>Look up the current configuration for the given handler.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="get_handler_ids" arity="0" since="OTP 21.0"/>
+ <fsummary>Look up the identities for all installed handlers.</fsummary>
+ <desc>
+ <p>Look up the identities for all installed handlers.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="get_primary_config" arity="0" since="OTP 21.0"/>
+ <fsummary>Look up the current primary configuration for Logger.</fsummary>
+ <desc>
+ <p>Look up the current primary configuration for Logger.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="get_proxy_config" arity="0" since="OTP 21.3"/>
+ <fsummary>Look up the current configuration for the Logger proxy.</fsummary>
+ <desc>
+ <p>Look up the current configuration for the Logger proxy.</p>
+ <p>For more information about the proxy, see
+ section <seealso marker="logger_chapter#proxy">Logger
+ Proxy</seealso> in the Kernel User's Guide.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="get_module_level" arity="0" since="OTP 21.0"/>
+ <fsummary>Look up all current module levels.</fsummary>
+ <desc>
+ <p>Look up all current module levels. Returns a list
+ containing one <c>{Module,Level}</c> element for each module
+ for which the module level was previously set
+ with <seealso marker="#set_module_level-2">
+ <c>set_module_level/2</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="get_module_level" arity="1" since="OTP 21.0"/>
+ <fsummary>Look up the current level for the given modules.</fsummary>
+ <desc>
+ <p>Look up the current level for the given modules. Returns a
+ list containing one <c>{Module,Level}</c> element for each
+ of the given modules for which the module level was
+ previously set with <seealso marker="#set_module_level-2">
+ <c>set_module_level/2</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="get_process_metadata" arity="0" since="OTP 21.0"/>
+ <fsummary>Retrieve data set with set_process_metadata/1.</fsummary>
+ <desc>
+ <p>Retrieve data set
+ with <seealso marker="#set_process_metadata-1">
+ <c>set_process_metadata/1</c></seealso> or
+ <seealso marker="#update_process_metadata-1">
+ <c>update_process_metadata/1</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="i" arity="0" since="OTP 21.3"/>
+ <name name="i" arity="1" since="OTP 21.3"/>
+ <fsummary>Pretty print the Logger configuration.</fsummary>
+ <desc>
+ <p>Pretty print the Logger configuration.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="remove_handler" arity="1" since="OTP 21.0"/>
+ <fsummary>Remove the handler with the specified identity.</fsummary>
+ <desc>
+ <p>Remove the handler identified by <c><anno>HandlerId</anno></c>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="remove_handler_filter" arity="2" since="OTP 21.0"/>
+ <fsummary>Remove a filter from the specified handler.</fsummary>
+ <desc>
+ <p>Remove the filter identified
+ by <c><anno>FilterId</anno></c> from the handler identified
+ by <c><anno>HandlerId</anno></c>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="remove_primary_filter" arity="1" since="OTP 21.0"/>
+ <fsummary>Remove a primary filter from Logger.</fsummary>
+ <desc>
+ <p>Remove the primary filter identified
+ by <c><anno>FilterId</anno></c> from Logger.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="set_application_level" arity="2" since="OTP 21.1"/>
+ <fsummary>Set the log level for all modules in the specified application.</fsummary>
+ <desc>
+ <p>Set the log level for all the modules of the specified application.</p>
+ <p>This function is a convenience function that calls
+ <seealso marker="#set_module_level/2">logger:set_module_level/2</seealso>
+ for each module associated with an application.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="set_handler_config" arity="2" since="OTP 21.0"/>
+ <fsummary>Set configuration data for the specified handler.</fsummary>
+ <desc>
+ <p>Set configuration data for the specified handler. This
+ overwrites the current handler configuration.</p>
+ <p>To modify the existing configuration,
+ use <seealso marker="#update_handler_config-2">
+ <c>update_handler_config/2</c></seealso>, or, if a more
+ complex merge is needed, read the current configuration
+ with <seealso marker="#get_handler_config-1"><c>get_handler_config/1</c>
+ </seealso>, then do the merge before writing the new
+ configuration back with this function.</p>
+ <p>If a key is removed compared to the current configuration,
+ and the key is known by Logger, the default value is used. If
+ it is a custom key, then it is up to the handler
+ implementation if the value is removed or a default value is
+ inserted.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="set_handler_config" arity="3" clause_i="1" since="OTP 21.0"/>
+ <name name="set_handler_config" arity="3" clause_i="2" since="OTP 21.0"/>
+ <name name="set_handler_config" arity="3" clause_i="3" since="OTP 21.0"/>
+ <name name="set_handler_config" arity="3" clause_i="4" since="OTP 21.0"/>
+ <name name="set_handler_config" arity="3" clause_i="5" since="OTP 21.0"/>
+ <fsummary>Add or update configuration data for the specified
+ handler.</fsummary>
+ <type variable="HandlerId"/>
+ <type variable="Level" name_i="1"/>
+ <type variable="FilterDefault" name_i="2"/>
+ <type variable="Filters" name_i="3"/>
+ <type variable="Formatter" name_i="4"/>
+ <type variable="Config" name_i="5"/>
+ <type variable="Return"/>
+ <desc>
+ <p>Add or update configuration data for the specified
+ handler. If the given <c><anno>Key</anno></c> already
+ exists, its associated value will be changed
+ to the given value. If it does not exist, it will
+ be added.</p>
+ <p>If the value is incomplete, which for example can be the
+ case for the <c>config</c> key, it is up to the handler
+ implementation how the unspecified parts are set. For all
+ handlers in the Kernel application, unspecified data for
+ the <c>config</c> key is set to default values. To update
+ only specified data, and keep the existing configuration for
+ the rest, use <seealso marker="#update_handler_config-3">
+ <c>update_handler_config/3</c></seealso>.</p>
+ <p>See the definition of
+ the <seealso marker="#type-handler_config">
+ <c>handler_config()</c></seealso> type for more
+ information about the different parameters.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="set_primary_config" arity="1" since="OTP 21.0"/>
+ <fsummary>Set primary configuration data for Logger.</fsummary>
+ <desc>
+ <p>Set primary configuration data for Logger. This
+ overwrites the current configuration.</p>
+ <p>To modify the existing configuration,
+ use <seealso marker="#update_primary_config-1">
+ <c>update_primary_config/1</c></seealso>, or, if a more
+ complex merge is needed, read the current configuration
+ with <seealso marker="#get_primary_config-0"><c>get_primary_config/0</c>
+ </seealso>, then do the merge before writing the new
+ configuration back with this function.</p>
+ <p>If a key is removed compared to the current configuration,
+ the default value is used.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="set_primary_config" arity="2" clause_i="1" since="OTP 21.0"/>
+ <name name="set_primary_config" arity="2" clause_i="2" since="OTP 21.0"/>
+ <name name="set_primary_config" arity="2" clause_i="3" since="OTP 21.0"/>
+ <fsummary>Add or update primary configuration data for Logger.</fsummary>
+ <type variable="Level" name_i="1"/>
+ <type variable="FilterDefault" name_i="2"/>
+ <type variable="Filters" name_i="3"/>
+ <desc>
+ <p>Add or update primary configuration data for Logger. If the
+ given <c><anno>Key</anno></c> already exists, its associated
+ value will be changed to the given value. If it does not
+ exist, it will be added.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="set_proxy_config" arity="1" since="OTP 21.3"/>
+ <fsummary>Set configuration data for the Logger proxy.</fsummary>
+ <desc>
+ <p>Set configuration data for the Logger proxy. This
+ overwrites the current proxy configuration. Keys that are not
+ specified in the <c><anno>Config</anno></c> map gets default
+ values.</p>
+ <p>To modify the existing configuration,
+ use <seealso marker="#update_proxy_config-1">
+ <c>update_proxy_config/1</c></seealso>, or, if a more
+ complex merge is needed, read the current configuration
+ with <seealso marker="#get_proxy_config-0"><c>get_proxy_config/0</c>
+ </seealso>, then do the merge before writing the new
+ configuration back with this function.</p>
+ <p>For more information about the proxy, see
+ section <seealso marker="logger_chapter#proxy">Logger
+ Proxy</seealso> in the Kernel User's Guide.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="set_module_level" arity="2" since="OTP 21.0"/>
+ <fsummary>Set the log level for the specified modules.</fsummary>
+ <desc>
+ <p>Set the log level for the specified modules.</p>
+ <p>The log level for a module overrides the primary log level
+ of Logger for log events originating from the module in
+ question. Notice, however, that it does not override the
+ level configuration for any handler.</p>
+ <p>For example: Assume that the primary log level for Logger
+ is <c>info</c>, and there is one handler, <c>h1</c>, with
+ level <c>info</c> and one handler, <c>h2</c>, with
+ level <c>debug</c>.</p>
+ <p>With this configuration, no debug messages will be logged,
+ since they are all stopped by the primary log level.</p>
+ <p>If the level for <c>mymodule</c> is now set
+ to <c>debug</c>, then debug events from this module will be
+ logged by the handler <c>h2</c>, but not by
+ handler <c>h1</c>.</p>
+ <p>Debug events from other modules are still not logged.</p>
+ <p>To change the primary log level for Logger, use
+ <seealso marker="#set_primary_config/2">
+ <c>set_primary_config(level, Level)</c></seealso>.</p>
+ <p>To change the log level for a handler, use
+ <seealso marker="#set_handler_config/3">
+ <c>set_handler_config(HandlerId, level, Level)</c>
+ </seealso>.</p>
+ <note>
+ <p>The originating module for a log event is only detected
+ if the key <c>mfa</c> exists in the metadata, and is
+ associated with <c>{Module, Function, Arity}</c>. When log
+ macros are used, this association is automatically added
+ to all log events. If an API function is called directly,
+ without using a macro, the logging client must explicitly
+ add this information if module levels shall have any
+ effect.</p>
+ </note>
+ </desc>
+ </func>
+
+ <func>
+ <name name="set_process_metadata" arity="1" since="OTP 21.0"/>
+ <fsummary>Set metadata to use when logging from current process.</fsummary>
+ <desc>
+ <p>Set metadata which Logger shall automatically insert in
+ all log events produced on the current process.</p>
+ <p>Location data produced by the log macros, and/or metadata
+ given as argument to the log call (API function or macro),
+ are merged with the process metadata. If the same keys
+ occur, values from the metadata argument to the log call
+ overwrite values from the process metadata, which in turn
+ overwrite values from the location data.</p>
+ <p>Subsequent calls to this function overwrites previous data
+ set. To update existing data instead of overwriting it,
+ see <seealso marker="#update_process_metadata-1">
+ <c>update_process_metadata/1</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="unset_application_level" arity="1" since="OTP 21.1"/>
+ <fsummary>Unset the log level for all modules in the specified application.</fsummary>
+ <desc>
+ <p>Unset the log level for all the modules of the specified application.</p>
+ <p>This function is a convinience function that calls
+ <seealso marker="#unset_module_level/1">logger:unset_module_level/2</seealso>
+ for each module associated with an application.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="unset_module_level" arity="0" since="OTP 21.0"/>
+ <fsummary>Remove module specific log settings for all modules.</fsummary>
+ <desc>
+ <p>Remove module specific log settings. After this, the
+ primary log level is used for all modules.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="unset_module_level" arity="1" since="OTP 21.0"/>
+ <fsummary>Remove module specific log settings for the given
+ modules.</fsummary>
+ <desc>
+ <p>Remove module specific log settings. After this, the
+ primary log level is used for the specified modules.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="unset_process_metadata" arity="0" since="OTP 21.0"/>
+ <fsummary>Delete data set with set_process_metadata/1.</fsummary>
+ <desc>
+ <p>Delete data set
+ with <seealso marker="#set_process_metadata-1">
+ <c>set_process_metadata/1</c></seealso> or
+ <seealso marker="#update_process_metadata-1">
+ <c>update_process_metadata/1</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="update_formatter_config" arity="2" since="OTP 21.0"/>
+ <fsummary>Update the formatter configuration for the specified handler.</fsummary>
+ <desc>
+ <p>Update the formatter configuration for the specified handler.</p>
+ <p>The new configuration is merged with the existing formatter
+ configuration.</p>
+ <p>To overwrite the existing configuration without any merge,
+ use</p>
+ <pre>
+<seealso marker="#set_handler_config-3">set_handler_config(HandlerId, formatter,
+ {FormatterModule, FormatterConfig})</seealso>.</pre>
+ </desc>
+ </func>
+
+ <func>
+ <name name="update_formatter_config" arity="3" since="OTP 21.0"/>
+ <fsummary>Update the formatter configuration for the specified handler.</fsummary>
+ <desc>
+ <p>Update the formatter configuration for the specified handler.</p>
+ <p>This is equivalent to</p>
+ <pre>
+<seealso marker="#update_formatter_config-2">update_formatter_config(<anno>HandlerId</anno>, #{<anno>Key</anno> => <anno>Value</anno>})</seealso></pre>
+ </desc>
+ </func>
+
+ <func>
+ <name name="update_handler_config" arity="2" since="OTP 21.0"/>
+ <fsummary>Update configuration data for the specified handler.</fsummary>
+ <desc>
+ <p>Update configuration data for the specified handler. This function
+ behaves as if it was implemented as follows:</p>
+ <code type="erl">
+{ok, {_, Old}} = logger:get_handler_config(HandlerId),
+logger:set_handler_config(HandlerId, maps:merge(Old, Config)).
+ </code>
+ <p>To overwrite the existing configuration without any merge,
+ use <seealso marker="#set_handler_config-2"><c>set_handler_config/2</c>
+ </seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="update_handler_config" arity="3" clause_i="1" since="OTP 21.2"/>
+ <name name="update_handler_config" arity="3" clause_i="2" since="OTP 21.2"/>
+ <name name="update_handler_config" arity="3" clause_i="3" since="OTP 21.2"/>
+ <name name="update_handler_config" arity="3" clause_i="4" since="OTP 21.2"/>
+ <name name="update_handler_config" arity="3" clause_i="5" since="OTP 21.2"/>
+ <fsummary>Add or update configuration data for the specified
+ handler.</fsummary>
+ <type variable="HandlerId"/>
+ <type variable="Level" name_i="1"/>
+ <type variable="FilterDefault" name_i="2"/>
+ <type variable="Filters" name_i="3"/>
+ <type variable="Formatter" name_i="4"/>
+ <type variable="Config" name_i="5"/>
+ <type variable="Return"/>
+ <desc>
+ <p>Add or update configuration data for the specified
+ handler. If the given <c><anno>Key</anno></c> already
+ exists, its associated value will be changed
+ to the given value. If it does not exist, it will
+ be added.</p>
+ <p>If the value is incomplete, which for example can be the
+ case for the <c>config</c> key, it is up to the handler
+ implementation how the unspecified parts are set. For all
+ handlers in the Kernel application, unspecified data for
+ the <c>config</c> key is not changed. To reset unspecified
+ data to default values,
+ use <seealso marker="#set_handler_config-3">
+ <c>set_handler_config/3</c></seealso>.</p>
+ <p>See the definition of
+ the <seealso marker="#type-handler_config">
+ <c>handler_config()</c></seealso> type for more
+ information about the different parameters.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="update_primary_config" arity="1" since="OTP 21.0"/>
+ <fsummary>Update primary configuration data for Logger.</fsummary>
+ <desc>
+ <p>Update primary configuration data for Logger. This function
+ behaves as if it was implemented as follows:</p>
+ <code type="erl">
+Old = logger:get_primary_config(),
+logger:set_primary_config(maps:merge(Old, Config)).
+ </code>
+ <p>To overwrite the existing configuration without any merge,
+ use <seealso marker="#set_primary_config-1"><c>set_primary_config/1</c>
+ </seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="update_process_metadata" arity="1" since="OTP 21.0"/>
+ <fsummary>Set or update metadata to use when logging from
+ current process.</fsummary>
+ <desc>
+ <p>Set or update metadata to use when logging from current
+ process</p>
+ <p>If process metadata exists for the current process, this
+ function behaves as if it was implemented as follows:</p>
+ <code type="erl">
+logger:set_process_metadata(maps:merge(logger:get_process_metadata(), Meta)).
+ </code>
+ <p>If no process metadata exists, the function behaves as
+ <seealso marker="#set_process_metadata-1">
+ <c>set_process_metadata/1</c>
+ </seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="update_proxy_config" arity="1" since="OTP 21.3"/>
+ <fsummary>Update configuration data for the Logger proxy.</fsummary>
+ <desc>
+ <p>Update configuration data for the Logger proxy. This function
+ behaves as if it was implemented as follows:</p>
+ <code type="erl">
+Old = logger:get_proxy_config(),
+logger:set_proxy_config(maps:merge(Old, Config)).
+ </code>
+ <p>To overwrite the existing configuration without any merge,
+ use <seealso marker="#set_proxy_config-1"><c>set_proxy_config/1</c>
+ </seealso>.</p>
+ <p>For more information about the proxy, see
+ section <seealso marker="logger_chapter#proxy">Logger
+ Proxy</seealso> in the Kernel User's Guide.</p>
+ </desc>
+ </func>
+ </funcs>
+
+ <section>
+ <marker id="misc_API"/>
+ <title>Miscellaneous API functions</title>
+ </section>
+ <funcs>
+ <func>
+ <name name="compare_levels" arity="2" since="OTP 21.0"/>
+ <fsummary>Compare the severity of two log levels.</fsummary>
+ <desc>
+ <p>Compare the severity of two log levels. Returns <c>gt</c>
+ if <c>Level1</c> is more severe than
+ <c>Level2</c>, <c>lt</c> if <c>Level1</c> is less severe,
+ and <c>eq</c> if the levels are equal.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="format_report" arity="1" since="OTP 21.0"/>
+ <fsummary>Convert a log message on report form to {Format, Args}.</fsummary>
+ <desc>
+ <p>Convert a log message on report form to <c>{Format,
+ Args}</c>. This is the default report callback used
+ by <seealso marker="logger_formatter">
+ <c>logger_formatter</c></seealso> when no custom report
+ callback is found. See
+ section <seealso marker="logger_chapter#log_message">Log
+ Message</seealso> in the Kernel User's Guide for
+ information about report callbacks and valid forms of log
+ messages.</p>
+ <p>The function produces lines of <c>Key: Value</c> from
+ key-value lists. Strings are printed with <c>~ts</c> and
+ other terms with <c>~tp</c>.</p>
+ <p>If <c><anno>Report</anno></c> is a map, it is converted to
+ a key-value list before formatting as such.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="timestamp" arity="0" since="OTP 21.3"/>
+ <fsummary>Return a timestamp to insert in meta data for a log
+ event.</fsummary>
+ <desc>
+ <p>Return a timestamp that can be inserted as the <c>time</c>
+ field in the meta data for a log event. It is produced with
+ <seealso marker="kernel:os#system_time-1">
+ <c>os:system_time(microsecond)</c></seealso>.</p>
+ <p>Notice that Logger automatically inserts a timestamp in the
+ meta data unless it already exists. This function is
+ exported for the rare case when the timestamp must be taken
+ at a different point in time than when the log event is
+ issued.</p>
+ </desc>
+ </func>
+
+ </funcs>
+
+ <section>
+ <marker id="handler_callback_functions"/>
+ <title>Handler Callback Functions</title>
+ <p>The following functions are to be exported from a handler
+ callback module.</p>
+ </section>
+
+ <funcs>
+ <func>
+ <name since="OTP 21.0">HModule:adding_handler(Config1) -> {ok, Config2} | {error,
+ Reason}</name>
+ <fsummary>An instance of this handler is about to be added.</fsummary>
+ <type>
+ <v>Config1 = Config2 =
+ <seealso marker="#type-handler_config">handler_config()</seealso></v>
+ <v>Reason = term()</v>
+ </type>
+ <desc>
+ <p>This callback function is optional.</p>
+ <p>The function is called on a temporary process when an new
+ handler is about to be added. The purpose is to verify the
+ configuration and initiate all resources needed by the
+ handler.</p>
+ <p>The handler identity is associated with the <c>id</c> key
+ in <c>Config1</c>.</p>
+ <p>If everything succeeds, the callback function can add
+ possible default values or internal state values to the
+ configuration, and return the adjusted map
+ in <c>{ok,Config2}</c>.</p>
+ <p>If the configuration is faulty, or if the initiation fails,
+ the callback function must return <c>{error,Reason}</c>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name since="OTP 21.2">HModule:changing_config(SetOrUpdate, OldConfig, NewConfig) -> {ok, Config} | {error, Reason}</name>
+ <fsummary>The configuration for this handler is about to change.</fsummary>
+ <type>
+ <v>SetOrUpdate = set | update</v>
+ <v>OldConfig = NewConfig = Config =
+ <seealso marker="#type-handler_config">handler_config()</seealso></v>
+ <v>Reason = term()</v>
+ </type>
+ <desc>
+ <p>This callback function is optional.</p>
+ <p>The function is called on a temporary process when the
+ configuration for a handler is about to change. The purpose
+ is to verify and act on the new configuration.</p>
+ <p><c>OldConfig</c> is the existing configuration
+ and <c>NewConfig</c> is the new configuration.</p>
+ <p>The handler identity is associated with the <c>id</c> key
+ in <c>OldConfig</c>.</p>
+ <p><c>SetOrUpdate</c> has the value <c>set</c> if the
+ configuration change originates from a call to
+ <seealso marker="#set_handler_config-2">
+ <c>set_handler_config/2,3</c></seealso>, and <c>update</c>
+ if it originates from <seealso marker="#update_handler_config-2">
+ <c>update_handler_config/2,3</c></seealso>. The handler can
+ use this parameteter to decide how to update the value of
+ the <c>config</c> field, that is, the handler specific
+ configuration data. Typically, if <c>SetOrUpdate</c>
+ equals <c>set</c>, values that are not specified must be
+ given their default values. If <c>SetOrUpdate</c>
+ equals <c>update</c>, the values found in <c>OldConfig</c>
+ must be used instead.</p>
+ <p>If everything succeeds, the callback function must return a
+ possibly adjusted configuration in <c>{ok,Config}</c>.</p>
+ <p>If the configuration is faulty, the callback function must
+ return <c>{error,Reason}</c>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name since="OTP 21.2">HModule:filter_config(Config) -> FilteredConfig</name>
+ <fsummary>Remove internal data from configuration.</fsummary>
+ <type>
+ <v>Config = FilteredConfig =
+ <seealso marker="#type-handler_config">handler_config()</seealso></v>
+ </type>
+ <desc>
+ <p>This callback function is optional.</p>
+ <p>The function is called when one of the Logger API functions
+ for fetching the handler configuration is called, for
+ example
+ <seealso marker="#get_handler_config-1">
+ <c>logger:get_handler_config/1</c></seealso>.</p>
+ <p>It allows the handler to remove internal data fields from
+ its configuration data before it is returned to the
+ caller.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name since="OTP 21.0">HModule:log(LogEvent, Config) -> void()</name>
+ <fsummary>Log the given log event.</fsummary>
+ <type>
+ <v>LogEvent =
+ <seealso marker="#type-log_event">log_event()</seealso></v>
+ <v>Config =
+ <seealso marker="#type-handler_config">handler_config()</seealso></v>
+ </type>
+ <desc>
+ <p>This callback function is mandatory.</p>
+ <p>The function is called when all primary filters and all
+ handler filters for the handler in question have passed for
+ the given log event. It is called on the client process, that
+ is, the process that issued the log event.</p>
+ <p>The handler identity is associated with the <c>id</c> key
+ in <c>Config</c>.</p>
+ <p>The handler must log the event.</p>
+ <p>The return value from this function is ignored by
+ Logger.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name since="OTP 21.0">HModule:removing_handler(Config) -> ok</name>
+ <fsummary>The given handler is about to be removed.</fsummary>
+ <type>
+ <v>Config =
+ <seealso marker="#type-handler_config">handler_config()</seealso></v>
+ </type>
+ <desc>
+ <p>This callback function is optional.</p>
+ <p>The function is called on a temporary process when a
+ handler is about to be removed. The purpose is to release
+ all resources used by the handler.</p>
+ <p>The handler identity is associated with the <c>id</c> key
+ in <c>Config</c>.</p>
+ <p>The return value is ignored by Logger.</p>
+ </desc>
+ </func>
+
+ </funcs>
+
+ <section>
+ <marker id="formatter_callback_functions"/>
+ <title>Formatter Callback Functions</title>
+ <p>The following functions are to be exported from a formatter
+ callback module.</p>
+ </section>
+
+ <funcs>
+ <func>
+ <name since="OTP 21.0">FModule:check_config(FConfig) -> ok | {error, Reason}</name>
+ <fsummary>Validate the given formatter configuration.</fsummary>
+ <type>
+ <v>FConfig =
+ <seealso marker="#type-formatter_config">formatter_config()</seealso></v>
+ <v>Reason = term()</v>
+ </type>
+ <desc>
+ <p>This callback function is optional.</p>
+ <p>The function is called by a Logger when formatter
+ configuration is set or modified. The formatter must
+ validate the given configuration and return <c>ok</c> if it
+ is correct, and <c>{error,Reason}</c> if it is faulty.</p>
+ <p>The following Logger API functions can trigger this callback:</p>
+ <list>
+ <item><seealso marker="logger#add_handler-3">
+ <c>logger:add_handler/3</c></seealso></item>
+ <item><seealso marker="logger#set_handler_config-2">
+ <c>logger:set_handler_config/2,3</c></seealso></item>
+ <item><seealso marker="logger#update_handler_config-2">
+ <c>logger:update_handler_config/2,3</c></seealso></item>
+ <item><seealso marker="logger#update_formatter_config-2">
+ <c>logger:update_formatter_config/2</c></seealso></item>
+ </list>
+ <p>See <seealso marker="logger_formatter">
+ <c>logger_formatter(3)</c></seealso>
+ for an example implementation. <c>logger_formatter</c> is the
+ default formatter used by Logger.</p>
+ </desc>
+ </func>
+ <func>
+ <name since="OTP 21.0">FModule:format(LogEvent, FConfig) -> FormattedLogEntry</name>
+ <fsummary>Format the given log event.</fsummary>
+ <type>
+ <v>LogEvent =
+ <seealso marker="#type-log_event">log_event()</seealso></v>
+ <v>FConfig =
+ <seealso marker="#type-formatter_config">formatter_config()</seealso></v>
+ <v>FormattedLogEntry =
+ <seealso marker="unicode#type-chardata">unicode:chardata()</seealso></v>
+ </type>
+ <desc>
+ <p>This callback function is mandatory.</p>
+ <p>The function can be called by a log handler to convert a
+ log event term to a printable string. The returned value
+ can, for example, be printed as a log entry to the console
+ or a file using <seealso marker="stdlib:io#put_chars-1">
+ <c>io:put_chars/1,2</c></seealso>.</p>
+ <p>See <seealso marker="logger_formatter">
+ <c>logger_formatter(3)</c></seealso>
+ for an example implementation. <c>logger_formatter</c> is the
+ default formatter used by Logger.</p>
+ </desc>
+ </func>
+ </funcs>
+
+ <section>
+ <title>See Also</title>
+ <p>
+ <seealso marker="config"><c>config(4)</c></seealso>,
+ <seealso marker="erts:erlang"><c>erlang(3)</c></seealso>,
+ <seealso marker="stdlib:io"><c>io(3)</c></seealso>,
+ <seealso marker="logger_disk_log_h"><c>logger_disk_log_h(3)</c></seealso>,
+ <seealso marker="logger_filters"><c>logger_filters(3)</c></seealso>,
+ <seealso marker="logger_formatter"><c>logger_formatter(3)</c></seealso>,
+ <seealso marker="logger_std_h"><c>logger_std_h(3)</c></seealso>,
+ <seealso marker="stdlib:unicode"><c>unicode(3)</c></seealso>
+ </p>
+ </section>
+</erlref>
+
+
diff --git a/lib/kernel/doc/src/logger_arch.dia b/lib/kernel/doc/src/logger_arch.dia
new file mode 100644
index 0000000000..97be31856e
--- /dev/null
+++ b/lib/kernel/doc/src/logger_arch.dia
Binary files differ
diff --git a/lib/kernel/doc/src/logger_arch.png b/lib/kernel/doc/src/logger_arch.png
new file mode 100644
index 0000000000..70933a5a41
--- /dev/null
+++ b/lib/kernel/doc/src/logger_arch.png
Binary files differ
diff --git a/lib/kernel/doc/src/logger_chapter.xml b/lib/kernel/doc/src/logger_chapter.xml
new file mode 100644
index 0000000000..bfd0acf634
--- /dev/null
+++ b/lib/kernel/doc/src/logger_chapter.xml
@@ -0,0 +1,1399 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE chapter SYSTEM "chapter.dtd">
+
+<chapter>
+ <header>
+ <copyright>
+ <year>2017</year><year>2018</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ </legalnotice>
+
+ <title>Logging</title>
+ <prepared></prepared>
+ <docno></docno>
+ <date></date>
+ <rev></rev>
+ <file>logger_chapter.xml</file>
+ </header>
+
+ <p>Erlang/OTP 21.0 provides a standard API for logging
+ through <c>Logger</c>, which is part of the Kernel
+ application. Logger consists of the API for issuing log events,
+ and a customizable backend where log handlers, filters and
+ formatters can be plugged in.</p>
+ <p>By default, the Kernel application installs one log handler at
+ system start. This handler is named <c>default</c>. It receives
+ and processes standard log events produced by the Erlang runtime
+ system, standard behaviours and different Erlang/OTP
+ applications. The log events are by default written to the
+ terminal.</p>
+ <p>You can also configure the system so that the default handler
+ prints log events to a single file, or to a set of wrap logs
+ via <seealso marker="disk_log"><c>disk_log</c></seealso>.</p>
+ <p>By configuration, you can also modify or disable the default
+ handler, replace it by a custom handler, and install additional
+ handlers.</p>
+
+ <note>
+ <p>Since Logger is new in Erlang/OTP 21.0, we do reserve the right
+ to introduce changes to the Logger API and functionality in
+ patches following this release. These changes might or might not
+ be backwards compatible with the initial version.</p>
+ </note>
+
+ <section>
+ <title>Overview</title>
+ <p>A <em>log event</em> consists of a <em>log level</em>, the
+ <em>message</em> to be logged, and <em>metadata</em>.</p>
+ <p>The Logger backend forwards log events from the API, first
+ through a set of <em>primary filters</em>, then through a set of
+ secondary filters attached to each log handler. The secondary
+ filters are in the following named <em>handler filters</em>.</p>
+ <p>Each filter set consists of a <em>log level check</em>,
+ followed by zero or more <em>filter functions</em>.</p>
+ <p>The following figure shows a conceptual overview of Logger. The
+ figure shows two log handlers, but any number of handlers can be
+ installed.</p>
+
+ <!-- The image is edited with dia in logger_arch.dia file,
+ and .png file generated with make target 'png'. -->
+ <image file="logger_arch.png">
+ <icaption>Conceptual Overview</icaption>
+ </image>
+
+ <p>Log levels are expressed as atoms. Internally in Logger, the
+ atoms are mapped to integer values, and a log event passes the
+ log level check if the integer value of its log level is less
+ than or equal to the currently configured log level. That is,
+ the check passes if the event is equally or more severe than the
+ configured level. See section <seealso marker="#log_level">Log
+ Level</seealso> for a listing and description of all log
+ levels.</p>
+ <p>The primary log level can be overridden by a log level
+ configured per module. This is to, for instance, allow more
+ verbose logging from a specific part of the system.</p>
+ <p>Filter functions can be used for more sophisticated filtering
+ than the log level check provides. A filter function can stop or
+ pass a log event, based on any of the event's contents. It can
+ also modify all parts of the log event. See see
+ section <seealso marker="#filters">Filters</seealso> for more
+ details.</p>
+ <p>If a log event passes through all primary filters and all
+ handler filters for a specific handler, Logger forwards the
+ event to the <em>handler callback</em>. The handler formats and
+ prints the event to its destination. See
+ section <seealso marker="#handlers">Handlers</seealso> for more
+ details.</p>
+ <p>Everything up to and including the call to the handler
+ callbacks is executed on the client process, that is, the
+ process where the log event was issued. It is up to the handler
+ implementation if other processes are involved or not.</p>
+ <p>The handlers are called in sequence, and the order is not
+ defined.</p>
+ </section>
+ <section>
+ <marker id="logger_api"/>
+ <title>Logger API</title>
+ <p>The API for logging consists of a set
+ of <seealso marker="logger#macros">macros</seealso>, and a set
+ of functions on the form <c>logger:Level/1,2,3</c>, which are
+ all shortcuts
+ for <seealso marker="logger#log-2">
+ <c>logger:log(Level,Arg1[,Arg2[,Arg3]])</c></seealso>.</p>
+ <p>The macros are defined in <c>logger.hrl</c>, which is included
+ in a module with the directive</p>
+ <code>-include_lib("kernel/include/logger.hrl").</code>
+ <p>The difference between using the macros and the exported
+ functions is that macros add location (originator) information
+ to the metadata, and performs lazy evaluation by wrapping the
+ logger call in a case statement, so it is only evaluated if the
+ log level of the event passes the primary log level check.</p>
+ <section>
+ <marker id="log_level"/>
+ <title>Log Level</title>
+ <p>The log level indicates the severity of a event. In
+ accordance with the Syslog protocol,
+ <url href="https://www.ietf.org/rfc/rfc5424.txt">RFC
+ 5424</url>, eight log levels can be specified. The following
+ table lists all possible log levels by name (atom), integer
+ value, and description:</p>
+
+ <table align="left">
+ <row>
+ <cell><strong>Level</strong></cell>
+ <cell align="center"><strong>Integer</strong></cell>
+ <cell><strong>Description</strong></cell>
+ </row>
+ <row>
+ <cell>emergency</cell>
+ <cell align="center">0</cell>
+ <cell>system is unusable</cell>
+ </row>
+ <row>
+ <cell>alert</cell>
+ <cell align="center">1</cell>
+ <cell>action must be taken immediately</cell>
+ </row>
+ <row>
+ <cell>critical</cell>
+ <cell align="center">2</cell>
+ <cell>critical conditions</cell>
+ </row>
+ <row>
+ <cell>error</cell>
+ <cell align="center">3</cell>
+ <cell>error conditions</cell>
+ </row>
+ <row>
+ <cell>warning</cell>
+ <cell align="center">4</cell>
+ <cell>warning conditions</cell>
+ </row>
+ <row>
+ <cell>notice</cell>
+ <cell align="center">5</cell>
+ <cell>normal but significant conditions</cell>
+ </row>
+ <row>
+ <cell>info</cell>
+ <cell align="center">6</cell>
+ <cell>informational messages</cell>
+ </row>
+ <row>
+ <cell>debug</cell>
+ <cell align="center">7</cell>
+ <cell>debug-level messages</cell>
+ </row>
+ <tcaption>Log Levels</tcaption>
+ </table>
+ <p>Notice that the integer value is only used internally in
+ Logger. In the API, you must always use the atom. To compare
+ the severity of two log levels,
+ use <seealso marker="logger#compare_levels-2">
+ <c>logger:compare_levels/2</c></seealso>.</p>
+ </section>
+ <section>
+ <marker id="log_message"/>
+ <title>Log Message</title>
+ <p>The log message contains the information to be logged. The
+ message can consist of a format string and arguments (given as
+ two separate parameters in the Logger API), a string or a
+ report. The latter, which is either a map or a key-value list,
+ can be accompanied by a <em>report callback</em> specified in
+ the log event's <seealso marker="#metadata">metadata</seealso>.
+ The report callback is a convenience function that
+ the <seealso marker="#formatters">formatter</seealso> can use
+ to convert the report to a format string and arguments, or
+ directly to a string. The
+ formatter can also use its own conversion function, if no
+ callback is provided, or if a customized formatting is
+ desired.</p>
+ <p>The report callback must be a fun with one or two
+ arguments. If it takes one argument, this is the report
+ itself, and the fun returns a format string and arguments:</p>
+ <pre>fun((<seealso marker="logger#type-report"><c>logger:report()</c></seealso>) -> {<seealso marker="stdlib:io#type-format"><c>io:format()</c></seealso>,[term()]})</pre>
+ <p>If it takes two arguments, the first is the report, and the
+ second is a map containing extra data that allows direct
+ coversion to a string:</p>
+ <pre>fun((<seealso marker="logger#type-report"><c>logger:report()</c></seealso>,<seealso marker="logger#type-report_cb_config"><c>logger:report_cb_config()</c></seealso>) -> <seealso marker="stdlib:unicode#type-chardata"><c>unicode:chardata()</c></seealso>)
+ </pre>
+ <p>The fun must obey the <c>depth</c> and <c>chars_limit</c>
+ parameters provided in the second argument, as the formatter can
+ not do anything useful of these parameters with the returned
+ string. The extra data also contains a field named
+ <c>single_line</c>, indicating if the printed log message may
+ contain line breaks or not. This variant is used when the
+ formatting of the report depends on the size or single line
+ parameters.</p>
+ <p>Example, format string and arguments:</p>
+ <code>logger:error("The file does not exist: ~ts",[Filename])</code>
+ <p>Example, string:</p>
+ <code>logger:notice("Something strange happened!")</code>
+ <p>Example, report, and metadata with report callback:</p>
+ <code>
+logger:debug(#{got => connection_request, id => Id, state => State},
+ #{report_cb => fun(R) -> {"~p",[R]} end})</code>
+ <p>The log message can also be provided through a fun for lazy
+ evaluation. The fun is only evaluated if the primary log level
+ check passes, and is therefore recommended if it is expensive
+ to generate the message. The lazy fun must return a string, a
+ report, or a tuple with format string and arguments.</p>
+ </section>
+ <section>
+ <title>Metadata</title>
+ <p>Metadata contains additional data associated with a log
+ message. Logger inserts some metadata fields by default, and
+ the client can add custom metadata in two different ways:</p>
+ <taglist>
+ <tag>Set process metadata</tag>
+ <item>
+ <p>Process metadata is set and updated
+ with <seealso marker="logger#set_process_metadata-1">
+ <c>logger:set_process_metadata/1</c></seealso>
+ and <seealso marker="logger#update_process_metadata-1">
+ <c>logger:update_process_metadata/1</c></seealso>,
+ respectively. This metadata applies to the process on
+ which these calls are made, and Logger adds the metadata
+ to all log events issued on that process.</p>
+ </item>
+ <tag>Add metadata to a specific log event</tag>
+ <item>
+ <p>Metadata associated with one specific log event is given
+ as the last parameter to the log macro or Logger API
+ function when the event is issued. For example:</p>
+ <code>?LOG_ERROR("Connection closed",#{context => server})</code>
+ </item>
+ </taglist>
+ <p>See the description of
+ the <seealso marker="logger#type-metadata">
+ <c>logger:metadata()</c></seealso> type for information
+ about which default keys Logger inserts, and how the different
+ metadata maps are merged.</p>
+ </section>
+ </section>
+ <section>
+ <marker id="filter"/>
+ <title>Filters</title>
+ <p>Filters can be primary, or attached to a specific
+ handler. Logger calls the primary filters first, and if they all
+ pass, it calls the handler filters for each handler. Logger
+ calls the handler callback only if all filters attached to the
+ handler in question also pass.</p>
+ <p>A filter is defined as:</p>
+ <pre>{FilterFun, Extra}</pre>
+ <p>where <c>FilterFun</c> is a function of arity 2,
+ and <c>Extra</c> is any term. When applying the filter, Logger
+ calls the function with the log event as the first argument,
+ and the value of <c>Extra</c> as the second
+ argument. See <seealso marker="logger#type-filter">
+ <c>logger:filter()</c></seealso> for type definitions.</p>
+ <p>The filter function can return <c>stop</c>, <c>ignore</c> or
+ the (possibly modified) log event.</p>
+ <p>If <c>stop</c> is returned, the log event is immediately
+ discarded. If the filter is primary, no handler filters or
+ callbacks are called. If it is a handler filter, the
+ corresponding handler callback is not called, but the log event
+ is forwarded to filters attached to the next handler, if
+ any.</p>
+ <p>If the log event is returned, the next filter function is
+ called with the returned value as the first argument. That is,
+ if a filter function modifies the log event, the next filter
+ function receives the modified event. The value returned from
+ the last filter function is the value that the handler callback
+ receives.</p>
+ <p>If the filter function returns <c>ignore</c>, it means that it
+ did not recognize the log event, and thus leaves to other
+ filters to decide the event's destiny.</p>
+ <p>The configuration option <c>filter_default</c> specifies the
+ behaviour if all filter functions return <c>ignore</c>, or if no
+ filters exist. <c>filter_default</c> is by default set
+ to <c>log</c>, meaning that if all existing filters ignore a log
+ event, Logger forwards the event to the handler
+ callback. If <c>filter_default</c> is set to <c>stop</c>, Logger
+ discards such events.</p>
+ <p>Primary filters are added
+ with <seealso marker="logger#add_primary_filter-2">
+ <c>logger:add_primary_filter/2</c></seealso>
+ and removed
+ with <seealso marker="logger#remove_primary_filter-1">
+ <c>logger:remove_primary_filter/1</c></seealso>. They can also
+ be added at system start via the Kernel configuration
+ parameter <seealso marker="#logger_parameter"><c>logger</c></seealso>.</p>
+ <p>Handler filters are added
+ with <seealso marker="logger#add_handler_filter-3">
+ <c>logger:add_handler_filter/3</c></seealso>
+ and removed
+ with <seealso marker="logger#remove_handler_filter-2">
+ <c>logger:remove_handler_filter/2</c></seealso>. They can also
+ be specified directly in the configuration when adding a handler
+ with <seealso marker="logger#add_handler/3">
+ <c>logger:add_handler/3</c></seealso>
+ or via the Kernel configuration
+ parameter <seealso marker="#logger_parameter"><c>logger</c></seealso>.</p>
+
+ <p>To see which filters are currently installed in the system,
+ use <seealso marker="logger#get_config-0">
+ <c>logger:get_config/0</c></seealso>,
+ or <seealso marker="logger#get_primary_config-0">
+ <c>logger:get_primary_config/0</c></seealso>
+ and <seealso marker="logger#get_handler_config-1">
+ <c>logger:get_handler_config/1</c></seealso>. Filters are
+ listed in the order they are applied, that is, the first
+ filter in the list is applied first, and so on.</p>
+
+ <p>For convenience, the following built-in filters exist:</p>
+
+ <taglist>
+ <tag><seealso marker="logger_filters#domain-2">
+ <c>logger_filters:domain/2</c></seealso></tag>
+ <item>
+ <p>Provides a way of filtering log events based on a
+ <c>domain</c> field in <c>Metadata</c>.</p>
+ </item>
+ <tag><seealso marker="logger_filters#level-2">
+ <c>logger_filters:level/2</c></seealso></tag>
+ <item>
+ <p>Provides a way of filtering log events based on the log
+ level.</p>
+ </item>
+ <tag><seealso marker="logger_filters#progress-2">
+ <c>logger_filters:progress/2</c></seealso></tag>
+ <item>
+ <p>Stops or allows progress reports from <c>supervisor</c>
+ and <c>application_controller</c>.</p>
+ </item>
+ <tag><seealso marker="logger_filters#remote_gl-2">
+ <c>logger_filters:remote_gl/2</c></seealso></tag>
+ <item>
+ <p>Stops or allows log events originating from a process
+ that has its group leader on a remote node.</p>
+ </item>
+ </taglist>
+ </section>
+
+ <section>
+ <marker id="handlers"/>
+ <title>Handlers</title>
+ <p>A handler is defined as a module exporting at least the
+ following callback function:</p>
+
+ <pre><seealso marker="logger#HModule:log-2">log(LogEvent, Config) -> void()</seealso></pre>
+
+ <p>This function is called when a log event has passed through all
+ primary filters, and all handler filters attached to the handler
+ in question. The function call is executed on the client
+ process, and it is up to the handler implementation if other
+ processes are involved or not.</p>
+
+ <p>Logger allows adding multiple instances of a handler
+ callback. That is, if a callback module implementation allows
+ it, you can add multiple handler instances using the same
+ callback module. The different instances are identified by
+ unique handler identities.</p>
+
+ <p>In addition to the mandatory callback function <c>log/2</c>, a
+ handler module can export the optional callback
+ functions <c>adding_handler/1</c>, <c>changing_config/3</c>,
+ <c>filter_config/1</c>, and <c>removing_handler/1</c>. See
+ section <seealso marker="logger#handler_callback_functions">Handler
+ Callback Functions</seealso> in the logger(3) manual page for
+ more information about these function.</p>
+
+ <p>The following built-in handlers exist:</p>
+
+ <taglist>
+ <tag><c>logger_std_h</c></tag>
+ <item>
+ <p>This is the default handler used by OTP. Multiple instances
+ can be started, and each instance will write log events to a
+ given destination, terminal or file.</p>
+ </item>
+
+ <tag><c>logger_disk_log_h</c></tag>
+ <item>
+ <p>This handler behaves much like <c>logger_std_h</c>, except it uses
+ <seealso marker="disk_log"><c>disk_log</c></seealso> as its
+ destination.</p>
+ </item>
+
+ <tag><marker id="ErrorLoggerManager"/><c>error_logger</c></tag>
+ <item>
+ <p>This handler is provided for backwards compatibility
+ only. It is not started by default, but will be
+ automatically started the first time an <c>error_logger</c>
+ event handler is added
+ with <seealso marker="error_logger#add_report_handler-1">
+ <c>error_logger:add_report_handler/1,2</c></seealso>.</p>
+
+ <p>The old <c>error_logger</c> event handlers in STDLIB and
+ SASL still exist, but they are not added by Erlang/OTP 21.0
+ or later.</p>
+ </item>
+ </taglist>
+ </section>
+
+ <section>
+ <marker id="formatters"/>
+ <title>Formatters</title>
+ <p>A formatter can be used by the handler implementation to do the
+ final formatting of a log event, before printing to the
+ handler's destination. The handler callback receives the
+ formatter information as part of the handler configuration,
+ which is passed as the second argument
+ to <seealso marker="logger#HModule:log-2">
+ <c>HModule:log/2</c></seealso>.</p>
+ <p>The formatter information consist of a formatter
+ module, <c>FModule</c> and its
+ configuration, <c>FConfig</c>. <c>FModule</c> must export the
+ following function, which can be called by the handler:</p>
+ <pre><seealso marker="logger#FModule:format-2">format(LogEvent,FConfig)
+ -> FormattedLogEntry</seealso></pre>
+ <p>The formatter information for a handler is set as a part of its
+ configuration when the handler is added. It can also be changed
+ during runtime
+ with <seealso marker="logger#set_handler_config-3">
+ <c>logger:set_handler_config(HandlerId,formatter,{FModule,FConfig})</c>
+ </seealso>, which overwrites the current formatter information,
+ or with <seealso marker="logger#update_formatter_config-2">
+ <c>logger:update_formatter_config/2,3</c></seealso>, which
+ only modifies the formatter configuration.</p>
+ <p>If the formatter module exports the optional callback
+ function <seealso marker="logger#FModule:check_config-1">
+ <c>check_config(FConfig)</c></seealso>, Logger calls this
+ function when the formatter information is set or modified, to
+ verify the validity of the formatter configuration.</p>
+ <p>If no formatter information is specified for a handler, Logger
+ uses <c>logger_formatter</c> as default. See
+ the <seealso marker="logger_formatter"><c>logger_formatter(3)</c></seealso>
+ manual page for more information about this module.</p>
+ </section>
+
+ <section>
+ <title>Configuration</title>
+
+ <p>At system start, Logger is configured through Kernel
+ configuration parameters. The parameters that apply to Logger
+ are described in
+ section <seealso marker="#kernel_config_params">Kernel
+ Configuration Parameters</seealso>. Examples are found in
+ section <seealso marker="#config_examples">Configuration
+ Examples</seealso>.</p>
+ <p>During runtime, Logger configuration is changed via API
+ functions. See
+ section <seealso marker="logger#configuration_API">Configuration
+ API Functions</seealso> in the <c>logger(3)</c> manual page.</p>
+
+ <section>
+ <title>Primary Logger Configuration</title>
+ <p>Logger API functions that apply to the primary Logger
+ configuration are:</p>
+ <list>
+ <item><seealso marker="logger#get_primary_config-0">
+ <c>get_primary_config/0</c></seealso></item>
+ <item><seealso marker="logger#set_primary_config-1">
+ <c>set_primary_config/1,2</c></seealso></item>
+ <item><seealso marker="logger#update_primary_config-1">
+ <c>update_primary_config/1</c></seealso></item>
+ <item><seealso marker="logger#add_primary_filter-2">
+ <c>add_primary_filter/2</c></seealso></item>
+ <item><seealso marker="logger#remove_primary_filter-1">
+ <c>remove_primary_filter/1</c></seealso></item>
+ </list>
+ <p>The primary Logger configuration is a map with the following
+ keys:</p>
+ <taglist>
+ <tag><marker id="primary_level"/>
+ <c>level = </c><seealso marker="logger#type-level">
+ <c>logger:level()</c></seealso><c> | all | none</c></tag>
+ <item>
+ <p>Specifies the primary log level, that is, log event that
+ are equally or more severe than this level, are forwarded
+ to the primary filters. Less severe log events are
+ immediately discarded.</p>
+ <p>See section <seealso marker="#log_level">Log
+ Level</seealso> for a listing and description of
+ possible log levels.</p>
+ <p>The initial value of this option is set by the Kernel
+ configuration parameter <seealso marker="#logger_level">
+ <c>logger_level</c></seealso>. It is changed during
+ runtime with <seealso marker="logger#set_primary_config-2">
+ <c>logger:set_primary_config(level,Level)</c></seealso>.</p>
+ <p>Defaults to <c>notice</c>.</p>
+ </item>
+ <tag><c>filters = [{FilterId,Filter}]</c></tag>
+ <item>
+ <p>Specifies the primary filters.</p>
+ <list>
+ <item><c>FilterId = </c><seealso marker="logger#type-filter_id">
+ <c>logger:filter_id()</c></seealso></item>
+ <item><c>Filter = </c><seealso marker="logger#type-filter">
+ <c>logger:filter()</c></seealso></item>
+ </list>
+ <p>The initial value of this option is set by the Kernel
+ configuration
+ parameter <seealso marker="#logger_parameter"><c>logger</c></seealso>.
+ During runtime, primary filters are added and removed with
+ <seealso marker="logger#add_primary_filter-2">
+ <c>logger:add_primary_filter/2</c></seealso> and
+ <seealso marker="logger#remove_primary_filter-1">
+ <c>logger:remove_primary_filter/1</c></seealso>,
+ respectively.</p>
+ <p>See section <seealso marker="#filters">Filters</seealso>
+ for more detailed information.</p>
+ <p>Defaults to <c>[]</c>.</p>
+ </item>
+ <tag><c>filter_default = log | stop</c></tag>
+ <item>
+ <p>Specifies what happens to a log event if all filters
+ return <c>ignore</c>, or if no filters exist.</p>
+ <p>See section <seealso marker="#filters">Filters</seealso>
+ for more information about how this option is used.</p>
+ <p>Defaults to <c>log</c>.</p>
+ </item>
+ </taglist>
+ </section>
+
+ <section>
+ <marker id="handler_configuration"/>
+ <title>Handler Configuration</title>
+ <p>Logger API functions that apply to handler configuration
+ are:</p>
+ <list>
+ <item><seealso marker="logger#get_handler_config-0">
+ <c>get_handler_config/0,1</c></seealso></item>
+ <item><seealso marker="logger#set_handler_config-2">
+ <c>set_handler_config/2,3</c></seealso></item>
+ <item><seealso marker="logger#update_handler_config-2">
+ <c>update_handler_config/2,3</c></seealso></item>
+ <item><seealso marker="logger#add_handler_filter-3">
+ <c>add_handler_filter/3</c></seealso></item>
+ <item><seealso marker="logger#remove_handler_filter-2">
+ <c>remove_handler_filter/2</c></seealso></item>
+ <item><seealso marker="logger#update_formatter_config-2">
+ <c>update_formatter_config/2,3</c></seealso></item>
+ </list>
+ <p>The configuration for a handler is a map with the following keys:</p>
+ <taglist>
+ <tag><c>id = </c><seealso marker="logger#type-handler_id">
+ <c>logger:handler_id()</c></seealso></tag>
+ <item>
+ <p>Automatically inserted by Logger. The value is the same
+ as the <c>HandlerId</c> specified when adding the handler,
+ and it cannot be changed.</p>
+ </item>
+ <tag><c>module = module()</c></tag>
+ <item>
+ <p>Automatically inserted by Logger. The value is the same
+ as the <c>Module</c> specified when adding the handler,
+ and it cannot be changed.</p>
+ </item>
+ <tag><c>level = </c><seealso marker="logger#type-level">
+ <c>logger:level()</c></seealso><c> | all | none</c></tag>
+ <item>
+ <p>Specifies the log level for the handler, that is, log
+ events that are equally or more severe than this level,
+ are forwarded to the handler filters for this
+ handler.</p>
+ <p>See section <seealso marker="#log_level">Log
+ Level</seealso> for a listing and description of
+ possible log levels.</p>
+ <p>The log level is specified when adding the handler, or
+ changed during runtime with, for
+ instance, <seealso marker="logger#set_handler_config/3">
+ <c>logger:set_handler_config(HandlerId,level,Level)</c></seealso>.
+ </p>
+ <p>Defaults to <c>all</c>.</p>
+ </item>
+ <tag><c>filters = [{FilterId,Filter}]</c></tag>
+ <item>
+ <p>Specifies the handler filters.</p>
+ <list>
+ <item><c>FilterId = </c><seealso marker="logger#type-filter_id">
+ <c>logger:filter_id()</c></seealso></item>
+ <item><c>Filter = </c><seealso marker="logger#type-filter">
+ <c>logger:filter()</c></seealso></item>
+ </list>
+ <p>Handler filters are specified when adding the handler,
+ or added or removed during runtime with
+ <seealso marker="logger#add_handler_filter-3">
+ <c>logger:add_handler_filter/3</c></seealso> and
+ <seealso marker="logger#remove_handler_filter-2">
+ <c>logger:remove_handler_filter/2</c></seealso>,
+ respectively.</p>
+ <p>See <seealso marker="#filters">Filters</seealso> for more
+ detailed information.</p>
+ <p>Defaults to <c>[]</c>.</p>
+ </item>
+ <tag><c>filter_default = log | stop</c></tag>
+ <item>
+ <p>Specifies what happens to a log event if all filters
+ return <c>ignore</c>, or if no filters exist.</p>
+ <p>See section <seealso marker="#filters">Filters</seealso>
+ for more information about how this option is used.</p>
+ <p>Defaults to <c>log</c>.</p>
+ </item>
+ <tag><c>formatter = {FormatterModule,FormatterConfig}</c></tag>
+ <item>
+ <p>Specifies a formatter that the handler can use for
+ converting the log event term to a printable string.</p>
+ <list>
+ <item><c>FormatterModule = module()</c></item>
+ <item><c>FormatterConfig = </c>
+ <seealso marker="logger#type-formatter_config">
+ <c>logger:formatter_config()</c></seealso></item>
+ </list>
+ <p>The formatter information is specified when adding the
+ handler. The formatter configuration can be changed during
+ runtime
+ with <seealso marker="logger#update_formatter_config-2">
+ <c>logger:update_formatter_config/2,3</c></seealso>,
+ or the complete formatter information can be overwritten
+ with, for
+ instance, <seealso marker="logger#set_handler_config-3">
+ <c>logger:set_handler_config/3</c></seealso>.</p>
+ <p>See
+ section <seealso marker="#formatters">Formatters</seealso>
+ for more detailed information.</p>
+ <p>Defaults
+ to <c>{logger_formatter,DefaultFormatterConfig}</c>. See
+ the <seealso marker="logger_formatter">
+ <c>logger_formatter(3)</c></seealso> manual page for
+ information about this formatter and its default
+ configuration.</p>
+ </item>
+ <tag><c>config = term()</c></tag>
+ <item>
+ <p>Handler specific configuration, that is, configuration
+ data related to a specific handler implementation.</p>
+ <p>The configuration for the built-in handlers is described
+ in
+ the <seealso marker="logger_std_h"><c>logger_std_h(3)</c></seealso>
+ and
+ <seealso marker="logger_disk_log_h"><c>logger_disk_log_h(3)</c>
+ </seealso> manual pages.</p>
+ </item>
+ </taglist>
+
+ <p>Notice that <c>level</c> and <c>filters</c> are obeyed by
+ Logger itself before forwarding the log events to each
+ handler, while <c>formatter</c> and all handler specific
+ options are left to the handler implementation.</p>
+ </section>
+
+ <section>
+ <marker id="kernel_config_params"/>
+ <title>Kernel Configuration Parameters</title>
+
+ <p>The following Kernel configuration parameters apply to
+ Logger:</p>
+ <taglist>
+ <tag><marker id="logger_parameter"/><c>logger = [Config]</c></tag>
+ <item>
+ <p>Specifies the configuration
+ for <seealso marker="logger">Logger</seealso>, except the
+ primary log level, which is specified
+ with <seealso marker="#logger_level"><c>logger_level</c></seealso>,
+ and the compatibility
+ with <seealso marker="sasl:error_logging">SASL Error
+ Logging</seealso>, which is specified
+ with <seealso marker="#logger_sasl_compatible">
+ <c>logger_sasl_compatible</c></seealso>.</p>
+ <p>With this parameter, you can modify or disable the default
+ handler, add custom handlers and primary logger filters, set
+ log levels per module, and modify
+ the <seealso marker="#proxy">proxy</seealso>
+ configuration.</p>
+ <p><c>Config</c> is any (zero or more) of the following:</p>
+ <taglist>
+ <tag><c>{handler, default, undefined}</c></tag>
+ <item>
+ <p>Disables the default handler. This allows another
+ application to add its own default handler.</p>
+ <p>Only one entry of this type is allowed.</p>
+ </item>
+ <tag><c>{handler, HandlerId, Module, HandlerConfig}</c></tag>
+ <item>
+ <p>If <c>HandlerId</c> is <c>default</c>, then this entry
+ modifies the default handler, equivalent to calling</p>
+ <pre><seealso marker="logger#remove_handler-1">
+ logger:remove_handler(default)
+ </seealso></pre>
+ <p>followed by</p>
+ <pre><seealso marker="logger#add_handler-3">
+ logger:add_handler(default, Module, HandlerConfig)
+ </seealso></pre>
+ <p>For all other values of <c>HandlerId</c>, this entry
+ adds a new handler, equivalent to calling</p>
+ <pre><seealso marker="logger:add_handler/3">
+ logger:add_handler(HandlerId, Module, HandlerConfig)
+ </seealso></pre>
+ <p>Multiple entries of this type are allowed.</p></item>
+ <tag><c>{filters, FilterDefault, [Filter]}</c></tag>
+ <item>
+ <p>Adds the specified primary filters.</p>
+ <list>
+ <item><c>FilterDefault = log | stop</c></item>
+ <item><c>Filter = {FilterId, {FilterFun, FilterConfig}}</c></item>
+ </list>
+ <p>Equivalent to calling</p>
+ <pre><seealso marker="logger#add_primary_filter/2">
+ logger:add_primary_filter(FilterId, {FilterFun, FilterConfig})
+ </seealso></pre>
+ <p>for each <c>Filter</c>.</p>
+ <p><c>FilterDefault</c> specifies the behaviour if all
+ primary filters return <c>ignore</c>, see
+ section <seealso marker="#filters">Filters</seealso>.</p>
+ <p>Only one entry of this type is allowed.</p>
+ </item>
+ <tag><c>{module_level, Level, [Module]}</c></tag>
+ <item>
+ <p>Sets module log level for the given modules. Equivalent
+ to calling</p>
+ <pre><seealso marker="logger#set_module_level/2">
+ logger:set_module_level(Module, Level)</seealso></pre>
+ <p>for each <c>Module</c>.</p>
+ <p>Multiple entries of this type are allowed.</p>
+ </item>
+ <tag><c>{proxy, ProxyConfig}</c></tag>
+ <item>
+ <p>Sets the proxy configuration, equivalent to calling</p>
+ <pre><seealso marker="logger#set_proxy_config/1">
+ logger:set_proxy_config(ProxyConfig)
+ </seealso></pre>
+ <p>Only one entry of this type is allowed.</p>
+ </item>
+ </taglist>
+ <p>See
+ section <seealso marker="#config_examples">Configuration
+ Examples</seealso> for examples using the <c>logger</c>
+ parameter for system configuration.</p>
+ </item>
+ <tag><marker id="logger_level"/>
+ <c>logger_level = Level</c></tag>
+ <item>
+ <p>Specifies the primary log level. See
+ the <seealso marker="kernel_app#logger_level"><c>kernel(6)</c></seealso>
+ manual page for more information about this parameter.</p>
+ </item>
+ <tag><marker id="logger_sasl_compatible"/>
+ <c>logger_sasl_compatible = true | false</c></tag>
+ <item>
+ <p>Specifies Logger's compatibility
+ with <seealso marker="sasl:error_logging">SASL Error
+ Logging</seealso>. See
+ the <seealso marker="kernel_app#logger_sasl_compatible">
+ <c>kernel(6)</c></seealso> manual page for more
+ information about this parameter.</p>
+ </item>
+ </taglist>
+ </section>
+
+ <section>
+ <marker id="config_examples"/>
+ <title>Configuration Examples</title>
+ <p>The value of the Kernel configuration parameter <c>logger</c>
+ is a list of tuples. It is possible to write the term on the
+ command line when starting an erlang node, but as the term
+ grows, a better approach is to use the system configuration
+ file. See
+ the <seealso marker="config"><c>config(4)</c></seealso> manual
+ page for more information about this file.</p>
+ <p>Each of the following examples shows a simple system
+ configuration file that configures Logger according to the
+ description.</p>
+ <p>Modify the default handler to print to a file instead of
+ <c>standard_io</c>:</p>
+ <code>
+[{kernel,
+ [{logger,
+ [{handler, default, logger_std_h, % {handler, HandlerId, Module,
+ #{config => #{file => "log/erlang.log"}}} % Config}
+ ]}]}].
+ </code>
+ <p>Modify the default handler to print each log event as a
+ single line:</p>
+ <code>
+[{kernel,
+ [{logger,
+ [{handler, default, logger_std_h,
+ #{formatter => {logger_formatter, #{single_line => true}}}}
+ ]}]}].
+ </code>
+ <p>Modify the default handler to print the pid of the logging
+ process for each log event:</p>
+ <code>
+[{kernel,
+ [{logger,
+ [{handler, default, logger_std_h,
+ #{formatter => {logger_formatter,
+ #{template => [time," ",pid," ",msg,"\n"]}}}}
+ ]}]}].
+ </code>
+ <p>Modify the default handler to only print errors and more
+ severe log events to "log/erlang.log", and add another handler
+ to print all log events to "log/debug.log".</p>
+ <code>
+[{kernel,
+ [{logger,
+ [{handler, default, logger_std_h,
+ #{level => error,
+ config => #{file => "log/erlang.log"}}},
+ {handler, info, logger_std_h,
+ #{level => debug,
+ config => #{file => "log/debug.log"}}}
+ ]}]}].
+ </code>
+ </section>
+
+ </section>
+
+ <section>
+ <marker id="compatibility"/>
+ <title>Backwards Compatibility with error_logger</title>
+ <p>Logger provides backwards compatibility with
+ <c>error_logger</c> in the following ways:</p>
+
+ <taglist>
+ <tag>API for Logging</tag>
+ <item>
+ <p>The <c>error_logger</c> API still exists, but should only
+ be used by legacy code. It will be removed in a later
+ release.</p>
+ <p>Calls
+ to <seealso marker="error_logger#error_report-1">
+ <c>error_logger:error_report/1,2</c></seealso>,
+ <seealso marker="error_logger#error_msg-1">
+ <c>error_logger:error_msg/1,2</c></seealso>, and
+ corresponding functions for warning and info messages, are
+ all forwarded to Logger as calls
+ to <seealso marker="logger#log-3">
+ <c>logger:log(Level,Report,Metadata)</c></seealso>.</p>
+ <p><c>Level = error | warning | info</c> and is taken
+ from the function name. <c>Report</c> contains the actual
+ log message, and <c>Metadata</c> contains additional
+ information which can be used for creating backwards
+ compatible events for legacy <c>error_logger</c> event
+ handlers, see
+ section <seealso marker="#legacy_event_handlers">Legacy
+ Event Handlers</seealso>.</p>
+ </item>
+ <tag>Output Format</tag>
+ <item>
+ <p>To get log events on the same format as produced
+ by <c>error_logger_tty_h</c> and <c>error_logger_file_h</c>,
+ use the default formatter, <c>logger_formatter</c>, with
+ configuration parameter <c>legacy_header</c> set
+ to <c>true</c>. This is the default configuration of
+ the <c>default</c> handler started by Kernel.</p>
+ </item>
+ <tag>Default Format of Log Events from OTP</tag>
+ <item>
+ <p>By default, all log events originating from within OTP,
+ except the former so called "SASL reports", look the same as
+ before.</p>
+ </item>
+ <tag><marker id="sasl_reports"/>SASL Reports</tag>
+ <item>
+ <p>By SASL reports we mean supervisor reports, crash reports
+ and progress reports.</p>
+ <p>Prior to Erlang/OTP 21.0, these reports were only logged
+ when the SASL application was running, and they were printed
+ trough SASL's own event handlers <c>sasl_report_tty_h</c>
+ and <c>sasl_report_file_h</c>.</p>
+ <p>The destination of these log events was configured by
+ <seealso marker="sasl:sasl_app#deprecated_error_logger_config">SASL
+ configuration parameters</seealso>.</p>
+ <p>Due to the specific event handlers, the output format
+ slightly differed from other log events.</p>
+ <p>As of Erlang/OTP 21.0, the concept of SASL reports is
+ removed, meaning that the default behaviour is as
+ follows:</p>
+ <list>
+ <item>Supervisor reports, crash reports, and progress reports
+ are no longer connected to the SASL application.</item>
+ <item>Supervisor reports and crash reports are issued
+ as <c>error</c> level log events, and are logged through
+ the default handler started by Kernel.</item>
+ <item>Progress reports are issued as <c>info</c> level log
+ events, and since the default primary log level
+ is <c>notice</c>, these are not logged by default. To
+ enable printing of progress reports, set
+ the <seealso marker="#primary_level">primary log
+ level</seealso> to <c>info</c>.</item>
+ <item>The output format is the same for all log
+ events.</item>
+ </list>
+ <p>If the old behaviour is preferred, the Kernel configuration
+ parameter <seealso marker="kernel_app#logger_sasl_compatible">
+ <c>logger_sasl_compatible</c></seealso> can be set
+ to <c>true</c>. The
+ <seealso marker="sasl:sasl_app#deprecated_error_logger_config">SASL
+ configuration parameters</seealso> can then be used as
+ before, and the SASL reports will only be printed if the
+ SASL application is running, through a second log handler
+ named <c>sasl</c>.</p>
+ <p>All SASL reports have a metadata field <c>domain</c> which
+ is set to <c>[otp,sasl]</c>. This field can be
+ used by filters to stop or allow the log events.</p>
+ <p>See section <seealso marker="sasl:error_logging">SASL User's
+ Guide</seealso> for more information about the old SASL
+ error logging functionality.</p>
+ </item>
+ <tag><marker id="legacy_event_handlers"/>Legacy Event Handlers</tag>
+ <item>
+ <p>To use event handlers written for <c>error_logger</c>, just
+ add your event handler with</p>
+ <code>
+error_logger:add_report_handler/1,2.
+ </code>
+ <p>This automatically starts the error logger event manager,
+ and adds <c>error_logger</c> as a handler to Logger, with
+ the following configuration:</p>
+<code>
+#{level => info,
+ filter_default => log,
+ filters => []}.
+</code>
+ <note>
+ <p>This handler ignores events that do not originate from
+ the <c>error_logger</c> API, or from within OTP. This
+ means that if your code uses the Logger API for logging,
+ then your log events will be discarded by this
+ handler.</p>
+ <p>The handler is not overload protected.</p>
+ </note>
+ </item>
+ </taglist>
+ </section>
+
+
+ <section>
+ <title>Error Handling</title>
+ <p>Logger does, to a certain extent, check its input data before
+ forwarding a log event to filters and handlers. It does,
+ however, not evaluate report callbacks, or check the validity of
+ format strings and arguments. This means that all filters and
+ handlers must be careful when formatting the data of a log
+ event, making sure that it does not crash due to bad input data
+ or faulty callbacks.</p>
+ <p>If a filter or handler still crashes, Logger will remove the
+ filter or handler in question from the configuration, and print
+ a short error message to the terminal. A debug event containing
+ the crash reason and other details is also issued.</p>
+ <p>See section <seealso marker="#log_message">Log
+ Message</seealso> for more information about report callbacks
+ and valid forms of log messages.</p>
+ </section>
+
+ <section>
+ <title>Example: Add a handler to log info events to file</title>
+ <p>When starting an Erlang node, the default behaviour is that all
+ log events on level <c>notice</c> or more severe, are logged to
+ the terminal via the default handler. To also log info events,
+ you can either change the primary log level to <c>info</c>:</p>
+ <pre>
+1> <input>logger:set_primary_config(level, info).</input>
+ok</pre>
+ <p>or set the level for one or a few modules only:</p>
+ <pre>
+2> <input>logger:set_module_level(mymodule, info).</input>
+ok</pre>
+ <p>This allows info events to pass through to the default handler,
+ and be printed to the terminal as well. If there are many info
+ events, it can be useful to print these to a file instead.</p>
+ <p>First, set the log level of the default handler
+ to <c>notice</c>, preventing it from printing info events to the
+ terminal:</p>
+ <pre>
+3> <input>logger:set_handler_config(default, level, notice).</input>
+ok</pre>
+ <p>Then, add a new handler which prints to file. You can use the
+ handler
+ module <seealso marker="logger_std_h"><c>logger_std_h</c></seealso>,
+ and configure it to log to file:</p>
+ <pre>
+4> <input>Config = #{config => #{file => "./info.log"}, level => info}.</input>
+#{config => #{file => "./info.log"},level => info}
+5> <input>logger:add_handler(myhandler, logger_std_h, Config).</input>
+ok</pre>
+ <p>Since <c>filter_default</c> defaults to <c>log</c>, this
+ handler now receives all log events. If you want info events
+ only in the file, you must add a filter to stop all non-info
+ events. The built-in
+ filter <seealso marker="logger_filters#level-2">
+ <c>logger_filters:level/2</c></seealso>
+ can do this:</p>
+ <pre>
+6> <input>logger:add_handler_filter(myhandler, stop_non_info,
+ {fun logger_filters:level/2, {stop, neq, info}}).</input>
+ok</pre>
+ <p>See section <seealso marker="#filters">Filters</seealso> for
+ more information about the filters and the <c>filter_default</c>
+ configuration parameter.</p>
+
+ </section>
+
+ <section>
+ <title>Example: Implement a handler</title>
+ <p>Section <seealso marker="logger#handler_callback_functions">Handler
+ Callback Functions</seealso> in the logger(3) manual page
+ describes the callback functions that can be implemented for a
+ Logger handler.</p>
+ <p>A handler callback module must export:</p>
+ <list>
+ <item><c>log(Log, Config)</c></item>
+ </list>
+ <p>It can optionally also export some, or all, of the following:</p>
+ <list>
+ <item><c>adding_handler(Config)</c></item>
+ <item><c>removing_handler(Config)</c></item>
+ <item><c>changing_config(SetOrUpdate, OldConfig, NewConfig)</c></item>
+ <item><c>filter_config(Config)</c></item>
+ </list>
+ <p>When a handler is added, by for example a call
+ to <seealso marker="logger#add_handler-3">
+ <c>logger:add_handler(Id, HModule, Config)</c></seealso>,
+ Logger first calls <c>HModule:adding_handler(Config)</c>. If
+ this function returns <c>{ok,Config1}</c>, Logger
+ writes <c>Config1</c> to the configuration database, and
+ the <c>logger:add_handler/3</c> call returns. After this, the
+ handler is installed and must be ready to receive log events as
+ calls to <c>HModule:log/2</c>.</p>
+ <p>A handler can be removed by calling
+ <seealso marker="logger#remove_handler-1">
+ <c>logger:remove_handler(Id)</c></seealso>. Logger calls
+ <c>HModule:removing_handler(Config)</c>, and removes the
+ handler's configuration from the configuration database.</p>
+ <p>When <seealso marker="logger#set_handler_config-2">
+ <c>logger:set_handler_config/2,3</c></seealso>
+ or <seealso marker="logger#update_handler_config/2">
+ <c>logger:update_handler_config/2,3</c></seealso> is called,
+ Logger
+ calls <c>HModule:changing_config(SetOrUpdate, OldConfig, NewConfig)</c>. If
+ this function returns <c>{ok,NewConfig1}</c>, Logger
+ writes <c>NewConfig1</c> to the configuration database.</p>
+ <p>When <seealso marker="logger#get_config-0">
+ <c>logger:get_config/0</c></seealso> or
+ <seealso marker="logger#get_handler_config-0">
+ <c>logger:get_handler_config/0,1</c></seealso> is called,
+ Logger calls <c>HModule:filter_config(Config)</c>. This function
+ must return the handler configuration where internal data is
+ removed.</p>
+
+ <p>A simple handler that prints to the terminal can be implemented
+ as follows:</p>
+ <code>
+-module(myhandler1).
+-export([log/2]).
+
+log(LogEvent, #{formatter := {FModule, FConfig}}) ->
+ io:put_chars(FModule:format(LogEvent, FConfig)).
+ </code>
+
+ <p>Notice that the above handler does not have any overload
+ protection, and all log events are printed directly from the
+ client process.</p>
+ <p>For information and examples of overload protection, please
+ refer to
+ section <seealso marker="#overload_protection">Protecting the
+ Handler from Overload</seealso>, and the implementation
+ of <seealso marker="logger_std_h"><c>logger_std_h</c></seealso>
+ and <seealso marker="logger_disk_log_h"><c>logger_disk_log_h</c>
+ </seealso>.</p>
+ <p>The following is a simpler example of a handler which logs to a
+ file through one single process:</p>
+ <code>
+-module(myhandler2).
+-export([adding_handler/1, removing_handler/1, log/2]).
+-export([init/1, handle_call/3, handle_cast/2, terminate/2]).
+
+adding_handler(Config) ->
+ MyConfig = maps:get(config,Config,#{file => "myhandler2.log"}),
+ {ok, Pid} = gen_server:start(?MODULE, MyConfig, []),
+ {ok, Config#{config => MyConfig#{pid => Pid}}}.
+
+removing_handler(#{config := #{pid := Pid}}) ->
+ gen_server:stop(Pid).
+
+log(LogEvent,#{config := #{pid := Pid}} = Config) ->
+ gen_server:cast(Pid, {log, LogEvent, Config}).
+
+init(#{file := File}) ->
+ {ok, Fd} = file:open(File, [append, {encoding, utf8}]),
+ {ok, #{file => File, fd => Fd}}.
+
+handle_call(_, _, State) ->
+ {reply, {error, bad_request}, State}.
+
+handle_cast({log, LogEvent, Config}, #{fd := Fd} = State) ->
+ do_log(Fd, LogEvent, Config),
+ {noreply, State}.
+
+terminate(_Reason, #{fd := Fd}) ->
+ _ = file:close(Fd),
+ ok.
+
+do_log(Fd, LogEvent, #{formatter := {FModule, FConfig}}) ->
+ String = FModule:format(LogEvent, FConfig),
+ io:put_chars(Fd, String).
+ </code>
+ </section>
+
+ <section>
+ <marker id="overload_protection"/>
+ <title>Protecting the Handler from Overload</title>
+ <p>The default handlers, <seealso marker="logger_std_h">
+ <c>logger_std_h</c></seealso> and <seealso marker="logger_disk_log_h">
+ <c>logger_disk_log_h</c></seealso>, feature an overload protection
+ mechanism, which makes it possible for the handlers to survive,
+ and stay responsive, during periods of high load (when huge
+ numbers of incoming log requests must be handled).
+ The mechanism works as follows:</p>
+
+ <section>
+ <title>Message Queue Length</title>
+ <p>The handler process keeps track of the length of its message
+ queue and takes some form of action when the current length exceeds a
+ configurable threshold. The purpose is to keep the handler in, or to
+ as quickly as possible get the handler into, a state where it can
+ keep up with the pace of incoming log events. The memory use of the
+ handler must never grow larger and larger, since that will eventually
+ cause the handler to crash. These three thresholds, with associated
+ actions, exist:</p>
+
+ <taglist>
+ <tag><c>sync_mode_qlen</c></tag>
+ <item>
+ <p>As long as the length of the message queue is lower than this
+ value, all log events are handled asynchronously. This means that
+ the client process sending the log event, by calling a log function
+ in the <seealso marker="logger_chapter#logger_api">Logger API</seealso>,
+ does not wait for a response from the handler but continues
+ executing immediately after the event is sent. It is not affected
+ by the time it takes the handler to print the event to the log
+ device. If the message queue grows larger than this value,
+ the handler starts handling log events synchronously instead,
+ meaning that the client process sending the event must wait for a
+ response. When the handler reduces the message queue to a
+ level below the <c>sync_mode_qlen</c> threshold, asynchronous
+ operation is resumed. The switch from asynchronous to synchronous
+ mode can slow down the logging tempo of one, or a few, busy senders,
+ but cannot protect the handler sufficiently in a situation of many
+ busy concurrent senders.</p>
+ <p>Defaults to <c>10</c> messages.</p>
+ </item>
+ <tag><c>drop_mode_qlen</c></tag>
+ <item>
+ <p>When the message queue grows larger than this threshold, the
+ handler switches to a mode in which it drops all new events that
+ senders want to log. Dropping an event in this mode means that the
+ call to the log function never results in a message being sent to
+ the handler, but the function returns without taking any action.
+ The handler keeps logging the events that are already in its message
+ queue, and when the length of the message queue is reduced to a level
+ below the threshold, synchronous or asynchronous mode is resumed.
+ Notice that when the handler activates or deactivates drop mode,
+ information about it is printed in the log.</p>
+ <p>Defaults to <c>200</c> messages.</p>
+ </item>
+ <tag><c>flush_qlen</c></tag>
+ <item>
+ <p>If the length of the message queue grows larger than this threshold,
+ a flush (delete) operation takes place. To flush events, the handler
+ discards the messages in the message queue by receiving them in a
+ loop without logging. Client processes waiting for a response from a
+ synchronous log request receive a reply from the handler indicating
+ that the request is dropped. The handler process increases its
+ priority during the flush loop to make sure that no new events
+ are received during the operation. Notice that after the flush operation
+ is performed, the handler prints information in the log about how many
+ events have been deleted.</p>
+ <p>Defaults to <c>1000</c> messages.</p>
+ </item>
+ </taglist>
+
+ <p>For the overload protection algorithm to work properly, it is
+ required that:</p>
+
+ <p><c>sync_mode_qlen =&lt; drop_mode_qlen =&lt; flush_qlen</c></p>
+
+ <p>and that:</p>
+
+ <p><c>drop_mode_qlen &gt; 1</c></p>
+
+ <p>To disable certain modes, do the following:</p>
+ <list>
+ <item>If <c>sync_mode_qlen</c> is set to <c>0</c>, all log events are handled
+ synchronously. That is, asynchronous logging is disabled.</item>
+ <item>If <c>sync_mode_qlen</c> is set to the same value as
+ <c>drop_mode_qlen</c>, synchronous mode is disabled. That is, the handler
+ always runs in asynchronous mode, unless dropping or flushing is invoked.</item>
+ <item>If <c>drop_mode_qlen</c> is set to the same value as <c>flush_qlen</c>,
+ drop mode is disabled and can never occur.</item>
+ </list>
+
+ <p>During high load scenarios, the length of the handler message queue
+ rarely grows in a linear and predictable way. Instead, whenever the
+ handler process is scheduled in, it can have an almost arbitrary number
+ of messages waiting in the message queue. It is for this reason that the overload
+ protection mechanism is focused on acting quickly, and quite drastically,
+ such as immediately dropping or flushing messages, when a large queue length
+ is detected.</p>
+
+ <p>The values of the previously listed thresholds can be specified by the user.
+ This way, a handler can be configured to, for example, not drop or flush
+ messages unless the message queue length of the handler process grows extremely
+ large. Notice that large amounts of memory can be required for the node under such
+ circumstances. Another example of user configuration is when, for performance
+ reasons, the client processes must never be blocked by synchronous log requests.
+ It is possible, perhaps, that dropping or flushing events is still acceptable, since
+ it does not affect the performance of the client processes sending the log events.</p>
+
+ <p>A configuration example:</p>
+ <code type="none">
+logger:add_handler(my_standard_h, logger_std_h,
+ #{config => #{file => "./system_info.log",
+ sync_mode_qlen => 100,
+ drop_mode_qlen => 1000,
+ flush_qlen => 2000}}).
+ </code>
+ </section>
+
+ <section>
+ <title>Controlling Bursts of Log Requests</title>
+ <p>Large bursts of log events - many events received by the handler
+ under a short period of time - can potentially cause problems, such as:</p>
+ <list>
+ <item>Log files grow very large, very quickly.</item>
+ <item>Circular logs wrap too quickly so that important data is overwritten.</item>
+ <item>Write buffers grow large, which slows down file sync operations.</item>
+ </list>
+
+ <p>For this reason, both built-in handlers offer the possibility to specify the
+ maximum number of events to be handled within a certain time frame.
+ With this burst control feature enabled, the handler can avoid choking the log with
+ massive amounts of printouts. The configuration parameters are:</p>
+ <taglist>
+ <tag><c>burst_limit_enable</c></tag>
+ <item>
+ <p>Value <c>true</c> enables burst control and <c>false</c> disables it.</p>
+ <p>Defaults to <c>true</c>.</p>
+ </item>
+ <tag><c>burst_limit_max_count</c></tag>
+ <item>
+ <p>This is the maximum number of events to handle within a
+ <c>burst_limit_window_time</c> time frame. After the limit is
+ reached, successive events are dropped until the end of the time frame.</p>
+ <p>Defaults to <c>500</c> events.</p>
+ </item>
+ <tag><c>burst_limit_window_time</c></tag>
+ <item>
+ <p>See the previous description of <c>burst_limit_max_count</c>.</p>
+ <p>Defaults to <c>1000</c> milliseconds.</p>
+ </item>
+ </taglist>
+
+ <p>A configuration example:</p>
+ <code type="none">
+logger:add_handler(my_disk_log_h, logger_disk_log_h,
+ #{config => #{file => "./my_disk_log",
+ burst_limit_enable => true,
+ burst_limit_max_count => 20,
+ burst_limit_window_time => 500}}).
+ </code>
+ </section>
+
+ <section>
+ <title>Terminating an Overloaded Handler</title>
+ <p>It is possible that a handler, even if it can successfully manage peaks
+ of high load without crashing, can build up a large message queue, or use a
+ large amount of memory. The overload protection mechanism includes an
+ automatic termination and restart feature for the purpose of guaranteeing
+ that a handler does not grow out of bounds. The feature is configured
+ with the following parameters:</p>
+ <taglist>
+ <tag><c>overload_kill_enable</c></tag>
+ <item>
+ <p>Value <c>true</c> enables the feature and <c>false</c> disables it.</p>
+ <p>Defaults to <c>false</c>.</p>
+ </item>
+ <tag><c>overload_kill_qlen</c></tag>
+ <item>
+ <p>This is the maximum allowed queue length. If the message queue grows
+ larger than this, the handler process is terminated.</p>
+ <p>Defaults to <c>20000</c> messages.</p>
+ </item>
+ <tag><c>overload_kill_mem_size</c></tag>
+ <item>
+ <p>This is the maximum memory size that the handler process is allowed to use.
+ If the handler grows larger than this, the process is terminated.</p>
+ <p>Defaults to <c>3000000</c> bytes.</p>
+ </item>
+ <tag><c>overload_kill_restart_after</c></tag>
+ <item>
+ <p>If the handler is terminated, it restarts automatically after a
+ delay specified in milliseconds. The value <c>infinity</c> prevents
+ restarts.</p>
+ <p>Defaults to <c>5000</c> milliseconds.</p>
+ </item>
+ </taglist>
+ <p>If the handler process is terminated because of overload, it prints
+ information about it in the log. It also prints information about when a
+ restart has taken place, and the handler is back in action.</p>
+ <note>
+ <p>The sizes of the log events affect the memory needs of the handler.
+ For information about how to limit the size of log events, see the
+ <seealso marker="logger_formatter"><c>logger_formatter(3)</c></seealso>
+ manual page.</p>
+ </note>
+ </section>
+ </section>
+
+ <section>
+ <marker id="proxy"/>
+ <title>Logger Proxy</title>
+ <p>The Logger proxy is an Erlang process which is part of the
+ Kernel application's supervision tree. During startup, the proxy
+ process registers itself as the <c>system_logger</c>, meaning
+ that log events produced by the emulator are sent to this
+ process.</p>
+ <p>When a log event is issued on a process which has its group
+ leader on a remote node, Logger automatically forwards the log
+ event to the group leader's node. To achieve this, it first
+ sends the log event as an Erlang message from the original
+ client process to the proxy on the local node, and the proxy in
+ turn forwards the event to the proxy on the remote node.</p>
+ <p>When receiving a log event, either from the emulator or from a
+ remote node, the proxy calls the Logger API to log the event.</p>
+ <p>The proxy process is overload protected in the same way as
+ described in
+ section <seealso marker="#overload_protection">Protecting the
+ Handler from Overload</seealso>, but with the following default
+ values:</p>
+ <code>
+ #{sync_mode_qlen => 500,
+ drop_mode_qlen => 1000,
+ flush_qlen => 5000,
+ burst_limit_enable => false,
+ overload_kill_enable => false}</code>
+ <p>For log events from the emulator, synchronous message passing
+ mode is not applicable, since all messages are passed
+ asynchronously by the emulator. Drop mode is achieved by setting
+ the <c>system_logger</c> to <c>undefined</c>, forcing the
+ emulator to drop events until it is set back to the proxy pid
+ again.</p>
+ <p>The proxy uses <seealso marker="erts:erlang#send_nosuspend/2">
+ <c>erlang:send_nosuspend/2</c></seealso> when sending log
+ events to a remote node. If the message could not be sent
+ without suspending the sender, it is dropped. This is to avoid
+ blocking the proxy process.</p>
+ </section>
+
+ <section>
+ <title>See Also</title>
+ <p>
+ <seealso marker="disk_log"><c>disk_log(3)</c></seealso>,
+ <seealso marker="erts:erlang"><c>erlang(3)</c></seealso>,
+ <seealso marker="error_logger"><c>error_logger(3)</c></seealso>,
+ <seealso marker="logger"><c>logger(3)</c></seealso>,
+ <seealso marker="logger_disk_log_h"><c>logger_disk_log_h(3)</c></seealso>,
+ <seealso marker="logger_filters"><c>logger_filters(3)</c></seealso>,
+ <seealso marker="logger_formatter"><c>logger_formatter(3)</c></seealso>,
+ <seealso marker="logger_std_h"><c>logger_std_h(3)</c></seealso>,
+ <seealso marker="sasl:sasl_app"><c>sasl(6)</c></seealso></p>
+ </section>
+</chapter>
diff --git a/lib/kernel/doc/src/logger_disk_log_h.xml b/lib/kernel/doc/src/logger_disk_log_h.xml
new file mode 100644
index 0000000000..5b2374690e
--- /dev/null
+++ b/lib/kernel/doc/src/logger_disk_log_h.xml
@@ -0,0 +1,168 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE erlref SYSTEM "erlref.dtd">
+
+<erlref>
+ <header>
+ <copyright>
+ <year>2017</year><year>2018</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ </legalnotice>
+
+ <title>logger_disk_log_h</title>
+ <prepared></prepared>
+ <responsible></responsible>
+ <docno></docno>
+ <approved></approved>
+ <checked></checked>
+ <date></date>
+ <rev>A</rev>
+ <file>logger_disk_log_h.xml</file>
+ </header>
+ <module since="OTP 21.0">logger_disk_log_h</module>
+ <modulesummary>A disk_log based handler for Logger</modulesummary>
+
+ <description>
+ <p>This is a handler for Logger that offers circular
+ (wrapped) logs by using <seealso marker="disk_log"><c>disk_log</c></seealso>.
+ Multiple instances of this handler can be added to Logger, and each instance
+ prints to its own disk log file, created with the name and settings specified
+ in the handler configuration.</p>
+ <p>The default standard handler,
+ <seealso marker="logger_std_h"><c>logger_std_h</c></seealso>, can be
+ replaced by a disk_log handler at startup of the Kernel application.
+ See an example of this below.</p>
+ <p>The handler has an overload protection mechanism that keeps the handler
+ process and the Kernel application alive during high loads of log
+ events. How overload protection works, and how to configure it, is
+ described in the
+ <seealso marker="logger_chapter#overload_protection"><c>User's Guide</c>
+ </seealso>.</p>
+ <p>To add a new instance of the disk_log handler, use
+ <seealso marker="logger#add_handler-3"><c>logger:add_handler/3</c>
+ </seealso>. The handler configuration argument is a map which can contain
+ general configuration parameters, as documented in the
+ <seealso marker="logger_chapter#handler_configuration"><c>User's Guide</c>
+ </seealso>, and handler specific parameters. The specific data
+ is stored in a sub map with the key <c>config</c>, and can contain the
+ following parameters:</p>
+ <taglist>
+ <tag><c>file</c></tag>
+ <item>
+ <p>This is the full name of the disk log file. The option
+ corresponds to the <c>name</c> property in the
+ <seealso marker="disk_log#open-1"><c>dlog_option()</c></seealso>
+ datatype.</p>
+ <p>The value is set when the handler is added, and it can not
+ be changed in runtime.</p>
+ <p>Defaults to the same name as the handler identity, in the
+ current directory.</p>
+ </item>
+ <tag><c>type</c></tag>
+ <item>
+ <p>This is the disk log type, <c>wrap</c> or <c>halt</c>. The option
+ corresponds to the <c>type</c> property in the
+ <seealso marker="disk_log#open-1"><c>dlog_option()</c></seealso>
+ datatype.</p>
+ <p>The value is set when the handler is added, and it can not
+ be changed in runtime.</p>
+ <p>Defaults to <c>wrap</c>.</p>
+ </item>
+ <tag><c>max_no_files</c></tag>
+ <item>
+ <p>This is the maximum number of files that disk_log uses
+ for its circular logging. The option
+ corresponds to the <c>MaxNoFiles</c> element in the <c>size</c> property in the
+ <seealso marker="disk_log#open-1"><c>dlog_option()</c></seealso>
+ datatype.</p>
+ <p>The value is set when the handler is added, and it can not
+ be changed in runtime.</p>
+ <p>Defaults to <c>10</c>.</p>
+ <p>The setting has no effect on a halt log.</p>
+ </item>
+ <tag><c>max_no_bytes</c></tag>
+ <item>
+ <p>This is the maximum number of bytes that is written to
+ a log file before disk_log proceeds with the next file in order, or
+ generates an error in case of a full halt log. The option
+ corresponds to the <c>MaxNoBytes</c> element in the <c>size</c> property in the
+ <seealso marker="disk_log#open-1"><c>dlog_option()</c></seealso>
+ datatype.</p>
+ <p>The value is set when the handler is added, and it can not
+ be changed in runtime.</p>
+ <p>Defaults to <c>1048576</c> bytes for a wrap log, and
+ <c>infinity</c> for a halt log.</p>
+ </item>
+ <tag><c>filesync_repeat_interval</c></tag>
+ <item>
+ <p>This value, in milliseconds, specifies how often the handler does
+ a disk_log sync operation to write buffered data to disk. The handler attempts
+ the operation repeatedly, but only performs a new sync if something has
+ actually been logged.</p>
+ <p>Defaults to <c>5000</c> milliseconds.</p>
+ <p>If <c>no_repeat</c> is set as value, the repeated sync operation
+ is disabled. The user can also call the
+ <seealso marker="logger_disk_log_h#filesync-1"><c>filesync/1</c>
+ </seealso> function to perform a disk_log sync.</p>
+ </item>
+ </taglist>
+ <p>Other configuration parameters exist, to be used for customizing
+ the overload protection behaviour. The same parameters are used both in the
+ standard handler and the disk_log handler, and are documented in the
+ <seealso marker="logger_chapter#overload_protection"><c>User's Guide</c>
+ </seealso>.</p>
+ <p>Notice that when changing the configuration of the handler in runtime, the
+ disk_log options (<c>file</c>, <c>type</c>, <c>max_no_files</c>,
+ <c>max_no_bytes</c>) must not be modified.</p>
+ <p>Example of adding a disk_log handler:</p>
+ <code type="none">
+logger:add_handler(my_disk_log_h, logger_disk_log_h,
+ #{config => #{file => "./my_disk_log",
+ type => wrap,
+ max_no_files => 4,
+ max_no_bytes => 10000},
+ filesync_repeat_interval => 1000}}).
+ </code>
+ <p>To use the disk_log handler instead of the default standard
+ handler when starting an Erlang node, change the Kernel default logger to
+ use <c>logger_disk_log_h</c>. Example:</p>
+ <code type="none">
+erl -kernel logger '[{handler,default,logger_disk_log_h,
+ #{config => #{file => "./system_disk_log"}}}]'
+ </code>
+ </description>
+
+ <funcs>
+
+ <func>
+ <name name="filesync" arity="1" clause_i="1" since="OTP 21.0"/>
+ <fsummary>Writes buffered data to disk.</fsummary>
+ <desc>
+ <p>Write buffered data to disk.</p>
+ </desc>
+ </func>
+
+ </funcs>
+
+ <section>
+ <title>See Also</title>
+ <p><seealso marker="logger"><c>logger(3)</c></seealso>,
+ <seealso marker="logger_std_h"><c>logger_std_h(3)</c></seealso>,
+ <seealso marker="disk_log"><c>disk_log(3)</c></seealso></p>
+ </section>
+</erlref>
+
+
diff --git a/lib/kernel/doc/src/logger_filters.xml b/lib/kernel/doc/src/logger_filters.xml
new file mode 100644
index 0000000000..0a02342864
--- /dev/null
+++ b/lib/kernel/doc/src/logger_filters.xml
@@ -0,0 +1,254 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE erlref SYSTEM "erlref.dtd">
+
+<erlref>
+ <header>
+ <copyright>
+ <year>2018</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ </legalnotice>
+
+ <title>logger_filters</title>
+ <prepared></prepared>
+ <responsible></responsible>
+ <docno></docno>
+ <approved></approved>
+ <checked></checked>
+ <date></date>
+ <rev>A</rev>
+ <file>logger_filters.xml</file>
+ </header>
+ <module since="OTP 21.0">logger_filters</module>
+ <modulesummary>Filters to use with Logger.</modulesummary>
+
+ <description>
+ <p>All functions exported from this module can be used as primary
+ or handler
+ filters. See <seealso marker="logger#add_primary_filter-2">
+ <c>logger:add_primary_filter/2</c></seealso>
+ and <seealso marker="logger#add_handler_filter-3">
+ <c>logger:add_handler_filter/3</c></seealso> for more information
+ about how filters are added.</p>
+ <p>Filters are removed with <seealso marker="logger#remove_primary_filter-1">
+ <c>logger:remove_primary_filter/1</c></seealso>
+ and <seealso marker="logger#remove_handler_filter-2">
+ <c>logger:remove_handler_filter/2</c></seealso>.</p>
+ </description>
+
+ <funcs>
+ <func>
+ <name name="domain" arity="2" since="OTP 21.0"/>
+ <fsummary>Filter log events based on the domain field in
+ metadata.</fsummary>
+ <desc>
+ <p>This filter provides a way of filtering log events based on a
+ <c>domain</c> field in <c>Metadata</c>. This field is
+ optional, and the purpose of using it is to group log events
+ from, for example, a specific functional area. This allows
+ filtering or other specialized treatment in a Logger
+ handler.</p>
+
+ <p>A domain field must be a list of atoms, creating smaller
+ and more specialized domains as the list grows longer. The
+ greatest domain is <c>[]</c>, which comprises all possible
+ domains.</p>
+
+ <p>For example, consider the following domains:</p>
+ <pre>
+D1 = [otp]
+D2 = [otp, sasl]</pre>
+
+ <p><c>D1</c> is the greatest of the two, and is said to be a
+ super-domain of <c>D2</c>. <c>D2</c> is a
+ sub-domain <c>D1</c>. Both <c>D1</c> and <c>D2</c> are
+ sub-domains of <c>[]</c>.</p>
+
+ <p>The above domains are used for logs originating from
+ Erlang/OTP. D1 specifies that the log event comes from
+ Erlang/OTP in general, and D2 indicates that the log event
+ is a so
+ called <seealso marker="logger_chapter#sasl_reports">SASL
+ report</seealso>.</p>
+
+ <p>The <c><anno>Extra</anno></c> parameter to
+ the <c>domain/2</c> function is specified when adding the
+ filter via <seealso marker="logger#add_primary_filter-2">
+ <c>logger:add_primary_filter/2</c></seealso>
+ or <seealso marker="logger#add_handler_filter-3">
+ <c>logger:add_handler_filter/3</c></seealso>.</p>
+
+ <p>The filter compares the value of the <c>domain</c> field in
+ the log event's metadata (<c>Domain</c>) against
+ <c><anno>MatchDomain</anno></c>. The filter matches if the
+ value of <c>Compare</c> is:</p>
+
+ <taglist>
+ <tag><c>sub</c></tag>
+ <item>
+ <p>and <c>Domain</c> is equal to or a sub-domain
+ of <c>MatchDomain</c>, that is, if <c>MatchDomain</c> is
+ a prefix of <c>Domain</c>.</p>
+ </item>
+ <tag><c>super</c></tag>
+ <item>
+ <p>and <c>Domain</c> is equal to or a super-domain
+ of <c>MatchDomain</c>, that is, if <c>Domain</c> is a
+ prefix of <c>MatchDomain</c>.</p>
+ </item>
+ <tag><c>equal</c></tag>
+ <item>
+ <p>and <c>Domain</c> is equal to <c>MatchDomain</c>.</p>
+ </item>
+ <tag><c>not_equal</c></tag>
+ <item>
+ <p>and <c>Domain</c> differs from <c>MatchDomain</c>, or
+ if there is no domain field in metadata.</p>
+ </item>
+ <tag><c>undefined</c></tag>
+ <item>
+ <p>and there is no domain field in metadata. In this
+ case <c><anno>MatchDomain</anno></c> must be set
+ to <c>[]</c>.</p>
+ </item>
+ </taglist>
+
+ <p>If the filter matches and <c><anno>Action</anno></c> is
+ <c>log</c>, the log event is allowed. If the filter matches
+ and <c><anno>Action</anno></c> is <c>stop</c>, the log event
+ is stopped.</p>
+
+ <p>If the filter does not match, it returns <c>ignore</c>,
+ meaning that other filters, or the value of the
+ configuration parameter <c>filter_default</c>, decide if the
+ event is allowed or not.</p>
+
+ <p>Log events that do not contain any domain field, match only
+ when <c><anno>Compare</anno></c> is equal
+ to <c>undefined</c> or <c>not_equal</c>.</p>
+
+ <p>Example: stop all events with domain <c>[otp,
+ sasl | _]</c></p>
+
+ <code>
+logger:set_handler_config(h1, filter_default, log). % this is the default
+Filter = {fun logger_filters:domain/2, {stop, sub, [otp, sasl]}}.
+logger:add_handler_filter(h1, no_sasl, Filter).
+ok</code>
+ </desc>
+ </func>
+
+ <func>
+ <name name="level" arity="2" since="OTP 21.0"/>
+ <fsummary>Filter log events based on the log level.</fsummary>
+ <desc>
+ <p>This filter provides a way of filtering log events based
+ on the log level. It matches log events by comparing the
+ log level with a specified <c>MatchLevel</c></p>
+
+ <p>The <c><anno>Extra</anno></c> parameter is specified when
+ adding the filter
+ via <seealso marker="logger#add_primary_filter-2">
+ <c>logger:add_primary_filter/2</c></seealso>
+ or <seealso marker="logger#add_handler_filter-3">
+ <c>logger:add_handler_filter/3</c></seealso>.</p>
+
+ <p>The filter compares the value of the event's log level
+ (<c>Level</c>) to <c><anno>MatchLevel</anno></c> by
+ calling <seealso marker="logger#compare_levels-2">
+ <c>logger:compare_levels(Level, MatchLevel)</c></seealso>.
+ The filter matches if the value
+ of <c><anno>Operator</anno></c> is:</p>
+
+ <taglist>
+ <tag><c>neq</c></tag>
+ <item><p>and the compare function returns <c>lt</c>
+ or <c>gt</c>.</p></item>
+ <tag><c>eq</c></tag>
+ <item><p>and the compare function returns <c>eq</c>.</p></item>
+ <tag><c>lt</c></tag>
+ <item><p>and the compare function returns <c>lt</c>.</p></item>
+ <tag><c>gt</c></tag>
+ <item><p>and the compare function returns <c>gt</c>.</p></item>
+ <tag><c>lteq</c></tag>
+ <item><p>and the compare function returns <c>lt</c>
+ or <c>eq</c>.</p></item>
+ <tag><c>gteq</c></tag>
+ <item><p>and the compare function returns <c>gt</c>
+ or <c>eq</c>.</p></item>
+ </taglist>
+
+ <p>If the filter matches and <c><anno>Action</anno></c> is
+ <c>log</c>, the log event is allowed. If the filter
+ matches and <c><anno>Action</anno></c> is <c>stop</c>, the
+ log event is stopped.</p>
+
+ <p>If the filter does not match, it returns <c>ignore</c>,
+ meaning that other filters, or the value of the
+ configuration parameter <c>filter_default</c>, will decide
+ if the event is allowed or not.</p>
+
+ <p>Example: only allow debug level log events</p>
+
+ <code>
+logger:set_handler_config(h1, filter_default, stop).
+Filter = {fun logger_filters:level/2, {log, eq, debug}}.
+logger:add_handler_filter(h1, debug_only, Filter).
+ok</code>
+ </desc>
+ </func>
+
+ <func>
+ <name name="progress" arity="2" since="OTP 21.0"/>
+ <fsummary>Filter progress reports from supervisor and application_controller.</fsummary>
+ <desc>
+ <p>This filter matches all progress reports
+ from <c>supervisor</c> and <c>application_controller</c>.</p>
+
+ <p>If <c><anno>Extra</anno></c> is <c>log</c>, the progress
+ reports are allowed. If <c><anno>Extra</anno></c>
+ is <c>stop</c>, the progress reports are stopped.</p>
+
+ <p>The filter returns <c>ignore</c> for all other log events.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="remote_gl" arity="2" since="OTP 21.0"/>
+ <fsummary>Filter events with group leader on remote node.</fsummary>
+ <desc>
+ <p>This filter matches all events originating from a process
+ that has its group leader on a remote node.</p>
+
+ <p>If <c><anno>Extra</anno></c> is <c>log</c>, the matching
+ events are allowed. If <c><anno>Extra</anno></c>
+ is <c>stop</c>, the matching events are stopped.</p>
+
+ <p>The filter returns <c>ignore</c> for all other log events.</p>
+ </desc>
+ </func>
+
+ </funcs>
+
+ <section>
+ <title>See Also</title>
+ <p>
+ <seealso marker="logger"><c>logger(3)</c></seealso>
+ </p>
+ </section>
+</erlref>
+
+
diff --git a/lib/kernel/doc/src/logger_formatter.xml b/lib/kernel/doc/src/logger_formatter.xml
new file mode 100644
index 0000000000..6dc83d24e1
--- /dev/null
+++ b/lib/kernel/doc/src/logger_formatter.xml
@@ -0,0 +1,354 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE erlref SYSTEM "erlref.dtd">
+
+<erlref>
+ <header>
+ <copyright>
+ <year>2017</year><year>2018</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ </legalnotice>
+
+ <title>logger_formatter</title>
+ <prepared></prepared>
+ <responsible></responsible>
+ <docno></docno>
+ <approved></approved>
+ <checked></checked>
+ <date></date>
+ <rev>A</rev>
+ <file>logger_formatter.xml</file>
+ </header>
+ <module since="OTP 21.0">logger_formatter</module>
+ <modulesummary>Default formatter for Logger.</modulesummary>
+
+ <description>
+ <p>Each Logger handler has a configured formatter specified as a
+ module and a configuration term. The purpose of the formatter is
+ to translate the log events to a final printable string
+ (<seealso marker="stdlib:unicode#type-chardata"><c>unicode:chardata()</c>
+ </seealso>) which can be written to the output device of the
+ handler. See
+ sections <seealso marker="logger_chapter#handlers">Handlers</seealso>
+ and <seealso marker="logger_chapter#formatters">Formatters</seealso>
+ in the Kernel User's Guide for more information.</p>
+ <p><c>logger_formatter</c> is the default formatter used by
+ Logger.</p>
+ </description>
+
+
+ <datatypes>
+ <datatype>
+ <name name="config"/>
+ <desc>
+ <p>The configuration term for <c>logger_formatter</c> is a
+ <seealso marker="stdlib:maps">map</seealso>, and the
+ following keys can be set as configuration parameters:</p>
+ <taglist>
+ <tag><marker id="chars_limit"/>
+ <c>chars_limit = integer() > 0 | unlimited</c></tag>
+ <item>
+ <p>A positive integer representing the value of the option
+ with the same name to be used when calling
+ <seealso marker="stdlib:io_lib#format-3">
+ <c>io_lib:format/3</c></seealso>.
+ This value limits the total number of characters printed
+ for each log event. Notice that this is a soft limit. For a
+ hard truncation limit, see option <c>max_size</c>.</p>
+ <p>Defaults to <c>unlimited</c>.</p>
+ </item>
+ <tag><marker id="depth"/><c>depth = integer() > 0 | unlimited</c></tag>
+ <item>
+ <p>A positive integer representing the maximum depth to
+ which terms shall be printed by this formatter. Format
+ strings passed to this formatter are rewritten. The
+ format controls ~p and ~w are replaced with ~P and ~W,
+ respectively, and the value is used as the depth
+ parameter. For details, see
+ <seealso marker="stdlib:io#format-2"><c>io:format/2,3</c></seealso>
+ in STDLIB.</p>
+ <p>Defaults to <c>unlimited</c>.</p>
+ </item>
+ <tag><c>legacy_header = boolean()</c></tag>
+ <item>
+ <p>If set to <c>true</c> a header field is added to
+ logger_formatter's part of <c>Metadata</c>. The value of
+ this field is a string similar to the header created by
+ the
+ old <seealso marker="error_logger"><c>error_logger</c></seealso>
+ event handlers. It can be included in the log event by
+ adding the list <c>[logger_formatter,header]</c> to the
+ template. See the description of
+ the <seealso marker="#type-template"><c>template()</c></seealso>
+ type for more information.</p>
+ <p>Defaults to <c>false</c>.</p>
+ </item>
+ <tag><marker id="max_size"/>
+ <c>max_size = integer() > 0 | unlimited</c></tag>
+ <item>
+ <p>A positive integer representing the absolute maximum size a
+ string returned from this formatter can have. If the
+ formatted string is longer, after possibly being limited
+ by <c>chars_limit</c> or <c>depth</c>, it is truncated.</p>
+ <p>Defaults to <c>unlimited</c>.</p>
+ </item>
+ <tag><c>report_cb = </c><seealso marker="logger#type-report_cb">
+ <c>logger:report_cb()</c></seealso></tag>
+ <item>
+ <p>A report callback is used by the formatter to transform
+ log messages on report form to a format string and
+ arguments. The report callback can be specified in the
+ metadata for the log event. If no report callback exists
+ in metadata, <c>logger_formatter</c> will
+ use <seealso marker="logger#format_report-1">
+ <c>logger:format_report/1</c></seealso> as default
+ callback.</p>
+ <p>If this configuration parameter is set, it replaces
+ both the default report callback, and any report
+ callback found in metadata. That is, all reports are
+ converted by this configured function.</p>
+ </item>
+ <tag><c>single_line = boolean()</c></tag>
+ <item>
+ <p>If set to <c>true</c>, each log event is printed as a
+ single line. To achieve this, <c>logger_formatter</c>
+ sets the field width to <c>0</c> for all <c>~p</c>
+ and <c>~P</c> control sequences in the format a string
+ (see <seealso marker="stdlib:io#format-2">
+ <c>io:format/2</c></seealso>), and replaces all
+ newlines in the message with <c>", "</c>. White spaces
+ following directly after newlines are removed. Notice
+ that newlines added by the <c>template</c> parameter are
+ not replaced.</p>
+ <p>Defaults to <c>true</c>.</p>
+ </item>
+ <tag><marker id="template"/>
+ <c>template = </c><seealso marker="#type-template"><c>template()</c>
+ </seealso></tag>
+ <item>
+ <p>The template describes how the formatted string is
+ composed by combining different data values from the log
+ event. See the description of
+ the <seealso marker="#type-template"><c>template()</c></seealso>
+ type for more information about this.</p>
+ </item>
+ <tag><c>time_designator = byte()</c></tag>
+ <item>
+ <p>Timestamps are formatted according to RFC3339, and the
+ time designator is the character used as date and time
+ separator.</p>
+ <p>Defaults to <c>$T</c>.</p>
+ <p>The value of this parameter is used as
+ the <c>time_designator</c> option
+ to <seealso marker="stdlib:calendar#system_time_to_rfc3339-2">
+ <c>calendar:system_time_to_rcf3339/2</c></seealso>.</p>
+ </item>
+ <tag><c>time_offset = integer() | [byte()]</c></tag>
+ <item>
+ <p>The time offset, either a string or an integer, to be
+ used when formatting the timestamp.</p>
+ <p>An empty string is interpreted as local time. The
+ values <c>"Z"</c>, <c>"z"</c> or <c>0</c> are
+ interpreted as Universal Coordinated Time (UTC).</p>
+ <p>Strings, other than <c>"Z"</c>, <c>"z"</c>,
+ or <c>""</c>, must be on the form <c>±[hh]:[mm]</c>, for
+ example <c>"-02:00"</c> or <c>"+00:00"</c>.</p>
+ <p>Integers must be in microseconds, meaning that the
+ offset <c>7200000000</c> is equivalent
+ to <c>"+02:00"</c>.</p>
+ <p>Defaults to an empty string, meaning that timestamps
+ are displayed in local time. However, for backwards
+ compatibility, if the SASL configuration
+ parameter <seealso marker="sasl:sasl_app#utc_log">
+ <c>utc_log</c></seealso><c>=true</c>, the default is
+ changed to <c>"Z"</c>, meaning that timestamps are displayed
+ in UTC.</p>
+ <p>The value of this parameter is used as
+ the <c>offset</c> option
+ to <seealso marker="stdlib:calendar#system_time_to_rfc3339-2">
+ <c>calendar:system_time_to_rcf3339/2</c></seealso>.</p>
+ </item>
+ </taglist>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="metakey"/>
+ <desc>
+ <p></p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="template"/>
+ <desc>
+ <p>The template is a list of atoms, atom lists, tuples and strings. The
+ atoms <c>level</c> or <c>msg</c>, are treated as
+ placeholders for the severity level and the log message,
+ respectively. Other atoms or atom lists are interpreted as
+ placeholders for metadata, where atoms are expected to match
+ top level keys, and atom lists represent paths to sub keys when
+ the metadata is a nested map. For example the
+ list <c>[key1,key2]</c> is replaced by the value of
+ the <c>key2</c> field in the nested map below. The
+ atom <c>key1</c> on its own is replaced by the complete
+ value of the <c>key1</c> field. The values are converted to
+ strings.</p>
+
+ <code>
+#{key1 => #{key2 => my_value,
+ ...}
+ ...}</code>
+
+ <p>Tuples in the template express if-exist tests for metadata
+ keys. For example, the following tuple says that
+ if <c>key1</c> exists in the metadata map,
+ print <c>"key1=Value"</c>, where <c>Value</c> is the value
+ that <c>key1</c> is associated with in the metadata map. If
+ <c>key1</c> does not exist, print nothing.</p>
+ <code>
+{key1, ["key1=",key1], []}</code>
+
+ <p>Strings in the template are printed literally.</p>
+ <p>The default value for the <c>template</c> configuration
+ parameter depends on the value of the <c>single_line</c>
+ and <c>legacy_header</c> configuration parameters as
+ follows.</p>
+
+ <p>The log event used in the examples is:</p>
+ <code>
+?LOG_ERROR("name: ~p~nexit_reason: ~p", [my_name, "It crashed"])</code>
+
+ <taglist>
+ <tag><c>legacy_header = true, single_line = false</c></tag>
+ <item>
+ <p>Default
+ template: <c>[[logger_formatter,header],"\n",msg,"\n"]</c></p>
+
+ <p>Example log entry:</p>
+ <code type="none">
+=ERROR REPORT==== 17-May-2018::18:30:19.453447 ===
+name: my_name
+exit_reason: "It crashed"</code>
+
+ <p>Notice that all eight levels can occur in the heading,
+ not only <c>ERROR</c>, <c>WARNING</c> or <c>INFO</c> as
+ <seealso marker="error_logger"><c>error_logger</c></seealso>
+ produces. And microseconds are added at the end of the
+ timestamp.</p>
+ </item>
+
+ <tag><c>legacy_header = true, single_line = true</c></tag>
+ <item>
+ <p>Default
+ template: <c>[[logger_formatter,header],"\n",msg,"\n"]</c></p>
+
+ <p>Notice that the template is here the same as
+ for <c>single_line=false</c>, but the resulting log entry
+ differs in that there is only one line after the
+ heading:</p>
+ <code type="none">
+=ERROR REPORT==== 17-May-2018::18:31:06.952665 ===
+name: my_name, exit_reason: "It crashed"</code>
+ </item>
+
+ <tag><c>legacy_header = false, single_line = true</c></tag>
+ <item>
+ <p>Default template: <c>[time," ",level,": ",msg,"\n"]</c></p>
+
+ <p>Example log entry:</p>
+ <code type="none">
+2018-05-17T18:31:31.152864+02:00 error: name: my_name, exit_reason: "It crashed"</code>
+ </item>
+
+ <tag><c>legacy_header = false, single_line = false</c></tag>
+ <item>
+ <p>Default template: <c>[time," ",level,":\n",msg,"\n"]</c></p>
+
+ <p>Example log entry:</p>
+ <code type="none">
+2018-05-17T18:32:20.105422+02:00 error:
+name: my_name
+exit_reason: "It crashed"</code>
+ </item>
+ </taglist>
+ </desc>
+ </datatype>
+ </datatypes>
+
+ <funcs>
+ <func>
+ <name name="check_config" arity="1" since="OTP 21.0"/>
+ <fsummary>Validates the given formatter configuration.</fsummary>
+ <desc>
+ <p>The function is called by Logger when the formatter
+ configuration for a handler is set or modified. It
+ returns <c>ok</c> if the configuration is valid,
+ and <c>{error,term()}</c> if it is faulty.</p>
+ <p>The following Logger API functions can trigger this callback:</p>
+ <list>
+ <item><seealso marker="logger#add_handler-3">
+ <c>logger:add_handler/3</c></seealso></item>
+ <item><seealso marker="logger#set_handler_config-2">
+ <c>logger:set_handler_config/2,3</c></seealso></item>
+ <item><seealso marker="logger#update_handler_config-2">
+ <c>logger:update_handler_config/2</c></seealso></item>
+ <item><seealso marker="logger#update_formatter_config-2">
+ <c>logger:update_formatter_config/2</c></seealso></item>
+ </list>
+ </desc>
+ </func>
+ <func>
+ <name name="format" arity="2" since="OTP 21.0"/>
+ <fsummary>Formats the given message.</fsummary>
+ <desc>
+ <p>This the formatter callback function to be called from
+ handlers. The log event is processed as follows:</p>
+ <list>
+ <item>If the message is on report form, it is converted to
+ <c>{Format,Args}</c> by calling the report callback. See
+ section <seealso marker="logger_chapter#log_message">Log
+ Message</seealso> in the Kernel User's Guide for more
+ information about report callbacks and valid forms of log
+ messages.</item>
+ <item>The message size is limited according to the values of
+ configuration parameters <seealso marker="#chars_limit">
+ <c>chars_limit</c></seealso>
+ and <seealso marker="#depth"><c>depth</c></seealso>.</item>
+ <item>The full log entry is composed according to
+ the <seealso marker="#template"><c>template</c></seealso>.</item>
+ <item>If the final string is too long, it is truncated
+ according to the value of configuration
+ parameter <seealso marker="#max_size"><c>max_size</c></seealso>.</item>
+ </list>
+ </desc>
+ </func>
+ </funcs>
+
+ <section>
+ <title>See Also</title>
+ <p>
+ <seealso marker="stdlib:calendar"><c>calendar(3)</c></seealso>,
+ <seealso marker="error_logger"><c>error_logger(3)</c></seealso>,
+ <seealso marker="stdlib:io"><c>io(3)</c></seealso>,
+ <seealso marker="stdlib:io_lib"><c>io_lib(3)</c></seealso>,
+ <seealso marker="logger"><c>logger(3)</c></seealso>,
+ <seealso marker="stdlib:maps"><c>maps(3)</c></seealso>,
+ <seealso marker="sasl:sasl_app"><c>sasl(6)</c></seealso>,
+ <seealso marker="stdlib:unicode"><c>unicode(3)</c></seealso>
+ </p>
+ </section>
+</erlref>
+
+
diff --git a/lib/kernel/doc/src/logger_std_h.xml b/lib/kernel/doc/src/logger_std_h.xml
new file mode 100644
index 0000000000..5ed1a2f210
--- /dev/null
+++ b/lib/kernel/doc/src/logger_std_h.xml
@@ -0,0 +1,217 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE erlref SYSTEM "erlref.dtd">
+
+<erlref>
+ <header>
+ <copyright>
+ <year>2017</year><year>2018</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ </legalnotice>
+
+ <title>logger_std_h</title>
+ <prepared></prepared>
+ <responsible></responsible>
+ <docno></docno>
+ <approved></approved>
+ <checked></checked>
+ <date></date>
+ <rev>A</rev>
+ <file>logger_std_h.xml</file>
+ </header>
+ <module since="OTP 21.0">logger_std_h</module>
+ <modulesummary>Standard handler for Logger.</modulesummary>
+
+ <description>
+ <p>This is the standard handler for Logger.
+ Multiple instances of this handler can be added to
+ Logger, and each instance prints logs to <c>standard_io</c>,
+ <c>standard_error</c>, or to file.</p>
+ <p>The handler has an overload protection mechanism that keeps the handler
+ process and the Kernel application alive during high loads of log
+ events. How overload protection works, and how to configure it, is
+ described in the
+ <seealso marker="logger_chapter#overload_protection"><c>User's Guide</c>
+ </seealso>.</p>
+ <p>To add a new instance of the standard handler, use
+ <seealso marker="logger#add_handler-3"><c>logger:add_handler/3</c>
+ </seealso>. The handler configuration argument is a map which can contain
+ general configuration parameters, as documented in the
+ <seealso marker="logger_chapter#handler_configuration"><c>User's Guide</c>
+ </seealso>, and handler specific parameters. The specific data
+ is stored in a sub map with the key <c>config</c>, and can contain the
+ following parameters:</p>
+ <taglist>
+ <tag><marker id="type"/><c>type = standard_io | standard_error | file</c></tag>
+ <item>
+ <p>Specifies the log destination.</p>
+ <p>The value is set when the handler is added, and it can not
+ be changed in runtime.</p>
+ <p>Defaults to <c>standard_io</c>, unless
+ parameter <seealso marker="#file"><c>file</c></seealso> is
+ given, in which case it defaults to <c>file</c>.</p>
+ </item>
+ <tag><marker id="file"/><c>file = </c><seealso marker="file#type-filename"><c>file:filename()</c></seealso></tag>
+ <item>
+ <p>This specifies the name of the log file when the handler is
+ of type <c>file</c>.</p>
+ <p>The value is set when the handler is added, and it can not
+ be changed in runtime.</p>
+ <p>Defaults to the same name as the handler identity, in the
+ current directory.</p>
+ </item>
+ <tag><marker id="modes"/><c>modes = [</c><seealso marker="file#type-mode"><c>file:mode()</c></seealso><c>]</c></tag>
+ <item>
+ <p>This specifies the file modes to use when opening the log
+ file,
+ see <seealso marker="file#open-2"><c>file:open/2</c></seealso>.
+ If <c>modes</c> are not specified, the default list used
+ is <c>[raw,append,delayed_write]</c>. If <c>modes</c> are
+ specified, the list replaces the default modes list with the
+ following adjustments:</p>
+ <list>
+ <item>
+ If <c>raw</c> is not found in the list, it is added.
+ </item>
+ <item>
+ If none of <c>write</c>, <c>append</c> or <c>exclusive</c> is
+ found in the list, <c>append</c> is added.</item>
+ <item>If none of <c>delayed_write</c>
+ or <c>{delayed_write,Size,Delay}</c> is found in the
+ list, <c>delayed_write</c> is added.</item>
+ </list>
+ <p>Log files are always UTF-8 encoded. The encoding can not be
+ changed by setting the mode <c>{encoding,Encoding}</c>.</p>
+ <p>The value is set when the handler is added, and it can not
+ be changed in runtime.</p>
+ <p>Defaults to <c>[raw,append,delayed_write]</c>.</p>
+ </item>
+ <tag><marker id="max_no_bytes"/><c>max_no_bytes = pos_integer() | infinity</c></tag>
+ <item>
+ <p>This parameter specifies if the log file should be rotated
+ or not. The value <c>infinity</c> means the log file will
+ grow indefinitely, while an integer value specifies at which
+ file size (bytes) the file is rotated.</p>
+ <p>Defaults to <c>infinity</c>.</p>
+ </item>
+ <tag><marker id="max_no_files"/><c>max_no_files = non_neg_integer()</c></tag>
+ <item>
+ <p>This parameter specifies the number of rotated log file
+ archives to keep. This has meaning only
+ if <seealso marker="#max_no_bytes"><c>max_no_bytes</c></seealso>
+ is set to an integer value.</p>
+ <p>The log archives are
+ named <c>FileName.0</c>, <c>FileName.1</c>,
+ ... <c>FileName.N</c>, where <c>FileName</c> is the name of
+ the current log file. <c>FileName.0</c> is the newest of the
+ archives. The maximum value for <c>N</c> is the value
+ of <c>max_no_files</c> minus 1.</p>
+ <p>Notice that setting this value to <c>0</c> does not turn of
+ rotation. It only specifies that no archives are kept.</p>
+ <p>Defaults to <c>0</c>.</p>
+ </item>
+ <tag><marker id="compress_on_rotate"/><c>compress_on_rotate = boolean()</c></tag>
+ <item>
+ <p>This parameter specifies if the rotated log file archives
+ shall be compressed or not. If set to <c>true</c>, all
+ archives are compressed with <c>gzip</c>, and renamed
+ to <c>FileName.N.gz</c></p>
+ <p><c>compress_on_rotate</c> has no meaning if <seealso
+ marker="#max_no_bytes"><c>max_no_bytes</c></seealso> has the
+ value <c>infinity</c>.</p>
+ <p>Defaults to <c>false</c>.</p>
+ </item>
+ <tag><marker id="file_check"/><c>file_check = non_neg_integer()</c></tag>
+ <item>
+ <p>When <c>logger_std_h</c> logs to a file, it reads the file
+ information of the log file prior to each write
+ operation. This is to make sure the file still exists and
+ has the same inode as when it was opened. This implies some
+ performance loss, but ensures that no log events are lost in
+ the case when the file has been removed or renamed by an
+ external actor.</p>
+ <p>In order to allow minimizing the performance loss, the
+ <c>file_check</c> parameter can be set to a positive integer
+ value, <c>N</c>. The handler will then skip reading the file
+ information prior to writing, as long as no more
+ than <c>N</c> milliseconds have passed since it was last
+ read.</p>
+ <p>Notice that the risk of loosing log events grows when
+ the <c>file_check</c> value grows.</p>
+ <p>Defaults to 0.</p>
+ </item>
+ <tag><c>filesync_repeat_interval = pos_integer() | no_repeat</c></tag>
+ <item>
+ <p>This value, in milliseconds, specifies how often the handler does
+ a file sync operation to write buffered data to disk. The handler attempts
+ the operation repeatedly, but only performs a new sync if something has
+ actually been logged.</p>
+ <p>If <c>no_repeat</c> is set as value, the repeated file sync operation
+ is disabled, and it is the operating system settings that determine
+ how quickly or slowly data is written to disk. The user can also call
+ the <seealso marker="logger_std_h#filesync-1"><c>filesync/1</c></seealso>
+ function to perform a file sync.</p>
+ <p>Defaults to <c>5000</c> milliseconds.</p>
+ </item>
+ </taglist>
+ <p>Other configuration parameters exist, to be used for customizing
+ the overload protection behaviour. The same parameters are used both in the
+ standard handler and the disk_log handler, and are documented in the
+ <seealso marker="logger_chapter#overload_protection"><c>User's Guide</c>
+ </seealso>.</p>
+ <p>Notice that if changing the configuration of the handler in
+ runtime, the <c>type</c>, <c>file</c>, or <c>modes</c> parameters
+ must not be modified.</p>
+ <p>Example of adding a standard handler:</p>
+ <code type="none">
+logger:add_handler(my_standard_h, logger_std_h,
+ #{config => #{file => "./system_info.log",
+ filesync_repeat_interval => 1000}}).
+ </code>
+ <p>To set the default handler, that starts initially with
+ the Kernel application, to log to file instead of <c>standard_io</c>,
+ change the Kernel default logger configuration. Example:</p>
+ <code type="none">
+erl -kernel logger '[{handler,default,logger_std_h,
+ #{config => #{file => "./log.log"}}}]'
+ </code>
+ <p>An example of how to replace the standard handler with a disk_log handler
+ at startup is found in the
+ <seealso marker="logger_disk_log_h"><c>logger_disk_log_h</c></seealso>
+ manual.</p>
+ </description>
+
+ <funcs>
+
+ <func>
+ <name name="filesync" arity="1" clause_i="1" since="OTP 21.0"/>
+ <fsummary>Writes buffered data to disk.</fsummary>
+ <desc>
+ <p>Write buffered data to disk.</p>
+ </desc>
+ </func>
+
+ </funcs>
+
+ <section>
+ <title>See Also</title>
+ <p><seealso marker="logger"><c>logger(3)</c></seealso>,
+ <seealso marker="logger_disk_log_h">
+ <c>logger_disk_log_h(3)</c></seealso></p>
+ </section>
+</erlref>
+
+
diff --git a/lib/kernel/doc/src/net_adm.xml b/lib/kernel/doc/src/net_adm.xml
index 6957a3b5e4..c3e1619f1b 100644
--- a/lib/kernel/doc/src/net_adm.xml
+++ b/lib/kernel/doc/src/net_adm.xml
@@ -28,7 +28,7 @@
<date>1996-09-10</date>
<rev>A</rev>
</header>
- <module>net_adm</module>
+ <module since="">net_adm</module>
<modulesummary>Various Erlang net administration routines.</modulesummary>
<description>
<p>This module contains various network utility functions.</p>
@@ -36,7 +36,7 @@
<funcs>
<func>
- <name name="dns_hostname" arity="1"/>
+ <name name="dns_hostname" arity="1" since=""/>
<fsummary>Official name of a host.</fsummary>
<desc>
<p>Returns the official name of <c><anno>Host</anno></c>, or
@@ -46,7 +46,7 @@
</func>
<func>
- <name name="host_file" arity="0"/>
+ <name name="host_file" arity="0" since=""/>
<fsummary>Read file <c>.hosts.erlang</c>.</fsummary>
<desc>
<p>Reads file <c>.hosts.erlang</c>, see section
@@ -58,7 +58,7 @@
</func>
<func>
- <name name="localhost" arity="0"/>
+ <name name="localhost" arity="0" since=""/>
<fsummary>Name of the local host.</fsummary>
<desc>
<p>Returns the name of the local host. If Erlang was started
@@ -68,8 +68,8 @@
</func>
<func>
- <name name="names" arity="0"/>
- <name name="names" arity="1"/>
+ <name name="names" arity="0" since=""/>
+ <name name="names" arity="1" since=""/>
<fsummary>Names of Erlang nodes at a host.</fsummary>
<desc>
<p>Similar to <c>epmd -names</c>, see
@@ -86,7 +86,7 @@
</func>
<func>
- <name name="ping" arity="1"/>
+ <name name="ping" arity="1" since=""/>
<fsummary>Set up a connection to a node.</fsummary>
<desc>
<p>Sets up a connection to <c><anno>Node</anno></c>. Returns
@@ -95,8 +95,8 @@
</func>
<func>
- <name name="world" arity="0"/>
- <name name="world" arity="1"/>
+ <name name="world" arity="0" since=""/>
+ <name name="world" arity="1" since=""/>
<fsummary>Lookup and connect to all nodes at all hosts in
<c>.hosts.erlang</c>.</fsummary>
<type name="verbosity"/>
@@ -117,8 +117,8 @@
</func>
<func>
- <name name="world_list" arity="1"/>
- <name name="world_list" arity="2"/>
+ <name name="world_list" arity="1" since=""/>
+ <name name="world_list" arity="2" since=""/>
<fsummary>Lookup and connect to all nodes at specified hosts.</fsummary>
<type name="verbosity"/>
<desc>
diff --git a/lib/kernel/doc/src/net_kernel.xml b/lib/kernel/doc/src/net_kernel.xml
index 0b94fc0fa6..419d3cad84 100644
--- a/lib/kernel/doc/src/net_kernel.xml
+++ b/lib/kernel/doc/src/net_kernel.xml
@@ -4,7 +4,7 @@
<erlref>
<header>
<copyright>
- <year>1996</year><year>2017</year>
+ <year>1996</year><year>2018</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -28,7 +28,7 @@
<date>1996-09-10</date>
<rev>A</rev>
</header>
- <module>net_kernel</module>
+ <module since="">net_kernel</module>
<modulesummary>Erlang networking kernel.</modulesummary>
<description>
<p>The net kernel is a system process, registered as
@@ -56,7 +56,7 @@ $ <input>erl -sname foobar</input></pre>
<p>Normally, connections are established automatically when
another node is referenced. This functionality can be disabled
by setting Kernel configuration parameter
- <c>dist_auto_connect</c> to <c>false</c>, see
+ <c>dist_auto_connect</c> to <c>never</c>, see
<seealso marker="kernel_app"><c>kernel(6)</c></seealso>. In this case,
connections must be established explicitly by calling
<seealso marker="#connect_node/1"><c>connect_node/1</c></seealso>.</p>
@@ -81,7 +81,7 @@ $ <input>erl -sname foobar</input></pre>
<funcs>
<func>
- <name name="allow" arity="1"/>
+ <name name="allow" arity="1" since=""/>
<fsummary>Permit access to a specified set of nodes</fsummary>
<desc>
<p>Permits access to the specified set of nodes.</p>
@@ -98,17 +98,19 @@ $ <input>erl -sname foobar</input></pre>
</func>
<func>
- <name name="connect_node" arity="1"/>
+ <name name="connect_node" arity="1" since=""/>
<fsummary>Establish a connection to a node.</fsummary>
<desc>
<p>Establishes a connection to <c><anno>Node</anno></c>. Returns
- <c>true</c> if successful, <c>false</c> if not, and <c>ignored</c>
- if the local node is not alive.</p>
+ <c>true</c> if a connection was established or was already
+ established or if <c><anno>Node</anno></c> is the local node
+ itself. Returns <c>false</c> if the connection attempt failed, and
+ <c>ignored</c> if the local node is not alive.</p>
</desc>
</func>
<func>
- <name name="get_net_ticktime" arity="0"/>
+ <name name="get_net_ticktime" arity="0" since=""/>
<fsummary>Get <c>net_ticktime</c>.</fsummary>
<desc>
<p>Gets <c>net_ticktime</c> (see
@@ -129,7 +131,7 @@ $ <input>erl -sname foobar</input></pre>
</func>
<func>
- <name name="getopts" arity="2"/>
+ <name name="getopts" arity="2" since="OTP 19.1"/>
<fsummary>Get distribution socket options.</fsummary>
<desc>
<p>Get one or more options for the distribution socket
@@ -144,8 +146,8 @@ $ <input>erl -sname foobar</input></pre>
</func>
<func>
- <name name="monitor_nodes" arity="1"/>
- <name name="monitor_nodes" arity="2"/>
+ <name name="monitor_nodes" arity="1" since=""/>
+ <name name="monitor_nodes" arity="2" since=""/>
<fsummary>Subscribe to node status change messages.</fsummary>
<desc>
<p>The calling process subscribes or unsubscribes to node
@@ -230,7 +232,12 @@ $ <input>erl -sname foobar</input></pre>
<item>
<p>The tuple <c>{nodedown_reason, Reason}</c> is included in
<c>InfoList</c> in <c>nodedown</c> messages.</p>
- <p><c>Reason</c> can be any of the following:</p>
+ <p>
+ <c>Reason</c> can, depending on which
+ distribution module or process that is used be any term,
+ but for the standard TCP distribution module it is
+ any of the following:
+ </p>
<taglist>
<tag><c>connection_setup_failed</c></tag>
<item><p>The connection setup failed (after <c>nodeup</c>
@@ -260,8 +267,8 @@ $ <input>erl -sname foobar</input></pre>
</func>
<func>
- <name name="set_net_ticktime" arity="1"/>
- <name name="set_net_ticktime" arity="2"/>
+ <name name="set_net_ticktime" arity="1" since=""/>
+ <name name="set_net_ticktime" arity="2" since=""/>
<fsummary>Set <c>net_ticktime</c>.</fsummary>
<desc>
<p>Sets <c>net_ticktime</c> (see
@@ -317,7 +324,7 @@ $ <input>erl -sname foobar</input></pre>
</func>
<func>
- <name name="setopts" arity="2"/>
+ <name name="setopts" arity="2" since="OTP 19.1"/>
<fsummary>Set distribution socket options.</fsummary>
<desc>
<p>Set one or more options for distribution sockets.
@@ -338,9 +345,9 @@ $ <input>erl -sname foobar</input></pre>
</func>
<func>
- <name>start([Name]) -> {ok, pid()} | {error, Reason}</name>
- <name>start([Name, NameType]) -> {ok, pid()} | {error, Reason}</name>
- <name>start([Name, NameType, Ticktime]) -> {ok, pid()} | {error, Reason}</name>
+ <name since="">start([Name]) -> {ok, pid()} | {error, Reason}</name>
+ <name since="">start([Name, NameType]) -> {ok, pid()} | {error, Reason}</name>
+ <name since="">start([Name, NameType, Ticktime]) -> {ok, pid()} | {error, Reason}</name>
<fsummary>Turn an Erlang runtime system into a distributed node.</fsummary>
<type>
<v>Name = atom()</v>
@@ -357,7 +364,7 @@ $ <input>erl -sname foobar</input></pre>
</func>
<func>
- <name name="stop" arity="0"/>
+ <name name="stop" arity="0" since=""/>
<fsummary>Turn a node into a non-distributed Erlang runtime system.</fsummary>
<desc>
<p>Turns a distributed node into a non-distributed node. For
diff --git a/lib/kernel/doc/src/notes.xml b/lib/kernel/doc/src/notes.xml
index 09844f1502..61bd598145 100644
--- a/lib/kernel/doc/src/notes.xml
+++ b/lib/kernel/doc/src/notes.xml
@@ -4,7 +4,7 @@
<chapter>
<header>
<copyright>
- <year>2004</year><year>2017</year>
+ <year>2004</year><year>2018</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -31,6 +31,777 @@
</header>
<p>This document describes the changes made to the Kernel application.</p>
+<section><title>Kernel 6.3.1</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>Fixed a performance regression when reading files
+ opened with the <c>compressed</c> flag.</p>
+ <p>
+ Own Id: OTP-15706 Aux Id: ERIERL-336 </p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
+<section><title>Kernel 6.3</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ If for example the <c>/etc/hosts</c> did not come into
+ existence until after the kernel application had started,
+ its content was never read. This bug has now been
+ corrected.</p>
+ <p>
+ Own Id: OTP-14702 Aux Id: PR-2066 </p>
+ </item>
+ <item>
+ <p>
+ Fix bug where doing <c>seq_trace:reset_trace()</c> while
+ another process was doing a garbage collection could
+ cause the run-time system to segfault.</p>
+ <p>
+ Own Id: OTP-15490</p>
+ </item>
+ <item>
+ <p>
+ Fix <c>erl_epmd:port_please</c> spec to include
+ <c>atom()</c> and <c>string()</c>.</p>
+ <p>
+ Own Id: OTP-15557 Aux Id: PR-2117 </p>
+ </item>
+ <item>
+ <p>
+ The Logger handler logger_std_h now keeps track of the
+ inode of its log file in order to re-open the file if the
+ inode changes. This may happen, for instance, if the log
+ file is opened and saved by an editor.</p>
+ <p>
+ Own Id: OTP-15578 Aux Id: ERL-850 </p>
+ </item>
+ <item>
+ <p>
+ When user specific file modes are given to the logger
+ handler <c>logger_std_h</c>, they were earlier accepted
+ without any control. This is now changes, so Logger will
+ adjust the file modes as follows:</p>
+ <p>
+ - If <c>raw</c> is not found in the list, it is
+ added.<br/> - If none of <c>write</c>, <c>append</c> or
+ <c>exclusive</c> are found in the list, <c>append</c> is
+ added.<br/> - If none of <c>delayed_write</c> or
+ <c>{delayed_write,Size,Delay}</c> are found in the list,
+ <c>delayed_write</c> is added.</p>
+ <p>
+ Own Id: OTP-15602</p>
+ </item>
+ </list>
+ </section>
+
+
+ <section><title>Improvements and New Features</title>
+ <list>
+ <item>
+ <p>
+ The standard logger handler, <c>logger_std_h</c>, now has
+ a new internal feature for log rotation. The rotation
+ scheme is as follows:</p>
+ <p>
+ The log file to which the handler currently writes always
+ has the same name, i.e. the name which is configured for
+ the handler. The archived files have the same name, but
+ with extension ".N", where N is an integer. The newest
+ archive has extension ".0", and the oldest archive has
+ the highest number. </p>
+ <p>
+ The size at which the log file is rotated, and the number
+ of archive files that are kept, is specified with the
+ handler specific configuration parameters
+ <c>max_no_bytes</c> and <c>max_no_files</c> respectively. </p>
+ <p>
+ Archives can be compressed, in which case they get a
+ ".gz" file extension after the integer. Compression is
+ specified with the handler specific configuration
+ parameter <c>compress_on_rotate</c>.</p>
+ <p>
+ Own Id: OTP-15479</p>
+ </item>
+ <item>
+ <p>
+ The new functions <c>logger:i/0</c> and <c>logger:i/1</c>
+ are added. These provide the same information as
+ <c>logger:get_config/0</c> and other
+ <c>logger:get_*_config</c> functions, but the information
+ is more logically sorted and more readable.</p>
+ <p>
+ Own Id: OTP-15600</p>
+ </item>
+ <item>
+ <p>
+ Logger is now protected against overload due to massive
+ amounts of log events from the emulator or from remote
+ nodes.</p>
+ <p>
+ Own Id: OTP-15601</p>
+ </item>
+ <item>
+ <p>
+ Logger now uses os:system_time/1 instead of
+ erlang:system_time/1 to generate log event timestamps.</p>
+ <p>
+ Own Id: OTP-15625</p>
+ </item>
+ <item>
+ <p>
+ Add functions <c>application:set_env/1,2</c> and
+ <c>application:set_env/2</c>. These take a list of
+ application configuration parameters, and the behaviour
+ is equivalent to calling <c>application:set_env/4</c>
+ individually for each application/key combination, except
+ it is more efficient.</p>
+ <p>
+ <c>set_env/1,2</c> warns about duplicated applications or
+ keys. The warning is also emitted during boot, if
+ applications or keys are duplicated within one
+ configuration file, e.g. sys.config.</p>
+ <p>
+ Own Id: OTP-15642 Aux Id: PR-2164 </p>
+ </item>
+ <item>
+ <p>
+ Handler specific configuration parameters for the
+ standard handler <c>logger_std_h</c> are changed to be
+ more intuitive and more similar to the disk_log handler.</p>
+ <p>
+ Earlier there was only one parameter, <c>type</c>, which
+ could have the values <c>standard_io</c>,
+ <c>standard_error</c>, <c>{file,FileName}</c> or
+ <c>{file,FileName,Modes}</c>.</p>
+ <p>
+ This is now changed, so the following parameters are
+ allowed:</p>
+ <p>
+ <c>type = standard_io | standard_error | file</c><br/>
+ <c>file = file:filename()</c><br/> <c>modes =
+ [file:mode()]</c></p>
+ <p>
+ All parameters are optional. <c>type</c> defaults to
+ <c>standard_io</c>, unless a file name is given, in which
+ case it defaults to <c>file</c>. If <c>type</c> is set to
+ <c>file</c>, the file name defaults to the same as the
+ handler id.</p>
+ <p>
+ The potential incompatibility is that
+ <c>logger:get_config/0</c> and
+ <c>logger:get_handler_config/1</c> now returns the new
+ parameters, even if the configuration was set with the
+ old variant, e.g. <c>#{type=>{file,FileName}}</c>.</p>
+ <p>
+ *** POTENTIAL INCOMPATIBILITY ***</p>
+ <p>
+ Own Id: OTP-15662</p>
+ </item>
+ <item>
+ <p>
+ The new configuration parameter <c>file_check</c> is
+ added to the Logger handler <c>logger_std_h</c>. This
+ parameter specifies how long (in milliseconds) the
+ handler may wait before checking if the log file still
+ exists and the inode is the same as when it was opened.</p>
+ <p>
+ The default value is 0, which means that this check is
+ done prior to each write operation. Setting a higher
+ number may improve performance, but adds the risk of
+ loosing log events.</p>
+ <p>
+ Own Id: OTP-15663</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
+<section><title>Kernel 6.2.1</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ Setting the <c>recbuf</c> size of an inet socket the
+ <c>buffer</c> is also automatically increased. Fix a bug
+ where the auto adjustment of inet buffer size would be
+ triggered even if an explicit inet buffer size had
+ already been set.</p>
+ <p>
+ Own Id: OTP-15651 Aux Id: ERIERL-304 </p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
+<section><title>Kernel 6.2</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ A new function, <c>logger:update_handler_config/3</c> is
+ added, and the handler callback <c>changing_config</c>
+ now has a new argument, <c>SetOrUpdate</c>, which
+ indicates if the configuration change comes from
+ <c>set_handler_config/2,3</c> or
+ <c>update_handler_config/2,3</c>.</p>
+ <p>
+ This allows the handler to consistently merge the new
+ configuration with the old (if the change comes from
+ <c>update_handler_config/2,3</c>) or with the default (if
+ the change comes from <c>set_handler_config/2,3</c>).</p>
+ <p>
+ The built-in handlers <c>logger_std_h</c> and
+ <c>logger_disk_log_h</c> are updated accordingly. A bug
+ which could cause inconsistency between the handlers'
+ internal state and the stored configuration is also
+ corrected.</p>
+ <p>
+ *** POTENTIAL INCOMPATIBILITY ***</p>
+ <p>
+ Own Id: OTP-15364</p>
+ </item>
+ <item>
+ <p>
+ Fix fallback when custom erl_epmd client does not
+ implement address_please.</p>
+ <p>
+ Own Id: OTP-15388 Aux Id: PR-1983 </p>
+ </item>
+ <item>
+ <p>
+ The logger ets table did not have the
+ <c>read_concurrency</c> option. This is now added.</p>
+ <p>
+ Own Id: OTP-15453 Aux Id: ERL-782 </p>
+ </item>
+ <item>
+ <p>
+ During system start, logger has a simple handler which
+ prints to stdout. After the kernel supervision is
+ started, this handler is removed and replaced by the
+ default handler. Due to a bug, logger earlier issued a
+ debug printout saying it received an unexpected message,
+ which was the EXIT message from the simple handler's
+ process. This is now corrected. The simple handler's
+ process now unlinks from the logger process before
+ terminating.</p>
+ <p>
+ Own Id: OTP-15466 Aux Id: ERL-788 </p>
+ </item>
+ <item>
+ <p>
+ The logger handler <c>logger_std_h</c> would not
+ re-create it's log file if it was removed. Due to this it
+ could not be used with tools like 'logrotate'. This is
+ now corrected.</p>
+ <p>
+ Own Id: OTP-15469</p>
+ </item>
+ </list>
+ </section>
+
+
+ <section><title>Improvements and New Features</title>
+ <list>
+ <item>
+ <p>
+ A function <c>inet:getifaddrs/1</c> that takes a list
+ with a namespace option has been added, for platforms
+ that support that feature, for example Linux (only?).</p>
+ <p>
+ Own Id: OTP-15121 Aux Id: ERIERL-189, PR-1974 </p>
+ </item>
+ <item>
+ <p>Added the <c>nopush</c> option for TCP sockets, which
+ corresponds to <c>TCP_NOPUSH</c> on *BSD and
+ <c>TCP_CORK</c> on Linux.</p>
+ <p>This is also used internally in <c>file:sendfile</c>
+ to reduce latency on subsequent send operations.</p>
+ <p>
+ Own Id: OTP-15357 Aux Id: ERL-698 </p>
+ </item>
+ <item>
+ <p>
+ Optimize handling of send_delay for tcp sockes to better
+ work with the new pollthread implementation introduced in
+ OTP-21.</p>
+ <p>
+ Own Id: OTP-15471 Aux Id: ERIERL-229 </p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
+<section><title>Kernel 6.1.1</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ Fix bug causing net_kernel process crash on connection
+ attempt from node with name identical to local node.</p>
+ <p>
+ Own Id: OTP-15438 Aux Id: ERL-781 </p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
+<section><title>Kernel 6.1</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ The values <c>all</c> and <c>none</c> are documented as
+ valid value for the Kernel configuration parameter
+ <c>logger_level</c>, but would cause a crash during node
+ start. This is now corrected.</p>
+ <p>
+ Own Id: OTP-15143</p>
+ </item>
+ <item>
+ <p>
+ Fix some potential buggy behavior in how ticks are sent
+ on inter node distribution connections. Tick is now sent
+ to c-node even if there are unsent buffered data, as
+ c-nodes need ticks in order to send reply ticks. The
+ amount of sent data was also calculated wrongly when
+ ticks were suppressed due to unsent buffered data.</p>
+ <p>
+ Own Id: OTP-15162 Aux Id: ERIERL-191 </p>
+ </item>
+ <item>
+ <p>
+ Non semantic change in dist_util.erl to silence dialyzer
+ warning.</p>
+ <p>
+ Own Id: OTP-15170</p>
+ </item>
+ <item>
+ <p>
+ Fixed <c>net_kernel:connect_node(node())</c> to return
+ <c>true</c> (and do nothing) as it always has before
+ OTP-21.0. Also documented this successful "self connect"
+ as the expected behavior.</p>
+ <p>
+ Own Id: OTP-15182 Aux Id: ERL-643 </p>
+ </item>
+ <item>
+ <p>
+ The single_line option on logger_formatter would in some
+ cases add an unwanted comma after the association arrows
+ in a map. This is now corrected.</p>
+ <p>
+ Own Id: OTP-15228</p>
+ </item>
+ <item>
+ <p>
+ Improved robustness of distribution connection setup. In
+ OTP-21.0 a truly asynchronous connection setup was
+ introduced. This is further improvement on that work to
+ make the emulator more robust and also be able to recover
+ in cases when involved Erlang processes misbehave.</p>
+ <p>
+ Own Id: OTP-15297 Aux Id: OTP-15279, OTP-15280 </p>
+ </item>
+ </list>
+ </section>
+
+
+ <section><title>Improvements and New Features</title>
+ <list>
+ <item>
+ <p>
+ A new macro, <c>?LOG(Level,...)</c>, is added. This is
+ equivalent to the existing <c>?LOG_&lt;LEVEL&gt;(...)</c>
+ macros.</p>
+ <p>
+ A new variant of Logger report callback is added, which
+ takes an extra argument containing options for size
+ limiting and line breaks. Module <c>proc_lib</c> in
+ <c>STDLIB</c> uses this for crash reports.</p>
+ <p>
+ Logger configuration is now checked a bit more for
+ errors.</p>
+ <p>
+ Own Id: OTP-15132</p>
+ </item>
+ <item>
+ <p>
+ The socket options <c>recvtos</c>, <c>recvttl</c>,
+ <c>recvtclass</c> and <c>pktoptions</c> have been
+ implemented in the socket modules. See the documentation
+ for the <c>gen_tcp</c>, <c>gen_udp</c> and <c>inet</c>
+ modules. Note that support for these in the runtime
+ system is platform dependent. Especially for
+ <c>pktoptions</c> which is very Linux specific and
+ obsoleted by the RFCs that defined it.</p>
+ <p>
+ Own Id: OTP-15145 Aux Id: ERIERL-187 </p>
+ </item>
+ <item>
+ <p>
+ Add <c>logger:set_application_level/2</c> for setting the
+ logger level of all modules in one application.</p>
+ <p>
+ Own Id: OTP-15146</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
+<section><title>Kernel 6.0.1</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ Fixed bug in <c>net_kernel</c> that could cause an
+ emulator crash if certain connection attempts failed. Bug
+ exists since kernel-6.0 (OTP-21.0).</p>
+ <p>
+ Own Id: OTP-15280 Aux Id: ERIERL-226, OTP-15279 </p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
+<section><title>Kernel 6.0</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p> Clarify the documentation of <c>rpc:multicall/5</c>.
+ </p>
+ <p>
+ Own Id: OTP-10551</p>
+ </item>
+ <item>
+ <p>
+ The DNS resolver when getting econnrefused from a server
+ retained an invalid socket so look up towards the next
+ server(s) also failed.</p>
+ <p>
+ Own Id: OTP-13133 Aux Id: PR-1557 </p>
+ </item>
+ <item>
+ <p>
+ No resolver backend returns V4Mapped IPv6 addresses any
+ more. This was inconsistent before, some did, some did
+ not. To facilitate working with such addresses a new
+ function <c>inet:ipv4_mapped_ipv6_address/1</c> has been
+ added.</p>
+ <p>
+ *** POTENTIAL INCOMPATIBILITY ***</p>
+ <p>
+ Own Id: OTP-13761 Aux Id: ERL-503 </p>
+ </item>
+ <item>
+ <p>
+ The type specifications for <c>file:posix/0</c> and
+ <c>inet:posix/0</c> have been updated according to which
+ errors file and socket operations should be able to
+ return.</p>
+ <p>
+ Own Id: OTP-14019 Aux Id: ERL-550 </p>
+ </item>
+ <item>
+ <p>
+ Fix name resolving in IPv6 only environments when doing
+ the initial distributed connection.</p>
+ <p>
+ Own Id: OTP-14501</p>
+ </item>
+ <item>
+ <p> File operations used to accept <seealso
+ marker="kernel:file#type-name_all">filenames</seealso>
+ containing null characters (integer value zero). This
+ caused the name to be truncated and in some cases
+ arguments to primitive operations to be mixed up.
+ Filenames containing null characters inside the filename
+ are now <em>rejected</em> and will cause primitive file
+ operations to fail. </p> <p> Also environment variable
+ operations used to accept <seealso
+ marker="kernel:os#type-env_var_name">names</seealso> and
+ <seealso
+ marker="kernel:os#type-env_var_value">values</seealso> of
+ environment variables containing null characters (integer
+ value zero). This caused operations to silently produce
+ erroneous results. Environment variable names and values
+ containing null characters inside the name or value are
+ now <em>rejected</em> and will cause environment variable
+ operations to fail. </p> <p>Primitive environment
+ variable operations also used to accept the <c>$=</c>
+ character in environment variable names causing various
+ problems. <c>$=</c> characters in environment variable
+ names are now also <em>rejected</em>. </p> <p>Also
+ <seealso
+ marker="kernel:os#cmd/1"><c>os:cmd/1</c></seealso> now
+ reject null characters inside its <seealso
+ marker="kernel:os#type-os_command">command</seealso>.
+ </p> <p><seealso
+ marker="erts:erlang#open_port/2"><c>erlang:open_port/2</c></seealso>
+ will also reject null characters inside the port name
+ from now on.</p>
+ <p>
+ *** POTENTIAL INCOMPATIBILITY ***</p>
+ <p>
+ Own Id: OTP-14543 Aux Id: ERL-370 </p>
+ </item>
+ <item>
+ <p><c>os:putenv</c> and <c>os:getenv</c> no longer access
+ the process environment directly and instead work on a
+ thread-safe emulation. The only observable difference is
+ that it's <em>not</em> kept in sync with libc
+ <c>getenv(3)</c> / <c>putenv(3)</c>, so those who relied
+ on that behavior in drivers or NIFs will need to add
+ manual synchronization.</p> <p>On Windows this means that
+ you can no longer resolve DLL dependencies by modifying
+ the <c>PATH</c> just before loading the driver/NIF. To
+ make this less of a problem, the emulator now adds the
+ target DLL's folder to the DLL search path.</p>
+ <p>
+ *** POTENTIAL INCOMPATIBILITY ***</p>
+ <p>
+ Own Id: OTP-14666</p>
+ </item>
+ <item>
+ <p>
+ Fixed connection tick toward primitive hidden nodes
+ (erl_interface) that could cause faulty tick timeout in
+ rare cases when payload data is sent to hidden node but
+ not received.</p>
+ <p>
+ Own Id: OTP-14681</p>
+ </item>
+ <item>
+ <p>
+ Make group react immediately on an EXIT-signal from shell
+ in e.g ssh.</p>
+ <p>
+ Own Id: OTP-14991 Aux Id: PR1705 </p>
+ </item>
+ <item>
+ <p>
+ Calls to <c>gen_tcp:send/2</c> on closed sockets now
+ returns <c>{error, closed}</c> instead of
+ <c>{error,enotconn}</c>.</p>
+ <p>
+ Own Id: OTP-15001</p>
+ </item>
+ <item>
+ <p>
+ The <c>included_applications</c> key are no longer
+ duplicated as application environment variable. Earlier,
+ the included applications could be read both with
+ <c>application:get[_all]_env(...)</c> and
+ <c>application:get[_all]_key(...)</c> functions. Now, it
+ can only be read with
+ <c>application:get[_all]_key(...)</c>.</p>
+ <p>
+ *** POTENTIAL INCOMPATIBILITY ***</p>
+ <p>
+ Own Id: OTP-15071</p>
+ </item>
+ <item>
+ <p>Owner and group changes through
+ <c>file:write_file_info</c>, <c>file:change_owner</c>,
+ and <c>file:change_group</c> will no longer report
+ success on permission errors.</p>
+ <p>
+ *** POTENTIAL INCOMPATIBILITY ***</p>
+ <p>
+ Own Id: OTP-15118</p>
+ </item>
+ </list>
+ </section>
+
+
+ <section><title>Improvements and New Features</title>
+ <list>
+ <item>
+ <p>A new logging API is added to Erlang/OTP, see the
+ <seealso
+ marker="kernel:logger"><c>logger(3)</c></seealso> manual
+ page, and section <seealso
+ marker="kernel:logger_chapter">Logging</seealso> in the
+ Kernel User's Guide.</p>
+ <p>Calls to <c>error_logger</c> are automatically
+ redirected to the new API, and legacy error logger event
+ handlers can still be used. It is, however, recommended
+ to use the Logger API directly when writing new code.</p>
+ <p>Notice the following potential incompatibilities:</p>
+ <list> <item><p>Kernel configuration parameters
+ <c>error_logger</c> still works, but is overruled if the
+ default handler's output destination is configured with
+ Kernel configuration parameter <c>logger</c>.</p> <p>In
+ general, parameters for configuring error logger are
+ overwritten by new parameters for configuring
+ Logger.</p></item> <item><p>The concept of SASL error
+ logging is deprecated, meaning that by default the SASL
+ application does not affect which log events are
+ logged.</p> <p>By default, supervisor reports and crash
+ reports are logged by the default Logger handler started
+ by Kernel, and end up at the same destination (terminal
+ or file) as other standard log event from Erlang/OTP.</p>
+ <p>Progress reports are not logged by default, but can be
+ enabled by setting the primary log level to info, for
+ example with the Kernel configuration parameter
+ <c>logger_level</c>.</p> <p>To obtain backwards
+ compatibility with the SASL error logging functionality
+ from earlier releases, set Kernel configuration parameter
+ <c>logger_sasl_compatible</c> to <c>true</c>. This
+ prevents the default Logger handler from logging any
+ supervisor-, crash-, or progress reports. Instead, SASL
+ adds a separate Logger handler during application start,
+ which takes care of these log events. The SASL
+ configuration parameters <c>sasl_error_logger</c> and
+ <c>sasl_errlog_type</c> specify the destination (terminal
+ or file) and severity level to log for these
+ events.</p></item></list>
+ <p>
+ Since Logger is new in Erlang/OTP 21.0, we do reserve the
+ right to introduce changes to the Logger API and
+ functionality in patches following this release. These
+ changes might or might not be backwards compatible with
+ the initial version.</p>
+ <p>
+ *** POTENTIAL INCOMPATIBILITY ***</p>
+ <p>
+ Own Id: OTP-13295</p>
+ </item>
+ <item>
+ <p>
+ The function <c>inet:i/0</c> has been documented.</p>
+ <p>
+ Own Id: OTP-13713 Aux Id: PR-1645 </p>
+ </item>
+ <item>
+ <p>
+ Typespecs for <c>netns</c> and <c>bind_to_device</c>
+ options have been added to <c>gen_tcp</c>, <c>gen_udp</c>
+ and <c>gen_sctp</c> functions.</p>
+ <p>
+ Own Id: OTP-14359 Aux Id: PR-1816 </p>
+ </item>
+ <item>
+ <p>
+ New functionality for implementation of alternative
+ carriers for the Erlang distribution has been introduced.
+ This mainly consists of support for usage of distribution
+ controller processes (previously only ports could be used
+ as distribution controllers). For more information see
+ <seealso marker="erts:alt_dist#distribution_module">ERTS
+ User's Guide ➜ How to implement an Alternative Carrier
+ for the Erlang Distribution ➜ Distribution
+ Module</seealso>.</p>
+ <p>
+ Own Id: OTP-14459</p>
+ </item>
+ <item>
+ <p><c>seq_trace</c> labels may now be any erlang
+ term.</p>
+ <p>
+ Own Id: OTP-14899</p>
+ </item>
+ <item>
+ <p>
+ The SSL distribution protocol <c>-proto inet_tls</c> has
+ stopped setting the SSL option
+ <c>server_name_indication</c>. New verify funs for client
+ and server in <c>inet_tls_dist</c> has been added, not
+ documented yet, that checks node name if present in peer
+ certificate. Usage is still also yet to be documented.</p>
+ <p>
+ Own Id: OTP-14969 Aux Id: OTP-14465, ERL-598 </p>
+ </item>
+ <item>
+ <p>
+ Changed timeout of <c>gen_server</c> calls to <c>auth</c>
+ server from default 5 seconds to <c>infinity</c>.</p>
+ <p>
+ Own Id: OTP-15009 Aux Id: ERL-601 </p>
+ </item>
+ <item>
+ <p>The callback module passed as <c>-epmd_module</c> to
+ erl has been expanded to be able to do name and port
+ resolving.</p> <p>Documentation has also been added in
+ the <seealso
+ marker="kernel:erl_epmd"><c>erl_epmd</c></seealso>
+ reference manual and ERTS User's Guide <seealso
+ marker="erts:alt_disco">How to Implement an Alternative
+ Service Discovery for Erlang Distribution</seealso>.</p>
+ <p>
+ Own Id: OTP-15086 Aux Id: PR-1694 </p>
+ </item>
+ <item>
+ <p>
+ Included config file specified with relative path in
+ sys.config are now first searched for relative to the
+ directory of sys.config itself. If not found, it is also
+ searched for relative to the current working directory.
+ The latter is for backwards compatibility.</p>
+ <p>
+ Own Id: OTP-15137 Aux Id: PR-1838 </p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
+<section><title>Kernel 5.4.3.2</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ Non semantic change in dist_util.erl to silence dialyzer
+ warning.</p>
+ <p>
+ Own Id: OTP-15170</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
+<section><title>Kernel 5.4.3.1</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ Fix some potential buggy behavior in how ticks are sent
+ on inter node distribution connections. Tick is now sent
+ to c-node even if there are unsent buffered data, as
+ c-nodes need ticks in order to send reply ticks. The
+ amount of sent data was calculated wrongly when ticks
+ where suppressed due to unsent buffered data.</p>
+ <p>
+ Own Id: OTP-15162 Aux Id: ERIERL-191 </p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
<section><title>Kernel 5.4.3</title>
<section><title>Fixed Bugs and Malfunctions</title>
diff --git a/lib/kernel/doc/src/os.xml b/lib/kernel/doc/src/os.xml
index 7ce2f54542..0500e4cfb3 100644
--- a/lib/kernel/doc/src/os.xml
+++ b/lib/kernel/doc/src/os.xml
@@ -4,7 +4,7 @@
<erlref>
<header>
<copyright>
- <year>1997</year><year>2017</year>
+ <year>1997</year><year>2018</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -28,7 +28,7 @@
<date></date>
<rev></rev>
</header>
- <module>os</module>
+ <module since="">os</module>
<modulesummary>Operating system-specific functions.</modulesummary>
<description>
<p>The functions in this module are operating system-specific.
@@ -36,11 +36,85 @@
only run on a specific platform. On the other hand, with careful
use, these functions can be of help in enabling a program to run on
most platforms.</p>
+
+ <note>
+ <p>
+ File operations used to accept filenames containing
+ null characters (integer value zero). This caused
+ the name to be truncated and in some cases arguments
+ to primitive operations to be mixed up. Filenames
+ containing null characters inside the filename
+ are now <em>rejected</em> and will cause primitive
+ file operations to fail.
+ </p>
+ <p>
+ Also environment variable operations used to accept
+ names and values of environment variables containing
+ null characters (integer value zero). This caused
+ operations to silently produce erroneous results.
+ Environment variable names and values containing
+ null characters inside the name or value are now
+ <em>rejected</em> and will cause environment variable
+ operations to fail.
+ </p>
+ </note>
</description>
<datatypes>
<datatype>
+ <name name="env_var_name"/>
+ <desc>
+ <p>A string containing valid characters on the specific
+ OS for environment variable names using
+ <seealso marker="file#native_name_encoding/0"><c>file:native_name_encoding()</c></seealso>
+ encoding. Note that specifically null characters (integer
+ value zero) and <c>$=</c> characters are not allowed.
+ However, note that not all invalid characters necessarily
+ will cause the primitiv operations to fail, but may instead
+ produce invalid results.
+ </p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="env_var_value"/>
+ <desc>
+ <p>A string containing valid characters on the specific
+ OS for environment variable values using
+ <seealso marker="file#native_name_encoding/0"><c>file:native_name_encoding()</c></seealso>
+ encoding. Note that specifically null characters (integer
+ value zero) are not allowed. However, note that not all
+ invalid characters necessarily will cause the primitiv
+ operations to fail, but may instead produce invalid results.
+ </p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="env_var_name_value"/>
+ <desc>
+ <p>
+ Assuming that environment variables has been correctly
+ set, a strings containing valid characters on the specific
+ OS for environment variable names and values using
+ <seealso marker="file#native_name_encoding/0"><c>file:native_name_encoding()</c></seealso>
+ encoding. The first <c>$=</c> characters appearing in
+ the string separates environment variable name (on the
+ left) from environment variable value (on the right).
+ </p>
+ </desc>
+ </datatype>
+ <datatype>
<name name="os_command"/>
+ <desc>
+ <p>All characters needs to be valid characters on the
+ specific OS using
+ <seealso marker="file#native_name_encoding/0"><c>file:native_name_encoding()</c></seealso>
+ encoding. Note that specifically null characters (integer
+ value zero) are not allowed. However, note that not all
+ invalid characters not necessarily will cause
+ <seealso marker="#cmd/1"><c>os:cmd/1</c></seealso>
+ to fail, but may instead produce invalid results.
+ </p>
+ </desc>
</datatype>
<datatype>
<name name="os_command_opts"/>
@@ -60,13 +134,18 @@
<funcs>
<func>
- <name name="cmd" arity="1"/>
- <name name="cmd" arity="2"/>
+ <name name="cmd" arity="1" since=""/>
+ <name name="cmd" arity="2" since="OTP 20.2.3"/>
<fsummary>Execute a command in a shell of the target OS.</fsummary>
<desc>
<p>Executes <c><anno>Command</anno></c> in a command shell of the
- target OS, captures the standard output of the command,
+ target OS, captures the standard output of the command,
and returns this result as a string.</p>
+ <warning><p>Previous implementation used to allow all characters
+ as long as they were integer values greater than or equal to zero.
+ This sometimes lead to unwanted results since null characters
+ (integer value zero) often are interpreted as string termination. The
+ current implementation rejects these.</p></warning>
<p><em>Examples:</em></p>
<code type="none">
LsOut = os:cmd("ls"), % on unix platform
@@ -94,8 +173,8 @@ DirOut = os:cmd("dir"), % on Win32 platform</code>
</func>
<func>
- <name name="find_executable" arity="1"/>
- <name name="find_executable" arity="2"/>
+ <name name="find_executable" arity="1" since=""/>
+ <name name="find_executable" arity="2" since=""/>
<fsummary>Absolute filename of a program.</fsummary>
<desc>
<p>These two functions look up an executable program, with the
@@ -111,7 +190,7 @@ DirOut = os:cmd("dir"), % on Win32 platform</code>
</func>
<func>
- <name name="getenv" arity="0"/>
+ <name name="getenv" arity="0" since=""/>
<fsummary>List all environment variables.</fsummary>
<desc>
<p>Returns a list of all environment variables.
@@ -126,7 +205,7 @@ DirOut = os:cmd("dir"), % on Win32 platform</code>
</func>
<func>
- <name name="getenv" arity="1"/>
+ <name name="getenv" arity="1" since=""/>
<fsummary>Get the value of an environment variable.</fsummary>
<desc>
<p>Returns the <c><anno>Value</anno></c> of the environment variable
@@ -141,7 +220,7 @@ DirOut = os:cmd("dir"), % on Win32 platform</code>
</func>
<func>
- <name name="getenv" arity="2"/>
+ <name name="getenv" arity="2" since="OTP 18.0"/>
<fsummary>Get the value of an environment variable.</fsummary>
<desc>
<p>Returns the <c><anno>Value</anno></c> of the environment variable
@@ -156,7 +235,7 @@ DirOut = os:cmd("dir"), % on Win32 platform</code>
</func>
<func>
- <name name="getpid" arity="0"/>
+ <name name="getpid" arity="0" since=""/>
<fsummary>Return the process identifier of the emulator
process.</fsummary>
<desc>
@@ -172,7 +251,7 @@ DirOut = os:cmd("dir"), % on Win32 platform</code>
</func>
<func>
- <name name="putenv" arity="2"/>
+ <name name="putenv" arity="2" since=""/>
<fsummary>Set a new value for an environment variable.</fsummary>
<desc>
<p>Sets a new <c><anno>Value</anno></c> for environment variable
@@ -185,11 +264,20 @@ DirOut = os:cmd("dir"), % on Win32 platform</code>
<p>On Unix platforms, the environment is set using UTF-8 encoding
if Unicode filename translation is in effect. On Windows, the
environment is set using wide character interfaces.</p>
+ <note>
+ <p>
+ <c><anno>VarName</anno></c> is not allowed to contain
+ an <c>$=</c> character. Previous implementations used
+ to just let the <c>$=</c> character through which
+ silently caused erroneous results. Current implementation
+ will instead throw a <c>badarg</c> exception.
+ </p>
+ </note>
</desc>
</func>
<func>
- <name name="set_signal" arity="2"/>
+ <name name="set_signal" arity="2" since="OTP 20.0"/>
<fsummary>Enables or disables handling of OS signals.</fsummary>
<desc>
<p>Enables or disables OS signals.</p>
@@ -216,7 +304,7 @@ DirOut = os:cmd("dir"), % on Win32 platform</code>
</func>
<func>
- <name name="system_time" arity="0"/>
+ <name name="system_time" arity="0" since="OTP 18.0"/>
<fsummary>Current OS system time.</fsummary>
<desc>
<p>Returns the current
@@ -229,7 +317,7 @@ DirOut = os:cmd("dir"), % on Win32 platform</code>
</func>
<func>
- <name name="system_time" arity="1"/>
+ <name name="system_time" arity="1" since="OTP 18.0"/>
<fsummary>Current OS system time.</fsummary>
<desc>
<p>Returns the current
@@ -244,7 +332,7 @@ DirOut = os:cmd("dir"), % on Win32 platform</code>
</func>
<func>
- <name name="timestamp" arity="0"/>
+ <name name="timestamp" arity="0" since=""/>
<fsummary>Current OS system time on the <c>erlang:timestamp/0</c> format.</fsummary>
<type_desc variable="Timestamp">Timestamp = {MegaSecs, Secs, MicroSecs}</type_desc>
<desc>
@@ -285,7 +373,7 @@ calendar:now_to_universal_time(TS),
</func>
<func>
- <name name="perf_counter" arity="0"/>
+ <name name="perf_counter" arity="0" since="OTP 19.0"/>
<fsummary>Returns a performance counter</fsummary>
<desc>
<p>Returns the current performance counter value in <c>perf_counter</c>
@@ -295,7 +383,7 @@ calendar:now_to_universal_time(TS),
</desc>
</func>
<func>
- <name name="perf_counter" arity="1"/>
+ <name name="perf_counter" arity="1" since="OTP 19.0"/>
<fsummary>Returns a performance counter</fsummary>
<desc><p>Returns a performance counter that can be used as a very fast and
high resolution timestamp. This counter is read directly from the hardware or operating
@@ -309,7 +397,7 @@ calendar:now_to_universal_time(TS),
</desc>
</func>
<func>
- <name name="type" arity="0"/>
+ <name name="type" arity="0" since=""/>
<fsummary>Return the OS family and, in some cases, the OS name of the
current OS.</fsummary>
<desc>
@@ -329,7 +417,7 @@ calendar:now_to_universal_time(TS),
</func>
<func>
- <name name="unsetenv" arity="1"/>
+ <name name="unsetenv" arity="1" since="OTP R16B03"/>
<fsummary>Delete an environment variable.</fsummary>
<desc>
<p>Deletes the environment variable <c><anno>VarName</anno></c>.</p>
@@ -341,7 +429,7 @@ calendar:now_to_universal_time(TS),
</func>
<func>
- <name name="version" arity="0"/>
+ <name name="version" arity="0" since=""/>
<fsummary>Return the OS versions.</fsummary>
<desc>
<p>Returns the OS version.
diff --git a/lib/kernel/doc/src/part.xml b/lib/kernel/doc/src/part.xml
new file mode 100644
index 0000000000..fa7e92835f
--- /dev/null
+++ b/lib/kernel/doc/src/part.xml
@@ -0,0 +1,37 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE part SYSTEM "part.dtd">
+
+<part xmlns:xi="http://www.w3.org/2001/XInclude">
+ <header>
+ <copyright>
+ <year>2018</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ </legalnotice>
+
+ <title>Kernel User's Guide</title>
+ <prepared>OTP Team</prepared>
+ <docno></docno>
+ <date>2018-06-06</date>
+ <file>part.xml</file>
+ </header>
+ <description>
+ <p></p>
+ </description>
+ <xi:include href="introduction_chapter.xml"/>
+ <xi:include href="logger_chapter.xml"/>
+</part>
+
diff --git a/lib/kernel/doc/src/pg2.xml b/lib/kernel/doc/src/pg2.xml
index 0631b317b4..058d711756 100644
--- a/lib/kernel/doc/src/pg2.xml
+++ b/lib/kernel/doc/src/pg2.xml
@@ -32,7 +32,7 @@
<rev>A2</rev>
<file>pg2.xml</file>
</header>
- <module>pg2</module>
+ <module since="">pg2</module>
<modulesummary>Distributed named process groups.</modulesummary>
<description>
<p>This module implements process groups. Each message can be sent
@@ -66,7 +66,7 @@
<funcs>
<func>
- <name name="create" arity="1"/>
+ <name name="create" arity="1" since=""/>
<fsummary>Create a new, empty process group.</fsummary>
<desc>
<p>Creates a new, empty process group. The group is globally
@@ -75,7 +75,7 @@
</func>
<func>
- <name name="delete" arity="1"/>
+ <name name="delete" arity="1" since=""/>
<fsummary>Delete a process group.</fsummary>
<desc>
<p>Deletes a process group.</p>
@@ -83,7 +83,7 @@
</func>
<func>
- <name name="get_closest_pid" arity="1"/>
+ <name name="get_closest_pid" arity="1" since=""/>
<fsummary>Common dispatch function.</fsummary>
<desc>
<p>A useful dispatch function that can be used from
@@ -93,7 +93,7 @@
</func>
<func>
- <name name="get_local_members" arity="1"/>
+ <name name="get_local_members" arity="1" since=""/>
<fsummary>Return all local processes in a group.</fsummary>
<desc>
<p>Returns all processes running on the local node in the
@@ -104,7 +104,7 @@
</func>
<func>
- <name name="get_members" arity="1"/>
+ <name name="get_members" arity="1" since=""/>
<fsummary>Return all processes in a group.</fsummary>
<desc>
<p>Returns all processes in the group <c>Name</c>. This
@@ -114,7 +114,7 @@
</func>
<func>
- <name name="join" arity="2"/>
+ <name name="join" arity="2" since=""/>
<fsummary>Join a process to a group.</fsummary>
<desc>
<p>Joins the process <c>Pid</c> to the group <c>Name</c>.
@@ -124,7 +124,7 @@
</func>
<func>
- <name name="leave" arity="2"/>
+ <name name="leave" arity="2" since=""/>
<fsummary>Make a process leave a group.</fsummary>
<desc>
<p>Makes the process <c>Pid</c> leave the group <c>Name</c>.
@@ -134,8 +134,8 @@
</func>
<func>
- <name name="start" arity="0"/>
- <name name="start_link" arity="0"/>
+ <name name="start" arity="0" since=""/>
+ <name name="start_link" arity="0" since=""/>
<fsummary>Start the <c>pg2</c> server.</fsummary>
<desc>
<p>Starts the <c>pg2</c> server. Normally, the server does not need
@@ -149,7 +149,7 @@
</func>
<func>
- <name name="which_groups" arity="0"/>
+ <name name="which_groups" arity="0" since=""/>
<fsummary>Return a list of all known groups.</fsummary>
<desc>
<p>Returns a list of all known groups.</p>
diff --git a/lib/kernel/doc/src/ref_man.xml b/lib/kernel/doc/src/ref_man.xml
index 5cd77e0f6f..d3b947527f 100644
--- a/lib/kernel/doc/src/ref_man.xml
+++ b/lib/kernel/doc/src/ref_man.xml
@@ -4,7 +4,7 @@
<application xmlns:xi="http://www.w3.org/2001/XInclude">
<header>
<copyright>
- <year>1996</year><year>2016</year>
+ <year>1996</year><year>2018</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -32,12 +32,15 @@
</description>
<xi:include href="kernel_app.xml"/>
+ <xi:include href="app.xml"/>
<xi:include href="application.xml"/>
<xi:include href="auth.xml"/>
<xi:include href="code.xml"/>
+ <xi:include href="config.xml"/>
<xi:include href="disk_log.xml"/>
<xi:include href="erl_boot_server.xml"/>
<xi:include href="erl_ddll.xml"/>
+ <xi:include href="erl_epmd.xml"/>
<xi:include href="erl_prim_loader_stub.xml"/>
<xi:include href="erlang_stub.xml"/>
<xi:include href="error_handler.xml"/>
@@ -52,6 +55,11 @@
<xi:include href="inet.xml"/>
<xi:include href="inet_res.xml"/>
<xi:include href="init_stub.xml"/>
+ <xi:include href="logger.xml"/>
+ <xi:include href="logger_filters.xml"/>
+ <xi:include href="logger_formatter.xml"/>
+ <xi:include href="logger_std_h.xml"/>
+ <xi:include href="logger_disk_log_h.xml"/>
<xi:include href="net_adm.xml"/>
<xi:include href="net_kernel.xml"/>
<xi:include href="os.xml"/>
@@ -61,6 +69,4 @@
<xi:include href="user.xml"/>
<xi:include href="wrap_log_reader.xml"/>
<xi:include href="zlib_stub.xml"/>
- <xi:include href="app.xml"/>
- <xi:include href="config.xml"/>
</application>
diff --git a/lib/kernel/doc/src/rpc.xml b/lib/kernel/doc/src/rpc.xml
index adec2d9520..c55454506e 100644
--- a/lib/kernel/doc/src/rpc.xml
+++ b/lib/kernel/doc/src/rpc.xml
@@ -4,7 +4,7 @@
<erlref>
<header>
<copyright>
- <year>1996</year><year>2016</year>
+ <year>1996</year><year>2018</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -28,7 +28,7 @@
<date>1996-09-10</date>
<rev>A</rev>
</header>
- <module>rpc</module>
+ <module since="">rpc</module>
<modulesummary>Remote Procedure Call services.</modulesummary>
<description>
<p>This module contains services similar to Remote
@@ -51,7 +51,7 @@
<funcs>
<func>
- <name name="abcast" arity="2"/>
+ <name name="abcast" arity="2" since=""/>
<fsummary>Broadcast a message asynchronously to a registered process on
all nodes.</fsummary>
<desc>
@@ -61,7 +61,7 @@
</func>
<func>
- <name name="abcast" arity="3"/>
+ <name name="abcast" arity="3" since=""/>
<fsummary>Broadcast a message asynchronously to a registered process on
specific nodes.</fsummary>
<desc>
@@ -72,7 +72,7 @@
</func>
<func>
- <name name="async_call" arity="4"/>
+ <name name="async_call" arity="4" since=""/>
<fsummary>Evaluate a function call on a node, asynchronous
version.</fsummary>
<desc>
@@ -98,7 +98,7 @@
</func>
<func>
- <name name="block_call" arity="4"/>
+ <name name="block_call" arity="4" since=""/>
<fsummary>Evaluate a function call on a node in the RPC server's
context.</fsummary>
<desc>
@@ -115,7 +115,7 @@
</func>
<func>
- <name name="block_call" arity="5"/>
+ <name name="block_call" arity="5" since=""/>
<fsummary>Evaluate a function call on a node in the RPC server's
context.</fsummary>
<desc>
@@ -127,7 +127,7 @@
</func>
<func>
- <name name="call" arity="4"/>
+ <name name="call" arity="4" since=""/>
<fsummary>Evaluate a function call on a node.</fsummary>
<desc>
<p>Evaluates <c>apply(<anno>Module</anno>, <anno>Function</anno>,
@@ -138,7 +138,7 @@
</func>
<func>
- <name name="call" arity="5"/>
+ <name name="call" arity="5" since=""/>
<fsummary>Evaluate a function call on a node.</fsummary>
<desc>
<p>Evaluates <c>apply(<anno>Module</anno>, <anno>Function</anno>,
@@ -158,7 +158,7 @@
</func>
<func>
- <name name="cast" arity="4"/>
+ <name name="cast" arity="4" since=""/>
<fsummary>Run a function on a node ignoring the result.</fsummary>
<desc>
<p>Evaluates <c>apply(<anno>Module</anno>, <anno>Function</anno>,
@@ -171,7 +171,7 @@
</func>
<func>
- <name name="eval_everywhere" arity="3"/>
+ <name name="eval_everywhere" arity="3" since=""/>
<fsummary>Run a function on all nodes, ignoring the result.</fsummary>
<desc>
<p>Equivalent to <c>eval_everywhere([node()|nodes()],
@@ -181,7 +181,7 @@
</func>
<func>
- <name name="eval_everywhere" arity="4"/>
+ <name name="eval_everywhere" arity="4" since=""/>
<fsummary>Run a function on specific nodes, ignoring the
result.</fsummary>
<desc>
@@ -192,7 +192,7 @@
</func>
<func>
- <name name="multi_server_call" arity="2"/>
+ <name name="multi_server_call" arity="2" since=""/>
<fsummary>Interact with the servers on a number of nodes.</fsummary>
<desc>
<p>Equivalent to <c>multi_server_call([node()|nodes()],
@@ -201,7 +201,7 @@
</func>
<func>
- <name name="multi_server_call" arity="3"/>
+ <name name="multi_server_call" arity="3" since=""/>
<fsummary>Interact with the servers on a number of nodes.</fsummary>
<desc>
<p>Can be used when interacting with servers called
@@ -217,14 +217,14 @@
<list type="bulleted">
<item>A list of the nodes that do not exist</item>
<item>A list of the nodes where the server does not exist</item>
- <item>A list of the nodes where the server terminatd before sending
+ <item>A list of the nodes where the server terminated before sending
any reply.</item>
</list>
</desc>
</func>
<func>
- <name name="multicall" arity="3"/>
+ <name name="multicall" arity="3" since=""/>
<fsummary>Evaluate a function call on a number of nodes.</fsummary>
<desc>
<p>Equivalent to <c>multicall([node()|nodes()], <anno>Module</anno>,
@@ -233,7 +233,7 @@
</func>
<func>
- <name name="multicall" arity="4" clause_i="1"/>
+ <name name="multicall" arity="4" clause_i="1" since=""/>
<fsummary>Evaluate a function call on a number of nodes.</fsummary>
<desc>
<p>Equivalent to <c>multicall(<anno>Nodes</anno>, <anno>Module</anno>,
@@ -242,7 +242,7 @@
</func>
<func>
- <name name="multicall" arity="4" clause_i="2"/>
+ <name name="multicall" arity="4" clause_i="2" since=""/>
<fsummary>Evaluate a function call on a number of nodes.</fsummary>
<desc>
<p>Equivalent to <c>multicall([node()|nodes()], <anno>Module</anno>,
@@ -252,7 +252,7 @@
</func>
<func>
- <name name="multicall" arity="5"/>
+ <name name="multicall" arity="5" since=""/>
<fsummary>Evaluate a function call on a number of nodes.</fsummary>
<desc>
<p>In contrast to an RPC, a multicall is an RPC that is sent
@@ -268,8 +268,9 @@
on the specified nodes and collects the answers. It returns
<c>{<anno>ResL</anno>, <anno>BadNodes</anno>}</c>, where
<c><anno>BadNodes</anno></c> is a list
- of the nodes that terminated or timed out during computation,
- and <c><anno>ResL</anno></c> is a list of the return values.
+ of the nodes that do not exist,
+ and <c><anno>ResL</anno></c> is a list of the return values,
+ or <c>{badrpc, <anno>Reason</anno>}</c> for failing calls.
<c><anno>Timeout</anno></c> is a time (integer) in milliseconds, or
<c>infinity</c>.</p>
<p>The following example is useful when new object code is to
@@ -287,7 +288,7 @@
</func>
<func>
- <name name="nb_yield" arity="1"/>
+ <name name="nb_yield" arity="1" since=""/>
<fsummary>Deliver the result of evaluating a function call on a node
(non-blocking).</fsummary>
<desc>
@@ -296,7 +297,7 @@
</func>
<func>
- <name name="nb_yield" arity="2"/>
+ <name name="nb_yield" arity="2" since=""/>
<fsummary>Deliver the result of evaluating a function call on a node
(non-blocking).</fsummary>
<desc>
@@ -314,7 +315,7 @@
</func>
<func>
- <name name="parallel_eval" arity="1"/>
+ <name name="parallel_eval" arity="1" since=""/>
<fsummary>Evaluate many function calls on all nodes in
parallel.</fsummary>
<desc>
@@ -327,7 +328,7 @@
</func>
<func>
- <name name="pinfo" arity="1"/>
+ <name name="pinfo" arity="1" since=""/>
<fsummary>Information about a process.</fsummary>
<desc>
<p>Location transparent version of the BIF
@@ -336,8 +337,8 @@
</func>
<func>
- <name name="pinfo" arity="2" clause_i="1"/>
- <name name="pinfo" arity="2" clause_i="2"/>
+ <name name="pinfo" arity="2" clause_i="1" since=""/>
+ <name name="pinfo" arity="2" clause_i="2" since=""/>
<fsummary>Information about a process.</fsummary>
<desc>
<p>Location transparent version of the BIF
@@ -346,8 +347,8 @@
</func>
<func>
- <name name="pmap" arity="3"/>
- <fsummary>Parallell evaluation of mapping a function over a
+ <name name="pmap" arity="3" since=""/>
+ <fsummary>Parallel evaluation of mapping a function over a
list.</fsummary>
<desc>
<p>Evaluates <c>apply(<anno>Module</anno>, <anno>Function</anno>,
@@ -359,7 +360,7 @@
</func>
<func>
- <name name="sbcast" arity="2"/>
+ <name name="sbcast" arity="2" since=""/>
<fsummary>Broadcast a message synchronously to a registered process on
all nodes.</fsummary>
<desc>
@@ -369,7 +370,7 @@
</func>
<func>
- <name name="sbcast" arity="3"/>
+ <name name="sbcast" arity="3" since=""/>
<fsummary>Broadcast a message synchronously to a registered process on
specific nodes.</fsummary>
<desc>
@@ -390,7 +391,7 @@
</func>
<func>
- <name name="server_call" arity="4"/>
+ <name name="server_call" arity="4" since=""/>
<fsummary>Interact with a server on a node.</fsummary>
<desc>
<p>Can be used when interacting with a server called
@@ -409,7 +410,7 @@
</func>
<func>
- <name name="yield" arity="1"/>
+ <name name="yield" arity="1" since=""/>
<fsummary>Deliver the result of evaluating a function call on a node
(blocking).</fsummary>
<desc>
diff --git a/lib/kernel/doc/src/seq_trace.xml b/lib/kernel/doc/src/seq_trace.xml
index 197851021f..aa29223dd0 100644
--- a/lib/kernel/doc/src/seq_trace.xml
+++ b/lib/kernel/doc/src/seq_trace.xml
@@ -4,7 +4,7 @@
<erlref>
<header>
<copyright>
- <year>1998</year><year>2017</year>
+ <year>1998</year><year>2018</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -28,7 +28,7 @@
<date>1998-04-16</date>
<rev>A</rev>
</header>
- <module>seq_trace</module>
+ <module since="">seq_trace</module>
<modulesummary>Sequential tracing of messages.</modulesummary>
<description>
<p>Sequential tracing makes it possible to trace all messages
@@ -51,7 +51,7 @@
</datatypes>
<funcs>
<func>
- <name name="set_token" arity="1"/>
+ <name name="set_token" arity="1" since=""/>
<fsummary>Set the trace token</fsummary>
<desc>
<p>Sets the trace token for the calling process to <c><anno>Token</anno></c>.
@@ -71,7 +71,7 @@ seq_trace:set_token(OldToken), % activate the trace token again
</desc>
</func>
<func>
- <name name="set_token" arity="2"/>
+ <name name="set_token" arity="2" since=""/>
<fsummary>Set a component of the trace token</fsummary>
<type name="component"/>
<type name="flag"/>
@@ -80,13 +80,18 @@ seq_trace:set_token(OldToken), % activate the trace token again
<p>Sets the individual <c><anno>Component</anno></c> of the trace token to
<c><anno>Val</anno></c>. Returns the previous value of the component.</p>
<taglist>
- <tag><c>set_token(label, <anno>Integer</anno>)</c></tag>
+ <tag><c>set_token(label, <anno>Label</anno>)</c></tag>
<item>
- <p>The <c>label</c> component is an integer which
+ <p>The <c>label</c> component is a term which
identifies all events belonging to the same sequential
trace. If several sequential traces can be active
simultaneously, <c>label</c> is used to identify
the separate traces. Default is 0.</p>
+ <warning>
+ <p>Labels were restricted to small signed integers (28 bits)
+ prior to OTP 21. The trace token will be silenty dropped if it
+ crosses over to a node that does not support the label.</p>
+ </warning>
</item>
<tag><c>set_token(serial, SerialValue)</c></tag>
<item>
@@ -153,7 +158,7 @@ seq_trace:set_token(OldToken), % activate the trace token again
</desc>
</func>
<func>
- <name name="get_token" arity="0"/>
+ <name name="get_token" arity="0" since=""/>
<fsummary>Return the value of the trace token</fsummary>
<desc>
<p>Returns the value of the trace token for the calling process.
@@ -164,7 +169,7 @@ seq_trace:set_token(OldToken), % activate the trace token again
</desc>
</func>
<func>
- <name name="get_token" arity="1"/>
+ <name name="get_token" arity="1" since=""/>
<fsummary>Return the value of a trace token component</fsummary>
<type name="component"/>
<type name="flag"/>
@@ -177,7 +182,7 @@ seq_trace:set_token(OldToken), % activate the trace token again
</desc>
</func>
<func>
- <name name="print" arity="1"/>
+ <name name="print" arity="1" since=""/>
<fsummary>Put the Erlang term <c>TraceInfo</c>into the sequential trace output</fsummary>
<desc>
<p>Puts the Erlang term <c><anno>TraceInfo</anno></c> into the sequential
@@ -187,7 +192,7 @@ seq_trace:set_token(OldToken), % activate the trace token again
</desc>
</func>
<func>
- <name name="print" arity="2"/>
+ <name name="print" arity="2" since=""/>
<fsummary>Put the Erlang term <c>TraceInfo</c>into the sequential trace output</fsummary>
<desc>
<p>Same as <c>print/1</c> with the additional condition that
@@ -196,7 +201,7 @@ seq_trace:set_token(OldToken), % activate the trace token again
</desc>
</func>
<func>
- <name name="reset_trace" arity="0"/>
+ <name name="reset_trace" arity="0" since=""/>
<fsummary>Stop all sequential tracing on the local node</fsummary>
<desc>
<p>Sets the trace token to empty for all processes on the
@@ -208,7 +213,7 @@ seq_trace:set_token(OldToken), % activate the trace token again
</desc>
</func>
<func>
- <name name="set_system_tracer" arity="1"/>
+ <name name="set_system_tracer" arity="1" since=""/>
<fsummary>Set the system tracer</fsummary>
<type name="tracer"/>
<desc>
@@ -222,7 +227,7 @@ seq_trace:set_token(OldToken), % activate the trace token again
</desc>
</func>
<func>
- <name name="get_system_tracer" arity="0"/>
+ <name name="get_system_tracer" arity="0" since=""/>
<fsummary>Return the pid() or port() of the current system tracer.</fsummary>
<type name="tracer"/>
<desc>
diff --git a/lib/kernel/doc/src/specs.xml b/lib/kernel/doc/src/specs.xml
index 29d52f23bb..b8c25ca53b 100644
--- a/lib/kernel/doc/src/specs.xml
+++ b/lib/kernel/doc/src/specs.xml
@@ -6,6 +6,7 @@
<xi:include href="../specs/specs_disk_log.xml"/>
<xi:include href="../specs/specs_erl_boot_server.xml"/>
<xi:include href="../specs/specs_erl_ddll.xml"/>
+ <xi:include href="../specs/specs_erl_epmd.xml"/>
<xi:include href="../specs/specs_erl_prim_loader_stub.xml"/>
<xi:include href="../specs/specs_erlang_stub.xml"/>
<xi:include href="../specs/specs_error_handler.xml"/>
@@ -20,6 +21,11 @@
<xi:include href="../specs/specs_inet.xml"/>
<xi:include href="../specs/specs_inet_res.xml"/>
<xi:include href="../specs/specs_init_stub.xml"/>
+ <xi:include href="../specs/specs_logger.xml"/>
+ <xi:include href="../specs/specs_logger_filters.xml"/>
+ <xi:include href="../specs/specs_logger_formatter.xml"/>
+ <xi:include href="../specs/specs_logger_std_h.xml"/>
+ <xi:include href="../specs/specs_logger_disk_log_h.xml"/>
<xi:include href="../specs/specs_net_adm.xml"/>
<xi:include href="../specs/specs_net_kernel.xml"/>
<xi:include href="../specs/specs_os.xml"/>
diff --git a/lib/kernel/doc/src/wrap_log_reader.xml b/lib/kernel/doc/src/wrap_log_reader.xml
index 7fb9c1c023..5f37e7ec5f 100644
--- a/lib/kernel/doc/src/wrap_log_reader.xml
+++ b/lib/kernel/doc/src/wrap_log_reader.xml
@@ -32,7 +32,7 @@
<rev>A</rev>
<file>wrap_log_reader.sgml</file>
</header>
- <module>wrap_log_reader</module>
+ <module since="">wrap_log_reader</module>
<modulesummary>A service to read internally formatted wrap disk logs.
</modulesummary>
<description>
@@ -65,8 +65,8 @@
<funcs>
<func>
- <name name="chunk" arity="1"/>
- <name name="chunk" arity="2"/>
+ <name name="chunk" arity="1" since=""/>
+ <name name="chunk" arity="2" since=""/>
<fsummary>Read a chunk of objects written to a wrap log.</fsummary>
<type name="chunk_ret"/>
<desc>
@@ -105,7 +105,7 @@
</func>
<func>
- <name name="close" arity="1"/>
+ <name name="close" arity="1" since=""/>
<fsummary>Close a log.</fsummary>
<desc>
<p>Closes a log file properly.</p>
@@ -113,8 +113,8 @@
</func>
<func>
- <name name="open" arity="1"/>
- <name name="open" arity="2"/>
+ <name name="open" arity="1" since=""/>
+ <name name="open" arity="2" since=""/>
<fsummary>Open a log file.</fsummary>
<type name="open_ret"/>
<desc>
diff --git a/lib/kernel/examples/Makefile b/lib/kernel/examples/Makefile
index 26ec58f571..f86e662838 100644
--- a/lib/kernel/examples/Makefile
+++ b/lib/kernel/examples/Makefile
@@ -45,7 +45,7 @@ RELSYSDIR = $(RELEASE_PATH)/lib/kernel-$(KERNEL_VSN)/examples
# Pack and install the complete directory structure from
# here (CWD) and down, for all examples.
-EXAMPLES = uds_dist
+EXAMPLES = uds_dist gen_tcp_dist
release_spec:
$(INSTALL_DIR) "$(RELSYSDIR)"
diff --git a/lib/kernel/examples/gen_tcp_dist/Makefile b/lib/kernel/examples/gen_tcp_dist/Makefile
new file mode 100644
index 0000000000..65513a1729
--- /dev/null
+++ b/lib/kernel/examples/gen_tcp_dist/Makefile
@@ -0,0 +1,20 @@
+RM=rm -f
+CP=cp
+EBIN=ebin
+ERLC=erlc
+# Works if building in open source source tree
+KERNEL_INCLUDE=$(ERL_TOP)/lib/kernel/include
+ERLCFLAGS+= -W -I$(KERNEL_INCLUDE)
+
+MODULES=gen_tcp_dist
+
+TARGET_FILES=$(MODULES:%=$(EBIN)/%.beam)
+
+opt: $(TARGET_FILES)
+
+$(EBIN)/%.beam: src/%.erl
+ $(ERLC) $(ERLCFLAGS) -o$(EBIN) $<
+
+clean:
+ $(RM) $(TARGET_FILES)
+
diff --git a/lib/kernel/examples/gen_tcp_dist/ebin/.gitignore b/lib/kernel/examples/gen_tcp_dist/ebin/.gitignore
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/kernel/examples/gen_tcp_dist/ebin/.gitignore
diff --git a/lib/kernel/examples/gen_tcp_dist/src/gen_tcp_dist.erl b/lib/kernel/examples/gen_tcp_dist/src/gen_tcp_dist.erl
new file mode 100644
index 0000000000..98554ed805
--- /dev/null
+++ b/lib/kernel/examples/gen_tcp_dist/src/gen_tcp_dist.erl
@@ -0,0 +1,781 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(gen_tcp_dist).
+
+%%
+%% This is an example of how to plug in an arbitrary distribution
+%% carrier for Erlang using distribution processes.
+%%
+%% This example uses gen_tcp for transportation of data, but
+%% you can use whatever underlying protocol you want as long
+%% as your implementation reliably delivers data chunks to the
+%% receiving VM in the order they were sent from the sending
+%% VM.
+%%
+%% This code is a rewrite of the lib/kernel/src/inet_tcp_dist.erl
+%% distribution impementation for TCP used by default. That
+%% implementation use distribution ports instead of distribution
+%% processes and is more efficient compared to this implementation.
+%% This since this implementation more or less gets the
+%% distribution processes in between the VM and the ports without
+%% any gain specific gain.
+%%
+
+-export([listen/1, accept/1, accept_connection/5,
+ setup/5, close/1, select/1, is_node_name/1]).
+
+%% Optional
+-export([setopts/2, getopts/2]).
+
+%% internal exports
+
+-export([dist_cntrlr_setup/1, dist_cntrlr_input_setup/3,
+ dist_cntrlr_tick_handler/1]).
+
+-export([accept_loop/2,do_accept/6,do_setup/6]).
+
+-import(error_logger,[error_msg/2]).
+
+-include("net_address.hrl").
+
+-include("dist.hrl").
+-include("dist_util.hrl").
+
+%% ------------------------------------------------------------
+%% Select this protocol based on node name
+%% select(Node) => Bool
+%% ------------------------------------------------------------
+
+select(Node) ->
+ case split_node(atom_to_list(Node), $@, []) of
+ [_, Host] ->
+ case inet:getaddr(Host, inet) of
+ {ok,_} -> true;
+ _ -> false
+ end;
+ _ -> false
+ end.
+
+%% ------------------------------------------------------------
+%% Create the listen socket, i.e. the port that this erlang
+%% node is accessible through.
+%% ------------------------------------------------------------
+
+listen(Name) ->
+ case do_listen([binary, {active, false}, {packet,2}, {reuseaddr, true}]) of
+ {ok, Socket} ->
+ TcpAddress = get_tcp_address(Socket),
+ {_,Port} = TcpAddress#net_address.address,
+ ErlEpmd = net_kernel:epmd_module(),
+ case ErlEpmd:register_node(Name, Port) of
+ {ok, Creation} ->
+ {ok, {Socket, TcpAddress, Creation}};
+ Error ->
+ Error
+ end;
+ Error ->
+ Error
+ end.
+
+do_listen(Options) ->
+ {First,Last} = case application:get_env(kernel,inet_dist_listen_min) of
+ {ok,N} when is_integer(N) ->
+ case application:get_env(kernel,
+ inet_dist_listen_max) of
+ {ok,M} when is_integer(M) ->
+ {N,M};
+ _ ->
+ {N,N}
+ end;
+ _ ->
+ {0,0}
+ end,
+ do_listen(First, Last, listen_options([{backlog,128}|Options])).
+
+do_listen(First,Last,_) when First > Last ->
+ {error,eaddrinuse};
+do_listen(First,Last,Options) ->
+ case gen_tcp:listen(First, Options) of
+ {error, eaddrinuse} ->
+ do_listen(First+1,Last,Options);
+ Other ->
+ Other
+ end.
+
+listen_options(Opts0) ->
+ Opts1 =
+ case application:get_env(kernel, inet_dist_use_interface) of
+ {ok, Ip} ->
+ [{ip, Ip} | Opts0];
+ _ ->
+ Opts0
+ end,
+ case application:get_env(kernel, inet_dist_listen_options) of
+ {ok,ListenOpts} ->
+ ListenOpts ++ Opts1;
+ _ ->
+ Opts1
+ end.
+
+
+%% ------------------------------------------------------------
+%% Accepts new connection attempts from other Erlang nodes.
+%% ------------------------------------------------------------
+
+accept(Listen) ->
+ spawn_opt(?MODULE, accept_loop, [self(), Listen], [link, {priority, max}]).
+
+accept_loop(Kernel, Listen) ->
+ ?trace("~p~n",[{?MODULE, accept_loop, self()}]),
+ case gen_tcp:accept(Listen) of
+ {ok, Socket} ->
+ DistCtrl = spawn_dist_cntrlr(Socket),
+ ?trace("~p~n",[{?MODULE, accept_loop, accepted, Socket, DistCtrl, self()}]),
+ flush_controller(DistCtrl, Socket),
+ gen_tcp:controlling_process(Socket, DistCtrl),
+ flush_controller(DistCtrl, Socket),
+ Kernel ! {accept,self(),DistCtrl,inet,tcp},
+ receive
+ {Kernel, controller, Pid} ->
+ call_ctrlr(DistCtrl, {supervisor, Pid}),
+ Pid ! {self(), controller};
+ {Kernel, unsupported_protocol} ->
+ exit(unsupported_protocol)
+ end,
+ accept_loop(Kernel, Listen);
+ Error ->
+ exit(Error)
+ end.
+
+flush_controller(Pid, Socket) ->
+ receive
+ {tcp, Socket, Data} ->
+ Pid ! {tcp, Socket, Data},
+ flush_controller(Pid, Socket);
+ {tcp_closed, Socket} ->
+ Pid ! {tcp_closed, Socket},
+ flush_controller(Pid, Socket)
+ after 0 ->
+ ok
+ end.
+
+%% ------------------------------------------------------------
+%% Accepts a new connection attempt from another Erlang node.
+%% Performs the handshake with the other side.
+%% ------------------------------------------------------------
+
+accept_connection(AcceptPid, DistCtrl, MyNode, Allowed, SetupTime) ->
+ spawn_opt(?MODULE, do_accept,
+ [self(), AcceptPid, DistCtrl, MyNode, Allowed, SetupTime],
+ [link, {priority, max}]).
+
+do_accept(Kernel, AcceptPid, DistCtrl, MyNode, Allowed, SetupTime) ->
+ ?trace("~p~n",[{?MODULE, do_accept, self(), MyNode}]),
+ receive
+ {AcceptPid, controller} ->
+ Timer = dist_util:start_timer(SetupTime),
+ case check_ip(DistCtrl) of
+ true ->
+ HSData0 = hs_data_common(DistCtrl),
+ HSData = HSData0#hs_data{kernel_pid = Kernel,
+ this_node = MyNode,
+ socket = DistCtrl,
+ timer = Timer,
+ this_flags = 0,
+ allowed = Allowed},
+ dist_util:handshake_other_started(HSData);
+ {false,IP} ->
+ error_msg("** Connection attempt from "
+ "disallowed IP ~w ** ~n", [IP]),
+ ?shutdown(no_node)
+ end
+ end.
+
+%% we may not always want the nodelay behaviour
+%% for performance reasons
+
+nodelay() ->
+ case application:get_env(kernel, dist_nodelay) of
+ undefined ->
+ {nodelay, true};
+ {ok, true} ->
+ {nodelay, true};
+ {ok, false} ->
+ {nodelay, false};
+ _ ->
+ {nodelay, true}
+ end.
+
+%% ------------------------------------------------------------
+%% Setup a new connection to another Erlang node.
+%% Performs the handshake with the other side.
+%% ------------------------------------------------------------
+
+setup(Node, Type, MyNode, LongOrShortNames,SetupTime) ->
+ spawn_opt(?MODULE, do_setup,
+ [self(), Node, Type, MyNode, LongOrShortNames, SetupTime],
+ [link, {priority, max}]).
+
+do_setup(Kernel, Node, Type, MyNode, LongOrShortNames, SetupTime) ->
+ ?trace("~p~n",[{?MODULE, do_setup, self(), Node}]),
+ [Name, Address] = splitnode(Node, LongOrShortNames),
+ case inet:getaddr(Address, inet) of
+ {ok, Ip} ->
+ Timer = dist_util:start_timer(SetupTime),
+ ErlEpmd = net_kernel:epmd_module(),
+ case ErlEpmd:port_please(Name, Ip) of
+ {port, TcpPort, Version} ->
+ ?trace("port_please(~p) -> version ~p~n",
+ [Node,Version]),
+ dist_util:reset_timer(Timer),
+ case
+ gen_tcp:connect(
+ Ip, TcpPort,
+ connect_options([binary, {active, false}, {packet, 2}]))
+ of
+ {ok, Socket} ->
+ DistCtrl = spawn_dist_cntrlr(Socket),
+ call_ctrlr(DistCtrl, {supervisor, self()}),
+ flush_controller(DistCtrl, Socket),
+ gen_tcp:controlling_process(Socket, DistCtrl),
+ flush_controller(DistCtrl, Socket),
+ HSData0 = hs_data_common(DistCtrl),
+ HSData = HSData0#hs_data{kernel_pid = Kernel,
+ other_node = Node,
+ this_node = MyNode,
+ socket = DistCtrl,
+ timer = Timer,
+ this_flags = 0,
+ other_version = Version,
+ request_type = Type},
+ dist_util:handshake_we_started(HSData);
+ _ ->
+ %% Other Node may have closed since
+ %% port_please !
+ ?trace("other node (~p) "
+ "closed since port_please.~n",
+ [Node]),
+ ?shutdown(Node)
+ end;
+ _ ->
+ ?trace("port_please (~p) "
+ "failed.~n", [Node]),
+ ?shutdown(Node)
+ end;
+ _Other ->
+ ?trace("inet_getaddr(~p) "
+ "failed (~p).~n", [Node,_Other]),
+ ?shutdown(Node)
+ end.
+
+connect_options(Opts) ->
+ case application:get_env(kernel, inet_dist_connect_options) of
+ {ok,ConnectOpts} ->
+ ConnectOpts ++ Opts;
+ _ ->
+ Opts
+ end.
+
+%%
+%% Close a socket.
+%%
+close(Listen) ->
+ gen_tcp:close(Listen).
+
+
+%% If Node is illegal terminate the connection setup!!
+splitnode(Node, LongOrShortNames) ->
+ case split_node(atom_to_list(Node), $@, []) of
+ [Name|Tail] when Tail =/= [] ->
+ Host = lists:append(Tail),
+ case split_node(Host, $., []) of
+ [_] when LongOrShortNames =:= longnames ->
+ case inet:parse_address(Host) of
+ {ok, _} ->
+ [Name, Host];
+ _ ->
+ error_msg("** System running to use "
+ "fully qualified "
+ "hostnames **~n"
+ "** Hostname ~ts is illegal **~n",
+ [Host]),
+ ?shutdown(Node)
+ end;
+ L when length(L) > 1, LongOrShortNames =:= shortnames ->
+ error_msg("** System NOT running to use fully qualified "
+ "hostnames **~n"
+ "** Hostname ~ts is illegal **~n",
+ [Host]),
+ ?shutdown(Node);
+ _ ->
+ [Name, Host]
+ end;
+ [_] ->
+ error_msg("** Nodename ~p illegal, no '@' character **~n",
+ [Node]),
+ ?shutdown(Node);
+ _ ->
+ error_msg("** Nodename ~p illegal **~n", [Node]),
+ ?shutdown(Node)
+ end.
+
+split_node([Chr|T], Chr, Ack) -> [lists:reverse(Ack)|split_node(T, Chr, [])];
+split_node([H|T], Chr, Ack) -> split_node(T, Chr, [H|Ack]);
+split_node([], _, Ack) -> [lists:reverse(Ack)].
+
+%% ------------------------------------------------------------
+%% Fetch local information about a Socket.
+%% ------------------------------------------------------------
+get_tcp_address(Socket) ->
+ {ok, Address} = inet:sockname(Socket),
+ {ok, Host} = inet:gethostname(),
+ #net_address {
+ address = Address,
+ host = Host,
+ protocol = tcp,
+ family = inet
+ }.
+
+%% ------------------------------------------------------------
+%% Do only accept new connection attempts from nodes at our
+%% own LAN, if the check_ip environment parameter is true.
+%% ------------------------------------------------------------
+check_ip(DistCtrl) ->
+ case application:get_env(check_ip) of
+ {ok, true} ->
+ case get_ifs(DistCtrl) of
+ {ok, IFs, IP} ->
+ check_ip(IFs, IP);
+ _ ->
+ ?shutdown(no_node)
+ end;
+ _ ->
+ true
+ end.
+
+get_ifs(DistCtrl) ->
+ Socket = call_ctrlr(DistCtrl, socket),
+ case inet:peername(Socket) of
+ {ok, {IP, _}} ->
+ case inet:getif(Socket) of
+ {ok, IFs} -> {ok, IFs, IP};
+ Error -> Error
+ end;
+ Error ->
+ Error
+ end.
+
+check_ip([{OwnIP, _, Netmask}|IFs], PeerIP) ->
+ case {inet_tcp:mask(Netmask, PeerIP), inet_tcp:mask(Netmask, OwnIP)} of
+ {M, M} -> true;
+ _ -> check_ip(IFs, PeerIP)
+ end;
+check_ip([], PeerIP) ->
+ {false, PeerIP}.
+
+is_node_name(Node) when is_atom(Node) ->
+ case split_node(atom_to_list(Node), $@, []) of
+ [_, _Host] -> true;
+ _ -> false
+ end;
+is_node_name(_Node) ->
+ false.
+
+hs_data_common(DistCtrl) ->
+ TickHandler = call_ctrlr(DistCtrl, tick_handler),
+ Socket = call_ctrlr(DistCtrl, socket),
+ #hs_data{f_send = send_fun(),
+ f_recv = recv_fun(),
+ f_setopts_pre_nodeup = setopts_pre_nodeup_fun(),
+ f_setopts_post_nodeup = setopts_post_nodeup_fun(),
+ f_getll = getll_fun(),
+ f_handshake_complete = handshake_complete_fun(),
+ f_address = address_fun(),
+ mf_setopts = setopts_fun(DistCtrl, Socket),
+ mf_getopts = getopts_fun(DistCtrl, Socket),
+ mf_getstat = getstat_fun(DistCtrl, Socket),
+ mf_tick = tick_fun(DistCtrl, TickHandler)}.
+
+%%% ------------------------------------------------------------
+%%% Distribution controller processes
+%%% ------------------------------------------------------------
+
+%%
+%% There will be five parties working together when the
+%% connection is up:
+%% - The gen_tcp socket. Providing a tcp/ip connection
+%% to the other node.
+%% - The output handler. It will dispatch all outgoing
+%% traffic from the VM to the gen_tcp socket. This
+%% process is registered as distribution controller
+%% for this channel with the VM.
+%% - The input handler. It will dispatch all incoming
+%% traffic from the gen_tcp socket to the VM. This
+%% process is also the socket owner and receives
+%% incoming traffic using active-N.
+%% - The tick handler. Dispatches asynchronous tick
+%% requests to the socket. It executes on max priority
+%% since it is important to get ticks through to the
+%% other end.
+%% - The channel supervisor (provided by dist_util). It
+%% monitors traffic. Issue tick requests to the tick
+%% handler when no outgoing traffic is seen and bring
+%% the connection down if no incoming traffic is seen.
+%% This process also executes on max priority.
+%%
+%% These parties are linked togheter so should one
+%% of them fail, all of them are terminated and the
+%% connection is taken down.
+%%
+
+%% In order to avoid issues with lingering signal binaries
+%% we enable off-heap message queue data as well as fullsweep
+%% after 0. The fullsweeps will be cheap since we have more
+%% or less no live data.
+-define(DIST_CNTRL_COMMON_SPAWN_OPTS,
+ [{message_queue_data, off_heap},
+ {fullsweep_after, 0}]).
+
+tick_fun(DistCtrl, TickHandler) ->
+ fun (Ctrl) when Ctrl == DistCtrl ->
+ TickHandler ! tick
+ end.
+
+getstat_fun(DistCtrl, Socket) ->
+ fun (Ctrl) when Ctrl == DistCtrl ->
+ case inet:getstat(Socket, [recv_cnt, send_cnt, send_pend]) of
+ {ok, Stat} ->
+ split_stat(Stat,0,0,0);
+ Error ->
+ Error
+ end
+ end.
+
+split_stat([{recv_cnt, R}|Stat], _, W, P) ->
+ split_stat(Stat, R, W, P);
+split_stat([{send_cnt, W}|Stat], R, _, P) ->
+ split_stat(Stat, R, W, P);
+split_stat([{send_pend, P}|Stat], R, W, _) ->
+ split_stat(Stat, R, W, P);
+split_stat([], R, W, P) ->
+ {ok, R, W, P}.
+
+setopts_fun(DistCtrl, Socket) ->
+ fun (Ctrl, Opts) when Ctrl == DistCtrl ->
+ setopts(Socket, Opts)
+ end.
+
+getopts_fun(DistCtrl, Socket) ->
+ fun (Ctrl, Opts) when Ctrl == DistCtrl ->
+ getopts(Socket, Opts)
+ end.
+
+setopts(S, Opts) ->
+ case [Opt || {K,_}=Opt <- Opts,
+ K =:= active orelse K =:= deliver orelse K =:= packet] of
+ [] -> inet:setopts(S,Opts);
+ Opts1 -> {error, {badopts,Opts1}}
+ end.
+
+getopts(S, Opts) ->
+ inet:getopts(S, Opts).
+
+send_fun() ->
+ fun (Ctrlr, Packet) ->
+ call_ctrlr(Ctrlr, {send, Packet})
+ end.
+
+recv_fun() ->
+ fun (Ctrlr, Length, Timeout) ->
+ case call_ctrlr(Ctrlr, {recv, Length, Timeout}) of
+ {ok, Bin} when is_binary(Bin) ->
+ {ok, binary_to_list(Bin)};
+ Other ->
+ Other
+ end
+ end.
+
+getll_fun() ->
+ fun (Ctrlr) ->
+ call_ctrlr(Ctrlr, getll)
+ end.
+
+address_fun() ->
+ fun (Ctrlr, Node) ->
+ case call_ctrlr(Ctrlr, {address, Node}) of
+ {error, no_node} -> %% No '@' or more than one '@' in node name.
+ ?shutdown(no_node);
+ Res ->
+ Res
+ end
+ end.
+
+setopts_pre_nodeup_fun() ->
+ fun (Ctrlr) ->
+ call_ctrlr(Ctrlr, pre_nodeup)
+ end.
+
+setopts_post_nodeup_fun() ->
+ fun (Ctrlr) ->
+ call_ctrlr(Ctrlr, post_nodeup)
+ end.
+
+handshake_complete_fun() ->
+ fun (Ctrlr, Node, DHandle) ->
+ call_ctrlr(Ctrlr, {handshake_complete, Node, DHandle})
+ end.
+
+call_ctrlr(Ctrlr, Msg) ->
+ Ref = erlang:monitor(process, Ctrlr),
+ Ctrlr ! {Ref, self(), Msg},
+ receive
+ {Ref, Res} ->
+ erlang:demonitor(Ref, [flush]),
+ Res;
+ {'DOWN', Ref, process, Ctrlr, Reason} ->
+ exit({dist_controller_exit, Reason})
+ end.
+
+%%
+%% The tick handler process writes a tick to the
+%% socket when it receives a 'tick' message from
+%% the connection supervisor.
+%%
+%% We are not allowed to block the connection
+%% superviser when writing a tick and we also want
+%% the tick to go through even during a heavily
+%% loaded system. gen_tcp does not have a
+%% non-blocking send operation exposed in its API
+%% and we don't want to run the distribution
+%% controller under high priority. Therefore this
+%% sparate process with max prio that dispatches
+%% ticks.
+%%
+dist_cntrlr_tick_handler(Socket) ->
+ receive
+ tick ->
+ %% May block due to busy port...
+ sock_send(Socket, "");
+ _ ->
+ ok
+ end,
+ dist_cntrlr_tick_handler(Socket).
+
+spawn_dist_cntrlr(Socket) ->
+ spawn_opt(?MODULE, dist_cntrlr_setup, [Socket],
+ [{priority, max}] ++ ?DIST_CNTRL_COMMON_SPAWN_OPTS).
+
+dist_cntrlr_setup(Socket) ->
+ TickHandler = spawn_opt(?MODULE, dist_cntrlr_tick_handler,
+ [Socket],
+ [link, {priority, max}]
+ ++ ?DIST_CNTRL_COMMON_SPAWN_OPTS),
+ dist_cntrlr_setup_loop(Socket, TickHandler, undefined).
+
+%%
+%% During the handshake phase we loop in dist_cntrlr_setup().
+%% When the connection is up we spawn an input handler and
+%% continue as output handler.
+%%
+dist_cntrlr_setup_loop(Socket, TickHandler, Sup) ->
+ receive
+ {tcp_closed, Socket} ->
+ exit(connection_closed);
+
+ {Ref, From, {supervisor, Pid}} ->
+ Res = link(Pid),
+ From ! {Ref, Res},
+ dist_cntrlr_setup_loop(Socket, TickHandler, Pid);
+
+ {Ref, From, tick_handler} ->
+ From ! {Ref, TickHandler},
+ dist_cntrlr_setup_loop(Socket, TickHandler, Sup);
+
+ {Ref, From, socket} ->
+ From ! {Ref, Socket},
+ dist_cntrlr_setup_loop(Socket, TickHandler, Sup);
+
+ {Ref, From, {send, Packet}} ->
+ Res = gen_tcp:send(Socket, Packet),
+ From ! {Ref, Res},
+ dist_cntrlr_setup_loop(Socket, TickHandler, Sup);
+
+ {Ref, From, {recv, Length, Timeout}} ->
+ Res = gen_tcp:recv(Socket, Length, Timeout),
+ From ! {Ref, Res},
+ dist_cntrlr_setup_loop(Socket, TickHandler, Sup);
+
+ {Ref, From, getll} ->
+ From ! {Ref, {ok, self()}},
+ dist_cntrlr_setup_loop(Socket, TickHandler, Sup);
+
+ {Ref, From, {address, Node}} ->
+ Res = case inet:peername(Socket) of
+ {ok, Address} ->
+ case split_node(atom_to_list(Node), $@, []) of
+ [_,Host] ->
+ #net_address{address=Address,host=Host,
+ protocol=tcp, family=inet};
+ _ ->
+ {error, no_node}
+ end
+ end,
+ From ! {Ref, Res},
+ dist_cntrlr_setup_loop(Socket, TickHandler, Sup);
+
+ {Ref, From, pre_nodeup} ->
+ Res = inet:setopts(Socket,
+ [{active, false},
+ {packet, 4},
+ nodelay()]),
+ From ! {Ref, Res},
+ dist_cntrlr_setup_loop(Socket, TickHandler, Sup);
+
+ {Ref, From, post_nodeup} ->
+ Res = inet:setopts(Socket,
+ [{active, false},
+ {packet, 4},
+ nodelay()]),
+ From ! {Ref, Res},
+ dist_cntrlr_setup_loop(Socket, TickHandler, Sup);
+
+ {Ref, From, {handshake_complete, _Node, DHandle}} ->
+ From ! {Ref, ok},
+ %% Handshake complete! Begin dispatching traffic...
+
+ %% We use separate process for dispatching input. This
+ %% is not necessary, but it enables parallel execution
+ %% of independent work loads at the same time as it
+ %% simplifies the the implementation...
+ InputHandler = spawn_opt(?MODULE, dist_cntrlr_input_setup,
+ [DHandle, Socket, Sup],
+ [link] ++ ?DIST_CNTRL_COMMON_SPAWN_OPTS),
+
+ flush_controller(InputHandler, Socket),
+ gen_tcp:controlling_process(Socket, InputHandler),
+ flush_controller(InputHandler, Socket),
+
+ ok = erlang:dist_ctrl_input_handler(DHandle, InputHandler),
+
+ InputHandler ! DHandle,
+
+ %% From now on we execute on normal priority
+ process_flag(priority, normal),
+ erlang:dist_ctrl_get_data_notification(DHandle),
+ dist_cntrlr_output_loop(DHandle, Socket)
+ end.
+
+%% We use active 10 for good throughput while still
+%% maintaining back-pressure if the input controller
+%% isn't able to handle all incoming messages...
+-define(ACTIVE_INPUT, 10).
+
+dist_cntrlr_input_setup(DHandle, Socket, Sup) ->
+ link(Sup),
+ %% Ensure we don't try to put data before registerd
+ %% as input handler...
+ receive
+ DHandle ->
+ dist_cntrlr_input_loop(DHandle, Socket, 0)
+ end.
+
+dist_cntrlr_input_loop(DHandle, Socket, N) when N =< ?ACTIVE_INPUT/2 ->
+ inet:setopts(Socket, [{active, ?ACTIVE_INPUT - N}]),
+ dist_cntrlr_input_loop(DHandle, Socket, ?ACTIVE_INPUT);
+dist_cntrlr_input_loop(DHandle, Socket, N) ->
+ receive
+ {tcp_closed, Socket} ->
+ %% Connection to remote node terminated...
+ exit(connection_closed);
+
+ {tcp, Socket, Data} ->
+ %% Incoming data from remote node...
+ try erlang:dist_ctrl_put_data(DHandle, Data)
+ catch _ : _ -> death_row()
+ end,
+ dist_cntrlr_input_loop(DHandle, Socket, N-1);
+
+ _ ->
+ %% Ignore...
+ dist_cntrlr_input_loop(DHandle, Socket, N)
+ end.
+
+dist_cntrlr_send_data(DHandle, Socket) ->
+ case erlang:dist_ctrl_get_data(DHandle) of
+ none ->
+ erlang:dist_ctrl_get_data_notification(DHandle);
+ Data ->
+ sock_send(Socket, Data),
+ dist_cntrlr_send_data(DHandle, Socket)
+ end.
+
+
+dist_cntrlr_output_loop(DHandle, Socket) ->
+ receive
+ dist_data ->
+ %% Outgoing data from this node...
+ try dist_cntrlr_send_data(DHandle, Socket)
+ catch _ : _ -> death_row()
+ end,
+ dist_cntrlr_output_loop(DHandle, Socket);
+
+ {send, From, Ref, Data} ->
+ %% This is for testing only!
+ %%
+ %% Needed by some OTP distribution
+ %% test suites...
+ sock_send(Socket, Data),
+ From ! {Ref, ok},
+ dist_cntrlr_output_loop(DHandle, Socket);
+
+ _ ->
+ %% Drop garbage message...
+ dist_cntrlr_output_loop(DHandle, Socket)
+
+ end.
+
+sock_send(Socket, Data) ->
+ try gen_tcp:send(Socket, Data) of
+ ok -> ok;
+ {error, Reason} -> death_row({send_error, Reason})
+ catch
+ Type : Reason -> death_row({send_error, {Type, Reason}})
+ end.
+
+death_row() ->
+ death_row(connection_closed).
+
+death_row(normal) ->
+ %% We do not want to exit with normal
+ %% exit reason since it wont bring down
+ %% linked processes...
+ death_row();
+death_row(Reason) ->
+ %% When the connection is on its way down operations
+ %% begin to fail. We catch the failures and call
+ %% this function waiting for termination. We should
+ %% be terminated by one of our links to the other
+ %% involved parties that began bringing the
+ %% connection down. By waiting for termination we
+ %% avoid altering the exit reason for the connection
+ %% teardown. We however limit the wait to 5 seconds
+ %% and bring down the connection ourselves if not
+ %% terminated...
+ receive after 5000 -> exit(Reason) end.
diff --git a/lib/kernel/include/dist.hrl b/lib/kernel/include/dist.hrl
index d6bccdf474..003852f1b0 100644
--- a/lib/kernel/include/dist.hrl
+++ b/lib/kernel/include/dist.hrl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1999-2016. All Rights Reserved.
+%% Copyright Ericsson AB 1999-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -40,3 +40,8 @@
-define(DFLAG_UTF8_ATOMS, 16#10000).
-define(DFLAG_MAP_TAG, 16#20000).
-define(DFLAG_BIG_CREATION, 16#40000).
+-define(DFLAG_SEND_SENDER, 16#80000).
+-define(DFLAG_BIG_SEQTRACE_LABELS, 16#100000).
+
+%% Also update dflag2str() in ../src/dist_util.erl
+%% when adding flags...
diff --git a/lib/kernel/include/dist_util.hrl b/lib/kernel/include/dist_util.hrl
index e3d2fe0eb6..56f775f060 100644
--- a/lib/kernel/include/dist_util.hrl
+++ b/lib/kernel/include/dist_util.hrl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1999-2016. All Rights Reserved.
+%% Copyright Ericsson AB 1999-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -29,9 +29,9 @@
-endif.
-ifdef(dist_trace).
--define(trace(Fmt,Args), io:format("~p ~p:~s",[erlang:timestamp(),node(),lists:flatten(io_lib:format(Fmt, Args))])).
+-define(trace(Fmt,Args), io:format("~p ~p:~s",[erlang:convert_time_unit(erlang:monotonic_time()-erlang:system_info(start_time), native, microsecond),node(),lists:flatten(io_lib:format(Fmt, Args))])).
% Use the one below for config-file (early boot) connection tracing
-%-define(trace(Fmt,Args), erlang:display([erlang:now(),node(),lists:flatten(io_lib:format(Fmt, Args))])).
+%-define(trace(Fmt,Args), erlang:display([erlang:convert_time_unit(erlang:monotonic_time()-erlang:system_info(start_time), native, microsecond),node(),lists:flatten(io_lib:format(Fmt, Args))])).
-define(trace_factor,8).
-else.
-define(trace(Fmt,Args), ok).
@@ -78,7 +78,13 @@
%% New in kernel-5.1 (OTP 19.1):
mf_setopts, %% netkernel:setopts on active connection
- mf_getopts %% netkernel:getopts on active connection
+ mf_getopts, %% netkernel:getopts on active connection
+
+ %% New in kernel-6.0 (OTP 21.0)
+ f_handshake_complete, %% Notify handshake complete
+ add_flags, %% dflags to add
+ reject_flags, %% dflags not to use (not all can be rejected)
+ require_flags %% dflags that are required
}).
diff --git a/lib/kernel/include/logger.hrl b/lib/kernel/include/logger.hrl
new file mode 100644
index 0000000000..b09977e0f2
--- /dev/null
+++ b/lib/kernel/include/logger.hrl
@@ -0,0 +1,53 @@
+-ifndef(LOGGER_HRL).
+-define(LOGGER_HRL,true).
+-define(LOG_EMERGENCY(A),?DO_LOG(emergency,[A])).
+-define(LOG_EMERGENCY(A,B),?DO_LOG(emergency,[A,B])).
+-define(LOG_EMERGENCY(A,B,C),?DO_LOG(emergency,[A,B,C])).
+
+-define(LOG_ALERT(A),?DO_LOG(alert,[A])).
+-define(LOG_ALERT(A,B),?DO_LOG(alert,[A,B])).
+-define(LOG_ALERT(A,B,C),?DO_LOG(alert,[A,B,C])).
+
+-define(LOG_CRITICAL(A),?DO_LOG(critical,[A])).
+-define(LOG_CRITICAL(A,B),?DO_LOG(critical,[A,B])).
+-define(LOG_CRITICAL(A,B,C),?DO_LOG(critical,[A,B,C])).
+
+-define(LOG_ERROR(A),?DO_LOG(error,[A])).
+-define(LOG_ERROR(A,B),?DO_LOG(error,[A,B])).
+-define(LOG_ERROR(A,B,C),?DO_LOG(error,[A,B,C])).
+
+-define(LOG_WARNING(A),?DO_LOG(warning,[A])).
+-define(LOG_WARNING(A,B),?DO_LOG(warning,[A,B])).
+-define(LOG_WARNING(A,B,C),?DO_LOG(warning,[A,B,C])).
+
+-define(LOG_NOTICE(A),?DO_LOG(notice,[A])).
+-define(LOG_NOTICE(A,B),?DO_LOG(notice,[A,B])).
+-define(LOG_NOTICE(A,B,C),?DO_LOG(notice,[A,B,C])).
+
+-define(LOG_INFO(A),?DO_LOG(info,[A])).
+-define(LOG_INFO(A,B),?DO_LOG(info,[A,B])).
+-define(LOG_INFO(A,B,C),?DO_LOG(info,[A,B,C])).
+
+-define(LOG_DEBUG(A),?DO_LOG(debug,[A])).
+-define(LOG_DEBUG(A,B),?DO_LOG(debug,[A,B])).
+-define(LOG_DEBUG(A,B,C),?DO_LOG(debug,[A,B,C])).
+
+-define(LOG(L,A),?DO_LOG(L,[A])).
+-define(LOG(L,A,B),?DO_LOG(L,[A,B])).
+-define(LOG(L,A,B,C),?DO_LOG(L,[A,B,C])).
+
+-define(LOCATION,#{mfa=>{?MODULE,?FUNCTION_NAME,?FUNCTION_ARITY},
+ line=>?LINE,
+ file=>?FILE}).
+
+%%%-----------------------------------------------------------------
+%%% Internal, i.e. not intended for direct use in code - use above
+%%% macros instead!
+-define(DO_LOG(Level,Args),
+ case logger:allow(Level,?MODULE) of
+ true ->
+ apply(logger,macro_log,[?LOCATION,Level|Args]);
+ false ->
+ ok
+ end).
+-endif.
diff --git a/lib/kernel/src/Makefile b/lib/kernel/src/Makefile
index 5946620f0f..3d1506ea08 100644
--- a/lib/kernel/src/Makefile
+++ b/lib/kernel/src/Makefile
@@ -1,7 +1,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 1996-2017. All Rights Reserved.
+# Copyright Ericsson AB 1996-2018. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -106,8 +106,23 @@ MODULES = \
inet_sctp \
kernel \
kernel_config \
+ kernel_refc \
local_udp \
local_tcp \
+ logger \
+ logger_backend \
+ logger_config \
+ logger_handler_watcher \
+ logger_std_h \
+ logger_disk_log_h \
+ logger_h_common \
+ logger_filters \
+ logger_formatter \
+ logger_olp \
+ logger_proxy \
+ logger_server \
+ logger_simple_h \
+ logger_sup \
net \
net_adm \
net_kernel \
@@ -120,17 +135,25 @@ MODULES = \
user \
user_drv \
user_sup \
+ raw_file_io \
+ raw_file_io_compressed \
+ raw_file_io_inflate \
+ raw_file_io_deflate \
+ raw_file_io_delayed \
+ raw_file_io_list \
+ raw_file_io_raw \
wrap_log_reader
HRL_FILES= ../include/file.hrl ../include/inet.hrl ../include/inet_sctp.hrl \
../include/dist.hrl ../include/dist_util.hrl \
- ../include/net_address.hrl
+ ../include/net_address.hrl ../include/logger.hrl
INTERNAL_HRL_FILES= application_master.hrl disk_log.hrl \
- erl_epmd.hrl hipe_ext_format.hrl \
+ erl_epmd.hrl file_int.hrl hipe_ext_format.hrl \
inet_dns.hrl inet_res.hrl \
inet_boot.hrl inet_config.hrl inet_int.hrl \
- inet_dns_record_adts.hrl
+ inet_dns_record_adts.hrl \
+ logger_internal.hrl logger_olp.hrl logger_h_common.hrl
ERL_FILES= $(MODULES:%=%.erl)
@@ -215,7 +238,7 @@ release_docs_spec:
# Include dependencies -- list below added by Kostis Sagonas
-$(EBIN)/application_controller.beam: application_master.hrl
+$(EBIN)/application_controller.beam: application_master.hrl ../include/logger.hrl
$(EBIN)/application_master.beam: application_master.hrl
$(EBIN)/auth.beam: ../include/file.hrl
$(EBIN)/code.beam: ../include/file.hrl
@@ -226,7 +249,9 @@ $(EBIN)/disk_log_server.beam: disk_log.hrl
$(EBIN)/dist_util.beam: ../include/dist_util.hrl ../include/dist.hrl
$(EBIN)/erl_boot_server.beam: inet_boot.hrl
$(EBIN)/erl_epmd.beam: inet_int.hrl erl_epmd.hrl
-$(EBIN)/file.beam: ../include/file.hrl
+$(EBIN)/error_logger.beam: logger_internal.hrl ../include/logger.hrl
+$(EBIN)/file.beam: ../include/file.hrl file_int.hrl
+$(EBIN)/file_io_server.beam: ../include/file.hrl file_int.hrl
$(EBIN)/gen_tcp.beam: inet_int.hrl
$(EBIN)/gen_udp.beam: inet_int.hrl
$(EBIN)/gen_sctp.beam: ../include/inet_sctp.hrl
@@ -250,7 +275,26 @@ $(EBIN)/inet_udp.beam: inet_int.hrl
$(EBIN)/inet_sctp.beam: inet_int.hrl ../include/inet_sctp.hrl
$(EBIN)/local_udp.beam: inet_int.hrl
$(EBIN)/local_tcp.beam: inet_int.hrl
+$(EBIN)/logger.beam: logger_internal.hrl ../include/logger.hrl
+$(EBIN)/logger_backend.beam: logger_internal.hrl ../include/logger.hrl
+$(EBIN)/logger_config.beam: logger_internal.hrl ../include/logger.hrl
+$(EBIN)/logger_disk_log_h.beam: logger_h_common.hrl logger_internal.hrl ../include/logger.hrl ../include/file.hrl
+$(EBIN)/logger_filters.beam: logger_internal.hrl ../include/logger.hrl
+$(EBIN)/logger_formatter.beam: logger_internal.hrl ../include/logger.hrl
+$(EBIN)/logger_olp.beam: logger_olp.hrl logger_internal.hrl
+$(EBIN)/logger_proxy.beam: logger_internal.hrl
+$(EBIN)/logger_server.beam: logger_internal.hrl ../include/logger.hrl
+$(EBIN)/logger_simple_h.beam: logger_internal.hrl ../include/logger.hrl
+$(EBIN)/logger_std_h.beam: logger_h_common.hrl logger_internal.hrl ../include/logger.hrl ../include/file.hrl
+$(EBIN)/logger_h_common.beam: logger_h_common.hrl logger_internal.hrl ../include/logger.hrl
$(EBIN)/net_kernel.beam: ../include/net_address.hrl
$(EBIN)/os.beam: ../include/file.hrl
$(EBIN)/ram_file.beam: ../include/file.hrl
$(EBIN)/wrap_log_reader.beam: disk_log.hrl ../include/file.hrl
+$(EBIN)/raw_file_io.beam: ../include/file.hrl file_int.hrl
+$(EBIN)/raw_file_io_compressed.beam: ../include/file.hrl file_int.hrl
+$(EBIN)/raw_file_io_inflate.beam: ../include/file.hrl file_int.hrl
+$(EBIN)/raw_file_io_deflate.beam: ../include/file.hrl file_int.hrl
+$(EBIN)/raw_file_io_delayed.beam: ../include/file.hrl file_int.hrl
+$(EBIN)/raw_file_io_list.beam: ../include/file.hrl file_int.hrl
+$(EBIN)/raw_file_io_raw.beam: ../include/file.hrl file_int.hrl
diff --git a/lib/kernel/src/application.erl b/lib/kernel/src/application.erl
index bc6be2f8f5..5c2e981e4b 100644
--- a/lib/kernel/src/application.erl
+++ b/lib/kernel/src/application.erl
@@ -25,7 +25,7 @@
which_applications/0, which_applications/1,
loaded_applications/0, permit/2]).
-export([ensure_started/1, ensure_started/2]).
--export([set_env/3, set_env/4, unset_env/2, unset_env/3]).
+-export([set_env/1, set_env/2, set_env/3, set_env/4, unset_env/2, unset_env/3]).
-export([get_env/1, get_env/2, get_env/3, get_all_env/0, get_all_env/1]).
-export([get_key/1, get_key/2, get_all_key/0, get_all_key/1]).
-export([get_application/0, get_application/1, info/0]).
@@ -279,6 +279,26 @@ loaded_applications() ->
info() ->
application_controller:info().
+-spec set_env(Config) -> 'ok' when
+ Config :: [{Application, Env}],
+ Application :: atom(),
+ Env :: [{Par :: atom(), Val :: term()}].
+
+set_env(Config) when is_list(Config) ->
+ set_env(Config, []).
+
+-spec set_env(Config, Opts) -> 'ok' when
+ Config :: [{Application, Env}],
+ Application :: atom(),
+ Env :: [{Par :: atom(), Val :: term()}],
+ Opts :: [{timeout, timeout()} | {persistent, boolean()}].
+
+set_env(Config, Opts) when is_list(Config), is_list(Opts) ->
+ case application_controller:set_env(Config, Opts) of
+ ok -> ok;
+ {error, Msg} -> erlang:error({badarg, Msg}, [Config, Opts])
+ end.
+
-spec set_env(Application, Par, Val) -> 'ok' when
Application :: atom(),
Par :: atom(),
diff --git a/lib/kernel/src/application_controller.erl b/lib/kernel/src/application_controller.erl
index 3b642f5873..9a8091fb2e 100644
--- a/lib/kernel/src/application_controller.erl
+++ b/lib/kernel/src/application_controller.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2017. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -26,7 +26,7 @@
control_application/1,
change_application_data/2, prep_config_change/0, config_change/1,
which_applications/0, which_applications/1,
- loaded_applications/0, info/0,
+ loaded_applications/0, info/0, set_env/2,
get_pid_env/2, get_env/2, get_pid_all_env/1, get_all_env/1,
get_pid_key/2, get_key/2, get_pid_all_key/1, get_all_key/1,
get_master/1, get_application/1, get_application_module/1,
@@ -44,6 +44,7 @@
keyfind/3, keydelete/3, keyreplace/4]).
-include("application_master.hrl").
+-include("logger.hrl").
-define(AC, ?MODULE). % Name of process
@@ -344,9 +345,6 @@ get_all_env(AppName) ->
map(fun([Key, Val]) -> {Key, Val} end,
ets:match(ac_tab, {{env, AppName, '$1'}, '$2'})).
-
-
-
get_pid_key(Master, Key) ->
case ets:match(ac_tab, {{application_master, '$1'}, Master}) of
[[AppName]] -> get_key(AppName, Key);
@@ -460,6 +458,15 @@ permit_application(ApplName, Flag) ->
{permit_application, ApplName, Flag},
infinity).
+set_env(Config, Opts) ->
+ case check_conf_data(Config) of
+ ok ->
+ Timeout = proplists:get_value(timeout, Opts, 5000),
+ gen_server:call(?AC, {set_env, Config, Opts}, Timeout);
+
+ {error, _} = Error ->
+ Error
+ end.
set_env(AppName, Key, Val) ->
gen_server:call(?AC, {set_env, AppName, Key, Val, []}).
@@ -527,19 +534,17 @@ check_conf_data([]) ->
check_conf_data(ConfData) when is_list(ConfData) ->
[Application | ConfDataRem] = ConfData,
case Application of
- {kernel, List} when is_list(List) ->
- case check_para_kernel(List) of
- ok ->
- check_conf_data(ConfDataRem);
- Error1 ->
- Error1
- end;
{AppName, List} when is_atom(AppName), is_list(List) ->
- case check_para(List, atom_to_list(AppName)) of
- ok ->
- check_conf_data(ConfDataRem);
- Error2 ->
- Error2
+ case lists:keymember(AppName, 1, ConfDataRem) of
+ true ->
+ ?LOG_WARNING("duplicate application config: " ++ atom_to_list(AppName));
+ false ->
+ ok
+ end,
+
+ case check_para(List, AppName) of
+ ok -> check_conf_data(ConfDataRem);
+ Error -> Error
end;
{AppName, List} when is_list(List) ->
ErrMsg = "application: "
@@ -552,36 +557,40 @@ check_conf_data(ConfData) when is_list(ConfData) ->
++ "; parameters must be a list",
{error, ErrMsg};
Else ->
- ErrMsg = "invalid application name: " ++
- lists:flatten(io_lib:format(" ~tp",[Else])),
+ ErrMsg = "invalid application config: "
+ ++ lists:flatten(io_lib:format("~tp",[Else])),
{error, ErrMsg}
end;
check_conf_data(_ConfData) ->
- {error, 'configuration must be a list ended by <dot><whitespace>'}.
-
+ {error, "configuration must be a list ended by <dot><whitespace>"}.
-%% Special check of distributed parameter for kernel
-check_para_kernel([]) ->
+
+check_para([], _AppName) ->
ok;
-check_para_kernel([{distributed, Apps} | ParaList]) when is_list(Apps) ->
- case check_distributed(Apps) of
- {error, _ErrorMsg} = Error ->
- Error;
- _ ->
- check_para_kernel(ParaList)
+check_para([{Para, Val} | ParaList], AppName) when is_atom(Para) ->
+ case lists:keymember(Para, 1, ParaList) of
+ true ->
+ ?LOG_WARNING("application: " ++ atom_to_list(AppName) ++
+ "; duplicate parameter: " ++ atom_to_list(Para));
+ false ->
+ ok
+ end,
+
+ case check_para_value(Para, Val, AppName) of
+ ok -> check_para(ParaList, AppName);
+ {error, _} = Error -> Error
end;
-check_para_kernel([{distributed, _Apps} | _ParaList]) ->
- {error, "application: kernel; erroneous parameter: distributed"};
-check_para_kernel([{Para, _Val} | ParaList]) when is_atom(Para) ->
- check_para_kernel(ParaList);
-check_para_kernel([{Para, _Val} | _ParaList]) ->
- {error, "application: kernel; invalid parameter: " ++
+check_para([{Para, _Val} | _ParaList], AppName) ->
+ {error, "application: " ++ atom_to_list(AppName) ++ "; invalid parameter name: " ++
lists:flatten(io_lib:format("~tp",[Para]))};
-check_para_kernel(Else) ->
- {error, "application: kernel; invalid parameter list: " ++
+check_para([Else | _ParaList], AppName) ->
+ {error, "application: " ++ atom_to_list(AppName) ++ "; invalid parameter: " ++
lists:flatten(io_lib:format("~tp",[Else]))}.
-
+check_para_value(distributed, Apps, kernel) -> check_distributed(Apps);
+check_para_value(_Para, _Val, _AppName) -> ok.
+
+%% Special check of distributed parameter for kernel
check_distributed([]) ->
ok;
check_distributed([{App, List} | Apps]) when is_atom(App), is_list(List) ->
@@ -594,18 +603,6 @@ check_distributed(_Else) ->
{error, "application: kernel; erroneous parameter: distributed"}.
-check_para([], _AppName) ->
- ok;
-check_para([{Para, _Val} | ParaList], AppName) when is_atom(Para) ->
- check_para(ParaList, AppName);
-check_para([{Para, _Val} | _ParaList], AppName) ->
- {error, "application: " ++ AppName ++ "; invalid parameter: " ++
- lists:flatten(io_lib:format("~tp",[Para]))};
-check_para([Else | _ParaList], AppName) ->
- {error, "application: " ++ AppName ++ "; invalid parameter: " ++
- lists:flatten(io_lib:format("~tp",[Else]))}.
-
-
-type calls() :: 'info' | 'prep_config_change' | 'which_applications'
| {'config_change' | 'control_application' |
'load_application' | 'start_type' | 'stop_application' |
@@ -862,6 +859,16 @@ handle_call(which_applications, _From, S) ->
end, S#state.running),
{reply, Reply, S};
+handle_call({set_env, Config, Opts}, _From, S) ->
+ _ = [add_env(AppName, Env) || {AppName, Env} <- Config],
+
+ case proplists:get_value(persistent, Opts, false) of
+ true ->
+ {reply, ok, S#state{conf_data = merge_env(S#state.conf_data, Config)}};
+ false ->
+ {reply, ok, S}
+ end;
+
handle_call({set_env, AppName, Key, Val, Opts}, _From, S) ->
ets:insert(ac_tab, {{env, AppName, Key}, Val}),
case proplists:get_value(persistent, Opts, false) of
@@ -1271,9 +1278,7 @@ load(S, {ApplData, ApplEnv, IncApps, Descr, Id, Vsn, Apps}) ->
NewEnv = merge_app_env(ApplEnv, ConfEnv),
CmdLineEnv = get_cmd_env(Name),
NewEnv2 = merge_app_env(NewEnv, CmdLineEnv),
- NewEnv3 = keyreplaceadd(included_applications, 1, NewEnv2,
- {included_applications, IncApps}),
- add_env(Name, NewEnv3),
+ add_env(Name, NewEnv2),
Appl = #appl{name = Name, descr = Descr, id = Id, vsn = Vsn,
appl_data = ApplData, inc_apps = IncApps, apps = Apps},
ets:insert(ac_tab, {{loaded, Name}, Appl}),
@@ -1291,7 +1296,7 @@ load(S, {ApplData, ApplEnv, IncApps, Descr, Id, Vsn, Apps}) ->
{ok, NewS}.
unload(AppName, S) ->
- {ok, IncApps} = get_env(AppName, included_applications),
+ {ok, IncApps} = get_key(AppName, included_applications),
del_env(AppName),
ets:delete(ac_tab, {loaded, AppName}),
foldl(fun(App, S1) ->
@@ -1546,9 +1551,8 @@ do_change_apps(Applications, Config, OldAppls) ->
%% Report errors, but do not terminate
%% (backwards compatible behaviour)
lists:foreach(fun({error, {SysFName, Line, Str}}) ->
- Str2 = lists:flatten(io_lib:format("~tp: ~w: ~ts~n",
- [SysFName, Line, Str])),
- error_logger:format(Str2, [])
+ ?LOG_ERROR("~tp: ~w: ~ts~n",[SysFName, Line, Str],
+ #{error_logger=>#{tag=>error}})
end,
Errors),
@@ -1583,13 +1587,9 @@ do_change_appl({ok, {ApplData, Env, IncApps, Descr, Id, Vsn, Apps}},
CmdLineEnv = get_cmd_env(AppName),
NewEnv2 = merge_app_env(NewEnv1, CmdLineEnv),
- %% included_apps is made into an env parameter as well
- NewEnv3 = keyreplaceadd(included_applications, 1, NewEnv2,
- {included_applications, IncApps}),
-
%% Update ets table with new application env
del_env(AppName),
- add_env(AppName, NewEnv3),
+ add_env(AppName, NewEnv2),
OldAppl#appl{appl_data=ApplData,
descr=Descr,
@@ -1631,8 +1631,9 @@ make_term(Str) ->
end.
handle_make_term_error(Mod, Reason, Str) ->
- error_logger:format("application_controller: ~ts: ~ts~n",
- [Mod:format_error(Reason), Str]),
+ ?LOG_ERROR("application_controller: ~ts: ~ts~n",
+ [Mod:format_error(Reason), Str],
+ #{error_logger=>#{tag=>error}}),
throw({error, {bad_environment_value, Str}}).
get_env_i(Name, #state{conf_data = ConfData}) when is_list(ConfData) ->
@@ -1819,8 +1820,9 @@ check_conf() ->
%% Therefore read and merge contents.
if
BFName =:= "sys" ->
+ DName = filename:dirname(FName),
{ok, SysEnv, Errors} =
- check_conf_sys(NewEnv),
+ check_conf_sys(NewEnv, [], [], DName),
%% Report first error, if any, and
%% terminate
@@ -1842,20 +1844,31 @@ check_conf() ->
end.
check_conf_sys(Env) ->
- check_conf_sys(Env, [], []).
+ check_conf_sys(Env, [], [], []).
-check_conf_sys([File|T], SysEnv, Errors) when is_list(File) ->
+check_conf_sys([File|T], SysEnv, Errors, DName) when is_list(File),is_list(DName) ->
BFName = filename:basename(File, ".config"),
FName = filename:join(filename:dirname(File), BFName ++ ".config"),
- case load_file(FName) of
+ LName = case filename:pathtype(FName) of
+ relative when (DName =/= []) ->
+ % Check if relative to sys.config dir otherwise use legacy mode,
+ % i.e relative to cwd.
+ RName = filename:join(DName, FName),
+ case erl_prim_loader:read_file_info(RName) of
+ {ok, _} -> RName ;
+ error -> FName
+ end;
+ _ -> FName
+ end,
+ case load_file(LName) of
{ok, NewEnv} ->
- check_conf_sys(T, merge_env(SysEnv, NewEnv), Errors);
+ check_conf_sys(T, merge_env(SysEnv, NewEnv), Errors, DName);
{error, {Line, _Mod, Str}} ->
- check_conf_sys(T, SysEnv, [{error, {FName, Line, Str}}|Errors])
+ check_conf_sys(T, SysEnv, [{error, {LName, Line, Str}}|Errors], DName)
end;
-check_conf_sys([Tuple|T], SysEnv, Errors) ->
- check_conf_sys(T, merge_env(SysEnv, [Tuple]), Errors);
-check_conf_sys([], SysEnv, Errors) ->
+check_conf_sys([Tuple|T], SysEnv, Errors, DName) ->
+ check_conf_sys(T, merge_env(SysEnv, [Tuple]), Errors, DName);
+check_conf_sys([], SysEnv, Errors, _) ->
{ok, SysEnv, lists:reverse(Errors)}.
load_file(File) ->
@@ -1913,19 +1926,25 @@ config_error() ->
"configuration file must contain ONE list ended by <dot>"}}.
%%-----------------------------------------------------------------
-%% Info messages sent to error_logger
+%% Info messages sent to logger
%%-----------------------------------------------------------------
info_started(Name, Node) ->
- Rep = [{application, Name},
- {started_at, Node}],
- error_logger:info_report(progress, Rep).
+ ?LOG_INFO(#{label=>{application_controller,progress},
+ report=>[{application, Name},
+ {started_at, Node}]},
+ #{domain=>[otp,sasl],
+ report_cb=>fun logger:format_otp_report/1,
+ logger_formatter=>#{title=>"PROGRESS REPORT"},
+ error_logger=>#{tag=>info_report,type=>progress}}).
info_exited(Name, Reason, Type) ->
- Rep = [{application, Name},
- {exited, Reason},
- {type, Type}],
- error_logger:info_report(Rep).
-
+ ?LOG_NOTICE(#{label=>{application_controller,exit},
+ report=>[{application, Name},
+ {exited, Reason},
+ {type, Type}]},
+ #{domain=>[otp],
+ report_cb=>fun logger:format_otp_report/1,
+ error_logger=>#{tag=>info_report,type=>std_info}}).
%%-----------------------------------------------------------------
%% Reply to all processes waiting this application to be started.
@@ -2012,5 +2031,5 @@ to_string(Term) ->
true ->
Term;
false ->
- lists:flatten(io_lib:format("~134217728p", [Term]))
+ lists:flatten(io_lib:format("~0p", [Term]))
end.
diff --git a/lib/kernel/src/application_master.erl b/lib/kernel/src/application_master.erl
index 5da2b0b06c..8697143dfb 100644
--- a/lib/kernel/src/application_master.erl
+++ b/lib/kernel/src/application_master.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2016. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -118,6 +118,10 @@ init(Parent, Starter, ApplData, Type) ->
link(Parent),
process_flag(trap_exit, true),
OldGleader = group_leader(),
+ %% We become the group leader, but forward all I/O to OldGleader.
+ %% This is just a way to identify processes that belong to the
+ %% application. Used for example to find ourselves from any
+ %% process, or, reciprocally, to kill them all when we terminate.
group_leader(self(), self()),
%% Insert ourselves as master for the process. This ensures that
%% the processes in the application can use get_env/1 at startup.
diff --git a/lib/kernel/src/auth.erl b/lib/kernel/src/auth.erl
index 40feee6bf0..4d18daf9e4 100644
--- a/lib/kernel/src/auth.erl
+++ b/lib/kernel/src/auth.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2016. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -107,7 +107,7 @@ get_cookie() ->
get_cookie(_Node) when node() =:= nonode@nohost ->
nocookie;
get_cookie(Node) ->
- gen_server:call(auth, {get_cookie, Node}).
+ gen_server:call(auth, {get_cookie, Node}, infinity).
-spec set_cookie(Cookie :: cookie()) -> 'true'.
@@ -119,12 +119,12 @@ set_cookie(Cookie) ->
set_cookie(_Node, _Cookie) when node() =:= nonode@nohost ->
erlang:error(distribution_not_started);
set_cookie(Node, Cookie) ->
- gen_server:call(auth, {set_cookie, Node, Cookie}).
+ gen_server:call(auth, {set_cookie, Node, Cookie}, infinity).
-spec sync_cookie() -> any().
sync_cookie() ->
- gen_server:call(auth, sync_cookie).
+ gen_server:call(auth, sync_cookie, infinity).
-spec print(Node :: node(), Format :: string(), Args :: [_]) -> 'ok'.
diff --git a/lib/kernel/src/code.erl b/lib/kernel/src/code.erl
index 9969021a6c..7faef93609 100644
--- a/lib/kernel/src/code.erl
+++ b/lib/kernel/src/code.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2017. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -149,8 +149,11 @@ load_file(Mod) when is_atom(Mod) ->
-spec ensure_loaded(Module) -> {module, Module} | {error, What} when
Module :: module(),
What :: embedded | badfile | nofile | on_load_failure.
-ensure_loaded(Mod) when is_atom(Mod) ->
- call({ensure_loaded,Mod}).
+ensure_loaded(Mod) when is_atom(Mod) ->
+ case erlang:module_loaded(Mod) of
+ true -> {module, Mod};
+ false -> call({ensure_loaded,Mod})
+ end.
%% XXX File as an atom is allowed only for backwards compatibility.
-spec load_abs(Filename) -> load_ret() when
@@ -627,7 +630,7 @@ do_par_recv(N, Good, Bad) ->
call(Req) ->
code_server:call(Req).
--spec start_link() -> {'ok', pid()} | {'error', 'crash'}.
+-spec start_link() -> {'ok', pid()}.
start_link() ->
do_start().
diff --git a/lib/kernel/src/code_server.erl b/lib/kernel/src/code_server.erl
index f5a890cb95..68e1205301 100644
--- a/lib/kernel/src/code_server.erl
+++ b/lib/kernel/src/code_server.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1998-2016. All Rights Reserved.
+%% Copyright Ericsson AB 1998-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -1434,14 +1434,26 @@ all_loaded(Db) ->
-spec error_msg(io:format(), [term()]) -> 'ok'.
error_msg(Format, Args) ->
- Msg = {notify,{error, group_leader(), {self(), Format, Args}}},
- error_logger ! Msg,
+ %% This is equal to calling logger:error/3 which we don't want to
+ %% do from code_server. We don't want to call logger:timestamp()
+ %% either.
+ logger ! {log,error,Format,Args,
+ #{pid=>self(),
+ gl=>group_leader(),
+ time=>os:system_time(microsecond),
+ error_logger=>#{tag=>error}}},
ok.
-spec info_msg(io:format(), [term()]) -> 'ok'.
info_msg(Format, Args) ->
- Msg = {notify,{info_msg, group_leader(), {self(), Format, Args}}},
- error_logger ! Msg,
+ %% This is equal to calling logger:info/3 which we don't want to
+ %% do from code_server. We don't want to call logger:timestamp()
+ %% either.
+ logger ! {log,info,Format,Args,
+ #{pid=>self(),
+ gl=>group_leader(),
+ time=>os:system_time(microsecond),
+ error_logger=>#{tag=>info_msg}}},
ok.
objfile_extension() ->
diff --git a/lib/kernel/src/disk_log_1.erl b/lib/kernel/src/disk_log_1.erl
index 93856aa7b3..41ef33c613 100644
--- a/lib/kernel/src/disk_log_1.erl
+++ b/lib/kernel/src/disk_log_1.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2017. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -630,7 +630,7 @@ is_head(Bin) when is_binary(Bin) ->
%% Writes MaxB bytes on each file.
%% Creates a file called Name.idx in the Dir. This
%% file contains the last written FileName as one byte, and
-%% follwing that, the sizes of each file (size 0 number of items).
+%% following that, the sizes of each file (size 0 number of items).
%% On startup, this file is read, and the next available
%% filename is used as first log file.
%% Reports can be browsed with Report Browser Tool (rb), or
diff --git a/lib/kernel/src/dist_util.erl b/lib/kernel/src/dist_util.erl
index b3507e5d13..ecc022b28d 100644
--- a/lib/kernel/src/dist_util.erl
+++ b/lib/kernel/src/dist_util.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1999-2017. All Rights Reserved.
+%% Copyright Ericsson AB 1999-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -27,8 +27,10 @@
%%-compile(export_all).
-export([handshake_we_started/1, handshake_other_started/1,
+ strict_order_flags/0,
start_timer/1, setup_timer/2,
reset_timer/1, cancel_timer/1,
+ is_node_name/1, split_node/1, is_allowed/2,
shutdown/3, shutdown/4]).
-import(error_logger,[error_msg/2]).
@@ -74,22 +76,52 @@
ticked = 0
}).
-remove_flag(Flag, Flags) ->
- case Flags band Flag of
- 0 ->
- Flags;
- _ ->
- Flags - Flag
- end.
+dflag2str(?DFLAG_PUBLISHED) ->
+ "PUBLISHED";
+dflag2str(?DFLAG_ATOM_CACHE) ->
+ "ATOM_CACHE";
+dflag2str(?DFLAG_EXTENDED_REFERENCES) ->
+ "EXTENDED_REFERENCES";
+dflag2str(?DFLAG_DIST_MONITOR) ->
+ "DIST_MONITOR";
+dflag2str(?DFLAG_FUN_TAGS) ->
+ "FUN_TAGS";
+dflag2str(?DFLAG_DIST_MONITOR_NAME) ->
+ "DIST_MONITOR_NAME";
+dflag2str(?DFLAG_HIDDEN_ATOM_CACHE) ->
+ "HIDDEN_ATOM_CACHE";
+dflag2str(?DFLAG_NEW_FUN_TAGS) ->
+ "NEW_FUN_TAGS";
+dflag2str(?DFLAG_EXTENDED_PIDS_PORTS) ->
+ "EXTENDED_PIDS_PORTS";
+dflag2str(?DFLAG_EXPORT_PTR_TAG) ->
+ "EXPORT_PTR_TAG";
+dflag2str(?DFLAG_BIT_BINARIES) ->
+ "BIT_BINARIES";
+dflag2str(?DFLAG_NEW_FLOATS) ->
+ "NEW_FLOATS";
+dflag2str(?DFLAG_UNICODE_IO) ->
+ "UNICODE_IO";
+dflag2str(?DFLAG_DIST_HDR_ATOM_CACHE) ->
+ "DIST_HDR_ATOM_CACHE";
+dflag2str(?DFLAG_SMALL_ATOM_TAGS) ->
+ "SMALL_ATOM_TAGS";
+dflag2str(?DFLAG_UTF8_ATOMS) ->
+ "UTF8_ATOMS";
+dflag2str(?DFLAG_MAP_TAG) ->
+ "MAP_TAG";
+dflag2str(?DFLAG_BIG_CREATION) ->
+ "BIG_CREATION";
+dflag2str(?DFLAG_SEND_SENDER) ->
+ "SEND_SENDER";
+dflag2str(?DFLAG_BIG_SEQTRACE_LABELS) ->
+ "BIG_SEQTRACE_LABELS";
+dflag2str(_) ->
+ "UNKNOWN".
+
adjust_flags(ThisFlags, OtherFlags) ->
- case (?DFLAG_PUBLISHED band ThisFlags) band OtherFlags of
- 0 ->
- {remove_flag(?DFLAG_PUBLISHED, ThisFlags),
- remove_flag(?DFLAG_PUBLISHED, OtherFlags)};
- _ ->
- {ThisFlags, OtherFlags}
- end.
+ ThisFlags band OtherFlags.
publish_flag(hidden, _) ->
0;
@@ -101,38 +133,56 @@ publish_flag(_, OtherNode) ->
0
end.
-make_this_flags(RequestType, OtherNode) ->
- publish_flag(RequestType, OtherNode) bor
- %% The parenthesis below makes the compiler generate better code.
- (?DFLAG_EXPORT_PTR_TAG bor
- ?DFLAG_EXTENDED_PIDS_PORTS bor
- ?DFLAG_EXTENDED_REFERENCES bor
- ?DFLAG_DIST_MONITOR bor
- ?DFLAG_FUN_TAGS bor
- ?DFLAG_DIST_MONITOR_NAME bor
- ?DFLAG_HIDDEN_ATOM_CACHE bor
- ?DFLAG_NEW_FUN_TAGS bor
- ?DFLAG_BIT_BINARIES bor
- ?DFLAG_NEW_FLOATS bor
- ?DFLAG_UNICODE_IO bor
- ?DFLAG_DIST_HDR_ATOM_CACHE bor
- ?DFLAG_SMALL_ATOM_TAGS bor
- ?DFLAG_UTF8_ATOMS bor
- ?DFLAG_MAP_TAG bor
- ?DFLAG_BIG_CREATION).
-
-handshake_other_started(#hs_data{request_type=ReqType}=HSData0) ->
+
+%% Sync with dist.c
+-record(erts_dflags, {
+ default, % flags erts prefers
+ mandatory, % flags erts needs
+ addable, % flags local dist implementation is allowed to add
+ rejectable, % flags local dist implementation is allowed to reject
+ strict_order % flags for features needing strict order delivery
+}).
+
+-spec strict_order_flags() -> integer().
+strict_order_flags() ->
+ EDF = erts_internal:get_dflags(),
+ EDF#erts_dflags.strict_order.
+
+make_this_flags(RequestType, AddFlags, RejectFlags, OtherNode,
+ #erts_dflags{}=EDF) ->
+ case RejectFlags band (bnot EDF#erts_dflags.rejectable) of
+ 0 -> ok;
+ Rerror -> exit({"Rejecting non rejectable flags", Rerror})
+ end,
+ case AddFlags band (bnot EDF#erts_dflags.addable) of
+ 0 -> ok;
+ Aerror -> exit({"Adding non addable flags", Aerror})
+ end,
+ Flgs0 = EDF#erts_dflags.default,
+ Flgs1 = Flgs0 bor publish_flag(RequestType, OtherNode),
+ Flgs2 = Flgs1 bor AddFlags,
+ Flgs2 band (bnot RejectFlags).
+
+handshake_other_started(#hs_data{request_type=ReqType,
+ add_flags=AddFlgs0,
+ reject_flags=RejFlgs0,
+ require_flags=ReqFlgs0}=HSData0) ->
+ AddFlgs = convert_flags(AddFlgs0),
+ RejFlgs = convert_flags(RejFlgs0),
+ ReqFlgs = convert_flags(ReqFlgs0),
{PreOtherFlags,Node,Version} = recv_name(HSData0),
- PreThisFlags = make_this_flags(ReqType, Node),
- {ThisFlags, OtherFlags} = adjust_flags(PreThisFlags,
- PreOtherFlags),
- HSData = HSData0#hs_data{this_flags=ThisFlags,
- other_flags=OtherFlags,
+ EDF = erts_internal:get_dflags(),
+ PreThisFlags = make_this_flags(ReqType, AddFlgs, RejFlgs, Node, EDF),
+ ChosenFlags = adjust_flags(PreThisFlags, PreOtherFlags),
+ HSData = HSData0#hs_data{this_flags=ChosenFlags,
+ other_flags=ChosenFlags,
other_version=Version,
other_node=Node,
- other_started=true},
- check_dflags(HSData),
- is_allowed(HSData),
+ other_started=true,
+ add_flags=AddFlgs,
+ reject_flags=RejFlgs,
+ require_flags=ReqFlgs},
+ check_dflags(HSData, EDF),
?debug({"MD5 connection from ~p (V~p)~n",
[Node, HSData#hs_data.other_version]}),
mark_pending(HSData),
@@ -150,38 +200,16 @@ handshake_other_started(OldHsData) when element(1,OldHsData) =:= hs_data ->
%%
-%% check if connecting node is allowed to connect
-%% with allow-node-scheme
-%%
-is_allowed(#hs_data{other_node = Node,
- allowed = Allowed} = HSData) ->
- case lists:member(Node, Allowed) of
- false when Allowed =/= [] ->
- send_status(HSData, not_allowed),
- error_msg("** Connection attempt from "
- "disallowed node ~w ** ~n", [Node]),
- ?shutdown2(Node, {is_allowed, not_allowed});
- _ -> true
- end.
-
-%%
-%% Check that both nodes can handle the same types of extended
-%% node containers. If they can not, abort the connection.
+%% Check mandatory flags...
%%
check_dflags(#hs_data{other_node = Node,
other_flags = OtherFlags,
- other_started = OtherStarted} = HSData) ->
-
- Mandatory = [{?DFLAG_EXTENDED_REFERENCES, "EXTENDED_REFERENCES"},
- {?DFLAG_EXTENDED_PIDS_PORTS, "EXTENDED_PIDS_PORTS"},
- {?DFLAG_UTF8_ATOMS, "UTF8_ATOMS"}],
- Missing = lists:filtermap(fun({Bit, Str}) ->
- case Bit band OtherFlags of
- Bit -> false;
- 0 -> {true, Str}
- end
- end,
- Mandatory),
+ other_started = OtherStarted,
+ require_flags = RequiredFlags} = HSData,
+ #erts_dflags{}=EDF) ->
+
+ Mandatory = (EDF#erts_dflags.mandatory bor RequiredFlags),
+ Missing = check_mandatory(Mandatory, OtherFlags, []),
case Missing of
[] ->
ok;
@@ -201,6 +229,21 @@ check_dflags(#hs_data{other_node = Node,
?shutdown2(Node, {check_dflags_failed, Missing})
end.
+check_mandatory(0, _OtherFlags, Missing) ->
+ Missing;
+check_mandatory(Mandatory, OtherFlags, Missing) ->
+ Left = Mandatory band (Mandatory - 1), % clear lowest set bit
+ DFlag = Mandatory bxor Left, % only lowest set bit
+ NewMissing = case DFlag band OtherFlags of
+ 0 ->
+ %% Mandatory and missing...
+ [dflag2str(DFlag) | Missing];
+ _ ->
+ %% Mandatory and present...
+ Missing
+ end,
+ check_mandatory(Left, OtherFlags, NewMissing).
+
%% No nodedown will be sent if we fail before this process has
%% succeeded to mark the node as pending.
@@ -314,17 +357,27 @@ flush_down() ->
end.
handshake_we_started(#hs_data{request_type=ReqType,
- other_node=Node}=PreHSData) ->
- PreThisFlags = make_this_flags(ReqType, Node),
- HSData = PreHSData#hs_data{this_flags=PreThisFlags},
+ other_node=Node,
+ add_flags=AddFlgs0,
+ reject_flags=RejFlgs0,
+ require_flags=ReqFlgs0}=PreHSData) ->
+ AddFlgs = convert_flags(AddFlgs0),
+ RejFlgs = convert_flags(RejFlgs0),
+ ReqFlgs = convert_flags(ReqFlgs0),
+ EDF = erts_internal:get_dflags(),
+ PreThisFlags = make_this_flags(ReqType, AddFlgs, RejFlgs, Node, EDF),
+ HSData = PreHSData#hs_data{this_flags = PreThisFlags,
+ add_flags = AddFlgs,
+ reject_flags = RejFlgs,
+ require_flags = ReqFlgs},
send_name(HSData),
recv_status(HSData),
{PreOtherFlags,ChallengeA} = recv_challenge(HSData),
- {ThisFlags,OtherFlags} = adjust_flags(PreThisFlags, PreOtherFlags),
- NewHSData = HSData#hs_data{this_flags = ThisFlags,
- other_flags = OtherFlags,
+ ChosenFlags = adjust_flags(PreThisFlags, PreOtherFlags),
+ NewHSData = HSData#hs_data{this_flags = ChosenFlags,
+ other_flags = ChosenFlags,
other_started = false},
- check_dflags(NewHSData),
+ check_dflags(NewHSData, EDF),
MyChallenge = gen_challenge(),
{MyCookie,HisCookie} = get_cookies(Node),
send_challenge_reply(NewHSData,MyChallenge,
@@ -336,15 +389,16 @@ handshake_we_started(#hs_data{request_type=ReqType,
handshake_we_started(OldHsData) when element(1,OldHsData) =:= hs_data ->
handshake_we_started(convert_old_hsdata(OldHsData)).
-convert_old_hsdata({hs_data, KP, ON, TN, S, T, TF, A, OV, OF, OS, FS, FR,
- FS_PRE, FS_POST, FG, FA, MFT, MFG, RT}) ->
- #hs_data{
- kernel_pid = KP, other_node = ON, this_node = TN, socket = S, timer = T,
- this_flags = TF, allowed = A, other_version = OV, other_flags = OF,
- other_started = OS, f_send = FS, f_recv = FR, f_setopts_pre_nodeup = FS_PRE,
- f_setopts_post_nodeup = FS_POST, f_getll = FG, f_address = FA,
- mf_tick = MFT, mf_getstat = MFG, request_type = RT}.
+convert_old_hsdata(OldHsData) ->
+ OHSDL = tuple_to_list(OldHsData),
+ NoMissing = tuple_size(#hs_data{}) - tuple_size(OldHsData),
+ true = NoMissing > 0,
+ list_to_tuple(OHSDL ++ lists:duplicate(NoMissing, undefined)).
+convert_flags(Flags) when is_integer(Flags) ->
+ Flags;
+convert_flags(_Undefined) ->
+ 0.
%% --------------------------------------------------------------
%% The connection has been established.
@@ -359,15 +413,20 @@ connection(#hs_data{other_node = Node,
PType = publish_type(HSData#hs_data.other_flags),
case FPreNodeup(Socket) of
ok ->
- do_setnode(HSData), % Succeeds or exits the process.
+ DHandle = do_setnode(HSData), % Succeeds or exits the process.
Address = FAddress(Socket,Node),
mark_nodeup(HSData,Address),
case FPostNodeup(Socket) of
ok ->
+ case HSData#hs_data.f_handshake_complete of
+ undefined -> ok;
+ HsComplete -> HsComplete(Socket, Node, DHandle)
+ end,
con_loop({HSData#hs_data.kernel_pid,
Node,
Socket,
PType,
+ DHandle,
HSData#hs_data.mf_tick,
HSData#hs_data.mf_getstat,
HSData#hs_data.mf_setopts,
@@ -425,18 +484,16 @@ do_setnode(#hs_data{other_node = Node, socket = Socket,
[Node, Port, {publish_type(Flags),
'(', Flags, ')',
Version}]),
- case (catch
- erlang:setnode(Node, Port,
- {Flags, Version, '', ''})) of
- {'EXIT', {system_limit, _}} ->
+ try
+ erlang:setnode(Node, Port, {Flags, Version, '', ''})
+ catch
+ error:system_limit ->
error_msg("** Distribution system limit reached, "
"no table space left for node ~w ** ~n",
[Node]),
?shutdown(Node);
- {'EXIT', Other} ->
- exit(Other);
- _Else ->
- ok
+ error:Other:Stacktrace ->
+ exit({Other, Stacktrace})
end;
_ ->
error_msg("** Distribution connection error, "
@@ -468,7 +525,13 @@ mark_nodeup(#hs_data{kernel_pid = Kernel,
?shutdown(Node)
end.
-con_loop({Kernel, Node, Socket, Type, MFTick, MFGetstat, MFSetOpts, MFGetOpts}=ConData,
+getstat(DHandle, _Socket, undefined) ->
+ erlang:dist_get_stat(DHandle);
+getstat(_DHandle, Socket, MFGetstat) ->
+ MFGetstat(Socket).
+
+con_loop({Kernel, Node, Socket, Type, DHandle, MFTick, MFGetstat,
+ MFSetOpts, MFGetOpts}=ConData,
Tick) ->
receive
{tcp_closed, Socket} ->
@@ -476,15 +539,15 @@ con_loop({Kernel, Node, Socket, Type, MFTick, MFGetstat, MFSetOpts, MFGetOpts}=C
{Kernel, disconnect} ->
?shutdown2(Node, disconnected);
{Kernel, aux_tick} ->
- case MFGetstat(Socket) of
+ case getstat(DHandle, Socket, MFGetstat) of
{ok, _, _, PendWrite} ->
- send_tick(Socket, PendWrite, MFTick);
+ send_aux_tick(Type, Socket, PendWrite, MFTick);
_ ->
ignore_it
end,
con_loop(ConData, Tick);
{Kernel, tick} ->
- case send_tick(Socket, Tick, Type,
+ case send_tick(DHandle, Socket, Tick, Type,
MFTick, MFGetstat) of
{ok, NewTick} ->
con_loop(ConData, NewTick);
@@ -497,7 +560,7 @@ con_loop({Kernel, Node, Socket, Type, MFTick, MFGetstat, MFSetOpts, MFGetOpts}=C
?shutdown2(Node, send_net_tick_failed)
end;
{From, get_status} ->
- case MFGetstat(Socket) of
+ case getstat(DHandle, Socket, MFGetstat) of
{ok, Read, Write, _} ->
From ! {self(), get_status, {ok, Read, Write}},
con_loop(ConData, Tick);
@@ -564,33 +627,130 @@ send_challenge_ack(#hs_data{socket = Socket, f_send = FSend},
%% tcp_drv.c which used it to detect simultaneous connection
%% attempts).
%%
-recv_name(#hs_data{socket = Socket, f_recv = Recv}) ->
+recv_name(#hs_data{socket = Socket, f_recv = Recv} = HSData) ->
case Recv(Socket, 0, infinity) of
- {ok,Data} ->
- get_name(Data);
+ {ok,
+ [$n,VersionA, VersionB, Flag1, Flag2, Flag3, Flag4
+ | OtherNode] = Data} ->
+ case is_node_name(OtherNode) of
+ true ->
+ Flags = ?u32(Flag1, Flag2, Flag3, Flag4),
+ Version = ?u16(VersionA,VersionB),
+ is_allowed(HSData, Flags, OtherNode, Version);
+ false ->
+ ?shutdown(Data)
+ end;
_ ->
?shutdown(no_node)
end.
-get_name([$n,VersionA, VersionB, Flag1, Flag2, Flag3, Flag4 | OtherNode] = Data) ->
- case is_valid_name(OtherNode) of
+is_node_name(OtherNodeName) ->
+ case string:split(OtherNodeName, "@", all) of
+ [Name,Host] ->
+ (not string:is_empty(Name))
+ andalso (not string:is_empty(Host));
+ _ ->
+ false
+ end.
+
+split_node(Node) ->
+ Split = string:split(listify(Node), "@", all),
+ case Split of
+ [Name,Host] ->
+ case string:is_empty(Name) of
+ true ->
+ Split;
+ false ->
+ case string:is_empty(Host) of
+ true ->
+ {name,Name};
+ false ->
+ {node,Name,Host}
+ end
+ end;
+ [Host] ->
+ case string:is_empty(Host) of
+ true ->
+ Split;
+ false ->
+ {host,Host}
+ end
+ end.
+
+%% Check if connecting node is allowed to connect
+%% with allow-node-scheme. An empty allowed list
+%% allows all nodes.
+%%
+is_allowed(#hs_data{allowed = []}, Flags, Node, Version) ->
+ {Flags,list_to_atom(Node),Version};
+is_allowed(#hs_data{allowed = Allowed} = HSData, Flags, Node, Version) ->
+ case is_allowed(Node, Allowed) of
true ->
- {?u32(Flag1, Flag2, Flag3, Flag4), list_to_atom(OtherNode),
- ?u16(VersionA,VersionB)};
+ {Flags,list_to_atom(Node),Version};
false ->
- ?shutdown(Data)
- end;
-get_name(Data) ->
- ?shutdown(Data).
-
-is_valid_name(OtherNodeName) ->
- case string:lexemes(OtherNodeName,"@") of
- [_OtherNodeName,_OtherNodeHost] ->
- true;
- _else ->
- false
+ send_status(HSData#hs_data{other_node = Node}, not_allowed),
+ error_msg("** Connection attempt from "
+ "disallowed node ~s ** ~n", [Node]),
+ ?shutdown2(Node, {is_allowed, not_allowed})
+ end.
+
+%% The allowed list can contain node names, host names
+%% or names before '@', in atom or list form:
+%% [[email protected], "host.example.org", "node@"].
+%% An empty allowed list allows no nodes.
+%%
+%% Allow a node that matches any entry in the allowed list.
+%% Also allow allowed entries as node to match, not from
+%% this module; here the node has to be a valid name.
+%%
+is_allowed(_Node, []) ->
+ false;
+is_allowed(Node, [Node|_Allowed]) ->
+ %% Just an optimization
+ true;
+is_allowed(Node, [AllowedNode|Allowed]) ->
+ case split_node(AllowedNode) of
+ {node,AllowedName,AllowedHost} ->
+ %% Allowed node name
+ case split_node(Node) of
+ {node,AllowedName,AllowedHost} ->
+ true;
+ _ ->
+ is_allowed(Node, Allowed)
+ end;
+ {host,AllowedHost} ->
+ %% Allowed host name
+ case split_node(Node) of
+ {node,_,AllowedHost} ->
+ %% Matching Host part
+ true;
+ {host,AllowedHost} ->
+ %% Host matches Host
+ true;
+ _ ->
+ is_allowed(Node, Allowed)
+ end;
+ {name,AllowedName} ->
+ %% Allowed name before '@'
+ case split_node(Node) of
+ {node,AllowedName,_} ->
+ %% Matching Name part
+ true;
+ {name,AllowedName} ->
+ %% Name matches Name
+ true;
+ _ ->
+ is_allowed(Node, Allowed)
+ end;
+ _ ->
+ is_allowed(Node, Allowed)
end.
+listify(Atom) when is_atom(Atom) ->
+ atom_to_list(Atom);
+listify(Node) when is_list(Node) ->
+ Node.
+
publish_type(Flags) ->
case Flags band ?DFLAG_PUBLISHED of
0 ->
@@ -731,51 +891,57 @@ send_status(#hs_data{socket = Socket, other_node = Node,
%% The detection time interval is thus, by default, 45s < DT < 75s
-%% A HIDDEN node is always (if not a pending write) ticked if
-%% we haven't read anything as a hidden node only ticks when it receives
-%% a TICK !!
+%% A HIDDEN node is always ticked if we haven't read anything
+%% as a (primitive) hidden node only ticks when it receives a TICK !!
-send_tick(Socket, Tick, Type, MFTick, MFGetstat) ->
+send_tick(DHandle, Socket, Tick, Type, MFTick, MFGetstat) ->
#tick{tick = T0,
read = Read,
write = Write,
- ticked = Ticked} = Tick,
+ ticked = Ticked0} = Tick,
T = T0 + 1,
T1 = T rem 4,
- case MFGetstat(Socket) of
- {ok, Read, _, _} when Ticked =:= T ->
+ case getstat(DHandle, Socket, MFGetstat) of
+ {ok, Read, _, _} when Ticked0 =:= T ->
{error, not_responding};
- {ok, Read, W, Pend} when Type =:= hidden ->
- send_tick(Socket, Pend, MFTick),
- {ok, Tick#tick{write = W + 1,
- tick = T1}};
- {ok, Read, Write, Pend} ->
- send_tick(Socket, Pend, MFTick),
- {ok, Tick#tick{write = Write + 1,
- tick = T1}};
- {ok, R, Write, Pend} ->
- send_tick(Socket, Pend, MFTick),
- {ok, Tick#tick{write = Write + 1,
- read = R,
- tick = T1,
- ticked = T}};
- {ok, Read, W, _} ->
- {ok, Tick#tick{write = W,
- tick = T1}};
- {ok, R, W, _} ->
- {ok, Tick#tick{write = W,
- read = R,
- tick = T1,
- ticked = T}};
+
+ {ok, R, W1, Pend} ->
+ RDiff = R - Read,
+ W2 = case need_to_tick(Type, RDiff, W1-Write, Pend) of
+ true ->
+ MFTick(Socket),
+ W1 + 1;
+ false ->
+ W1
+ end,
+
+ Ticked1 = case RDiff of
+ 0 -> Ticked0;
+ _ -> T
+ end,
+
+ {ok, Tick#tick{write = W2,
+ tick = T1,
+ read = R,
+ ticked = Ticked1}};
+
Error ->
Error
end.
-send_tick(Socket, 0, MFTick) ->
- MFTick(Socket);
-send_tick(_, _Pend, _) ->
- %% Dont send tick if pending write.
- ok.
+need_to_tick(_, _, 0, 0) -> % nothing written and empty send queue
+ true;
+need_to_tick(_, _, 0, false) -> % nothing written and empty send queue
+ true;
+need_to_tick(hidden, 0, _, _) -> % nothing read from hidden
+ true;
+need_to_tick(_, _, _, _) ->
+ false.
+
+send_aux_tick(normal, _, Pend, _) when Pend /= false, Pend /= 0 ->
+ ok; %% Dont send tick if pending write.
+send_aux_tick(_Type, Socket, _Pend, MFTick) ->
+ MFTick(Socket).
%% ------------------------------------------------------------
%% Connection setup timeout timer.
diff --git a/lib/kernel/src/erl_boot_server.erl b/lib/kernel/src/erl_boot_server.erl
index 2578b74428..4ac945ce01 100644
--- a/lib/kernel/src/erl_boot_server.erl
+++ b/lib/kernel/src/erl_boot_server.erl
@@ -252,9 +252,9 @@ handle_info({udp, U, IP, Port, Data}, S0) ->
"~w is not a valid address ** ~n", [IP]),
{noreply,S0};
{true,_,_} ->
- case catch string:substr(Data, 1, length(?EBOOT_REQUEST)) of
+ case catch string:slice(Data, 0, length(?EBOOT_REQUEST)) of
?EBOOT_REQUEST ->
- Vsn = string:substr(Data, length(?EBOOT_REQUEST)+1, length(Data)),
+ Vsn = string:slice(Data, length(?EBOOT_REQUEST), length(Data)),
error_logger:error_msg("** Illegal boot server connection attempt: "
"client version is ~s ** ~n", [Vsn]);
_ ->
diff --git a/lib/kernel/src/erl_epmd.erl b/lib/kernel/src/erl_epmd.erl
index 7bc9e2ede3..7a14e2635c 100644
--- a/lib/kernel/src/erl_epmd.erl
+++ b/lib/kernel/src/erl_epmd.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1998-2016. All Rights Reserved.
+%% Copyright Ericsson AB 1998-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -29,10 +29,20 @@
-define(port_please_failure2(Term), noop).
-endif.
+-ifndef(erlang_daemon_port).
+-define(erlang_daemon_port, 4369).
+-endif.
+-ifndef(epmd_dist_high).
+-define(epmd_dist_high, 4370).
+-endif.
+-ifndef(epmd_dist_low).
+-define(epmd_dist_low, 4370).
+-endif.
+
%% External exports
-export([start/0, start_link/0, stop/0, port_please/2,
port_please/3, names/0, names/1,
- register_node/2, register_node/3, open/0, open/1, open/2]).
+ register_node/2, register_node/3, address_please/3, open/0, open/1, open/2]).
%% gen_server callbacks
-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
@@ -53,7 +63,7 @@
start() ->
gen_server:start({local, erl_epmd}, ?MODULE, [], []).
-
+-spec start_link() -> {ok, pid()} | ignore | {error,term()}.
start_link() ->
gen_server:start_link({local, erl_epmd}, ?MODULE, [], []).
@@ -66,9 +76,22 @@ stop() ->
%% return {port, P, Version} | noport
%%
+-spec port_please(Name, Host) -> {ok, Port, Version} | noport when
+ Name :: atom() | string(),
+ Host :: atom() | string() | inet:ip_address(),
+ Port :: non_neg_integer(),
+ Version :: non_neg_integer().
+
port_please(Node, Host) ->
port_please(Node, Host, infinity).
+-spec port_please(Name, Host, Timeout) -> {ok, Port, Version} | noport when
+ Name :: atom() | string(),
+ Host :: atom() | string() | inet:ip_address(),
+ Timeout :: non_neg_integer() | infinity,
+ Port :: non_neg_integer(),
+ Version :: non_neg_integer().
+
port_please(Node,HostName, Timeout) when is_atom(HostName) ->
port_please1(Node,atom_to_list(HostName), Timeout);
port_please(Node,HostName, Timeout) when is_list(HostName) ->
@@ -79,17 +102,34 @@ port_please(Node, EpmdAddr, Timeout) ->
port_please1(Node,HostName, Timeout) ->
- case inet:gethostbyname(HostName, inet, Timeout) of
+ Family = case inet_db:res_option(inet6) of
+ true ->
+ inet6;
+ false ->
+ inet
+ end,
+ case inet:gethostbyname(HostName, Family, Timeout) of
{ok,{hostent, _Name, _ , _Af, _Size, [EpmdAddr | _]}} ->
get_port(Node, EpmdAddr, Timeout);
Else ->
Else
end.
+-spec names() -> {ok, [{Name, Port}]} | {error, Reason} when
+ Name :: string(),
+ Port :: non_neg_integer(),
+ Reason :: address | file:posix().
+
names() ->
{ok, H} = inet:gethostname(),
names(H).
+-spec names(Host) -> {ok, [{Name, Port}]} | {error, Reason} when
+ Host :: atom() | string() | inet:ip_address(),
+ Name :: string(),
+ Port :: non_neg_integer(),
+ Reason :: address | file:posix().
+
names(HostName) when is_atom(HostName); is_list(HostName) ->
case inet:gethostbyname(HostName) of
{ok,{hostent, _Name, _ , _Af, _Size, [EpmdAddr | _]}} ->
@@ -100,9 +140,22 @@ names(HostName) when is_atom(HostName); is_list(HostName) ->
names(EpmdAddr) ->
get_names(EpmdAddr).
+-spec register_node(Name, Port) -> Result when
+ Name :: string(),
+ Port :: non_neg_integer(),
+ Creation :: non_neg_integer(),
+ Result :: {ok, Creation} | {error, already_registered} | term().
register_node(Name, PortNo) ->
- register_node(Name, PortNo, inet).
+ register_node(Name, PortNo, inet).
+
+-spec register_node(Name, Port, Driver) -> Result when
+ Name :: string(),
+ Port :: non_neg_integer(),
+ Driver :: inet_tcp | inet6_tcp | inet | inet6,
+ Creation :: non_neg_integer(),
+ Result :: {ok, Creation} | {error, already_registered} | term().
+
register_node(Name, PortNo, inet_tcp) ->
register_node(Name, PortNo, inet);
register_node(Name, PortNo, inet6_tcp) ->
@@ -110,6 +163,17 @@ register_node(Name, PortNo, inet6_tcp) ->
register_node(Name, PortNo, Family) ->
gen_server:call(erl_epmd, {register, Name, PortNo, Family}, infinity).
+-spec address_please(Name, Host, AddressFamily) -> Success | {error, term()} when
+ Name :: string(),
+ Host :: string() | inet:ip_address(),
+ AddressFamily :: inet | inet6,
+ Port :: non_neg_integer(),
+ Version :: non_neg_integer(),
+ Success :: {ok, inet:ip_address()} | {ok, inet:ip_address(), Port, Version}.
+
+address_please(_Name, Host, AddressFamily) ->
+ inet:getaddr(Host, AddressFamily).
+
%%%----------------------------------------------------------------------
%%% Callback functions from gen_server
%%%----------------------------------------------------------------------
diff --git a/lib/kernel/src/erl_reply.erl b/lib/kernel/src/erl_reply.erl
index e1e046cbb4..e1c4ffe839 100644
--- a/lib/kernel/src/erl_reply.erl
+++ b/lib/kernel/src/erl_reply.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2016. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2017. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -42,7 +42,7 @@ reply(_) ->
%% convert ip number to tuple
ip_string_to_tuple(Ip) ->
- [Ip1,Ip2,Ip3,Ip4] = string:tokens(Ip,"."),
+ [Ip1,Ip2,Ip3,Ip4] = string:lexemes(Ip,"."),
{list_to_integer(Ip1),
list_to_integer(Ip2),
list_to_integer(Ip3),
diff --git a/lib/kernel/src/erl_signal_handler.erl b/lib/kernel/src/erl_signal_handler.erl
index 22f235d4e4..5be905d8ae 100644
--- a/lib/kernel/src/erl_signal_handler.erl
+++ b/lib/kernel/src/erl_signal_handler.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2017. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -19,12 +19,21 @@
-module(erl_signal_handler).
-behaviour(gen_event).
--export([init/1, format_status/2,
+-export([start/0, init/1, format_status/2,
handle_event/2, handle_call/2, handle_info/2,
terminate/2, code_change/3]).
-record(state,{}).
+start() ->
+ %% add signal handler
+ case whereis(erl_signal_server) of
+ %% in case of minimal mode
+ undefined -> ok;
+ _ ->
+ gen_event:add_handler(erl_signal_server, erl_signal_handler, [])
+ end.
+
init(_Args) ->
{ok, #state{}}.
diff --git a/lib/kernel/src/error_handler.erl b/lib/kernel/src/error_handler.erl
index 59ca8e690d..a89ef83261 100644
--- a/lib/kernel/src/error_handler.erl
+++ b/lib/kernel/src/error_handler.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2016. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -106,8 +106,8 @@ crash(M, F, A) ->
crash(Tuple) ->
try erlang:error(undef)
catch
- error:undef ->
- Stk = [Tuple|tl(erlang:get_stacktrace())],
+ error:undef:Stacktrace ->
+ Stk = [Tuple|tl(Stacktrace)],
erlang:raise(error, undef, Stk)
end.
diff --git a/lib/kernel/src/error_logger.erl b/lib/kernel/src/error_logger.erl
index 585507c545..e324be5290 100644
--- a/lib/kernel/src/error_logger.erl
+++ b/lib/kernel/src/error_logger.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2017. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -19,22 +19,23 @@
%%
-module(error_logger).
--export([start/0,start_link/0,format/2,error_msg/1,error_msg/2,error_report/1,
+-include("logger_internal.hrl").
+
+-export([start/0,start_link/0,stop/0,
+ format/2,error_msg/1,error_msg/2,error_report/1,
error_report/2,info_report/1,info_report/2,warning_report/1,
warning_report/2,error_info/1,
info_msg/1,info_msg/2,warning_msg/1,warning_msg/2,
- logfile/1,tty/1,swap_handler/1,
+ logfile/1,tty/1,
add_report_handler/1,add_report_handler/2,
- delete_report_handler/1]).
+ delete_report_handler/1,
+ which_report_handlers/0]).
--export([init/1,
- handle_event/2, handle_call/2, handle_info/2,
- terminate/2]).
+%% logger callbacks
+-export([adding_handler/1, removing_handler/1, log/2]).
-export([get_format_depth/0, limit_term/1]).
--define(buffer_size, 10).
-
%%-----------------------------------------------------------------
%% Types used in this file
%%-----------------------------------------------------------------
@@ -43,8 +44,6 @@
| 'info' | 'info_msg' | 'info_report'
| 'warning_msg' | 'warning_report'.
--type state() :: {non_neg_integer(), non_neg_integer(), [term()]}.
-
%%% BIF
-export([warning_map/0]).
@@ -59,26 +58,164 @@ warning_map() ->
%%-----------------------------------------------------------------
--spec start() -> {'ok', pid()} | {'error', any()}.
+%%%-----------------------------------------------------------------
+%%% Start the event manager process under logger_sup, which is part of
+%%% the kernel application's supervision tree.
+-spec start() -> 'ok' | {'error', any()}.
start() ->
- case gen_event:start({local, error_logger}) of
- {ok, Pid} ->
- simple_logger(?buffer_size),
- {ok, Pid};
- Error -> Error
+ case whereis(?MODULE) of
+ undefined ->
+ ErrorLogger =
+ #{id => ?MODULE,
+ start => {?MODULE, start_link, []},
+ restart => transient,
+ shutdown => 2000,
+ type => worker,
+ modules => dynamic},
+ case supervisor:start_child(logger_sup, ErrorLogger) of
+ {ok,Pid} ->
+ ok = logger_handler_watcher:register_handler(?MODULE,Pid);
+ Error ->
+ Error
+ end;
+ _ ->
+ ok
end.
+%%%-----------------------------------------------------------------
+%%% Start callback specified in child specification to supervisor, see start/0
-spec start_link() -> {'ok', pid()} | {'error', any()}.
start_link() ->
- case gen_event:start_link({local, error_logger}) of
- {ok, Pid} ->
- simple_logger(?buffer_size),
- {ok, Pid};
- Error -> Error
+ gen_event:start_link({local, ?MODULE},
+ [{spawn_opt,[{message_queue_data, off_heap}]}]).
+
+%%%-----------------------------------------------------------------
+%%% Stop the event manager
+-spec stop() -> ok.
+stop() ->
+ case whereis(?MODULE) of
+ undefined ->
+ ok;
+ _Pid ->
+ _ = gen_event:stop(?MODULE,{shutdown,stopped},infinity),
+ _ = supervisor:delete_child(logger_sup,?MODULE),
+ ok
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Callbacks for logger
+-spec adding_handler(logger:handler_config()) ->
+ {ok,logger:handler_config()} | {error,term()}.
+adding_handler(#{id:=?MODULE}=Config) ->
+ case start() of
+ ok ->
+ {ok,Config};
+ Error ->
+ Error
end.
+-spec removing_handler(logger:handler_config()) -> ok.
+removing_handler(#{id:=?MODULE}) ->
+ stop(),
+ ok.
+
+-spec log(logger:log_event(),logger:handler_config()) -> ok.
+log(#{level:=Level,msg:=Msg,meta:=Meta},_Config) ->
+ do_log(Level,Msg,Meta).
+
+do_log(Level,{report,Msg},#{?MODULE:=#{tag:=Tag,type:=Type}}=Meta) ->
+ %% From error_logger:*_report/1,2, or logger call which added
+ %% error_logger data to obtain backwards compatibility with
+ %% error_logger:*_report/1,2
+ Report =
+ case Msg of
+ #{label:=_,report:=R} -> R;
+ _ -> Msg
+ end,
+ notify(Level,Tag,Type,Report,Meta);
+do_log(Level,{report,Msg},#{?MODULE:=#{tag:=Tag}}=Meta) ->
+ {Format,Args} =
+ case Msg of
+ #{label:=_,format:=F,args:=A} ->
+ %% From error_logger:*_msg/1,2.
+ %% In order to be backwards compatible with handling
+ %% of faulty parameters to error_logger:*_msg/1,2,
+ %% don't use report_cb here.
+ {F,A};
+ _ ->
+ %% From logger call which added error_logger data to
+ %% obtain backwards compatibility with error_logger:*_msg/1,2
+ case maps:get(report_cb,Meta,fun logger:format_report/1) of
+ RCBFun when is_function(RCBFun,1) ->
+ try RCBFun(Msg) of
+ {F,A} when is_list(F), is_list(A) ->
+ {F,A};
+ Other ->
+ {"REPORT_CB ERROR: ~tp; Returned: ~tp",[Msg,Other]}
+ catch C:R ->
+ {"REPORT_CB CRASH: ~tp; Reason: ~tp",[Msg,{C,R}]}
+ end;
+ RCBFun when is_function(RCBFun,2) ->
+ try RCBFun(Msg,#{depth=>get_format_depth(),
+ chars_limit=>unlimited,
+ single_line=>false}) of
+ Chardata when ?IS_STRING(Chardata) ->
+ {"~ts",[Chardata]};
+ Other ->
+ {"REPORT_CB ERROR: ~tp; Returned: ~tp",[Msg,Other]}
+ catch C:R ->
+ {"REPORT_CB CRASH: ~tp; Reason: ~tp",[Msg,{C,R}]}
+ end
+ end
+ end,
+ notify(Level,Tag,Format,Args,Meta);
+do_log(Level,{Format,Args},#{?MODULE:=#{tag:=Tag}}=Meta)
+ when is_list(Format), is_list(Args) ->
+ %% From logger call which added error_logger data to obtain
+ %% backwards compatibility with error_logger:*_msg/1,2
+ notify(Level,Tag,Format,Args,Meta);
+do_log(_Level,_Msg,_Meta) ->
+ %% Ignore the rest - i.e. to get backwards compatibility with
+ %% error_logger, you must use the error_logger API for logging.
+ %% Some modules within OTP go around this by adding an
+ %% error_logger field to its metadata. This is done only to allow
+ %% complete backwards compatibility for log events originating
+ %% from within OTP, while still using the new logger interface.
+ ok.
+
+-spec notify(logger:level(), msg_tag(), any(), any(), map()) -> 'ok'.
+notify(Level,Tag0,FormatOrType0,ArgsOrReport,#{pid:=Pid0,gl:=GL,?MODULE:=My}) ->
+ {Tag,FormatOrType} = maybe_map_warnings(Level,Tag0,FormatOrType0),
+ Pid = case maps:get(emulator,My,false) of
+ true -> emulator;
+ _ -> Pid0
+ end,
+ gen_event:notify(?MODULE,{Tag,GL,{Pid,FormatOrType,ArgsOrReport}}).
+
+%% For backwards compatibility with really old even handlers, check
+%% the warning map and update tag and type.
+maybe_map_warnings(warning,Tag,FormatOrType) ->
+ case error_logger:warning_map() of
+ warning ->
+ {Tag,FormatOrType};
+ Level ->
+ {fix_warning_tag(Level,Tag),fix_warning_type(Level,FormatOrType)}
+ end;
+maybe_map_warnings(_,Tag,FormatOrType) ->
+ {Tag,FormatOrType}.
+
+fix_warning_tag(error,warning_msg) -> error;
+fix_warning_tag(error,warning_report) -> error_report;
+fix_warning_tag(info,warning_msg) -> info_msg;
+fix_warning_tag(info,warning_report) -> info_report;
+fix_warning_tag(_,Tag) -> Tag.
+
+fix_warning_type(error,std_warning) -> std_error;
+fix_warning_type(info,std_warning) -> std_info;
+fix_warning_type(_,Type) -> Type.
+
%%-----------------------------------------------------------------
%% These two simple old functions generate events tagged 'error'
%% Used for simple messages; error or information.
@@ -95,14 +232,18 @@ error_msg(Format) ->
Data :: list().
error_msg(Format, Args) ->
- notify({error, group_leader(), {self(), Format, Args}}).
+ logger:log(error,
+ #{label=>{?MODULE,error_msg},
+ format=>Format,
+ args=>Args},
+ meta(error)).
-spec format(Format, Data) -> 'ok' when
Format :: string(),
Data :: list().
format(Format, Args) ->
- notify({error, group_leader(), {self(), Format, Args}}).
+ error_msg(Format, Args).
%%-----------------------------------------------------------------
%% This functions should be used for error reports. Events
@@ -124,7 +265,10 @@ error_report(Report) ->
Report :: report().
error_report(Type, Report) ->
- notify({error_report, group_leader(), {self(), Type, Report}}).
+ logger:log(error,
+ #{label=>{?MODULE,error_report},
+ report=>Report},
+ meta(error_report,Type)).
%%-----------------------------------------------------------------
%% This function should be used for warning reports.
@@ -146,25 +290,10 @@ warning_report(Report) ->
Report :: report().
warning_report(Type, Report) ->
- {Tag, NType} = case error_logger:warning_map() of
- info ->
- if
- Type =:= std_warning ->
- {info_report, std_info};
- true ->
- {info_report, Type}
- end;
- warning ->
- {warning_report, Type};
- error ->
- if
- Type =:= std_warning ->
- {error_report, std_error};
- true ->
- {error_report, Type}
- end
- end,
- notify({Tag, group_leader(), {self(), NType, Report}}).
+ logger:log(warning,
+ #{label=>{?MODULE,warning_report},
+ report=>Report},
+ meta(warning_report,Type)).
%%-----------------------------------------------------------------
%% This function provides similar functions as error_msg for
@@ -183,15 +312,11 @@ warning_msg(Format) ->
Data :: list().
warning_msg(Format, Args) ->
- Tag = case error_logger:warning_map() of
- warning ->
- warning_msg;
- info ->
- info_msg;
- error ->
- error
- end,
- notify({Tag, group_leader(), {self(), Format, Args}}).
+ logger:log(warning,
+ #{label=>{?MODULE,warning_msg},
+ format=>Format,
+ args=>Args},
+ meta(warning_msg)).
%%-----------------------------------------------------------------
%% This function should be used for information reports. Events
@@ -210,7 +335,10 @@ info_report(Report) ->
Report :: report().
info_report(Type, Report) ->
- notify({info_report, group_leader(), {self(), Type, Report}}).
+ logger:log(notice,
+ #{label=>{?MODULE,info_report},
+ report=>Report},
+ meta(info_report,Type)).
%%-----------------------------------------------------------------
%% This function provides similar functions as error_msg for
@@ -228,7 +356,11 @@ info_msg(Format) ->
Data :: list().
info_msg(Format, Args) ->
- notify({info_msg, group_leader(), {self(), Format, Args}}).
+ logger:log(notice,
+ #{label=>{?MODULE,info_msg},
+ format=>Format,
+ args=>Args},
+ meta(info_msg)).
%%-----------------------------------------------------------------
%% Used by the init process. Events are tagged 'info'.
@@ -236,38 +368,75 @@ info_msg(Format, Args) ->
-spec error_info(Error :: any()) -> 'ok'.
+%% unused?
error_info(Error) ->
- notify({info, group_leader(), {self(), Error, []}}).
-
--spec notify({msg_tag(), pid(), {pid(), any(), any()}}) -> 'ok'.
-
-notify(Msg) ->
- gen_event:notify(error_logger, Msg).
-
--type swap_handler_type() :: 'false' | 'silent' | 'tty' | {'logfile', string()}.
--spec swap_handler(Type :: swap_handler_type()) -> any().
-
-swap_handler(tty) ->
- R = gen_event:swap_handler(error_logger, {error_logger, swap},
- {error_logger_tty_h, []}),
- ok = simple_logger(),
- R;
-swap_handler({logfile, File}) ->
- R = gen_event:swap_handler(error_logger, {error_logger, swap},
- {error_logger_file_h, File}),
- ok = simple_logger(),
- R;
-swap_handler(silent) ->
- _ = gen_event:delete_handler(error_logger, error_logger, delete),
- ok = simple_logger();
-swap_handler(false) ->
- ok. % keep primitive event handler as-is
+ {Format,Args} =
+ case string_p(Error) of
+ true -> {Error,[]};
+ false -> {"~p",[Error]}
+ end,
+ MyMeta = #{tag=>info,type=>Error},
+ logger:log(notice, Format, Args, #{?MODULE=>MyMeta,domain=>[Error]}).
+
+%%-----------------------------------------------------------------
+%% Create metadata
+meta(Tag) ->
+ meta(Tag,undefined).
+meta(Tag,Type) ->
+ meta(Tag,Type,#{report_cb=>fun report_to_format/1}).
+meta(Tag,undefined,Meta0) ->
+ Meta0#{?MODULE=>#{tag=>Tag}};
+meta(Tag,Type,Meta0) ->
+ maybe_add_domain(Tag,Type,Meta0#{?MODULE=>#{tag=>Tag,type=>Type}}).
+
+%% This is to prevent events of non standard type from being printed
+%% with the standard logger. Similar to how error_logger_tty_h
+%% discards events of non standard type.
+maybe_add_domain(error_report,std_error,Meta) -> Meta;
+maybe_add_domain(info_report,std_info,Meta) -> Meta;
+maybe_add_domain(warning_report,std_warning,Meta) -> Meta;
+maybe_add_domain(_,Type,Meta) -> Meta#{domain=>[Type]}.
+
+%% -----------------------------------------------------------------
+%% Report formatting - i.e. Term => {Format,Args}
+%% This was earlier done in the event handler (error_logger_tty_h, etc)
+%% -----------------------------------------------------------------
+report_to_format(#{label:={?MODULE,_},
+ report:=Report}) when is_map(Report) ->
+ %% logger:format_otp_report does maps:to_list, and for backwards
+ %% compatibility reasons we don't want that.
+ {"~tp\n",[Report]};
+report_to_format(#{label:={?MODULE,_},
+ format:=Format,
+ args:=Args}) ->
+ %% This is not efficient, but needed for backwards compatibility
+ %% in giving faulty arguments to the *_msg functions.
+ try io_lib:scan_format(Format,Args) of
+ _ -> {Format,Args}
+ catch _:_ ->
+ {"ERROR: ~tp - ~tp",[Format,Args]}
+ end;
+report_to_format(Term) ->
+ logger:format_otp_report(Term).
+
+string_p(List) when is_list(List) ->
+ string_p1(lists:flatten(List));
+string_p(_) ->
+ false.
+string_p1([]) ->
+ false;
+string_p1(FlatList) ->
+ io_lib:printable_list(FlatList).
+
+%% -----------------------------------------------------------------
+%% Stuff directly related to the event manager
+%% -----------------------------------------------------------------
-spec add_report_handler(Handler) -> any() when
Handler :: module().
add_report_handler(Module) when is_atom(Module) ->
- gen_event:add_handler(error_logger, Module, []).
+ add_report_handler(Module, []).
-spec add_report_handler(Handler, Args) -> Result when
Handler :: module(),
@@ -275,24 +444,37 @@ add_report_handler(Module) when is_atom(Module) ->
Result :: gen_event:add_handler_ret().
add_report_handler(Module, Args) when is_atom(Module) ->
- gen_event:add_handler(error_logger, Module, Args).
+ _ = logger:add_handler(?MODULE,?MODULE,#{level=>info,filter_default=>log}),
+ gen_event:add_handler(?MODULE, Module, Args).
-spec delete_report_handler(Handler) -> Result when
Handler :: module(),
Result :: gen_event:del_handler_ret().
delete_report_handler(Module) when is_atom(Module) ->
- gen_event:delete_handler(error_logger, Module, []).
-
-%% Start the lowest level error_logger handler with Buffer.
-
-simple_logger(Buffer_size) when is_integer(Buffer_size) ->
- gen_event:add_handler(error_logger, error_logger, Buffer_size).
-
-%% Start the lowest level error_logger handler without Buffer.
+ case whereis(?MODULE) of
+ Pid when is_pid(Pid) ->
+ Return = gen_event:delete_handler(?MODULE, Module, []),
+ case gen_event:which_handlers(?MODULE) of
+ [] ->
+ %% Don't want a lot of logs here if it's not needed
+ _ = logger:remove_handler(?MODULE),
+ ok;
+ _ ->
+ ok
+ end,
+ Return;
+ _ ->
+ ok
+ end.
-simple_logger() ->
- gen_event:add_handler(error_logger, error_logger, []).
+which_report_handlers() ->
+ case whereis(?MODULE) of
+ Pid when is_pid(Pid) ->
+ gen_event:which_handlers(?MODULE);
+ undefined ->
+ []
+ end.
%% Log all errors to File for all eternity
@@ -308,26 +490,35 @@ simple_logger() ->
FilenameReason :: no_log_file.
logfile({open, File}) ->
- case lists:member(error_logger_file_h,
- gen_event:which_handlers(error_logger)) of
+ case lists:member(error_logger_file_h,which_report_handlers()) of
true ->
{error, allready_have_logfile};
_ ->
- gen_event:add_handler(error_logger, error_logger_file_h, File)
+ add_report_handler(error_logger_file_h, File)
end;
logfile(close) ->
- case gen_event:delete_handler(error_logger, error_logger_file_h, normal) of
- {error,Reason} ->
- {error,Reason};
- _ ->
- ok
+ case whereis(?MODULE) of
+ Pid when is_pid(Pid) ->
+ case gen_event:delete_handler(?MODULE, error_logger_file_h, normal) of
+ {error,Reason} ->
+ {error,Reason};
+ _ ->
+ ok
+ end;
+ _ ->
+ {error,module_not_found}
end;
logfile(filename) ->
- case gen_event:call(error_logger, error_logger_file_h, filename) of
- {error,_} ->
- {error, no_log_file};
- Val ->
- Val
+ case whereis(?MODULE) of
+ Pid when is_pid(Pid) ->
+ case gen_event:call(?MODULE, error_logger_file_h, filename) of
+ {error,_} ->
+ {error, no_log_file};
+ Val ->
+ Val
+ end;
+ _ ->
+ {error, no_log_file}
end.
%% Possibly turn off all tty printouts, maybe we only want the errors
@@ -337,194 +528,38 @@ logfile(filename) ->
Flag :: boolean().
tty(true) ->
- Hs = gen_event:which_handlers(error_logger),
- case lists:member(error_logger_tty_h, Hs) of
- false ->
- gen_event:add_handler(error_logger, error_logger_tty_h, []);
- true ->
- ignore
- end,
+ _ = case lists:member(error_logger_tty_h, which_report_handlers()) of
+ false ->
+ case logger:get_handler_config(default) of
+ {ok,#{module:=logger_std_h,config:=#{type:=standard_io}}} ->
+ logger:remove_handler_filter(default,
+ error_logger_tty_false);
+ _ ->
+ logger:add_handler(error_logger_tty_true,logger_std_h,
+ #{filter_default=>stop,
+ filters=>?DEFAULT_HANDLER_FILTERS(
+ [otp]),
+ formatter=>{?DEFAULT_FORMATTER,
+ ?DEFAULT_FORMAT_CONFIG},
+ config=>#{type=>standard_io}})
+ end;
+ true ->
+ ok
+ end,
ok;
tty(false) ->
- gen_event:delete_handler(error_logger, error_logger_tty_h, []),
+ delete_report_handler(error_logger_tty_h),
+ _ = logger:remove_handler(error_logger_tty_true),
+ _ = case logger:get_handler_config(default) of
+ {ok,#{module:=logger_std_h,config:=#{type:=standard_io}}} ->
+ logger:add_handler_filter(default,error_logger_tty_false,
+ {fun(_,_) -> stop end, ok});
+ _ ->
+ ok
+ end,
ok.
-
-%%% ---------------------------------------------------
-%%% This is the default error_logger handler.
-%%% ---------------------------------------------------
-
--spec init(term()) -> {'ok', state() | []}.
-
-init(Max) when is_integer(Max) ->
- {ok, {Max, 0, []}};
-%% This one is called if someone took over from us, and now wants to
-%% go back.
-init({go_back, _PostState}) ->
- {ok, {?buffer_size, 0, []}};
-init(_) ->
- %% The error logger process may receive a huge amount of
- %% messages. Make sure that they are stored off heap to
- %% avoid exessive GCs.
- process_flag(message_queue_data, off_heap),
- {ok, []}.
-
--spec handle_event(term(), state()) -> {'ok', state()}.
-
-handle_event({Type, GL, Msg}, State) when node(GL) =/= node() ->
- gen_event:notify({error_logger, node(GL)},{Type, GL, Msg}),
- %% handle_event2({Type, GL, Msg}, State); %% Shall we do something
- {ok, State}; %% at this node too ???
-handle_event({info_report, _, {_, Type, _}}, State) when Type =/= std_info ->
- {ok, State}; %% Ignore other info reports here
-handle_event(Event, State) ->
- handle_event2(Event, State).
-
--spec handle_info(term(), state()) -> {'ok', state()}.
-
-handle_info({emulator, GL, Chars}, State) when node(GL) =/= node() ->
- {error_logger, node(GL)} ! {emulator, GL, add_node(Chars,self())},
- {ok, State};
-handle_info({emulator, GL, Chars}, State) ->
- handle_event2({emulator, GL, Chars}, State);
-handle_info(_, State) ->
- {ok, State}.
-
--spec handle_call(term(), state()) -> {'ok', {'error', 'bad_query'}, state()}.
-
-handle_call(_Query, State) -> {ok, {error, bad_query}, State}.
-
--spec terminate(term(), state()) -> {'error_logger', [term()]}.
-
-terminate(swap, {_, 0, Buff}) ->
- {error_logger, Buff};
-terminate(swap, {_, Lost, Buff}) ->
- Myevent = {info, group_leader(), {self(), {lost_messages, Lost}, []}},
- {error_logger, [tag_event(Myevent)|Buff]};
-terminate(_, _) ->
- {error_logger, []}.
-
-handle_event2(Event, {1, Lost, Buff}) ->
- display(tag_event(Event)),
- {ok, {1, Lost+1, Buff}};
-handle_event2(Event, {N, Lost, Buff}) ->
- Tagged = tag_event(Event),
- display(Tagged),
- {ok, {N-1, Lost, [Tagged|Buff]}};
-handle_event2(_, State) ->
- {ok, State}.
-
-tag_event(Event) ->
- {erlang:localtime(), Event}.
-
-display({Tag,{error,_,{_,Format,Args}}}) ->
- display2(Tag,Format,Args);
-display({Tag,{error_report,_,{_,Type,Report}}}) ->
- display2(Tag,Type,Report);
-display({Tag,{info_report,_,{_,Type,Report}}}) ->
- display2(Tag,Type,Report);
-display({Tag,{info,_,{_,Error,_}}}) ->
- display2(Tag,Error,[]);
-display({Tag,{info_msg,_,{_,Format,Args}}}) ->
- display2(Tag,Format,Args);
-display({Tag,{warning_report,_,{_,Type,Report}}}) ->
- display2(Tag,Type,Report);
-display({Tag,{warning_msg,_,{_,Format,Args}}}) ->
- display2(Tag,Format,Args);
-display({Tag,{emulator,_,Chars}}) ->
- display2(Tag,Chars,[]).
-
-add_node(X, Pid) when is_atom(X) ->
- add_node(atom_to_list(X), Pid);
-add_node(X, Pid) ->
- lists:concat([X,"** at node ",node(Pid)," **~n"]).
-
-%% Can't do io_lib:format
-
-display2({{_Y,_Mo,_D},{_H,_Mi,_S}} = Date, F, A) ->
- display_date(Date),
- display3(string_p(F), F, A).
-
-display_date({{Y,Mo,D},{H,Mi,S}}) ->
- erlang:display_string(
- integer_to_list(Y) ++ "-" ++
- two_digits(Mo) ++ "-" ++
- two_digits(D) ++ " " ++
- two_digits(H) ++ ":" ++
- two_digits(Mi) ++ ":" ++
- two_digits(S) ++ " ").
-
-two_digits(N) when 0 =< N, N =< 9 ->
- [$0, $0 + N];
-two_digits(N) ->
- integer_to_list(N).
-
-display3(true, F, A) ->
- %% Format string with arguments
- erlang:display_string(F ++ "\n"),
- [begin
- erlang:display_string("\t"),
- erlang:display(Arg)
- end || Arg <- A],
- ok;
-display3(false, Atom, A) when is_atom(Atom) ->
- %% The widest atom seems to be 'supervisor_report' at 17.
- ColumnWidth = 20,
- AtomString = atom_to_list(Atom),
- AtomLength = length(AtomString),
- Padding = lists:duplicate(ColumnWidth - AtomLength, $\s),
- erlang:display_string(AtomString ++ Padding),
- display4(A);
-display3(_, F, A) ->
- erlang:display({F, A}).
-
-display4([A, []]) ->
- %% Not sure why crash reports look like this.
- display4(A);
-display4(A = [_|_]) ->
- case lists:all(fun({Key,_Value}) -> is_atom(Key); (_) -> false end, A) of
- true ->
- erlang:display_string("\n"),
- lists:foreach(
- fun({Key, Value}) ->
- erlang:display_string(
- " " ++
- atom_to_list(Key) ++
- ": "),
- erlang:display(Value)
- end, A);
- false ->
- erlang:display(A)
- end;
-display4(A) ->
- erlang:display(A).
-
-string_p([]) ->
- false;
-string_p(Term) ->
- string_p1(Term).
-
-string_p1([H|T]) when is_integer(H), H >= $\040, H =< $\176 ->
- string_p1(T);
-string_p1([H|T]) when is_integer(H), H >= 16#A0, H < 16#D800;
- is_integer(H), H > 16#DFFF, H < 16#FFFE;
- is_integer(H), H > 16#FFFF, H =< 16#10FFFF ->
- string_p1(T);
-string_p1([$\n|T]) -> string_p1(T);
-string_p1([$\r|T]) -> string_p1(T);
-string_p1([$\t|T]) -> string_p1(T);
-string_p1([$\v|T]) -> string_p1(T);
-string_p1([$\b|T]) -> string_p1(T);
-string_p1([$\f|T]) -> string_p1(T);
-string_p1([$\e|T]) -> string_p1(T);
-string_p1([H|T]) when is_list(H) ->
- case string_p1(H) of
- true -> string_p1(T);
- _ -> false
- end;
-string_p1([]) -> true;
-string_p1(_) -> false.
-
+%%%-----------------------------------------------------------------
-spec limit_term(term()) -> term().
limit_term(Term) ->
@@ -539,6 +574,8 @@ get_format_depth() ->
case application:get_env(kernel, error_logger_format_depth) of
{ok, Depth} when is_integer(Depth) ->
max(10, Depth);
+ {ok, unlimited} ->
+ unlimited;
undefined ->
unlimited
end.
diff --git a/lib/kernel/src/erts_debug.erl b/lib/kernel/src/erts_debug.erl
index 9662f8fa90..1270de4144 100644
--- a/lib/kernel/src/erts_debug.erl
+++ b/lib/kernel/src/erts_debug.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1999-2016. All Rights Reserved.
+%% Copyright Ericsson AB 1999-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -21,7 +21,7 @@
%% Low-level debugging support. EXPERIMENTAL!
--export([size/1,df/1,df/2,df/3,df/4,ic/1]).
+-export([size/1,df/1,df/2,df/3,dis_to_file/2,ic/1]).
%% This module contains the following *experimental* BIFs:
%% disassemble/1
@@ -32,11 +32,11 @@
%%% BIFs
-export([breakpoint/2, disassemble/1, display/1, dist_ext_to_term/2,
- dump_monitors/1, dump_links/1, flat_size/1,
- get_internal_state/1, instructions/0,
+ flat_size/1, get_internal_state/1, instructions/0,
map_info/1, same/2, set_internal_state/2,
size_shared/1, copy_shared/1, dirty_cpu/2, dirty_io/2, dirty/3,
- lcnt_control/1, lcnt_control/2, lcnt_collect/0, lcnt_clear/0]).
+ lcnt_control/1, lcnt_control/2, lcnt_collect/0, lcnt_clear/0,
+ lc_graph/0, lc_graph_to_dot/2, lc_graph_merge/2]).
-spec breakpoint(MFA, Flag) -> non_neg_integer() when
MFA :: {Module :: module(),
@@ -70,18 +70,6 @@ display(_) ->
dist_ext_to_term(_, _) ->
erlang:nif_error(undef).
--spec dump_monitors(Id) -> true when
- Id :: pid() | atom().
-
-dump_monitors(_) ->
- erlang:nif_error(undef).
-
--spec dump_links(Id) -> true when
- Id :: pid() | port() | atom().
-
-dump_links(_) ->
- erlang:nif_error(undef).
-
-spec flat_size(Term) -> non_neg_integer() when
Term :: term().
@@ -347,55 +335,52 @@ is_term_seen(_, []) -> false.
-spec df(module()) -> df_ret().
df(Mod) when is_atom(Mod) ->
- df(lists:concat([Mod, ".dis"]), Mod).
-
--spec df(module(), atom()) -> df_ret();
- (file:io_device() | file:filename(), module()) -> df_ret().
-
-df(Mod, Func) when is_atom(Mod), is_atom(Func) ->
- df(lists:concat([Mod, "_", Func, ".dis"]), Mod, Func);
-df(Name, Mod) when is_atom(Mod) ->
try Mod:module_info(functions) of
Fs0 when is_list(Fs0) ->
+ Name = lists:concat([Mod, ".dis"]),
Fs = [{Mod,Func,Arity} || {Func,Arity} <- Fs0],
dff(Name, Fs)
catch _:_ -> {undef,Mod}
end.
+-spec df(module(), atom()) -> df_ret().
--spec df(module(), atom(), arity()) -> df_ret();
- (file:io_device() | file:filename(), module(), atom()) -> df_ret().
-
-df(Mod, Func, Arity) when is_atom(Mod), is_atom(Func), is_integer(Arity) ->
- df(lists:concat([Mod, "_", Func, "_", Arity, ".dis"]), Mod, Func, Arity);
-df(Name, Mod, Func) when is_atom(Mod), is_atom(Func) ->
+df(Mod, Func) when is_atom(Mod), is_atom(Func) ->
try Mod:module_info(functions) of
Fs0 when is_list(Fs0) ->
+ Name = lists:concat([Mod, "_", Func, ".dis"]),
Fs = [{Mod,Func1,Arity} || {Func1,Arity} <- Fs0, Func1 =:= Func],
dff(Name, Fs)
catch _:_ -> {undef,Mod}
end.
--spec df(file:io_device() | file:filename(), module(), atom(), arity()) -> df_ret().
-df(Name, Mod, Func, Arity) when is_atom(Mod), is_atom(Func), is_integer(Arity) ->
+-spec df(module(), atom(), arity()) -> df_ret().
+
+df(Mod, Func, Arity) when is_atom(Mod), is_atom(Func) ->
try Mod:module_info(functions) of
Fs0 when is_list(Fs0) ->
+ Name = lists:concat([Mod, "_", Func, "_", Arity, ".dis"]),
Fs = [{Mod,Func1,Arity1} || {Func1,Arity1} <- Fs0,
Func1 =:= Func, Arity1 =:= Arity],
dff(Name, Fs)
catch _:_ -> {undef,Mod}
end.
-dff(File, Fs) when is_pid(File), is_list(Fs) ->
- lists:foreach(fun(Mfa) ->
- disassemble_function(File, Mfa),
- io:nl(File)
- end, Fs);
-dff(Name, Fs) when is_list(Name) ->
- case file:open(Name, [write]) of
+-spec dis_to_file(module(), file:filename()) -> df_ret().
+
+dis_to_file(Mod, Name) when is_atom(Mod) ->
+ try Mod:module_info(functions) of
+ Fs0 when is_list(Fs0) ->
+ Fs = [{Mod,Func,Arity} || {Func,Arity} <- Fs0],
+ dff(Name, Fs)
+ catch _:_ -> {undef,Mod}
+ end.
+
+dff(Name, Fs) ->
+ case file:open(Name, [write,raw,delayed_write]) of
{ok,F} ->
try
- dff(F, Fs)
+ dff_1(F, Fs)
after
_ = file:close(F)
end;
@@ -403,12 +388,18 @@ dff(Name, Fs) when is_list(Name) ->
{error,{badopen,Reason}}
end.
+dff_1(File, Fs) ->
+ lists:foreach(fun(Mfa) ->
+ disassemble_function(File, Mfa),
+ file:write(File, "\n")
+ end, Fs).
+
disassemble_function(File, {_,_,_}=MFA) ->
cont_dis(File, erts_debug:disassemble(MFA), MFA).
cont_dis(_, false, _) -> ok;
cont_dis(File, {Addr,Str,MFA}, MFA) ->
- io:put_chars(File, binary_to_list(Str)),
+ ok = file:write(File, Str),
cont_dis(File, erts_debug:disassemble(Addr), MFA);
cont_dis(_, {_,_,_}, _) -> ok.
@@ -417,3 +408,90 @@ cont_dis(_, {_,_,_}, _) -> ok.
map_info(_) ->
erlang:nif_error(undef).
+
+%% Create file "lc_graph.<pid>" with all actual lock dependencies
+%% recorded so far by the VM.
+%% Needs debug VM or --enable-lock-checking config, returns 'notsup' otherwise.
+lc_graph() ->
+ erts_debug:set_internal_state(available_internal_state, true),
+ erts_debug:get_internal_state(lc_graph).
+
+%% Convert "lc_graph.<pid>" file to https://www.graphviz.org dot format.
+lc_graph_to_dot(OutFile, InFile) ->
+ {ok, [LL0]} = file:consult(InFile),
+
+ [{"NO LOCK",0} | LL] = LL0,
+ Map = maps:from_list([{Id, Name} || {Name, Id, _, _} <- LL]),
+
+ case file:open(OutFile, [exclusive]) of
+ {ok, Out} ->
+ ok = file:write(Out, "digraph G {\n"),
+
+ [dot_print_lock(Out, Lck, Map) || Lck <- LL],
+
+ ok = file:write(Out, "}\n"),
+ ok = file:close(Out);
+
+ {error,eexist} ->
+ {"File already exists", OutFile}
+ end.
+
+dot_print_lock(Out, {_Name, Id, Lst, _}, Map) ->
+ [dot_print_edge(Out, From, Id, Map) || From <- Lst],
+ ok.
+
+dot_print_edge(_, 0, _, _) ->
+ ignore; % "NO LOCK"
+dot_print_edge(Out, From, To, Map) ->
+ io:format(Out, "~p -> ~p;\n", [maps:get(From,Map), maps:get(To,Map)]).
+
+
+%% Merge several "lc_graph" files into one file.
+lc_graph_merge(OutFile, InFiles) ->
+ LLs = lists:map(fun(InFile) ->
+ {ok, [LL]} = file:consult(InFile),
+ LL
+ end,
+ InFiles),
+
+ Res = lists:foldl(fun(A, B) -> lcg_merge(A, B) end,
+ hd(LLs),
+ tl(LLs)),
+ case file:open(OutFile, [exclusive]) of
+ {ok, Out} ->
+ try
+ lcg_print(Out, Res)
+ after
+ file:close(Out)
+ end,
+ ok;
+ {error, eexist} ->
+ {"File already exists", OutFile}
+ end.
+
+lcg_merge(A, B) ->
+ lists:zipwith(fun(LA, LB) -> lcg_merge_locks(LA, LB) end,
+ A, B).
+
+lcg_merge_locks(L, L) ->
+ L;
+lcg_merge_locks({Name, Id, DA, IA}, {Name, Id, DB, IB}) ->
+ Direct = lists:umerge(DA, DB),
+ Indirect = lists:umerge(IA, IB),
+ {Name, Id, Direct, Indirect -- Direct}.
+
+
+lcg_print(Out, LL) ->
+ io:format(Out, "[", []),
+ lcg_print_locks(Out, LL),
+ io:format(Out, "].\n", []),
+ ok.
+
+lcg_print_locks(Out, [{_,_}=NoLock | Rest]) ->
+ io:format(Out, "~p,\n", [NoLock]),
+ lcg_print_locks(Out, Rest);
+lcg_print_locks(Out, [LastLock]) ->
+ io:format(Out, "~w", [LastLock]);
+lcg_print_locks(Out, [Lock | Rest]) ->
+ io:format(Out, "~w,\n", [Lock]),
+ lcg_print_locks(Out, Rest).
diff --git a/lib/kernel/src/file.erl b/lib/kernel/src/file.erl
index 933f2d5f65..1d4e37196c 100644
--- a/lib/kernel/src/file.erl
+++ b/lib/kernel/src/file.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2017. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -69,10 +69,10 @@
%% Types that can be used from other modules -- alphabetically ordered.
-export_type([date_time/0, fd/0, file_info/0, filename/0, filename_all/0,
- io_device/0, name/0, name_all/0, posix/0]).
+ io_device/0, mode/0, name/0, name_all/0, posix/0]).
%%% Includes and defines
--include("file.hrl").
+-include("file_int.hrl").
-define(FILE_IO_SERVER_TABLE, file_io_servers).
@@ -101,14 +101,25 @@
-type deep_list() :: [char() | atom() | deep_list()].
-type name() :: string() | atom() | deep_list().
-type name_all() :: string() | atom() | deep_list() | (RawFilename :: binary()).
--type posix() :: 'eacces' | 'eagain' | 'ebadf' | 'ebusy' | 'edquot'
- | 'eexist' | 'efault' | 'efbig' | 'eintr' | 'einval'
- | 'eio' | 'eisdir' | 'eloop' | 'emfile' | 'emlink'
- | 'enametoolong'
- | 'enfile' | 'enodev' | 'enoent' | 'enomem' | 'enospc'
- | 'enotblk' | 'enotdir' | 'enotsup' | 'enxio' | 'eperm'
- | 'epipe' | 'erofs' | 'espipe' | 'esrch' | 'estale'
- | 'exdev'.
+-type posix() ::
+ 'eacces' | 'eagain' |
+ 'ebadf' | 'ebadmsg' | 'ebusy' |
+ 'edeadlk' | 'edeadlock' | 'edquot' |
+ 'eexist' |
+ 'efault' | 'efbig' | 'eftype' |
+ 'eintr' | 'einval' | 'eio' | 'eisdir' |
+ 'eloop' |
+ 'emfile' | 'emlink' | 'emultihop' |
+ 'enametoolong' | 'enfile' |
+ 'enobufs' | 'enodev' | 'enolck' | 'enolink' | 'enoent' |
+ 'enomem' | 'enospc' | 'enosr' | 'enostr' | 'enosys' |
+ 'enotblk' | 'enotdir' | 'enotsup' | 'enxio' |
+ 'eopnotsupp' | 'eoverflow' |
+ 'eperm' | 'epipe' |
+ 'erange' | 'erofs' |
+ 'espipe' | 'esrch' | 'estale' |
+ 'etxtbsy' |
+ 'exdev'.
-type date_time() :: calendar:datetime().
-type posix_file_advise() :: 'normal' | 'sequential' | 'random'
| 'no_reuse' | 'will_need' | 'dont_need'.
@@ -454,41 +465,23 @@ raw_write_file_info(Name, #file_info{} = Info) ->
Reason :: posix() | badarg | system_limit.
open(Item, ModeList) when is_list(ModeList) ->
- case lists:member(raw, ModeList) of
- %% Raw file, use ?PRIM_FILE to handle this file
- true ->
+ case {lists:member(raw, ModeList), lists:member(ram, ModeList)} of
+ {false, false} ->
+ %% File server file
Args = [file_name(Item) | ModeList],
case check_args(Args) of
ok ->
[FileName | _] = Args,
- %% We rely on the returned Handle (in {ok, Handle})
- %% being a pid() or a #file_descriptor{}
- ?PRIM_FILE:open(FileName, ModeList);
+ call(open, [FileName, ModeList]);
Error ->
Error
- end;
- false ->
- case lists:member(ram, ModeList) of
- %% RAM file, use ?RAM_FILE to handle this file
- true ->
- case check_args(ModeList) of
- ok ->
- ?RAM_FILE:open(Item, ModeList);
- Error ->
- Error
- end;
- %% File server file
- false ->
- Args = [file_name(Item) | ModeList],
- case check_args(Args) of
- ok ->
- [FileName | _] = Args,
- call(open, [FileName, ModeList]);
- Error ->
- Error
- end
- end
+ end;
+ {true, _Either} ->
+ raw_file_io:open(file_name(Item), ModeList);
+ {false, true} ->
+ ram_file:open(Item, ModeList)
end;
+
%% Old obsolete mode specification in atom or 2-tuple format
open(Item, Mode) ->
open(Item, mode_list(Mode)).
@@ -1254,15 +1247,18 @@ sendfile(File, _Sock, _Offet, _Bytes, _Opts) when is_pid(File) ->
sendfile(File, Sock, Offset, Bytes, []) ->
sendfile(File, Sock, Offset, Bytes, ?MAX_CHUNK_SIZE, [], [], []);
sendfile(File, Sock, Offset, Bytes, Opts) ->
- ChunkSize0 = proplists:get_value(chunk_size, Opts, ?MAX_CHUNK_SIZE),
- ChunkSize = if ChunkSize0 > ?MAX_CHUNK_SIZE ->
- ?MAX_CHUNK_SIZE;
- true -> ChunkSize0
- end,
- %% Support for headers, trailers and options has been removed because the
- %% Darwin and BSD API for using it does not play nice with
- %% non-blocking sockets. See unix_efile.c for more info.
- sendfile(File, Sock, Offset, Bytes, ChunkSize, [], [], Opts).
+ try proplists:get_value(chunk_size, Opts, ?MAX_CHUNK_SIZE) of
+ ChunkSize0 when is_integer(ChunkSize0) ->
+ ChunkSize = erlang:min(ChunkSize0, ?MAX_CHUNK_SIZE),
+ %% Support for headers, trailers and options has been removed
+ %% because the Darwin and BSD API for using it does not play nice
+ %% with non-blocking sockets. See unix_efile.c for more info.
+ sendfile(File, Sock, Offset, Bytes, ChunkSize, [], [], Opts);
+ _Other ->
+ {error, badarg}
+ catch
+ error:_ -> {error, badarg}
+ end.
%% sendfile/2
-spec sendfile(Filename, Socket) ->
@@ -1397,8 +1393,8 @@ eval_stream2({ok,Form,EndLine}, Fd, H, Last, E, Bs0) ->
try erl_eval:exprs(Form, Bs0) of
{value,V,Bs} ->
eval_stream(Fd, H, EndLine, {V}, E, Bs)
- catch Class:Reason ->
- Error = {EndLine,?MODULE,{Class,Reason,erlang:get_stacktrace()}},
+ catch Class:Reason:StackTrace ->
+ Error = {EndLine,?MODULE,{Class,Reason,StackTrace}},
eval_stream(Fd, H, EndLine, Last, [Error|E], Bs0)
end;
eval_stream2({error,What,EndLine}, Fd, H, Last, E, Bs) ->
diff --git a/lib/kernel/src/file_int.hrl b/lib/kernel/src/file_int.hrl
new file mode 100644
index 0000000000..bafc330c04
--- /dev/null
+++ b/lib/kernel/src/file_int.hrl
@@ -0,0 +1,33 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%
+%% Internal definitions for the 'file' module and friends.
+%%
+
+-ifndef(FILE_INTERNAL_HRL_).
+-define(FILE_INTERNAL_HRL_, 1).
+
+-include("file.hrl").
+
+-define(CALL_FD(Fd, Method, Args),
+ apply(Fd#file_descriptor.module, Method, [Fd | Args])).
+
+-endif.
diff --git a/lib/kernel/src/file_io_server.erl b/lib/kernel/src/file_io_server.erl
index deb7b315b1..34d5497a4a 100644
--- a/lib/kernel/src/file_io_server.erl
+++ b/lib/kernel/src/file_io_server.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2000-2015. All Rights Reserved.
+%% Copyright Ericsson AB 2000-2017. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -28,7 +28,8 @@
-record(state, {handle,owner,mref,buf,read_mode,unic}).
--define(PRIM_FILE, prim_file).
+-include("file_int.hrl").
+
-define(READ_SIZE_LIST, 128).
-define(READ_SIZE_BINARY, (8*1024)).
@@ -67,8 +68,9 @@ do_start(Spawn, Owner, FileName, ModeList) ->
erlang:dt_restore_tag(Utag),
%% process_flag(trap_exit, true),
case parse_options(ModeList) of
- {ReadMode, UnicodeMode, Opts} ->
- case ?PRIM_FILE:open(FileName, Opts) of
+ {ReadMode, UnicodeMode, Opts0} ->
+ Opts = maybe_add_read_ahead(ReadMode, Opts0),
+ case raw_file_io:open(FileName, [raw | Opts]) of
{error, Reason} = Error ->
Self ! {Ref, Error},
exit(Reason);
@@ -157,6 +159,24 @@ valid_enc({utf32,little}) ->
valid_enc(_Other) ->
{error,badarg}.
+%% Add a small read_ahead buffer if the file is opened for reading
+%% only in list mode and no read_ahead is already given.
+maybe_add_read_ahead(binary, Opts) ->
+ Opts;
+maybe_add_read_ahead(list, Opts) ->
+ P = fun(read_ahead) -> true;
+ ({read_ahead,_}) -> true;
+ (append) -> true;
+ (exclusive) -> true;
+ (write) -> true;
+ (_) -> false
+ end,
+ case lists:any(P, Opts) of
+ false ->
+ [{read_ahead, 4096}|Opts];
+ true ->
+ Opts
+ end.
server_loop(#state{mref = Mref} = State) ->
receive
@@ -205,7 +225,7 @@ io_reply(From, ReplyAs, Reply) ->
file_request({advise,Offset,Length,Advise},
#state{handle=Handle}=State) ->
- case ?PRIM_FILE:advise(Handle, Offset, Length, Advise) of
+ case ?CALL_FD(Handle, advise, [Offset, Length, Advise]) of
{error,Reason}=Reply ->
{stop,Reason,Reply,State};
Reply ->
@@ -213,7 +233,7 @@ file_request({advise,Offset,Length,Advise},
end;
file_request({allocate, Offset, Length},
#state{handle = Handle} = State) ->
- Reply = ?PRIM_FILE:allocate(Handle, Offset, Length),
+ Reply = ?CALL_FD(Handle, allocate, [Offset, Length]),
{reply, Reply, State};
file_request({pread,At,Sz}, State)
when At =:= cur;
@@ -256,7 +276,7 @@ file_request({pwrite,At,Data},
end;
file_request(datasync,
#state{handle=Handle}=State) ->
- case ?PRIM_FILE:datasync(Handle) of
+ case ?CALL_FD(Handle, datasync, []) of
{error,Reason}=Reply ->
{stop,Reason,Reply,State};
Reply ->
@@ -264,7 +284,7 @@ file_request(datasync,
end;
file_request(sync,
#state{handle=Handle}=State) ->
- case ?PRIM_FILE:sync(Handle) of
+ case ?CALL_FD(Handle, sync, []) of
{error,Reason}=Reply ->
{stop,Reason,Reply,State};
Reply ->
@@ -272,7 +292,7 @@ file_request(sync,
end;
file_request(close,
#state{handle=Handle}=State) ->
- case ?PRIM_FILE:close(Handle) of
+ case ?CALL_FD(Handle, close, []) of
{error,Reason}=Reply ->
{stop,Reason,Reply,State#state{buf= <<>>}};
Reply ->
@@ -288,7 +308,7 @@ file_request({position,At},
end;
file_request(truncate,
#state{handle=Handle}=State) ->
- case ?PRIM_FILE:truncate(Handle) of
+ case ?CALL_FD(Handle, truncate, []) of
{error,Reason}=Reply ->
{stop,Reason,Reply,State#state{buf= <<>>}};
Reply ->
@@ -398,7 +418,7 @@ io_request_loop([Request|Tail],
%%
put_chars(Chars, latin1, #state{handle=Handle, unic=latin1}=State) ->
NewState = State#state{buf = <<>>},
- case ?PRIM_FILE:write(Handle, Chars) of
+ case ?CALL_FD(Handle, write, [Chars]) of
{error,Reason}=Reply ->
{stop,Reason,Reply,NewState};
Reply ->
@@ -408,7 +428,7 @@ put_chars(Chars, InEncoding, #state{handle=Handle, unic=OutEncoding}=State) ->
NewState = State#state{buf = <<>>},
case unicode:characters_to_binary(Chars,InEncoding,OutEncoding) of
Bin when is_binary(Bin) ->
- case ?PRIM_FILE:write(Handle, Bin) of
+ case ?CALL_FD(Handle, write, [Bin]) of
{error,Reason}=Reply ->
{stop,Reason,Reply,NewState};
Reply ->
@@ -422,7 +442,7 @@ put_chars(Chars, InEncoding, #state{handle=Handle, unic=OutEncoding}=State) ->
get_line(S, {<<>>, Cont}, OutEnc,
#state{handle=Handle, read_mode=Mode, unic=InEnc}=State) ->
- case ?PRIM_FILE:read(Handle, read_size(Mode)) of
+ case ?CALL_FD(Handle, read, [read_size(Mode)]) of
{ok,Bin} ->
get_line(S, convert_enc([Cont, Bin], InEnc, OutEnc), OutEnc, State);
eof ->
@@ -472,7 +492,7 @@ get_chars(N, OutEnc,#state{handle=Handle,buf=Buf,read_mode=ReadMode,unic=latin1}
BufSize = byte_size(Buf),
NeedSize = N-BufSize,
Size = erlang:max(NeedSize, ?READ_SIZE_BINARY),
- case ?PRIM_FILE:read(Handle, Size) of
+ case ?CALL_FD(Handle, read, [Size]) of
{ok, B} ->
if BufSize+byte_size(B) < N ->
std_reply(cat(Buf, B, ReadMode,latin1,OutEnc), State);
@@ -504,7 +524,7 @@ get_chars(N, OutEnc,#state{handle=Handle,buf=Buf,read_mode=ReadMode,unic=InEncod
%% Need more, Try to read 4*needed in bytes...
NeedSize = (N - BufCount) * 4,
Size = erlang:max(NeedSize, ?READ_SIZE_BINARY),
- case ?PRIM_FILE:read(Handle, Size) of
+ case ?CALL_FD(Handle, read, [Size]) of
{ok, B} ->
NewBuf = list_to_binary([Buf,B]),
{NewCount,NewSplit} = count_and_find(NewBuf,N,InEncoding),
@@ -544,7 +564,7 @@ get_chars(Mod, Func, XtraArg, OutEnc, #state{buf=Buf}=State) ->
get_chars_empty(Mod, Func, XtraArg, S, latin1,
#state{handle=Handle,read_mode=ReadMode, unic=latin1}=State) ->
- case ?PRIM_FILE:read(Handle, read_size(ReadMode)) of
+ case ?CALL_FD(Handle, read, [read_size(ReadMode)]) of
{ok,Bin} ->
get_chars_apply(Mod, Func, XtraArg, S, latin1, State, Bin);
eof ->
@@ -554,7 +574,7 @@ get_chars_empty(Mod, Func, XtraArg, S, latin1,
end;
get_chars_empty(Mod, Func, XtraArg, S, OutEnc,
#state{handle=Handle,read_mode=ReadMode}=State) ->
- case ?PRIM_FILE:read(Handle, read_size(ReadMode)) of
+ case ?CALL_FD(Handle, read, [read_size(ReadMode)]) of
{ok,Bin} ->
get_chars_apply(Mod, Func, XtraArg, S, OutEnc, State, Bin);
eof ->
@@ -564,7 +584,7 @@ get_chars_empty(Mod, Func, XtraArg, S, OutEnc,
end.
get_chars_notempty(Mod, Func, XtraArg, S, OutEnc,
#state{handle=Handle,read_mode=ReadMode,buf = B}=State) ->
- case ?PRIM_FILE:read(Handle, read_size(ReadMode)) of
+ case ?CALL_FD(Handle, read, [read_size(ReadMode)]) of
{ok,Bin} ->
get_chars_apply(Mod, Func, XtraArg, S, OutEnc, State, list_to_binary([B,Bin]));
eof ->
@@ -918,13 +938,10 @@ cbv({utf32,little},_) ->
%% Compensates ?PRIM_FILE:position/2 for the number of bytes
%% we have buffered
position(Handle, At, Buf) ->
- ?PRIM_FILE:position(
- Handle,
- case At of
- cur ->
- {cur, -byte_size(Buf)};
- {cur, Offs} ->
- {cur, Offs-byte_size(Buf)};
- _ ->
- At
- end).
+ SeekTo =
+ case At of
+ {cur, Offs} -> {cur, Offs-byte_size(Buf)};
+ cur -> {cur, -byte_size(Buf)};
+ _ -> At
+ end,
+ ?CALL_FD(Handle, position, [SeekTo]).
diff --git a/lib/kernel/src/file_server.erl b/lib/kernel/src/file_server.erl
index 6e8f64d932..29eaa23375 100644
--- a/lib/kernel/src/file_server.erl
+++ b/lib/kernel/src/file_server.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2000-2017. All Rights Reserved.
+%% Copyright Ericsson AB 2000-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -63,7 +63,7 @@ stop() ->
%%% Callback functions from gen_server
%%%----------------------------------------------------------------------
--type state() :: port(). % Internal type
+-type state() :: term(). % Internal type
%%----------------------------------------------------------------------
%% Func: init/1
@@ -73,18 +73,12 @@ stop() ->
%% {stop, Reason}
%%----------------------------------------------------------------------
--spec init([]) -> {'ok', state()} | {'stop', term()}.
+-spec init([]) -> {'ok', state()}.
init([]) ->
process_flag(trap_exit, true),
- case ?PRIM_FILE:start() of
- {ok, Handle} ->
- ?FILE_IO_SERVER_TABLE =
- ets:new(?FILE_IO_SERVER_TABLE, [named_table]),
- {ok, Handle};
- {error, Reason} ->
- {stop, Reason}
- end.
+ ?FILE_IO_SERVER_TABLE = ets:new(?FILE_IO_SERVER_TABLE, [named_table]),
+ {ok, undefined}.
%%----------------------------------------------------------------------
%% Func: handle_call/3
@@ -101,7 +95,7 @@ init([]) ->
{'reply', 'eof' | 'ok' | {'error', term()} | {'ok', term()}, state()} |
{'stop', 'normal', 'stopped', state()}.
-handle_call({open, Name, ModeList}, {Pid, _Tag} = _From, Handle)
+handle_call({open, Name, ModeList}, {Pid, _Tag} = _From, State)
when is_list(ModeList) ->
Child = ?FILE_IO_SERVER:start_link(Pid, Name, ModeList),
case Child of
@@ -110,78 +104,78 @@ handle_call({open, Name, ModeList}, {Pid, _Tag} = _From, Handle)
_ ->
ok
end,
- {reply, Child, Handle};
+ {reply, Child, State};
-handle_call({open, _Name, _Mode}, _From, Handle) ->
- {reply, {error, einval}, Handle};
+handle_call({open, _Name, _Mode}, _From, State) ->
+ {reply, {error, einval}, State};
-handle_call({read_file, Name}, _From, Handle) ->
- {reply, ?PRIM_FILE:read_file(Name), Handle};
+handle_call({read_file, Name}, _From, State) ->
+ {reply, ?PRIM_FILE:read_file(Name), State};
-handle_call({write_file, Name, Bin}, _From, Handle) ->
- {reply, ?PRIM_FILE:write_file(Name, Bin), Handle};
+handle_call({write_file, Name, Bin}, _From, State) ->
+ {reply, ?PRIM_FILE:write_file(Name, Bin), State};
-handle_call({set_cwd, Name}, _From, Handle) ->
- {reply, ?PRIM_FILE:set_cwd(Handle, Name), Handle};
+handle_call({set_cwd, Name}, _From, State) ->
+ {reply, ?PRIM_FILE:set_cwd(Name), State};
-handle_call({delete, Name}, _From, Handle) ->
- {reply, ?PRIM_FILE:delete(Handle, Name), Handle};
+handle_call({delete, Name}, _From, State) ->
+ {reply, ?PRIM_FILE:delete(Name), State};
-handle_call({rename, Fr, To}, _From, Handle) ->
- {reply, ?PRIM_FILE:rename(Handle, Fr, To), Handle};
+handle_call({rename, Fr, To}, _From, State) ->
+ {reply, ?PRIM_FILE:rename(Fr, To), State};
-handle_call({make_dir, Name}, _From, Handle) ->
- {reply, ?PRIM_FILE:make_dir(Handle, Name), Handle};
+handle_call({make_dir, Name}, _From, State) ->
+ {reply, ?PRIM_FILE:make_dir(Name), State};
-handle_call({del_dir, Name}, _From, Handle) ->
- {reply, ?PRIM_FILE:del_dir(Handle, Name), Handle};
+handle_call({del_dir, Name}, _From, State) ->
+ {reply, ?PRIM_FILE:del_dir(Name), State};
-handle_call({list_dir, Name}, _From, Handle) ->
- {reply, ?PRIM_FILE:list_dir(Handle, Name), Handle};
-handle_call({list_dir_all, Name}, _From, Handle) ->
- {reply, ?PRIM_FILE:list_dir_all(Handle, Name), Handle};
+handle_call({list_dir, Name}, _From, State) ->
+ {reply, ?PRIM_FILE:list_dir(Name), State};
+handle_call({list_dir_all, Name}, _From, State) ->
+ {reply, ?PRIM_FILE:list_dir_all(Name), State};
-handle_call(get_cwd, _From, Handle) ->
- {reply, ?PRIM_FILE:get_cwd(Handle), Handle};
-handle_call({get_cwd}, _From, Handle) ->
- {reply, ?PRIM_FILE:get_cwd(Handle), Handle};
-handle_call({get_cwd, Name}, _From, Handle) ->
- {reply, ?PRIM_FILE:get_cwd(Handle, Name), Handle};
+handle_call(get_cwd, _From, State) ->
+ {reply, ?PRIM_FILE:get_cwd(), State};
+handle_call({get_cwd}, _From, State) ->
+ {reply, ?PRIM_FILE:get_cwd(), State};
+handle_call({get_cwd, Name}, _From, State) ->
+ {reply, ?PRIM_FILE:get_cwd(Name), State};
-handle_call({read_file_info, Name}, _From, Handle) ->
- {reply, ?PRIM_FILE:read_file_info(Handle, Name), Handle};
+handle_call({read_file_info, Name}, _From, State) ->
+ {reply, ?PRIM_FILE:read_file_info(Name), State};
-handle_call({read_file_info, Name, Opts}, _From, Handle) ->
- {reply, ?PRIM_FILE:read_file_info(Handle, Name, Opts), Handle};
+handle_call({read_file_info, Name, Opts}, _From, State) ->
+ {reply, ?PRIM_FILE:read_file_info(Name, Opts), State};
-handle_call({altname, Name}, _From, Handle) ->
- {reply, ?PRIM_FILE:altname(Handle, Name), Handle};
+handle_call({altname, Name}, _From, State) ->
+ {reply, ?PRIM_FILE:altname(Name), State};
-handle_call({write_file_info, Name, Info}, _From, Handle) ->
- {reply, ?PRIM_FILE:write_file_info(Handle, Name, Info), Handle};
+handle_call({write_file_info, Name, Info}, _From, State) ->
+ {reply, ?PRIM_FILE:write_file_info(Name, Info), State};
-handle_call({write_file_info, Name, Info, Opts}, _From, Handle) ->
- {reply, ?PRIM_FILE:write_file_info(Handle, Name, Info, Opts), Handle};
+handle_call({write_file_info, Name, Info, Opts}, _From, State) ->
+ {reply, ?PRIM_FILE:write_file_info(Name, Info, Opts), State};
-handle_call({read_link_info, Name}, _From, Handle) ->
- {reply, ?PRIM_FILE:read_link_info(Handle, Name), Handle};
+handle_call({read_link_info, Name}, _From, State) ->
+ {reply, ?PRIM_FILE:read_link_info(Name), State};
-handle_call({read_link_info, Name, Opts}, _From, Handle) ->
- {reply, ?PRIM_FILE:read_link_info(Handle, Name, Opts), Handle};
+handle_call({read_link_info, Name, Opts}, _From, State) ->
+ {reply, ?PRIM_FILE:read_link_info(Name, Opts), State};
-handle_call({read_link, Name}, _From, Handle) ->
- {reply, ?PRIM_FILE:read_link(Handle, Name), Handle};
-handle_call({read_link_all, Name}, _From, Handle) ->
- {reply, ?PRIM_FILE:read_link_all(Handle, Name), Handle};
+handle_call({read_link, Name}, _From, State) ->
+ {reply, ?PRIM_FILE:read_link(Name), State};
+handle_call({read_link_all, Name}, _From, State) ->
+ {reply, ?PRIM_FILE:read_link_all(Name), State};
-handle_call({make_link, Old, New}, _From, Handle) ->
- {reply, ?PRIM_FILE:make_link(Handle, Old, New), Handle};
+handle_call({make_link, Old, New}, _From, State) ->
+ {reply, ?PRIM_FILE:make_link(Old, New), State};
-handle_call({make_symlink, Old, New}, _From, Handle) ->
- {reply, ?PRIM_FILE:make_symlink(Handle, Old, New), Handle};
+handle_call({make_symlink, Old, New}, _From, State) ->
+ {reply, ?PRIM_FILE:make_symlink(Old, New), State};
handle_call({copy, SourceName, SourceOpts, DestName, DestOpts, Length},
- _From, Handle) ->
+ _From, State) ->
Reply =
case ?PRIM_FILE:open(SourceName, [read, binary | SourceOpts]) of
{ok, Source} ->
@@ -201,14 +195,14 @@ handle_call({copy, SourceName, SourceOpts, DestName, DestOpts, Length},
{error, _} = Error ->
Error
end,
- {reply, Reply, Handle};
+ {reply, Reply, State};
-handle_call(stop, _From, Handle) ->
- {stop, normal, stopped, Handle};
+handle_call(stop, _From, State) ->
+ {stop, normal, stopped, State};
-handle_call(Request, From, Handle) ->
+handle_call(Request, From, State) ->
error_logger:error_msg("handle_call(~tp, ~tp, _)", [Request, From]),
- {noreply, Handle}.
+ {noreply, State}.
%%----------------------------------------------------------------------
%% Func: handle_cast/2
@@ -231,16 +225,11 @@ handle_cast(Msg, State) ->
%%----------------------------------------------------------------------
-spec handle_info(term(), state()) ->
- {'noreply', state()} | {'stop', 'normal', state()}.
+ {'noreply', state()}.
-handle_info({'EXIT', Pid, _Reason}, Handle) when is_pid(Pid) ->
+handle_info({'EXIT', Pid, _Reason}, State) when is_pid(Pid) ->
ets:delete(?FILE_IO_SERVER_TABLE, Pid),
- {noreply, Handle};
-
-handle_info({'EXIT', Handle, _Reason}, Handle) ->
- error_logger:error_msg("Port controlling ~w terminated in ~w",
- [?FILE_SERVER, ?MODULE]),
- {stop, normal, Handle};
+ {noreply, State};
handle_info(Info, State) ->
error_logger:error_msg("handle_Info(~tp, _)", [Info]),
@@ -254,8 +243,8 @@ handle_info(Info, State) ->
-spec terminate(term(), state()) -> 'ok'.
-terminate(_Reason, Handle) ->
- ?PRIM_FILE:stop(Handle).
+terminate(_Reason, _State) ->
+ ok.
%%----------------------------------------------------------------------
%% Func: code_change/3
diff --git a/lib/kernel/src/gen_sctp.erl b/lib/kernel/src/gen_sctp.erl
index a6aa0edd15..d893d44079 100644
--- a/lib/kernel/src/gen_sctp.erl
+++ b/lib/kernel/src/gen_sctp.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2007-2016. All Rights Reserved.
+%% Copyright Ericsson AB 2007-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -66,7 +66,12 @@
{sctp_set_peer_primary_addr, #sctp_setpeerprim{}} |
{sctp_status, #sctp_status{}} |
{sndbuf, non_neg_integer()} |
- {tos, non_neg_integer()}.
+ {tos, non_neg_integer()} |
+ {tclass, non_neg_integer()} |
+ {ttl, non_neg_integer()} |
+ {recvtos, boolean()} |
+ {recvtclass, boolean()} |
+ {recvttl, boolean()}.
-type option_name() ::
active |
buffer |
@@ -97,7 +102,12 @@
sctp_set_peer_primary_addr |
sctp_status |
sndbuf |
- tos.
+ tos |
+ tclass |
+ ttl |
+ recvtos |
+ recvtclass |
+ recvttl.
-type sctp_socket() :: port().
-export_type([assoc_id/0, option/0, option_name/0, sctp_socket/0]).
@@ -118,6 +128,8 @@ open() ->
| inet:address_family()
| {port,Port}
| {type,SockType}
+ | {netns, file:filename_all()}
+ | {bind_to_device, binary()}
| option(),
IP :: inet:ip_address() | any | loopback,
Port :: inet:port_number(),
@@ -363,7 +375,7 @@ send(S, AssocChange, Stream, Data) ->
Socket :: sctp_socket(),
FromIP :: inet:ip_address(),
FromPort :: inet:port_number(),
- AncData :: [#sctp_sndrcvinfo{}],
+ AncData :: [#sctp_sndrcvinfo{} | inet:ancillary_data()],
Data :: binary() | string() | #sctp_sndrcvinfo{}
| #sctp_assoc_change{} | #sctp_paddr_change{}
| #sctp_adaptation_event{},
@@ -380,7 +392,7 @@ recv(S) ->
Timeout :: timeout(),
FromIP :: inet:ip_address(),
FromPort :: inet:port_number(),
- AncData :: [#sctp_sndrcvinfo{}],
+ AncData :: [#sctp_sndrcvinfo{} | inet:ancillary_data()],
Data :: binary() | string() | #sctp_sndrcvinfo{}
| #sctp_assoc_change{} | #sctp_paddr_change{}
| #sctp_adaptation_event{},
diff --git a/lib/kernel/src/gen_tcp.erl b/lib/kernel/src/gen_tcp.erl
index ac61dbc792..7f7833ec23 100644
--- a/lib/kernel/src/gen_tcp.erl
+++ b/lib/kernel/src/gen_tcp.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2016. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -62,7 +62,14 @@
{show_econnreset, boolean()} |
{sndbuf, non_neg_integer()} |
{tos, non_neg_integer()} |
+ {tclass, non_neg_integer()} |
+ {ttl, non_neg_integer()} |
+ {recvtos, boolean()} |
+ {recvtclass, boolean()} |
+ {recvttl, boolean()} |
{ipv6_v6only, boolean()}.
+-type pktoptions_value() ::
+ {pktoptions, inet:ancillary_data()}.
-type option_name() ::
active |
buffer |
@@ -81,6 +88,7 @@
nodelay |
packet |
packet_size |
+ pktoptions |
priority |
{raw,
Protocol :: non_neg_integer(),
@@ -94,6 +102,12 @@
show_econnreset |
sndbuf |
tos |
+ tclass |
+ ttl |
+ recvtos |
+ recvtclass |
+ recvttl |
+ pktoptions |
ipv6_v6only.
-type connect_option() ::
{ip, inet:socket_address()} |
@@ -102,6 +116,8 @@
inet:address_family() |
{port, inet:port_number()} |
{tcp_module, module()} |
+ {netns, file:filename_all()} |
+ {bind_to_device, binary()} |
option().
-type listen_option() ::
{ip, inet:socket_address()} |
@@ -111,11 +127,13 @@
{port, inet:port_number()} |
{backlog, B :: non_neg_integer()} |
{tcp_module, module()} |
+ {netns, file:filename_all()} |
+ {bind_to_device, binary()} |
option().
-type socket() :: port().
-export_type([option/0, option_name/0, connect_option/0, listen_option/0,
- socket/0]).
+ socket/0, pktoptions_value/0]).
%%
%% Connect a socket
diff --git a/lib/kernel/src/gen_udp.erl b/lib/kernel/src/gen_udp.erl
index 3121544719..d6e8652e77 100644
--- a/lib/kernel/src/gen_udp.erl
+++ b/lib/kernel/src/gen_udp.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2016. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -51,6 +51,11 @@
{reuseaddr, boolean()} |
{sndbuf, non_neg_integer()} |
{tos, non_neg_integer()} |
+ {tclass, non_neg_integer()} |
+ {ttl, non_neg_integer()} |
+ {recvtos, boolean()} |
+ {recvtclass, boolean()} |
+ {recvttl, boolean()} |
{ipv6_v6only, boolean()}.
-type option_name() ::
active |
@@ -76,6 +81,12 @@
reuseaddr |
sndbuf |
tos |
+ tclass |
+ ttl |
+ recvtos |
+ recvtclass |
+ recvttl |
+ pktoptions |
ipv6_v6only.
-type socket() :: port().
@@ -97,6 +108,8 @@ open(Port) ->
| {ifaddr, inet:socket_address()}
| inet:address_family()
| {port, inet:port_number()}
+ | {netns, file:filename_all()}
+ | {bind_to_device, binary()}
| option(),
Socket :: socket(),
Reason :: inet:posix().
@@ -145,11 +158,13 @@ send(S, Packet) when is_port(S) ->
end.
-spec recv(Socket, Length) ->
- {ok, {Address, Port, Packet}} | {error, Reason} when
+ {ok, RecvData} | {error, Reason} when
Socket :: socket(),
Length :: non_neg_integer(),
+ RecvData :: {Address, Port, Packet} | {Address, Port, AncData, Packet},
Address :: inet:ip_address() | inet:returned_non_ip_address(),
Port :: inet:port_number(),
+ AncData :: inet:ancillary_data(),
Packet :: string() | binary(),
Reason :: not_owner | inet:posix().
@@ -162,12 +177,14 @@ recv(S,Len) when is_port(S), is_integer(Len) ->
end.
-spec recv(Socket, Length, Timeout) ->
- {ok, {Address, Port, Packet}} | {error, Reason} when
+ {ok, RecvData} | {error, Reason} when
Socket :: socket(),
Length :: non_neg_integer(),
Timeout :: timeout(),
+ RecvData :: {Address, Port, Packet} | {Address, Port, AncData, Packet},
Address :: inet:ip_address() | inet:returned_non_ip_address(),
Port :: inet:port_number(),
+ AncData :: inet:ancillary_data(),
Packet :: string() | binary(),
Reason :: not_owner | inet:posix().
diff --git a/lib/kernel/src/group.erl b/lib/kernel/src/group.erl
index bf785959ff..5625ae6eb7 100644
--- a/lib/kernel/src/group.erl
+++ b/lib/kernel/src/group.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2017. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -114,7 +114,7 @@ server_loop(Drv, Shell, Buf0) ->
{io_request,From,ReplyAs,Req} when is_pid(From) ->
%% This io_request may cause a transition to a couple of
%% selective receive loops elsewhere in this module.
- Buf = io_request(Req, From, ReplyAs, Drv, Buf0),
+ Buf = io_request(Req, From, ReplyAs, Drv, Shell, Buf0),
server_loop(Drv, Shell, Buf);
{reply,{{From,ReplyAs},Reply}} ->
io_reply(From, ReplyAs, Reply),
@@ -135,7 +135,7 @@ server_loop(Drv, Shell, Buf0) ->
exit(R);
%% We want to throw away any term that we don't handle (standard
%% practice in receive loops), but not any {Drv,_} tuples which are
- %% handled in io_request/5.
+ %% handled in io_request/6.
NotDrvTuple when (not is_tuple(NotDrvTuple)) orelse
(tuple_size(NotDrvTuple) =/= 2) orelse
(element(1, NotDrvTuple) =/= Drv) ->
@@ -177,8 +177,8 @@ set_unicode_state(Drv,Bool) ->
end.
-io_request(Req, From, ReplyAs, Drv, Buf0) ->
- case io_request(Req, Drv, {From,ReplyAs}, Buf0) of
+io_request(Req, From, ReplyAs, Drv, Shell, Buf0) ->
+ case io_request(Req, Drv, Shell, {From,ReplyAs}, Buf0) of
{ok,Reply,Buf} ->
io_reply(From, ReplyAs, Reply),
Buf;
@@ -208,7 +208,7 @@ io_request(Req, From, ReplyAs, Drv, Buf0) ->
%%
%% These put requests have to be synchronous to the driver as otherwise
%% there is no guarantee that the data has actually been printed.
-io_request({put_chars,unicode,Chars}, Drv, From, Buf) ->
+io_request({put_chars,unicode,Chars}, Drv, _Shell, From, Buf) ->
case catch unicode:characters_to_binary(Chars,utf8) of
Binary when is_binary(Binary) ->
send_drv(Drv, {put_chars_sync, unicode, Binary, {From,ok}}),
@@ -216,7 +216,7 @@ io_request({put_chars,unicode,Chars}, Drv, From, Buf) ->
_ ->
{error,{error,{put_chars, unicode,Chars}},Buf}
end;
-io_request({put_chars,unicode,M,F,As}, Drv, From, Buf) ->
+io_request({put_chars,unicode,M,F,As}, Drv, _Shell, From, Buf) ->
case catch apply(M, F, As) of
Binary when is_binary(Binary) ->
send_drv(Drv, {put_chars_sync, unicode, Binary, {From,ok}}),
@@ -230,12 +230,12 @@ io_request({put_chars,unicode,M,F,As}, Drv, From, Buf) ->
{error,{error,F},Buf}
end
end;
-io_request({put_chars,latin1,Binary}, Drv, From, Buf) when is_binary(Binary) ->
+io_request({put_chars,latin1,Binary}, Drv, _Shell, From, Buf) when is_binary(Binary) ->
send_drv(Drv, {put_chars_sync, unicode,
unicode:characters_to_binary(Binary,latin1),
{From,ok}}),
{noreply,Buf};
-io_request({put_chars,latin1,Chars}, Drv, From, Buf) ->
+io_request({put_chars,latin1,Chars}, Drv, _Shell, From, Buf) ->
case catch unicode:characters_to_binary(Chars,latin1) of
Binary when is_binary(Binary) ->
send_drv(Drv, {put_chars_sync, unicode, Binary, {From,ok}}),
@@ -243,7 +243,7 @@ io_request({put_chars,latin1,Chars}, Drv, From, Buf) ->
_ ->
{error,{error,{put_chars,latin1,Chars}},Buf}
end;
-io_request({put_chars,latin1,M,F,As}, Drv, From, Buf) ->
+io_request({put_chars,latin1,M,F,As}, Drv, _Shell, From, Buf) ->
case catch apply(M, F, As) of
Binary when is_binary(Binary) ->
send_drv(Drv, {put_chars_sync, unicode,
@@ -260,30 +260,30 @@ io_request({put_chars,latin1,M,F,As}, Drv, From, Buf) ->
end
end;
-io_request({get_chars,Encoding,Prompt,N}, Drv, _From, Buf) ->
- get_chars(Prompt, io_lib, collect_chars, N, Drv, Buf, Encoding);
-io_request({get_line,Encoding,Prompt}, Drv, _From, Buf) ->
- get_chars(Prompt, io_lib, collect_line, [], Drv, Buf, Encoding);
-io_request({get_until,Encoding, Prompt,M,F,As}, Drv, _From, Buf) ->
- get_chars(Prompt, io_lib, get_until, {M,F,As}, Drv, Buf, Encoding);
-io_request({get_password,_Encoding},Drv,_From,Buf) ->
- get_password_chars(Drv, Buf);
-io_request({setopts,Opts}, Drv, _From, Buf) when is_list(Opts) ->
+io_request({get_chars,Encoding,Prompt,N}, Drv, Shell, _From, Buf) ->
+ get_chars_n(Prompt, io_lib, collect_chars, N, Drv, Shell, Buf, Encoding);
+io_request({get_line,Encoding,Prompt}, Drv, Shell, _From, Buf) ->
+ get_chars_line(Prompt, io_lib, collect_line, [], Drv, Shell, Buf, Encoding);
+io_request({get_until,Encoding, Prompt,M,F,As}, Drv, Shell, _From, Buf) ->
+ get_chars_line(Prompt, io_lib, get_until, {M,F,As}, Drv, Shell, Buf, Encoding);
+io_request({get_password,_Encoding},Drv,Shell,_From,Buf) ->
+ get_password_chars(Drv, Shell, Buf);
+io_request({setopts,Opts}, Drv, _Shell, _From, Buf) when is_list(Opts) ->
setopts(Opts, Drv, Buf);
-io_request(getopts, Drv, _From, Buf) ->
+io_request(getopts, Drv, _Shell, _From, Buf) ->
getopts(Drv, Buf);
-io_request({requests,Reqs}, Drv, From, Buf) ->
- io_requests(Reqs, {ok,ok,Buf}, From, Drv);
+io_request({requests,Reqs}, Drv, Shell, From, Buf) ->
+ io_requests(Reqs, {ok,ok,Buf}, From, Drv, Shell);
%% New in R12
-io_request({get_geometry,columns},Drv,_From,Buf) ->
+io_request({get_geometry,columns},Drv,_Shell,_From,Buf) ->
case get_tty_geometry(Drv) of
{W,_H} ->
{ok,W,Buf};
_ ->
{error,{error,enotsup},Buf}
end;
-io_request({get_geometry,rows},Drv,_From,Buf) ->
+io_request({get_geometry,rows},Drv,_Shell,_From,Buf) ->
case get_tty_geometry(Drv) of
{_W,H} ->
{ok,H,Buf};
@@ -292,40 +292,40 @@ io_request({get_geometry,rows},Drv,_From,Buf) ->
end;
%% BC with pre-R13
-io_request({put_chars,Chars}, Drv, From, Buf) ->
- io_request({put_chars,latin1,Chars}, Drv, From, Buf);
-io_request({put_chars,M,F,As}, Drv, From, Buf) ->
- io_request({put_chars,latin1,M,F,As}, Drv, From, Buf);
-io_request({get_chars,Prompt,N}, Drv, From, Buf) ->
- io_request({get_chars,latin1,Prompt,N}, Drv, From, Buf);
-io_request({get_line,Prompt}, Drv, From, Buf) ->
- io_request({get_line,latin1,Prompt}, Drv, From, Buf);
-io_request({get_until, Prompt,M,F,As}, Drv, From, Buf) ->
- io_request({get_until,latin1, Prompt,M,F,As}, Drv, From, Buf);
-io_request(get_password,Drv,From,Buf) ->
- io_request({get_password,latin1},Drv,From,Buf);
-
-
-
-io_request(_, _Drv, _From, Buf) ->
+io_request({put_chars,Chars}, Drv, Shell, From, Buf) ->
+ io_request({put_chars,latin1,Chars}, Drv, Shell, From, Buf);
+io_request({put_chars,M,F,As}, Drv, Shell, From, Buf) ->
+ io_request({put_chars,latin1,M,F,As}, Drv, Shell, From, Buf);
+io_request({get_chars,Prompt,N}, Drv, Shell, From, Buf) ->
+ io_request({get_chars,latin1,Prompt,N}, Drv, Shell, From, Buf);
+io_request({get_line,Prompt}, Drv, Shell, From, Buf) ->
+ io_request({get_line,latin1,Prompt}, Drv, Shell, From, Buf);
+io_request({get_until, Prompt,M,F,As}, Drv, Shell, From, Buf) ->
+ io_request({get_until,latin1, Prompt,M,F,As}, Drv, Shell, From, Buf);
+io_request(get_password,Drv,Shell,From,Buf) ->
+ io_request({get_password,latin1},Drv,Shell,From,Buf);
+
+
+
+io_request(_, _Drv, _Shell, _From, Buf) ->
{error,{error,request},Buf}.
-%% Status = io_requests(RequestList, PrevStat, From, Drv)
+%% Status = io_requests(RequestList, PrevStat, From, Drv, Shell)
%% Process a list of output requests as long as
%% the previous status is 'ok' or noreply.
%%
%% We use undefined as the From for all but the last request
%% in order to discards acknowledgements from those requests.
%%
-io_requests([R|Rs], {noreply,Buf}, From, Drv) ->
+io_requests([R|Rs], {noreply,Buf}, From, Drv, Shell) ->
ReqFrom = if Rs =:= [] -> From; true -> undefined end,
- io_requests(Rs, io_request(R, Drv, ReqFrom, Buf), From, Drv);
-io_requests([R|Rs], {ok,ok,Buf}, From, Drv) ->
+ io_requests(Rs, io_request(R, Drv, Shell, ReqFrom, Buf), From, Drv, Shell);
+io_requests([R|Rs], {ok,ok,Buf}, From, Drv, Shell) ->
ReqFrom = if Rs =:= [] -> From; true -> undefined end,
- io_requests(Rs, io_request(R, Drv, ReqFrom, Buf), From, Drv);
-io_requests([_|_], Error, _From, _Drv) ->
+ io_requests(Rs, io_request(R, Drv, Shell, ReqFrom, Buf), From, Drv, Shell);
+io_requests([_|_], Error, _From, _Drv, _Shell) ->
Error;
-io_requests([], Stat, _From, _) ->
+io_requests([], Stat, _From, _, _Shell) ->
Stat.
%% io_reply(From, ReplyAs, Reply)
@@ -333,7 +333,7 @@ io_requests([], Stat, _From, _) ->
%% The ACK contains the return value.
io_reply(undefined, _ReplyAs, _Reply) ->
- %% Ignore these replies as they are generated from io_requests/4.
+ %% Ignore these replies as they are generated from io_requests/5.
ok;
io_reply(From, ReplyAs, Reply) ->
From ! {io_reply,ReplyAs,Reply},
@@ -434,7 +434,7 @@ getopts(Drv,Buf) ->
{ok,[Exp,Echo,Bin,Uni],Buf}.
-%% get_chars(Prompt, Module, Function, XtraArgument, Drv, Buffer)
+%% get_chars_*(Prompt, Module, Function, XtraArgument, Drv, Buffer)
%% Gets characters from the input Drv until as the applied function
%% returns {stop,Result,Rest}. Does not block output until input has been
%% received.
@@ -442,8 +442,8 @@ getopts(Drv,Buf) ->
%% {Result,NewSaveBuffer}
%% {error,What,NewSaveBuffer}
-get_password_chars(Drv,Buf) ->
- case get_password_line(Buf, Drv) of
+get_password_chars(Drv,Shell,Buf) ->
+ case get_password_line(Buf, Drv, Shell) of
{done, Line, Buf1} ->
{ok, Line, Buf1};
interrupted ->
@@ -452,36 +452,62 @@ get_password_chars(Drv,Buf) ->
{exit, terminated}
end.
-get_chars(Prompt, M, F, Xa, Drv, Buf, Encoding) ->
+get_chars_n(Prompt, M, F, Xa, Drv, Shell, Buf, Encoding) ->
Pbs = prompt_bytes(Prompt, Encoding),
- get_chars_loop(Pbs, M, F, Xa, Drv, Buf, start, Encoding).
+ case get(echo) of
+ true ->
+ get_chars_loop(Pbs, M, F, Xa, Drv, Shell, Buf, start, Encoding);
+ false ->
+ get_chars_n_loop(Pbs, M, F, Xa, Drv, Shell, Buf, start, Encoding)
+ end.
+
+get_chars_line(Prompt, M, F, Xa, Drv, Shell, Buf, Encoding) ->
+ Pbs = prompt_bytes(Prompt, Encoding),
+ get_chars_loop(Pbs, M, F, Xa, Drv, Shell, Buf, start, Encoding).
-get_chars_loop(Pbs, M, F, Xa, Drv, Buf0, State, Encoding) ->
- Result = case get(echo) of
+get_chars_loop(Pbs, M, F, Xa, Drv, Shell, Buf0, State, Encoding) ->
+ Result = case get(echo) of
true ->
- get_line(Buf0, Pbs, Drv, Encoding);
+ get_line(Buf0, Pbs, Drv, Shell, Encoding);
false ->
% get_line_echo_off only deals with lists
% and does not need encoding...
- get_line_echo_off(Buf0, Pbs, Drv)
+ get_line_echo_off(Buf0, Pbs, Drv, Shell)
end,
case Result of
- {done,Line,Buf1} ->
- get_chars_apply(Pbs, M, F, Xa, Drv, Buf1, State, Line, Encoding);
+ {done,Line,Buf} ->
+ get_chars_apply(Pbs, M, F, Xa, Drv, Shell, Buf, State, Line, Encoding);
interrupted ->
{error,{error,interrupted},[]};
terminated ->
{exit,terminated}
end.
-get_chars_apply(Pbs, M, F, Xa, Drv, Buf, State0, Line, Encoding) ->
+get_chars_apply(Pbs, M, F, Xa, Drv, Shell, Buf, State0, Line, Encoding) ->
case catch M:F(State0, cast(Line,get(read_mode), Encoding), Encoding, Xa) of
- {stop,Result,Rest} ->
- {ok,Result,append(Rest, Buf, Encoding)};
- {'EXIT',_} ->
- {error,{error,err_func(M, F, Xa)},[]};
- State1 ->
- get_chars_loop(Pbs, M, F, Xa, Drv, Buf, State1, Encoding)
+ {stop,Result,Rest} ->
+ {ok,Result,append(Rest, Buf, Encoding)};
+ {'EXIT',_} ->
+ {error,{error,err_func(M, F, Xa)},[]};
+ State1 ->
+ get_chars_loop(Pbs, M, F, Xa, Drv, Shell, Buf, State1, Encoding)
+ end.
+
+get_chars_n_loop(Pbs, M, F, Xa, Drv, Shell, Buf0, State, Encoding) ->
+ try M:F(State, cast(Buf0, get(read_mode), Encoding), Encoding, Xa) of
+ {stop,Result,Rest} ->
+ {ok, Result, Rest};
+ State1 ->
+ case get_chars_echo_off(Pbs, Drv, Shell) of
+ interrupted ->
+ {error,{error,interrupted},[]};
+ terminated ->
+ {exit,terminated};
+ Buf ->
+ get_chars_n_loop(Pbs, M, F, Xa, Drv, Shell, Buf, State1, Encoding)
+ end
+ catch _:_ ->
+ {error,{error,err_func(M, F, Xa)},[]}
end.
%% Convert error code to make it look as before
@@ -497,24 +523,24 @@ err_func(_, F, _) ->
%% {done,LineChars,RestChars}
%% interrupted
-get_line(Chars, Pbs, Drv, Encoding) ->
+get_line(Chars, Pbs, Drv, Shell, Encoding) ->
{more_chars,Cont,Rs} = edlin:start(Pbs),
send_drv_reqs(Drv, Rs),
- get_line1(edlin:edit_line(Chars, Cont), Drv, new_stack(get(line_buffer)),
+ get_line1(edlin:edit_line(Chars, Cont), Drv, Shell, new_stack(get(line_buffer)),
Encoding).
-get_line1({done,Line,Rest,Rs}, Drv, Ls, _Encoding) ->
+get_line1({done,Line,Rest,Rs}, Drv, _Shell, Ls, _Encoding) ->
send_drv_reqs(Drv, Rs),
save_line_buffer(Line, get_lines(Ls)),
{done,Line,Rest};
-get_line1({undefined,{_A,Mode,Char},Cs,Cont,Rs}, Drv, Ls0, Encoding)
+get_line1({undefined,{_A,Mode,Char},Cs,Cont,Rs}, Drv, Shell, Ls0, Encoding)
when ((Mode =:= none) and (Char =:= $\^P))
or ((Mode =:= meta_left_sq_bracket) and (Char =:= $A)) ->
send_drv_reqs(Drv, Rs),
case up_stack(save_line(Ls0, edlin:current_line(Cont))) of
{none,_Ls} ->
send_drv(Drv, beep),
- get_line1(edlin:edit_line(Cs, Cont), Drv, Ls0, Encoding);
+ get_line1(edlin:edit_line(Cs, Cont), Drv, Shell, Ls0, Encoding);
{Lcs,Ls} ->
send_drv_reqs(Drv, edlin:erase_line(Cont)),
{more_chars,Ncont,Nrs} = edlin:start(edlin:prompt(Cont)),
@@ -522,16 +548,17 @@ get_line1({undefined,{_A,Mode,Char},Cs,Cont,Rs}, Drv, Ls0, Encoding)
get_line1(edlin:edit_line1(lists:sublist(Lcs, 1, length(Lcs)-1),
Ncont),
Drv,
+ Shell,
Ls, Encoding)
end;
-get_line1({undefined,{_A,Mode,Char},Cs,Cont,Rs}, Drv, Ls0, Encoding)
+get_line1({undefined,{_A,Mode,Char},Cs,Cont,Rs}, Drv, Shell, Ls0, Encoding)
when ((Mode =:= none) and (Char =:= $\^N))
or ((Mode =:= meta_left_sq_bracket) and (Char =:= $B)) ->
send_drv_reqs(Drv, Rs),
case down_stack(save_line(Ls0, edlin:current_line(Cont))) of
{none,_Ls} ->
send_drv(Drv, beep),
- get_line1(edlin:edit_line(Cs, Cont), Drv, Ls0, Encoding);
+ get_line1(edlin:edit_line(Cs, Cont), Drv, Shell, Ls0, Encoding);
{Lcs,Ls} ->
send_drv_reqs(Drv, edlin:erase_line(Cont)),
{more_chars,Ncont,Nrs} = edlin:start(edlin:prompt(Cont)),
@@ -539,6 +566,7 @@ get_line1({undefined,{_A,Mode,Char},Cs,Cont,Rs}, Drv, Ls0, Encoding)
get_line1(edlin:edit_line1(lists:sublist(Lcs, 1, length(Lcs)-1),
Ncont),
Drv,
+ Shell,
Ls, Encoding)
end;
%% ^R = backward search, ^S = forward search.
@@ -551,7 +579,7 @@ get_line1({undefined,{_A,Mode,Char},Cs,Cont,Rs}, Drv, Ls0, Encoding)
%% new modes: search, search_quit, search_found. These are added to
%% the regular ones (none, meta_left_sq_bracket) and handle special
%% cases of history search.
-get_line1({undefined,{_A,Mode,Char},Cs,Cont,Rs}, Drv, Ls, Encoding)
+get_line1({undefined,{_A,Mode,Char},Cs,Cont,Rs}, Drv, Shell, Ls, Encoding)
when ((Mode =:= none) and (Char =:= $\^R)) ->
send_drv_reqs(Drv, Rs),
%% drop current line, move to search mode. We store the current
@@ -561,8 +589,8 @@ get_line1({undefined,{_A,Mode,Char},Cs,Cont,Rs}, Drv, Ls, Encoding)
Pbs = prompt_bytes("(search)`': ", Encoding),
{more_chars,Ncont,Nrs} = edlin:start(Pbs, search),
send_drv_reqs(Drv, Nrs),
- get_line1(edlin:edit_line1(Cs, Ncont), Drv, Ls, Encoding);
-get_line1({expand, Before, Cs0, Cont,Rs}, Drv, Ls0, Encoding) ->
+ get_line1(edlin:edit_line1(Cs, Ncont), Drv, Shell, Ls, Encoding);
+get_line1({expand, Before, Cs0, Cont,Rs}, Drv, Shell, Ls0, Encoding) ->
send_drv_reqs(Drv, Rs),
ExpandFun = get(expand_fun),
{Found, Add, Matches} = ExpandFun(Before),
@@ -577,37 +605,37 @@ get_line1({expand, Before, Cs0, Cont,Rs}, Drv, Ls0, Encoding) ->
send_drv(Drv, {put_chars, unicode, unicode:characters_to_binary(MatchStr,unicode)}),
[$\^L | Cs1]
end,
- get_line1(edlin:edit_line(Cs, Cont), Drv, Ls0, Encoding);
-get_line1({undefined,_Char,Cs,Cont,Rs}, Drv, Ls, Encoding) ->
+ get_line1(edlin:edit_line(Cs, Cont), Drv, Shell, Ls0, Encoding);
+get_line1({undefined,_Char,Cs,Cont,Rs}, Drv, Shell, Ls, Encoding) ->
send_drv_reqs(Drv, Rs),
send_drv(Drv, beep),
- get_line1(edlin:edit_line(Cs, Cont), Drv, Ls, Encoding);
+ get_line1(edlin:edit_line(Cs, Cont), Drv, Shell, Ls, Encoding);
%% The search item was found and accepted (new line entered on the exact
%% result found)
-get_line1({_What,Cont={line,_Prompt,_Chars,search_found},Rs}, Drv, Ls0, Encoding) ->
+get_line1({_What,Cont={line,_Prompt,_Chars,search_found},Rs}, Drv, Shell, Ls0, Encoding) ->
Line = edlin:current_line(Cont),
%% this may create duplicate entries.
Ls = save_line(new_stack(get_lines(Ls0)), Line),
- get_line1({done, Line, "", Rs}, Drv, Ls, Encoding);
+ get_line1({done, Line, "", Rs}, Drv, Shell, Ls, Encoding);
%% The search mode has been exited, but the user wants to remain in line
%% editing mode wherever that was, but editing the search result.
-get_line1({What,Cont={line,_Prompt,_Chars,search_quit},Rs}, Drv, Ls, Encoding) ->
+get_line1({What,Cont={line,_Prompt,_Chars,search_quit},Rs}, Drv, Shell, Ls, Encoding) ->
Line = edlin:current_chars(Cont),
%% Load back the old prompt with the correct line number.
case get(search_quit_prompt) of
undefined -> % should not happen. Fallback.
LsFallback = save_line(new_stack(get_lines(Ls)), Line),
- get_line1({done, "\n", Line, Rs}, Drv, LsFallback, Encoding);
+ get_line1({done, "\n", Line, Rs}, Drv, Shell, LsFallback, Encoding);
Prompt -> % redraw the line and keep going with the same stack position
NCont = {line,Prompt,{lists:reverse(Line),[]},none},
send_drv_reqs(Drv, Rs),
send_drv_reqs(Drv, edlin:erase_line(Cont)),
send_drv_reqs(Drv, edlin:redraw_line(NCont)),
- get_line1({What, NCont ,[]}, Drv, pad_stack(Ls), Encoding)
+ get_line1({What, NCont ,[]}, Drv, Shell, pad_stack(Ls), Encoding)
end;
%% Search mode is entered.
get_line1({What,{line,Prompt,{RevCmd0,_Aft},search},Rs},
- Drv, Ls0, Encoding) ->
+ Drv, Shell, Ls0, Encoding) ->
send_drv_reqs(Drv, Rs),
%% Figure out search direction. ^S and ^R are returned through edlin
%% whenever we received a search while being already in search mode.
@@ -629,61 +657,90 @@ get_line1({What,{line,Prompt,{RevCmd0,_Aft},search},Rs},
{Ls2, {RevCmd, "': "++Line}}
end,
Cont = {line,Prompt,NewStack,search},
- more_data(What, Cont, Drv, Ls, Encoding);
-get_line1({What,Cont0,Rs}, Drv, Ls, Encoding) ->
+ more_data(What, Cont, Drv, Shell, Ls, Encoding);
+get_line1({What,Cont0,Rs}, Drv, Shell, Ls, Encoding) ->
send_drv_reqs(Drv, Rs),
- more_data(What, Cont0, Drv, Ls, Encoding).
+ more_data(What, Cont0, Drv, Shell, Ls, Encoding).
-more_data(What, Cont0, Drv, Ls, Encoding) ->
+more_data(What, Cont0, Drv, Shell, Ls, Encoding) ->
receive
{Drv,{data,Cs}} ->
- get_line1(edlin:edit_line(Cs, Cont0), Drv, Ls, Encoding);
+ get_line1(edlin:edit_line(Cs, Cont0), Drv, Shell, Ls, Encoding);
{Drv,eof} ->
- get_line1(edlin:edit_line(eof, Cont0), Drv, Ls, Encoding);
+ get_line1(edlin:edit_line(eof, Cont0), Drv, Shell, Ls, Encoding);
{io_request,From,ReplyAs,Req} when is_pid(From) ->
{more_chars,Cont,_More} = edlin:edit_line([], Cont0),
send_drv_reqs(Drv, edlin:erase_line(Cont)),
- io_request(Req, From, ReplyAs, Drv, []), %WRONG!!!
+ io_request(Req, From, ReplyAs, Drv, Shell, []), %WRONG!!!
send_drv_reqs(Drv, edlin:redraw_line(Cont)),
- get_line1({more_chars,Cont,[]}, Drv, Ls, Encoding);
+ get_line1({more_chars,Cont,[]}, Drv, Shell, Ls, Encoding);
{reply,{{From,ReplyAs},Reply}} ->
%% We take care of replies from puts here as well
io_reply(From, ReplyAs, Reply),
- more_data(What, Cont0, Drv, Ls, Encoding);
+ more_data(What, Cont0, Drv, Shell, Ls, Encoding);
{'EXIT',Drv,interrupt} ->
interrupted;
{'EXIT',Drv,_} ->
- terminated
+ terminated;
+ {'EXIT',Shell,R} ->
+ exit(R)
after
get_line_timeout(What)->
- get_line1(edlin:edit_line([], Cont0), Drv, Ls, Encoding)
+ get_line1(edlin:edit_line([], Cont0), Drv, Shell, Ls, Encoding)
end.
-get_line_echo_off(Chars, Pbs, Drv) ->
+get_line_echo_off(Chars, Pbs, Drv, Shell) ->
send_drv_reqs(Drv, [{put_chars, unicode,Pbs}]),
- get_line_echo_off1(edit_line(Chars,[]), Drv).
+ get_line_echo_off1(edit_line(Chars,[]), Drv, Shell).
-get_line_echo_off1({Chars,[]}, Drv) ->
+get_line_echo_off1({Chars,[]}, Drv, Shell) ->
receive
{Drv,{data,Cs}} ->
- get_line_echo_off1(edit_line(Cs, Chars), Drv);
+ get_line_echo_off1(edit_line(Cs, Chars), Drv, Shell);
{Drv,eof} ->
- get_line_echo_off1(edit_line(eof, Chars), Drv);
+ get_line_echo_off1(edit_line(eof, Chars), Drv, Shell);
{io_request,From,ReplyAs,Req} when is_pid(From) ->
- io_request(Req, From, ReplyAs, Drv, []),
- get_line_echo_off1({Chars,[]}, Drv);
+ io_request(Req, From, ReplyAs, Drv, Shell, []),
+ get_line_echo_off1({Chars,[]}, Drv, Shell);
{reply,{{From,ReplyAs},Reply}} when From =/= undefined ->
%% We take care of replies from puts here as well
io_reply(From, ReplyAs, Reply),
- get_line_echo_off1({Chars,[]},Drv);
+ get_line_echo_off1({Chars,[]},Drv, Shell);
{'EXIT',Drv,interrupt} ->
interrupted;
{'EXIT',Drv,_} ->
- terminated
+ terminated;
+ {'EXIT',Shell,R} ->
+ exit(R)
end;
-get_line_echo_off1({Chars,Rest}, _Drv) ->
+get_line_echo_off1({Chars,Rest}, _Drv, _Shell) ->
{done,lists:reverse(Chars),case Rest of done -> []; _ -> Rest end}.
+get_chars_echo_off(Pbs, Drv, Shell) ->
+ send_drv_reqs(Drv, [{put_chars, unicode,Pbs}]),
+ get_chars_echo_off1(Drv, Shell).
+
+get_chars_echo_off1(Drv, Shell) ->
+ receive
+ {Drv, {data, Cs}} ->
+ Cs;
+ {Drv, eof} ->
+ eof;
+ {io_request,From,ReplyAs,Req} when is_pid(From) ->
+ io_request(Req, From, ReplyAs, Drv, Shell, []),
+ get_chars_echo_off1(Drv, Shell);
+ {reply,{{From,ReplyAs},Reply}} when From =/= undefined ->
+ %% We take care of replies from puts here as well
+ io_reply(From, ReplyAs, Reply),
+ get_chars_echo_off1(Drv, Shell);
+ {'EXIT',Drv,interrupt} ->
+ interrupted;
+ {'EXIT',Drv,_} ->
+ terminated;
+ {'EXIT',Shell,R} ->
+ exit(R)
+ end.
+
%% We support line editing for the ICANON mode except the following
%% line editing characters, which already has another meaning in
%% echo-on mode (See Advanced Programming in the Unix Environment, 2nd ed,
@@ -793,9 +850,9 @@ search_up_stack(Stack, Substr) ->
case up_stack(Stack) of
{none,NewStack} -> {none,NewStack};
{L, NewStack} ->
- case string:str(L, Substr) of
- 0 -> search_up_stack(NewStack, Substr);
- _ -> {string:strip(L,right,$\n), NewStack}
+ case string:find(L, Substr) of
+ nomatch -> search_up_stack(NewStack, Substr);
+ _ -> {string:trim(L, trailing, "$\n"), NewStack}
end
end.
@@ -803,39 +860,41 @@ search_down_stack(Stack, Substr) ->
case down_stack(Stack) of
{none,NewStack} -> {none,NewStack};
{L, NewStack} ->
- case string:str(L, Substr) of
- 0 -> search_down_stack(NewStack, Substr);
- _ -> {string:strip(L,right,$\n), NewStack}
+ case string:find(L, Substr) of
+ nomatch -> search_down_stack(NewStack, Substr);
+ _ -> {string:trim(L, trailing, "$\n"), NewStack}
end
end.
%% This is get_line without line editing (except for backspace) and
%% without echo.
-get_password_line(Chars, Drv) ->
- get_password1(edit_password(Chars,[]),Drv).
+get_password_line(Chars, Drv, Shell) ->
+ get_password1(edit_password(Chars,[]),Drv,Shell).
-get_password1({Chars,[]}, Drv) ->
+get_password1({Chars,[]}, Drv, Shell) ->
receive
{Drv,{data,Cs}} ->
- get_password1(edit_password(Cs,Chars),Drv);
+ get_password1(edit_password(Cs,Chars),Drv,Shell);
{io_request,From,ReplyAs,Req} when is_pid(From) ->
%send_drv_reqs(Drv, [{delete_chars, -length(Pbs)}]),
- io_request(Req, From, ReplyAs, Drv, []), %WRONG!!!
+ io_request(Req, From, ReplyAs, Drv, Shell, []), %WRONG!!!
%% I guess the reason the above line is wrong is that Buf is
%% set to []. But do we expect anything but plain output?
- get_password1({Chars, []}, Drv);
+ get_password1({Chars, []}, Drv, Shell);
{reply,{{From,ReplyAs},Reply}} ->
%% We take care of replies from puts here as well
io_reply(From, ReplyAs, Reply),
- get_password1({Chars, []},Drv);
+ get_password1({Chars, []},Drv, Shell);
{'EXIT',Drv,interrupt} ->
interrupted;
{'EXIT',Drv,_} ->
- terminated
+ terminated;
+ {'EXIT',Shell,R} ->
+ exit(R)
end;
-get_password1({Chars,Rest},Drv) ->
+get_password1({Chars,Rest},Drv,_Shell) ->
send_drv_reqs(Drv,[{put_chars, unicode, "\n"}]),
{done,lists:reverse(Chars),case Rest of done -> []; _ -> Rest end}.
diff --git a/lib/kernel/src/hipe_unified_loader.erl b/lib/kernel/src/hipe_unified_loader.erl
index f8199fcf71..5704cc79c2 100644
--- a/lib/kernel/src/hipe_unified_loader.erl
+++ b/lib/kernel/src/hipe_unified_loader.erl
@@ -275,6 +275,7 @@ needs_trampolines(Architecture) ->
arm -> true;
powerpc -> true;
ppc64 -> true;
+ amd64 -> true;
_ -> false
end.
@@ -452,7 +453,7 @@ make_beam_stub(Mod, LoaderState, MD5, Beam, FunDefs, ClosuresToPatch) ->
%%========================================================================
%% Patching
%% @spec patch(refs(), BaseAddress::integer(), ConstAndZone::term(),
-%% FunDefs::term(), TrampolineMap::term()) -> 'ok'.
+%% FunDefs::term(), TrampolineMap::term()) -> 'ok'
%% @type refs()=[{RefType::integer(), Reflist::reflist()} | refs()]
%%
%% @type reflist()= [{Data::term(), Offsets::offests()}|reflist()]
diff --git a/lib/kernel/src/inet.erl b/lib/kernel/src/inet.erl
index dc20c21c77..9f22eb6aaa 100644
--- a/lib/kernel/src/inet.erl
+++ b/lib/kernel/src/inet.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2017. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -34,7 +34,8 @@
ip/1, stats/0, options/0,
pushf/3, popf/1, close/1, gethostname/0, gethostname/1,
parse_ipv4_address/1, parse_ipv6_address/1, parse_ipv4strict_address/1,
- parse_ipv6strict_address/1, parse_address/1, parse_strict_address/1, ntoa/1]).
+ parse_ipv6strict_address/1, parse_address/1, parse_strict_address/1,
+ ntoa/1, ipv4_mapped_ipv6_address/1]).
-export([connect_options/2, listen_options/2, udp_options/2, sctp_options/2]).
-export([udp_module/1, tcp_module/1, tcp_module/2, sctp_module/1]).
@@ -72,10 +73,10 @@
%% timer interface
-export([start_timer/1, timeout/1, timeout/2, stop_timer/1]).
--export_type([address_family/0, hostent/0, hostname/0, ip4_address/0,
+-export_type([address_family/0, socket_protocol/0, hostent/0, hostname/0, ip4_address/0,
ip6_address/0, ip_address/0, port_number/0,
local_address/0, socket_address/0, returned_non_ip_address/0,
- socket_setopt/0, socket_getopt/0,
+ socket_setopt/0, socket_getopt/0, ancillary_data/0,
posix/0, socket/0, stat_option/0]).
%% imports
-import(lists, [append/1, duplicate/2, filter/2, foldl/3]).
@@ -104,7 +105,20 @@
{local, binary()} |
{unspec, <<>>} |
{undefined, any()}.
--type posix() :: exbadport | exbadseq | file:posix().
+-type posix() ::
+ 'eaddrinuse' | 'eaddrnotavail' | 'eafnosupport' | 'ealready' |
+ 'econnaborted' | 'econnrefused' | 'econnreset' |
+ 'edestaddrreq' |
+ 'ehostdown' | 'ehostunreach' |
+ 'einprogress' | 'eisconn' |
+ 'emsgsize' |
+ 'enetdown' | 'enetunreach' |
+ 'enopkg' | 'enoprotoopt' | 'enotconn' | 'enotty' | 'enotsock' |
+ 'eproto' | 'eprotonosupport' | 'eprototype' |
+ 'esocktnosupport' |
+ 'etimedout' |
+ 'ewouldblock' |
+ 'exbadport' | 'exbadseq' | file:posix().
-type socket() :: port().
-type socket_setopt() ::
@@ -140,6 +154,15 @@
'running' | 'multicast' | 'loopback']} |
{'hwaddr', ether_address()}.
+-type getifaddrs_ifopts() ::
+ [Ifopt :: {flags, Flags :: [up | broadcast | loopback |
+ pointtopoint | running | multicast]} |
+ {addr, Addr :: ip_address()} |
+ {netmask, Netmask :: ip_address()} |
+ {broadaddr, Broadaddr :: ip_address()} |
+ {dstaddr, Dstaddr :: ip_address()} |
+ {hwaddr, Hwaddr :: [byte()]}].
+
-type address_family() :: 'inet' | 'inet6' | 'local'.
-type socket_protocol() :: 'tcp' | 'udp' | 'sctp'.
-type socket_type() :: 'stream' | 'dgram' | 'seqpacket'.
@@ -149,6 +172,9 @@
'recv_cnt' | 'recv_max' | 'recv_avg' | 'recv_oct' | 'recv_dvi' |
'send_cnt' | 'send_max' | 'send_avg' | 'send_oct' | 'send_pend'.
+-type ancillary_data() ::
+ [ {'tos', byte()} | {'tclass', byte()} | {'ttl', byte()} ].
+
%%% ---------------------------------
-spec get_rc() -> [{Par :: atom(), Val :: any()} |
@@ -288,7 +314,7 @@ setopts(Socket, Opts) ->
{'ok', OptionValues} | {'error', posix()} when
Socket :: socket(),
Options :: [socket_getopt()],
- OptionValues :: [socket_setopt()].
+ OptionValues :: [socket_setopt() | gen_tcp:pktoptions_value()].
getopts(Socket, Opts) ->
case prim_inet:getopts(Socket, Opts) of
@@ -304,32 +330,32 @@ getopts(Socket, Opts) ->
Other
end.
--spec getifaddrs(Socket :: socket()) ->
- {'ok', [string()]} | {'error', posix()}.
-
+-spec getifaddrs(
+ [Option :: {netns, Namespace :: file:filename_all()}]
+ | socket()) ->
+ {'ok', [{Ifname :: string(),
+ Ifopts :: getifaddrs_ifopts()}]}
+ | {'error', posix()}.
+getifaddrs(Opts) when is_list(Opts) ->
+ withsocket(fun(S) -> prim_inet:getifaddrs(S) end, Opts);
getifaddrs(Socket) ->
prim_inet:getifaddrs(Socket).
--spec getifaddrs() -> {ok, Iflist} | {error, posix()} when
- Iflist :: [{Ifname,[Ifopt]}],
- Ifname :: string(),
- Ifopt :: {flags,[Flag]} | {addr,Addr} | {netmask,Netmask}
- | {broadaddr,Broadaddr} | {dstaddr,Dstaddr}
- | {hwaddr,Hwaddr},
- Flag :: up | broadcast | loopback | pointtopoint
- | running | multicast,
- Addr :: ip_address(),
- Netmask :: ip_address(),
- Broadaddr :: ip_address(),
- Dstaddr :: ip_address(),
- Hwaddr :: [byte()].
-
+-spec getifaddrs() ->
+ {'ok', [{Ifname :: string(),
+ Ifopts :: getifaddrs_ifopts()}]}
+ | {'error', posix()}.
getifaddrs() ->
withsocket(fun(S) -> prim_inet:getifaddrs(S) end).
--spec getiflist(Socket :: socket()) ->
- {'ok', [string()]} | {'error', posix()}.
+-spec getiflist(
+ [Option :: {netns, Namespace :: file:filename_all()}]
+ | socket()) ->
+ {'ok', [string()]} | {'error', posix()}.
+
+getiflist(Opts) when is_list(Opts) ->
+ withsocket(fun(S) -> prim_inet:getiflist(S) end, Opts);
getiflist(Socket) ->
prim_inet:getiflist(Socket).
@@ -346,11 +372,19 @@ getiflist() ->
ifget(Socket, Name, Opts) ->
prim_inet:ifget(Socket, Name, Opts).
--spec ifget(Name :: string() | atom(), Opts :: [if_getopt()]) ->
+-spec ifget(
+ Name :: string() | atom(),
+ Opts :: [if_getopt() |
+ {netns, Namespace :: file:filename_all()}]) ->
{'ok', [if_getopt_result()]} | {'error', posix()}.
ifget(Name, Opts) ->
- withsocket(fun(S) -> prim_inet:ifget(S, Name, Opts) end).
+ {NSOpts,IFOpts} =
+ lists:partition(
+ fun ({netns,_}) -> true;
+ (_) -> false
+ end, Opts),
+ withsocket(fun(S) -> prim_inet:ifget(S, Name, IFOpts) end, NSOpts).
-spec ifset(Socket :: socket(),
Name :: string() | atom(),
@@ -360,11 +394,19 @@ ifget(Name, Opts) ->
ifset(Socket, Name, Opts) ->
prim_inet:ifset(Socket, Name, Opts).
--spec ifset(Name :: string() | atom(), Opts :: [if_setopt()]) ->
+-spec ifset(
+ Name :: string() | atom(),
+ Opts :: [if_setopt() |
+ {netns, Namespace :: file:filename_all()}]) ->
'ok' | {'error', posix()}.
ifset(Name, Opts) ->
- withsocket(fun(S) -> prim_inet:ifset(S, Name, Opts) end).
+ {NSOpts,IFOpts} =
+ lists:partition(
+ fun ({netns,_}) -> true;
+ (_) -> false
+ end, Opts),
+ withsocket(fun(S) -> prim_inet:ifset(S, Name, IFOpts) end, NSOpts).
-spec getif() ->
{'ok', [{ip_address(), ip_address() | 'undefined', ip_address()}]} |
@@ -374,10 +416,14 @@ getif() ->
withsocket(fun(S) -> getif(S) end).
%% backwards compatible getif
--spec getif(Socket :: socket()) ->
+-spec getif(
+ [Option :: {netns, Namespace :: file:filename_all()}]
+ | socket()) ->
{'ok', [{ip_address(), ip_address() | 'undefined', ip_address()}]} |
{'error', posix()}.
+getif(Opts) when is_list(Opts) ->
+ withsocket(fun(S) -> getif(S) end, Opts);
getif(Socket) ->
case prim_inet:getiflist(Socket) of
{ok, IfList} ->
@@ -398,7 +444,10 @@ getif(Socket) ->
end.
withsocket(Fun) ->
- case inet_udp:open(0,[]) of
+ withsocket(Fun, []).
+%%
+withsocket(Fun, Opts) ->
+ case inet_udp:open(0, Opts) of
{ok,Socket} ->
Res = Fun(Socket),
inet_udp:close(Socket),
@@ -675,6 +724,14 @@ parse_address(Addr) ->
parse_strict_address(Addr) ->
inet_parse:strict_address(Addr).
+-spec ipv4_mapped_ipv6_address(ip_address()) -> ip_address().
+ipv4_mapped_ipv6_address({D1,D2,D3,D4})
+ when (D1 bor D2 bor D3 bor D4) < 256 ->
+ {0,0,0,0,0,16#ffff,(D1 bsl 8) bor D2,(D3 bsl 8) bor D4};
+ipv4_mapped_ipv6_address({D1,D2,D3,D4,D5,D6,D7,D8})
+ when (D1 bor D2 bor D3 bor D4 bor D5 bor D6 bor D7 bor D8) < 65536 ->
+ {D7 bsr 8,D7 band 255,D8 bsr 8,D8 band 255}.
+
%% Return a list of available options
options() ->
[
@@ -700,6 +757,7 @@ stats() ->
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
connect_options() ->
[tos, tclass, priority, reuseaddr, keepalive, linger, sndbuf, recbuf, nodelay,
+ recvtos, recvtclass, ttl, recvttl,
header, active, packet, packet_size, buffer, mode, deliver, line_delimiter,
exit_on_close, high_watermark, low_watermark, high_msgq_watermark,
low_msgq_watermark, send_timeout, send_timeout_close, delay_send, raw,
@@ -768,6 +826,7 @@ con_add(Name, Val, #connect_opts{} = R, Opts, AllOpts) ->
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
listen_options() ->
[tos, tclass, priority, reuseaddr, keepalive, linger, sndbuf, recbuf, nodelay,
+ recvtos, recvtclass, ttl, recvttl,
header, active, packet, buffer, mode, deliver, backlog, ipv6_v6only,
exit_on_close, high_watermark, low_watermark, high_msgq_watermark,
low_msgq_watermark, send_timeout, send_timeout_close, delay_send,
@@ -848,7 +907,7 @@ tcp_module_1(Opts, Address) ->
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
udp_options() ->
[tos, tclass, priority, reuseaddr, sndbuf, recbuf, header, active, buffer, mode,
- deliver, ipv6_v6only,
+ recvtos, recvtclass, ttl, recvttl, deliver, ipv6_v6only,
broadcast, dontroute, multicast_if, multicast_ttl, multicast_loop,
add_membership, drop_membership, read_packets,raw,
high_msgq_watermark, low_msgq_watermark, bind_to_device].
@@ -918,8 +977,10 @@ udp_module(Opts) ->
% (*) passing of open FDs ("fdopen") is not supported.
sctp_options() ->
[ % The following are generic inet options supported for SCTP sockets:
- mode, active, buffer, tos, tclass, priority, dontroute, reuseaddr, linger, sndbuf,
- recbuf, ipv6_v6only, high_msgq_watermark, low_msgq_watermark,
+ mode, active, buffer, tos, tclass, ttl,
+ priority, dontroute, reuseaddr, linger,
+ recvtos, recvtclass, recvttl,
+ sndbuf, recbuf, ipv6_v6only, high_msgq_watermark, low_msgq_watermark,
bind_to_device,
% Other options are SCTP-specific (though they may be similar to their
@@ -1244,9 +1305,7 @@ gethostbyname_string(Name, Type)
inet ->
inet_parse:ipv4_address(Name);
inet6 ->
- %% XXX should we really translate IPv4 addresses here
- %% even if we do not know if this host can do IPv6?
- inet_parse:ipv6_address(Name)
+ inet_parse:ipv6strict_address(Name)
end of
{ok,IP} ->
{ok,make_hostent(Name, [IP], [], Type)};
@@ -1452,11 +1511,14 @@ fdopen(Fd, Addr, Port, Opts, Protocol, Family, Type, Module) ->
%% socket stat
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+-spec i() -> ok.
i() -> i(tcp), i(udp), i(sctp).
+-spec i(socket_protocol()) -> ok.
i(Proto) -> i(Proto, [port, module, recv, sent, owner,
local_address, foreign_address, state, type]).
+-spec i(socket_protocol(), [atom()]) -> ok.
i(tcp, Fs) ->
ii(tcp_sockets(), Fs, tcp);
i(udp, Fs) ->
diff --git a/lib/kernel/src/inet6_tcp.erl b/lib/kernel/src/inet6_tcp.erl
index a0d5d3df70..347b8b9a1b 100644
--- a/lib/kernel/src/inet6_tcp.erl
+++ b/lib/kernel/src/inet6_tcp.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2016. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -167,7 +167,7 @@ listen(Port, Opts) ->
%% Accept
%%
accept(L) ->
- case prim_inet:accept(L) of
+ case prim_inet:accept(L, accept_family_opts()) of
{ok, S} ->
inet_db:register_socket(S, ?MODULE),
{ok,S};
@@ -175,13 +175,15 @@ accept(L) ->
end.
accept(L, Timeout) ->
- case prim_inet:accept(L, Timeout) of
+ case prim_inet:accept(L, Timeout, accept_family_opts()) of
{ok, S} ->
inet_db:register_socket(S, ?MODULE),
{ok,S};
Error -> Error
end.
+accept_family_opts() -> [tclass, recvtclass].
+
%%
%% Create a port/socket from a file descriptor
%%
diff --git a/lib/kernel/src/inet_config.erl b/lib/kernel/src/inet_config.erl
index 4bbc520449..9f76360b8b 100644
--- a/lib/kernel/src/inet_config.erl
+++ b/lib/kernel/src/inet_config.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2016. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2017. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -369,7 +369,7 @@ win32_load1(Reg,Type,HFileKey) ->
end.
win32_split_line(Line,nt) -> inet_parse:split_line(Line);
-win32_split_line(Line,windows) -> string:tokens(Line, ",").
+win32_split_line(Line,windows) -> string:lexemes(Line, ",").
win32_get_strings(Reg, Names) ->
win32_get_strings(Reg, Names, []).
diff --git a/lib/kernel/src/inet_db.erl b/lib/kernel/src/inet_db.erl
index 6cbb6ac2da..3f5a2ea5ee 100644
--- a/lib/kernel/src/inet_db.erl
+++ b/lib/kernel/src/inet_db.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2016. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2019. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -1223,7 +1223,10 @@ handle_set_file(Option, Fname, TagTm, TagInfo, ParseFun, From,
{ok, B, _} -> B;
_ -> <<>>
end;
- _ -> <<>>
+ _ ->
+ ets:insert(Db, {TagInfo, undefined}),
+ TimeZero = - (?RES_FILE_UPDATE_TM + 1), % Early enough
+ ets:insert(Db, {TagTm, TimeZero})
end,
handle_set_file(ParseFun, Bin, From, State);
false -> {reply,error,State}
diff --git a/lib/kernel/src/inet_dns.erl b/lib/kernel/src/inet_dns.erl
index d5f982cc51..f1f58bc872 100644
--- a/lib/kernel/src/inet_dns.erl
+++ b/lib/kernel/src/inet_dns.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2016. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2017. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -29,7 +29,7 @@
-export([decode/1, encode/1]).
--import(lists, [reverse/1, reverse/2, nthtail/2]).
+-import(lists, [reverse/1]).
-include("inet_int.hrl").
-include("inet_dns.hrl").
@@ -473,7 +473,7 @@ decode_data(<<Order:16,Preference:16,Data0/binary>>, _, ?S_NAPTR, Buffer) ->
{Data2,Services} = decode_string(Data1),
{Data,Regexp} = decode_characters(Data2, utf8),
Replacement = decode_domain(Data, Buffer),
- {Order,Preference,string:to_lower(Flags),string:to_lower(Services),
+ {Order,Preference,string:lowercase(Flags),string:lowercase(Services),
Regexp,Replacement};
%% ?S_OPT falls through to default
decode_data(Data, _, ?S_TXT, _) ->
diff --git a/lib/kernel/src/inet_hosts.erl b/lib/kernel/src/inet_hosts.erl
index 0bdf00ac30..fc653bf0d3 100644
--- a/lib/kernel/src/inet_hosts.erl
+++ b/lib/kernel/src/inet_hosts.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2016. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -72,9 +72,6 @@ gethostbyname(Name, Type, Byname, Byaddr) ->
gethostbyaddr({A,B,C,D}=IP) when ?ip(A,B,C,D) ->
gethostbyaddr(IP, inet);
-%% ipv4 only ipv6 address
-gethostbyaddr({0,0,0,0,0,16#ffff=F,G,H}) when ?ip6(0,0,0,0,0,F,G,H) ->
- gethostbyaddr({G bsr 8, G band 255, H bsr 8, H band 255});
gethostbyaddr({A,B,C,D,E,F,G,H}=IP) when ?ip6(A,B,C,D,E,F,G,H) ->
gethostbyaddr(IP, inet6);
gethostbyaddr(Addr) when is_list(Addr) ->
diff --git a/lib/kernel/src/inet_int.hrl b/lib/kernel/src/inet_int.hrl
index bc5b67f7bf..f6525d7261 100644
--- a/lib/kernel/src/inet_int.hrl
+++ b/lib/kernel/src/inet_int.hrl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2017. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -100,6 +100,8 @@
-define(TCP_REQ_RECV, 42).
-define(TCP_REQ_UNRECV, 43).
-define(TCP_REQ_SHUTDOWN, 44).
+-define(TCP_REQ_SENDFILE, 45).
+
%% UDP and SCTP requests
-define(PACKET_REQ_RECV, 60).
%%-define(SCTP_REQ_LISTEN, 61). MERGED
@@ -155,6 +157,12 @@
-define(INET_LOPT_LINE_DELIM, 40).
-define(INET_OPT_TCLASS, 41).
-define(INET_OPT_BIND_TO_DEVICE, 42).
+-define(INET_OPT_RECVTOS, 43).
+-define(INET_OPT_RECVTCLASS, 44).
+-define(INET_OPT_PKTOPTIONS, 45).
+-define(INET_OPT_TTL, 46).
+-define(INET_OPT_RECVTTL, 47).
+-define(TCP_OPT_NOPUSH, 48).
% Specific SCTP options: separate range:
-define(SCTP_OPT_RTOINFO, 100).
-define(SCTP_OPT_ASSOCINFO, 101).
@@ -319,6 +327,12 @@
[((X) bsr 24) band 16#ff, ((X) bsr 16) band 16#ff,
((X) bsr 8) band 16#ff, (X) band 16#ff]).
+-define(int64(X),
+ [((X) bsr 56) band 16#ff, ((X) bsr 48) band 16#ff,
+ ((X) bsr 40) band 16#ff, ((X) bsr 32) band 16#ff,
+ ((X) bsr 24) band 16#ff, ((X) bsr 16) band 16#ff,
+ ((X) bsr 8) band 16#ff, (X) band 16#ff]).
+
-define(intAID(X), % For SCTP AssocID
?int32(X)).
diff --git a/lib/kernel/src/inet_parse.erl b/lib/kernel/src/inet_parse.erl
index 29804dc50b..e9685c6554 100644
--- a/lib/kernel/src/inet_parse.erl
+++ b/lib/kernel/src/inet_parse.erl
@@ -95,7 +95,7 @@ hosts(Fname,File) ->
%% interface with a %if suffix. These kind of
%% addresses maybe need to be gracefully handled
%% throughout inet* and inet_drv.
- case string:tokens(Address, "%") of
+ case string:lexemes(Address, "%") of
[Addr,_] ->
{ok,_} = address(Addr),
skip;
@@ -407,7 +407,7 @@ is_dom1([C | Cs]) when C >= $a, C =< $z -> is_dom_ldh(Cs);
is_dom1([C | Cs]) when C >= $A, C =< $Z -> is_dom_ldh(Cs);
is_dom1([C | Cs]) when C >= $0, C =< $9 ->
case is_dom_ldh(Cs) of
- true -> is_dom2(string:tokens([C | Cs],"."));
+ true -> is_dom2(string:lexemes([C | Cs],"."));
false -> false
end;
is_dom1(_) -> false.
diff --git a/lib/kernel/src/inet_res.erl b/lib/kernel/src/inet_res.erl
index 90e49ddfdf..6454802b04 100644
--- a/lib/kernel/src/inet_res.erl
+++ b/lib/kernel/src/inet_res.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2016. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -349,9 +349,6 @@ gethostbyaddr_tm({A,B,C,D} = IP, Timer) when ?ip(A,B,C,D) ->
{ok, HEnt} -> {ok, HEnt};
_ -> res_gethostbyaddr(dn_in_addr_arpa(A,B,C,D), IP, Timer)
end;
-%% ipv4 only ipv6 address
-gethostbyaddr_tm({0,0,0,0,0,16#ffff,G,H},Timer) when is_integer(G+H) ->
- gethostbyaddr_tm({G div 256, G rem 256, H div 256, H rem 256},Timer);
gethostbyaddr_tm({A,B,C,D,E,F,G,H} = IP, Timer) when ?ip6(A,B,C,D,E,F,G,H) ->
inet_db:res_update_conf(),
case inet_db:gethostbyaddr(IP) of
@@ -431,28 +428,7 @@ gethostbyname(Name,Family,Timeout) ->
gethostbyname_tm(Name,inet,Timer) ->
getbyname_tm(Name,?S_A,Timer);
gethostbyname_tm(Name,inet6,Timer) ->
- case getbyname_tm(Name,?S_AAAA,Timer) of
- {ok,HEnt} -> {ok,HEnt};
- {error,nxdomain} ->
- case getbyname_tm(Name, ?S_A,Timer) of
- {ok, HEnt} ->
- %% rewrite to a ipv4 only ipv6 address
- {ok,
- HEnt#hostent {
- h_addrtype = inet6,
- h_length = 16,
- h_addr_list =
- lists:map(
- fun({A,B,C,D}) ->
- {0,0,0,0,0,16#ffff,A*256+B,C*256+D}
- end, HEnt#hostent.h_addr_list)
- }};
- Error ->
- Error
- end;
- Error ->
- Error
- end;
+ getbyname_tm(Name,?S_AAAA,Timer);
gethostbyname_tm(_Name, _Family, _Timer) ->
{error, einval}.
@@ -859,15 +835,17 @@ query_ns(S0, Id, Buffer, IP, Port, Timer, Retry, I,
{ok,S} ->
Timeout =
inet:timeout( (Tm * (1 bsl I)) div Retry, Timer),
- {S,
case query_udp(
S, Id, Buffer, IP, Port, Timeout, Verbose) of
{ok,#dns_rec{header=H}} when H#dns_header.tc ->
TcpTimeout = inet:timeout(Tm*5, Timer),
- query_tcp(
- TcpTimeout, Id, Buffer, IP, Port, Verbose);
- Reply -> Reply
- end};
+ {S, query_tcp(
+ TcpTimeout, Id, Buffer, IP, Port, Verbose)};
+ {error, econnrefused} = Err ->
+ ok = udp_close(S),
+ {#sock{}, Err};
+ Reply -> {S, Reply}
+ end;
Error ->
{S0,Error}
end
diff --git a/lib/kernel/src/inet_tcp.erl b/lib/kernel/src/inet_tcp.erl
index dac6b3119d..f1e3116856 100644
--- a/lib/kernel/src/inet_tcp.erl
+++ b/lib/kernel/src/inet_tcp.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2016. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -164,7 +164,7 @@ listen(Port, Opts) ->
%% Accept
%%
accept(L) ->
- case prim_inet:accept(L) of
+ case prim_inet:accept(L, accept_family_opts()) of
{ok, S} ->
inet_db:register_socket(S, ?MODULE),
{ok,S};
@@ -172,13 +172,15 @@ accept(L) ->
end.
accept(L, Timeout) ->
- case prim_inet:accept(L, Timeout) of
+ case prim_inet:accept(L, Timeout, accept_family_opts()) of
{ok, S} ->
inet_db:register_socket(S, ?MODULE),
{ok,S};
Error -> Error
end.
+accept_family_opts() -> [tos, ttl, recvtos, recvttl].
+
%%
%% Create a port/socket from a file descriptor
%%
diff --git a/lib/kernel/src/inet_tcp_dist.erl b/lib/kernel/src/inet_tcp_dist.erl
index e3fdb1bb22..c37212b0f9 100644
--- a/lib/kernel/src/inet_tcp_dist.erl
+++ b/lib/kernel/src/inet_tcp_dist.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2017. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -283,73 +283,22 @@ do_setup(Driver, Kernel, Node, Type, MyNode, LongOrShortNames, SetupTime) ->
?trace("~p~n",[{inet_tcp_dist,self(),setup,Node}]),
[Name, Address] = splitnode(Driver, Node, LongOrShortNames),
AddressFamily = Driver:family(),
- case inet:getaddr(Address, AddressFamily) of
+ ErlEpmd = net_kernel:epmd_module(),
+ {ARMod, ARFun} = get_address_resolver(ErlEpmd),
+ Timer = dist_util:start_timer(SetupTime),
+ case ARMod:ARFun(Name, Address, AddressFamily) of
+ {ok, Ip, TcpPort, Version} ->
+ ?trace("address_please(~p) -> version ~p~n",
+ [Node,Version]),
+ do_setup_connect(Driver, Kernel, Node, Address, AddressFamily,
+ Ip, TcpPort, Version, Type, MyNode, Timer);
{ok, Ip} ->
- Timer = dist_util:start_timer(SetupTime),
- ErlEpmd = net_kernel:epmd_module(),
case ErlEpmd:port_please(Name, Ip) of
{port, TcpPort, Version} ->
?trace("port_please(~p) -> version ~p~n",
[Node,Version]),
- dist_util:reset_timer(Timer),
- case
- Driver:connect(
- Ip, TcpPort,
- connect_options([{active, false}, {packet, 2}]))
- of
- {ok, Socket} ->
- HSData = #hs_data{
- kernel_pid = Kernel,
- other_node = Node,
- this_node = MyNode,
- socket = Socket,
- timer = Timer,
- this_flags = 0,
- other_version = Version,
- f_send = fun Driver:send/2,
- f_recv = fun Driver:recv/3,
- f_setopts_pre_nodeup =
- fun(S) ->
- inet:setopts
- (S,
- [{active, false},
- {packet, 4},
- nodelay()])
- end,
- f_setopts_post_nodeup =
- fun(S) ->
- inet:setopts
- (S,
- [{active, true},
- {deliver, port},
- {packet, 4},
- nodelay()])
- end,
-
- f_getll = fun inet:getll/1,
- f_address =
- fun(_,_) ->
- #net_address{
- address = {Ip,TcpPort},
- host = Address,
- protocol = tcp,
- family = AddressFamily}
- end,
- mf_tick = fun(S) -> ?MODULE:tick(Driver, S) end,
- mf_getstat = fun ?MODULE:getstat/1,
- request_type = Type,
- mf_setopts = fun ?MODULE:setopts/2,
- mf_getopts = fun ?MODULE:getopts/2
- },
- dist_util:handshake_we_started(HSData);
- _ ->
- %% Other Node may have closed since
- %% port_please !
- ?trace("other node (~p) "
- "closed since port_please.~n",
- [Node]),
- ?shutdown(Node)
- end;
+ do_setup_connect(Driver, Kernel, Node, Address, AddressFamily,
+ Ip, TcpPort, Version, Type, MyNode, Timer);
_ ->
?trace("port_please (~p) "
"failed.~n", [Node]),
@@ -361,6 +310,71 @@ do_setup(Driver, Kernel, Node, Type, MyNode, LongOrShortNames, SetupTime) ->
?shutdown(Node)
end.
+%%
+%% Actual setup of connection
+%%
+do_setup_connect(Driver, Kernel, Node, Address, AddressFamily,
+ Ip, TcpPort, Version, Type, MyNode, Timer) ->
+ dist_util:reset_timer(Timer),
+ case
+ Driver:connect(
+ Ip, TcpPort,
+ connect_options([{active, false}, {packet, 2}]))
+ of
+ {ok, Socket} ->
+ HSData = #hs_data{
+ kernel_pid = Kernel,
+ other_node = Node,
+ this_node = MyNode,
+ socket = Socket,
+ timer = Timer,
+ this_flags = 0,
+ other_version = Version,
+ f_send = fun Driver:send/2,
+ f_recv = fun Driver:recv/3,
+ f_setopts_pre_nodeup =
+ fun(S) ->
+ inet:setopts
+ (S,
+ [{active, false},
+ {packet, 4},
+ nodelay()])
+ end,
+ f_setopts_post_nodeup =
+ fun(S) ->
+ inet:setopts
+ (S,
+ [{active, true},
+ {deliver, port},
+ {packet, 4},
+ nodelay()])
+ end,
+
+ f_getll = fun inet:getll/1,
+ f_address =
+ fun(_,_) ->
+ #net_address{
+ address = {Ip,TcpPort},
+ host = Address,
+ protocol = tcp,
+ family = AddressFamily}
+ end,
+ mf_tick = fun(S) -> ?MODULE:tick(Driver, S) end,
+ mf_getstat = fun ?MODULE:getstat/1,
+ request_type = Type,
+ mf_setopts = fun ?MODULE:setopts/2,
+ mf_getopts = fun ?MODULE:getopts/2
+ },
+ dist_util:handshake_we_started(HSData);
+ _ ->
+ %% Other Node may have closed since
+ %% discovery !
+ ?trace("other node (~p) "
+ "closed since discovery (port_please).~n",
+ [Node]),
+ ?shutdown(Node)
+ end.
+
connect_options(Opts) ->
case application:get_env(kernel, inet_dist_connect_options) of
{ok,ConnectOpts} ->
@@ -430,6 +444,16 @@ get_tcp_address(Driver, Socket) ->
}.
%% ------------------------------------------------------------
+%% Determine if EPMD module supports address resolving. Default
+%% is to use inet:getaddr/2.
+%% ------------------------------------------------------------
+get_address_resolver(EpmdModule) ->
+ case erlang:function_exported(EpmdModule, address_please, 3) of
+ true -> {EpmdModule, address_please};
+ _ -> {erl_epmd, address_please}
+ end.
+
+%% ------------------------------------------------------------
%% Do only accept new connection attempts from nodes at our
%% own LAN, if the check_ip environment parameter is true.
%% ------------------------------------------------------------
diff --git a/lib/kernel/src/kernel.app.src b/lib/kernel/src/kernel.app.src
index 0eca6fef03..6bd5518a37 100644
--- a/lib/kernel/src/kernel.app.src
+++ b/lib/kernel/src/kernel.app.src
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2017. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -57,8 +57,23 @@
inet_tcp_dist,
kernel,
kernel_config,
+ kernel_refc,
local_tcp,
local_udp,
+ logger,
+ logger_backend,
+ logger_config,
+ logger_disk_log_h,
+ logger_filters,
+ logger_formatter,
+ logger_h_common,
+ logger_handler_watcher,
+ logger_olp,
+ logger_proxy,
+ logger_server,
+ logger_simple_h,
+ logger_std_h,
+ logger_sup,
net,
net_adm,
net_kernel,
@@ -88,6 +103,13 @@
inet_udp,
inet_sctp,
pg2,
+ raw_file_io,
+ raw_file_io_compressed,
+ raw_file_io_deflate,
+ raw_file_io_delayed,
+ raw_file_io_inflate,
+ raw_file_io_list,
+ raw_file_io_raw,
seq_trace,
standard_error,
wrap_log_reader]},
@@ -107,7 +129,11 @@
heart,
init,
kernel_config,
+ kernel_refc,
kernel_sup,
+ logger,
+ logger_handler_watcher,
+ logger_sup,
net_kernel,
net_sup,
rex,
@@ -118,8 +144,10 @@
inet_db,
pg2]},
{applications, []},
- {env, [{error_logger, tty}]},
+ {env, [{logger_level, notice},
+ {logger_sasl_compatible, false}
+ ]},
{mod, {kernel, []}},
- {runtime_dependencies, ["erts-9.3", "stdlib-3.4", "sasl-3.0"]}
+ {runtime_dependencies, ["erts-10.2.5", "stdlib-3.5", "sasl-3.0"]}
]
}.
diff --git a/lib/kernel/src/kernel.appup.src b/lib/kernel/src/kernel.appup.src
index 4ee497bbbd..aca3247c8f 100644
--- a/lib/kernel/src/kernel.appup.src
+++ b/lib/kernel/src/kernel.appup.src
@@ -16,11 +16,52 @@
%% limitations under the License.
%%
%% %CopyrightEnd%
+%%
+%% We allow upgrade from, and downgrade to all previous
+%% versions from the following OTP releases:
+%% - OTP 20
+%% - OTP 21
+%%
+%% We also allow upgrade from, and downgrade to all
+%% versions that have branched off from the above
+%% stated previous versions.
+%%
{"%VSN%",
- %% Up from - max one major revision back
- [{<<"5\\.[0-3](\\.[0-9]+)*">>,[restart_new_emulator]}, % OTP-19.*, OTP-20.0
- {<<"5\\.4(\\.[0-9]+)*">>,[restart_new_emulator]}], % OTP-20.1+
- %% Down to - max one major revision back
- [{<<"5\\.[0-3](\\.[0-9]+)*">>,[restart_new_emulator]}, % OTP-19.*, OTP-20.0
- {<<"5\\.4(\\.[0-9]+)*">>,[restart_new_emulator]}] % OTP-20.1+
-}.
+ [{<<"^5\\.3$">>,[restart_new_emulator]},
+ {<<"^5\\.3\\.0(?:\\.[0-9]+)+$">>,[restart_new_emulator]},
+ {<<"^5\\.3\\.1(?:\\.[0-9]+)*$">>,[restart_new_emulator]},
+ {<<"^5\\.4$">>,[restart_new_emulator]},
+ {<<"^5\\.4\\.0(?:\\.[0-9]+)+$">>,[restart_new_emulator]},
+ {<<"^5\\.4\\.1(?:\\.[0-9]+)*$">>,[restart_new_emulator]},
+ {<<"^5\\.4\\.2(?:\\.[0-9]+)*$">>,[restart_new_emulator]},
+ {<<"^5\\.4\\.3(?:\\.[0-9]+)*$">>,[restart_new_emulator]},
+ {<<"^6\\.0$">>,[restart_new_emulator]},
+ {<<"^6\\.0\\.0(?:\\.[0-9]+)+$">>,[restart_new_emulator]},
+ {<<"^6\\.0\\.1(?:\\.[0-9]+)*$">>,[restart_new_emulator]},
+ {<<"^6\\.1$">>,[restart_new_emulator]},
+ {<<"^6\\.1\\.0(?:\\.[0-9]+)+$">>,[restart_new_emulator]},
+ {<<"^6\\.1\\.1(?:\\.[0-9]+)*$">>,[restart_new_emulator]},
+ {<<"^6\\.2$">>,[restart_new_emulator]},
+ {<<"^6\\.2\\.0(?:\\.[0-9]+)+$">>,[restart_new_emulator]},
+ {<<"^6\\.2\\.1(?:\\.[0-9]+)*$">>,[restart_new_emulator]},
+ {<<"^6\\.3$">>,[restart_new_emulator]},
+ {<<"^6\\.3\\.0(?:\\.[0-9]+)+$">>,[restart_new_emulator]}],
+ [{<<"^5\\.3$">>,[restart_new_emulator]},
+ {<<"^5\\.3\\.0(?:\\.[0-9]+)+$">>,[restart_new_emulator]},
+ {<<"^5\\.3\\.1(?:\\.[0-9]+)*$">>,[restart_new_emulator]},
+ {<<"^5\\.4$">>,[restart_new_emulator]},
+ {<<"^5\\.4\\.0(?:\\.[0-9]+)+$">>,[restart_new_emulator]},
+ {<<"^5\\.4\\.1(?:\\.[0-9]+)*$">>,[restart_new_emulator]},
+ {<<"^5\\.4\\.2(?:\\.[0-9]+)*$">>,[restart_new_emulator]},
+ {<<"^5\\.4\\.3(?:\\.[0-9]+)*$">>,[restart_new_emulator]},
+ {<<"^6\\.0$">>,[restart_new_emulator]},
+ {<<"^6\\.0\\.0(?:\\.[0-9]+)+$">>,[restart_new_emulator]},
+ {<<"^6\\.0\\.1(?:\\.[0-9]+)*$">>,[restart_new_emulator]},
+ {<<"^6\\.1$">>,[restart_new_emulator]},
+ {<<"^6\\.1\\.0(?:\\.[0-9]+)+$">>,[restart_new_emulator]},
+ {<<"^6\\.1\\.1(?:\\.[0-9]+)*$">>,[restart_new_emulator]},
+ {<<"^6\\.2$">>,[restart_new_emulator]},
+ {<<"^6\\.2\\.0(?:\\.[0-9]+)+$">>,[restart_new_emulator]},
+ {<<"^6\\.2\\.1(?:\\.[0-9]+)*$">>,[restart_new_emulator]},
+ {<<"^6\\.3$">>,[restart_new_emulator]},
+ {<<"^6\\.3\\.0(?:\\.[0-9]+)+$">>,[restart_new_emulator]}]}.
diff --git a/lib/kernel/src/kernel.erl b/lib/kernel/src/kernel.erl
index cba57088ec..c68d04e279 100644
--- a/lib/kernel/src/kernel.erl
+++ b/lib/kernel/src/kernel.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2017. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -30,24 +30,13 @@
%%% Callback functions for the kernel application.
%%%-----------------------------------------------------------------
start(_, []) ->
+ %% Setup the logger and configure the kernel logger environment
+ ok = logger:internal_init_logger(),
case supervisor:start_link({local, kernel_sup}, kernel, []) of
{ok, Pid} ->
- %% add signal handler
- case whereis(erl_signal_server) of
- %% in case of minimal mode
- undefined -> ok;
- _ ->
- ok = gen_event:add_handler(erl_signal_server, erl_signal_handler, [])
- end,
- %% add error handler
- Type = get_error_logger_type(),
- case error_logger:swap_handler(Type) of
- ok -> {ok, Pid, []};
- Error ->
- %% Not necessary since the node will crash anyway:
- exit(Pid, shutdown),
- Error
- end;
+ ok = erl_signal_handler:start(),
+ ok = logger:add_handlers(kernel),
+ {ok, Pid, []};
Error -> Error
end.
@@ -62,16 +51,6 @@ config_change(Changed, New, Removed) ->
do_global_groups_change(Changed, New, Removed),
ok.
-get_error_logger_type() ->
- case application:get_env(kernel, error_logger) of
- {ok, tty} -> tty;
- {ok, {file, File}} when is_list(File) -> {logfile, File};
- {ok, false} -> false;
- {ok, silent} -> silent;
- undefined -> tty; % default value
- {ok, Bad} -> exit({bad_config, {kernel, {error_logger, Bad}}})
- end.
-
%%%-----------------------------------------------------------------
%%% The process structure in kernel is as shown in the figure.
%%%
@@ -111,6 +90,13 @@ init([]) ->
type => worker,
modules => [kernel_config]},
+ RefC = #{id => kernel_refc,
+ start => {kernel_refc, start_link, []},
+ restart => permanent,
+ shutdown => 2000,
+ type => worker,
+ modules => [kernel_refc]},
+
Code = #{id => code_server,
start => {code, start_link, []},
restart => permanent,
@@ -146,9 +132,18 @@ init([]) ->
type => supervisor,
modules => [?MODULE]},
+
+ LoggerSup = #{id => logger_sup,
+ start => {logger_sup, start_link, []},
+ restart => permanent,
+ shutdown => infinity,
+ type => supervisor,
+ modules => [logger_sup]},
+
case init:get_argument(mode) of
{ok, [["minimal"]]} ->
- {ok, {SupFlags, [Code, File, StdError, User, Config, SafeSup]}};
+ {ok, {SupFlags,
+ [Code, File, StdError, User, LoggerSup, Config, RefC, SafeSup]}};
_ ->
Rpc = #{id => rex,
start => {rpc, start_link, []},
@@ -199,7 +194,7 @@ init([]) ->
{ok, {SupFlags,
[Code, Rpc, Global, InetDb | DistAC] ++
[NetSup, GlGroup, File, SigSrv,
- StdError, User, Config, SafeSup] ++ Timer}}
+ StdError, User, Config, RefC, SafeSup, LoggerSup] ++ Timer}}
end;
init(safe) ->
SupFlags = #{strategy => one_for_one,
diff --git a/lib/kernel/src/kernel_config.erl b/lib/kernel/src/kernel_config.erl
index 535083ef27..691a266c2d 100644
--- a/lib/kernel/src/kernel_config.erl
+++ b/lib/kernel/src/kernel_config.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2016. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -30,11 +30,8 @@
%%%-----------------------------------------------------------------
%%% This module implements a process that configures the kernel
%%% application.
-%%% Its purpose is that in the init phase add an error_logger
-%%% and when it dies (when the kernel application dies) deleting the
-%%% previously installed error_logger.
-%%% Also, this process waits for other nodes at startup, if
-%%% specified.
+%%% Its purpose is that in the init phase waits for other nodes at startup,
+%%% if specified.
%%%-----------------------------------------------------------------
start_link() -> gen_server:start_link(kernel_config, [], []).
diff --git a/lib/kernel/src/kernel_refc.erl b/lib/kernel/src/kernel_refc.erl
new file mode 100644
index 0000000000..8e04ff99d8
--- /dev/null
+++ b/lib/kernel/src/kernel_refc.erl
@@ -0,0 +1,139 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017-2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(kernel_refc).
+
+-behaviour(gen_server).
+
+%% External exports
+-export([start_link/0, scheduler_wall_time/1]).
+%% Internal exports
+-export([init/1, handle_info/2, terminate/2]).
+-export([handle_call/3, handle_cast/2, code_change/3]).
+
+%%%-----------------------------------------------------------------
+%%% This module implements a process that handles reference counters for
+%%% various erts or other kernel resources which needs reference counting.
+%%%
+%%% Should not be documented nor used directly by user applications.
+%%%-----------------------------------------------------------------
+start_link() ->
+ gen_server:start_link({local,kernel_refc}, kernel_refc, [], []).
+
+-spec scheduler_wall_time(boolean()) -> boolean().
+scheduler_wall_time(Bool) ->
+ gen_server:call(kernel_refc, {scheduler_wall_time, self(), Bool}, infinity).
+
+%%-----------------------------------------------------------------
+%% Callback functions from gen_server
+%%-----------------------------------------------------------------
+
+-spec init([]) -> {'ok', map()}.
+
+init([]) ->
+ resource(scheduler_wall_time, false),
+ {ok, #{scheduler_wall_time=>#{}}}.
+
+-spec handle_call(term(), term(), State) -> {'reply', term(), State}.
+handle_call({What, Who, false}, _From, State) ->
+ {Reply, Cnt} = do_stop(What, maps:get(What, State), Who),
+ {reply, Reply, State#{What:=Cnt}};
+handle_call({What, Who, true}, _From, State) ->
+ {Reply, Cnt} = do_start(What, maps:get(What, State), Who),
+ {reply, Reply, State#{What:=Cnt}};
+handle_call(_, _From, State) ->
+ {reply, badarg, State}.
+
+-spec handle_cast(term(), State) -> {'noreply', State}.
+handle_cast(_, State) ->
+ {noreply, State}.
+
+-spec handle_info(term(), State) -> {'noreply', State}.
+handle_info({'DOWN', _Ref, process, Pid, _}, State) ->
+ Cleanup = fun(Resource, Cnts) ->
+ cleanup(Resource, Cnts, Pid)
+ end,
+ {noreply, maps:map(Cleanup, State)};
+handle_info(_, State) ->
+ {noreply, State}.
+
+-spec terminate(term(), term()) -> 'ok'.
+terminate(_Reason, _State) ->
+ ok.
+
+-spec code_change(term(), State, term()) -> {'ok', State}.
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%-----------------------------------------------------------------
+%% Internal functions
+%%-----------------------------------------------------------------
+
+do_start(Resource, Cnt, Pid) ->
+ case maps:get(Pid, Cnt, undefined) of
+ undefined ->
+ Ref = erlang:monitor(process, Pid),
+ case any(Cnt) of
+ true ->
+ {true, Cnt#{Pid=>{1, Ref}}};
+ false ->
+ resource(Resource, true),
+ {false, Cnt#{Pid=>{1, Ref}}}
+ end;
+ {N, Ref} ->
+ {true, Cnt#{Pid=>{N+1, Ref}}}
+ end.
+
+do_stop(Resource, Cnt0, Pid) ->
+ case maps:get(Pid, Cnt0, undefined) of
+ undefined ->
+ {any(Cnt0), Cnt0};
+ {1, Ref} ->
+ erlang:demonitor(Ref, [flush]),
+ Cnt = maps:remove(Pid, Cnt0),
+ case any(Cnt) of
+ true ->
+ {true, Cnt};
+ false ->
+ resource(Resource, false),
+ {true, Cnt}
+ end;
+ {N, Ref} ->
+ {true, Cnt0#{Pid=>{N-1, Ref}}}
+ end.
+
+cleanup(Resource, Cnt0, Pid) ->
+ case maps:is_key(Pid, Cnt0) of
+ true ->
+ Cnt = maps:remove(Pid, Cnt0),
+ case any(Cnt) of
+ true ->
+ Cnt;
+ false ->
+ resource(Resource, false),
+ Cnt
+ end;
+ false ->
+ Cnt0
+ end.
+
+any(Cnt) -> maps:size(Cnt) > 0.
+
+resource(scheduler_wall_time, Enable) ->
+ _ = erts_internal:scheduler_wall_time(Enable).
diff --git a/lib/kernel/src/logger.erl b/lib/kernel/src/logger.erl
new file mode 100644
index 0000000000..38bd2f481c
--- /dev/null
+++ b/lib/kernel/src/logger.erl
@@ -0,0 +1,1146 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017-2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger).
+
+%% Log interface
+-export([emergency/1,emergency/2,emergency/3,
+ alert/1,alert/2,alert/3,
+ critical/1,critical/2,critical/3,
+ error/1,error/2,error/3,
+ warning/1,warning/2,warning/3,
+ notice/1,notice/2,notice/3,
+ info/1,info/2,info/3,
+ debug/1,debug/2,debug/3]).
+-export([log/2,log/3,log/4]).
+
+%% Called by macro
+-export([allow/2,macro_log/3,macro_log/4,macro_log/5,add_default_metadata/1]).
+
+%% Configuration
+-export([add_handler/3, remove_handler/1,
+ add_primary_filter/2, add_handler_filter/3,
+ remove_primary_filter/1, remove_handler_filter/2,
+ set_module_level/2,
+ unset_module_level/1, unset_module_level/0,
+ set_application_level/2, unset_application_level/1,
+ get_module_level/0, get_module_level/1,
+ set_primary_config/1, set_primary_config/2,
+ set_handler_config/2, set_handler_config/3,
+ set_proxy_config/1,
+ update_primary_config/1,
+ update_handler_config/2, update_handler_config/3,
+ update_proxy_config/1,
+ update_formatter_config/2, update_formatter_config/3,
+ get_primary_config/0, get_handler_config/1,
+ get_handler_config/0, get_handler_ids/0, get_config/0,
+ get_proxy_config/0,
+ add_handlers/1]).
+
+%% Private configuration
+-export([internal_init_logger/0]).
+
+%% Misc
+-export([compare_levels/2]).
+-export([set_process_metadata/1, update_process_metadata/1,
+ unset_process_metadata/0, get_process_metadata/0]).
+-export([i/0, i/1]).
+-export([timestamp/0]).
+
+%% Basic report formatting
+-export([format_report/1, format_otp_report/1]).
+
+-export([internal_log/2,filter_stacktrace/2]).
+
+-include("logger_internal.hrl").
+-include("logger.hrl").
+
+%%%-----------------------------------------------------------------
+%%% Types
+-type log_event() :: #{level:=level(),
+ msg:={io:format(),[term()]} |
+ {report,report()} |
+ {string,unicode:chardata()},
+ meta:=metadata()}.
+-type level() :: emergency | alert | critical | error |
+ warning | notice | info | debug.
+-type report() :: map() | [{atom(),term()}].
+-type report_cb() :: fun((report()) -> {io:format(),[term()]}) |
+ fun((report(),report_cb_config()) -> unicode:chardata()).
+-type report_cb_config() :: #{depth := pos_integer() | unlimited,
+ chars_limit := pos_integer() | unlimited,
+ single_line := boolean()}.
+-type msg_fun() :: fun((term()) -> {io:format(),[term()]} |
+ report() |
+ unicode:chardata()).
+-type metadata() :: #{pid => pid(),
+ gl => pid(),
+ time => timestamp(),
+ mfa => {module(),atom(),non_neg_integer()},
+ file => file:filename(),
+ line => non_neg_integer(),
+ domain => [atom()],
+ report_cb => report_cb(),
+ atom() => term()}.
+-type location() :: #{mfa := {module(),atom(),non_neg_integer()},
+ file := file:filename(),
+ line := non_neg_integer()}.
+-type handler_id() :: atom().
+-type filter_id() :: atom().
+-type filter() :: {fun((log_event(),filter_arg()) ->
+ filter_return()),filter_arg()}.
+-type filter_arg() :: term().
+-type filter_return() :: stop | ignore | log_event().
+-type primary_config() :: #{level => level() | all | none,
+ filter_default => log | stop,
+ filters => [{filter_id(),filter()}]}.
+-type handler_config() :: #{id => handler_id(),
+ config => term(),
+ level => level() | all | none,
+ module => module(),
+ filter_default => log | stop,
+ filters => [{filter_id(),filter()}],
+ formatter => {module(),formatter_config()}}.
+-type timestamp() :: integer().
+-type formatter_config() :: #{atom() => term()}.
+
+-type config_handler() :: {handler, handler_id(), module(), handler_config()}.
+
+-type config_logger() :: [{handler,default,undefined} |
+ config_handler() |
+ {filters,log | stop,[{filter_id(),filter()}]} |
+ {module_level,level(),[module()]}].
+
+-type olp_config() :: #{sync_mode_qlen => non_neg_integer(),
+ drop_mode_qlen => pos_integer(),
+ flush_qlen => pos_integer(),
+ burst_limit_enable => boolean(),
+ burst_limit_max_count => pos_integer(),
+ burst_limit_window_time => pos_integer(),
+ overload_kill_enable => boolean(),
+ overload_kill_qlen => pos_integer(),
+ overload_kill_mem_size => pos_integer(),
+ overload_kill_restart_after =>
+ non_neg_integer() | infinity}.
+
+-export_type([log_event/0,
+ level/0,
+ report/0,
+ report_cb/0,
+ report_cb_config/0,
+ msg_fun/0,
+ metadata/0,
+ primary_config/0,
+ handler_config/0,
+ handler_id/0,
+ filter_id/0,
+ filter/0,
+ filter_arg/0,
+ filter_return/0,
+ config_handler/0,
+ formatter_config/0,
+ olp_config/0,
+ timestamp/0]).
+
+%%%-----------------------------------------------------------------
+%%% API
+emergency(X) ->
+ log(emergency,X).
+emergency(X,Y) ->
+ log(emergency,X,Y).
+emergency(X,Y,Z) ->
+ log(emergency,X,Y,Z).
+
+alert(X) ->
+ log(alert,X).
+alert(X,Y) ->
+ log(alert,X,Y).
+alert(X,Y,Z) ->
+ log(alert,X,Y,Z).
+
+critical(X) ->
+ log(critical,X).
+critical(X,Y) ->
+ log(critical,X,Y).
+critical(X,Y,Z) ->
+ log(critical,X,Y,Z).
+
+error(X) ->
+ log(error,X).
+error(X,Y) ->
+ log(error,X,Y).
+error(X,Y,Z) ->
+ log(error,X,Y,Z).
+
+warning(X) ->
+ log(warning,X).
+warning(X,Y) ->
+ log(warning,X,Y).
+warning(X,Y,Z) ->
+ log(warning,X,Y,Z).
+
+notice(X) ->
+ log(notice,X).
+notice(X,Y) ->
+ log(notice,X,Y).
+notice(X,Y,Z) ->
+ log(notice,X,Y,Z).
+
+info(X) ->
+ log(info,X).
+info(X,Y) ->
+ log(info,X,Y).
+info(X,Y,Z) ->
+ log(info,X,Y,Z).
+
+debug(X) ->
+ log(debug,X).
+debug(X,Y) ->
+ log(debug,X,Y).
+debug(X,Y,Z) ->
+ log(debug,X,Y,Z).
+
+-spec log(Level,StringOrReport) -> ok when
+ Level :: level(),
+ StringOrReport :: unicode:chardata() | report().
+log(Level, StringOrReport) ->
+ do_log(Level,StringOrReport,#{}).
+
+-spec log(Level,StringOrReport,Metadata) -> ok when
+ Level :: level(),
+ StringOrReport :: unicode:chardata() | report(),
+ Metadata :: metadata();
+ (Level,Format,Args) -> ok when
+ Level :: level(),
+ Format :: io:format(),
+ Args ::[term()];
+ (Level,Fun,FunArgs) -> ok when
+ Level :: level(),
+ Fun :: msg_fun(),
+ FunArgs :: term().
+log(Level, StringOrReport, Metadata)
+ when is_map(Metadata), not is_function(StringOrReport) ->
+ do_log(Level,StringOrReport,Metadata);
+log(Level, FunOrFormat, Args) ->
+ do_log(Level,{FunOrFormat,Args},#{}).
+
+-spec log(Level,Format, Args, Metadata) -> ok when
+ Level :: level(),
+ Format :: io:format(),
+ Args :: [term()],
+ Metadata :: metadata();
+ (Level,Fun,FunArgs,Metadata) -> ok when
+ Level :: level(),
+ Fun :: msg_fun(),
+ FunArgs :: term(),
+ Metadata :: metadata().
+log(Level, FunOrFormat, Args, Metadata) ->
+ do_log(Level,{FunOrFormat,Args},Metadata).
+
+-spec allow(Level,Module) -> boolean() when
+ Level :: level(),
+ Module :: module().
+allow(Level,Module) when ?IS_LEVEL(Level), is_atom(Module) ->
+ logger_config:allow(?LOGGER_TABLE,Level,Module).
+
+
+-spec macro_log(Location,Level,StringOrReport) -> ok when
+ Location :: location(),
+ Level :: level(),
+ StringOrReport :: unicode:chardata() | report().
+macro_log(Location,Level,StringOrReport) ->
+ log_allowed(Location,Level,StringOrReport,#{}).
+
+-spec macro_log(Location,Level,StringOrReport,Meta) -> ok when
+ Location :: location(),
+ Level :: level(),
+ StringOrReport :: unicode:chardata() | report(),
+ Meta :: metadata();
+ (Location,Level,Format,Args) -> ok when
+ Location :: location(),
+ Level :: level(),
+ Format :: io:format(),
+ Args ::[term()];
+ (Location,Level,Fun,FunArgs) -> ok when
+ Location :: location(),
+ Level :: level(),
+ Fun :: msg_fun(),
+ FunArgs :: term().
+macro_log(Location,Level,StringOrReport,Meta)
+ when is_map(Meta), not is_function(StringOrReport) ->
+ log_allowed(Location,Level,StringOrReport,Meta);
+macro_log(Location,Level,FunOrFormat,Args) ->
+ log_allowed(Location,Level,{FunOrFormat,Args},#{}).
+
+-spec macro_log(Location,Level,Format,Args,Meta) -> ok when
+ Location :: location(),
+ Level :: level(),
+ Format :: io:format(),
+ Args ::[term()],
+ Meta :: metadata();
+ (Location,Level,Fun,FunArgs,Meta) -> ok when
+ Location :: location(),
+ Level :: level(),
+ Fun :: msg_fun(),
+ FunArgs :: term(),
+ Meta :: metadata().
+macro_log(Location,Level,FunOrFormat,Args,Meta) ->
+ log_allowed(Location,Level,{FunOrFormat,Args},Meta).
+
+-spec format_otp_report(Report) -> FormatArgs when
+ Report :: report(),
+ FormatArgs :: {io:format(),[term()]}.
+format_otp_report(#{label:=_,report:=Report}) ->
+ format_report(Report);
+format_otp_report(Report) ->
+ format_report(Report).
+
+-spec format_report(Report) -> FormatArgs when
+ Report :: report(),
+ FormatArgs :: {io:format(),[term()]}.
+format_report(Report) when is_map(Report) ->
+ format_report(maps:to_list(Report));
+format_report(Report) when is_list(Report) ->
+ case lists:flatten(Report) of
+ [] ->
+ {"~tp",[[]]};
+ FlatList ->
+ case string_p1(FlatList) of
+ true ->
+ {"~ts",[FlatList]};
+ false ->
+ format_term_list(Report,[],[])
+ end
+ end;
+format_report(Report) ->
+ {"~tp",[Report]}.
+
+format_term_list([{Tag,Data}|T],Format,Args) ->
+ PorS = case string_p(Data) of
+ true -> "s";
+ false -> "p"
+ end,
+ format_term_list(T,[" ~tp: ~t"++PorS|Format],[Data,Tag|Args]);
+format_term_list([Data|T],Format,Args) ->
+ format_term_list(T,[" ~tp"|Format],[Data|Args]);
+format_term_list([],Format,Args) ->
+ {lists:flatten(lists:join($\n,lists:reverse(Format))),lists:reverse(Args)}.
+
+string_p(List) when is_list(List) ->
+ string_p1(lists:flatten(List));
+string_p(_) ->
+ false.
+
+string_p1([]) ->
+ false;
+string_p1(FlatList) ->
+ io_lib:printable_unicode_list(FlatList).
+
+internal_log(Level,Term) when is_atom(Level) ->
+ erlang:display_string("Logger - "++ atom_to_list(Level) ++ ": "),
+ erlang:display(Term).
+
+-spec timestamp() -> timestamp().
+timestamp() ->
+ os:system_time(microsecond).
+
+%%%-----------------------------------------------------------------
+%%% Configuration
+-spec add_primary_filter(FilterId,Filter) -> ok | {error,term()} when
+ FilterId :: filter_id(),
+ Filter :: filter().
+add_primary_filter(FilterId,Filter) ->
+ logger_server:add_filter(primary,{FilterId,Filter}).
+
+-spec add_handler_filter(HandlerId,FilterId,Filter) -> ok | {error,term()} when
+ HandlerId :: handler_id(),
+ FilterId :: filter_id(),
+ Filter :: filter().
+add_handler_filter(HandlerId,FilterId,Filter) ->
+ logger_server:add_filter(HandlerId,{FilterId,Filter}).
+
+
+-spec remove_primary_filter(FilterId) -> ok | {error,term()} when
+ FilterId :: filter_id().
+remove_primary_filter(FilterId) ->
+ logger_server:remove_filter(primary,FilterId).
+
+-spec remove_handler_filter(HandlerId,FilterId) -> ok | {error,term()} when
+ HandlerId :: handler_id(),
+ FilterId :: filter_id().
+remove_handler_filter(HandlerId,FilterId) ->
+ logger_server:remove_filter(HandlerId,FilterId).
+
+-spec add_handler(HandlerId,Module,Config) -> ok | {error,term()} when
+ HandlerId :: handler_id(),
+ Module :: module(),
+ Config :: handler_config().
+add_handler(HandlerId,Module,Config) ->
+ logger_server:add_handler(HandlerId,Module,Config).
+
+-spec remove_handler(HandlerId) -> ok | {error,term()} when
+ HandlerId :: handler_id().
+remove_handler(HandlerId) ->
+ logger_server:remove_handler(HandlerId).
+
+-spec set_primary_config(level,Level) -> ok | {error,term()} when
+ Level :: level() | all | none;
+ (filter_default,FilterDefault) -> ok | {error,term()} when
+ FilterDefault :: log | stop;
+ (filters,Filters) -> ok | {error,term()} when
+ Filters :: [{filter_id(),filter()}].
+set_primary_config(Key,Value) ->
+ logger_server:set_config(primary,Key,Value).
+
+-spec set_primary_config(Config) -> ok | {error,term()} when
+ Config :: primary_config().
+set_primary_config(Config) ->
+ logger_server:set_config(primary,Config).
+
+
+-spec set_handler_config(HandlerId,level,Level) -> Return when
+ HandlerId :: handler_id(),
+ Level :: level() | all | none,
+ Return :: ok | {error,term()};
+ (HandlerId,filter_default,FilterDefault) -> Return when
+ HandlerId :: handler_id(),
+ FilterDefault :: log | stop,
+ Return :: ok | {error,term()};
+ (HandlerId,filters,Filters) -> Return when
+ HandlerId :: handler_id(),
+ Filters :: [{filter_id(),filter()}],
+ Return :: ok | {error,term()};
+ (HandlerId,formatter,Formatter) -> Return when
+ HandlerId :: handler_id(),
+ Formatter :: {module(), formatter_config()},
+ Return :: ok | {error,term()};
+ (HandlerId,config,Config) -> Return when
+ HandlerId :: handler_id(),
+ Config :: term(),
+ Return :: ok | {error,term()}.
+set_handler_config(HandlerId,Key,Value) ->
+ logger_server:set_config(HandlerId,Key,Value).
+
+-spec set_handler_config(HandlerId,Config) -> ok | {error,term()} when
+ HandlerId :: handler_id(),
+ Config :: handler_config().
+set_handler_config(HandlerId,Config) ->
+ logger_server:set_config(HandlerId,Config).
+
+-spec set_proxy_config(Config) -> ok | {error,term()} when
+ Config :: olp_config().
+set_proxy_config(Config) ->
+ logger_server:set_config(proxy,Config).
+
+-spec update_primary_config(Config) -> ok | {error,term()} when
+ Config :: primary_config().
+update_primary_config(Config) ->
+ logger_server:update_config(primary,Config).
+
+-spec update_handler_config(HandlerId,level,Level) -> Return when
+ HandlerId :: handler_id(),
+ Level :: level() | all | none,
+ Return :: ok | {error,term()};
+ (HandlerId,filter_default,FilterDefault) -> Return when
+ HandlerId :: handler_id(),
+ FilterDefault :: log | stop,
+ Return :: ok | {error,term()};
+ (HandlerId,filters,Filters) -> Return when
+ HandlerId :: handler_id(),
+ Filters :: [{filter_id(),filter()}],
+ Return :: ok | {error,term()};
+ (HandlerId,formatter,Formatter) -> Return when
+ HandlerId :: handler_id(),
+ Formatter :: {module(), formatter_config()},
+ Return :: ok | {error,term()};
+ (HandlerId,config,Config) -> Return when
+ HandlerId :: handler_id(),
+ Config :: term(),
+ Return :: ok | {error,term()}.
+update_handler_config(HandlerId,Key,Value) ->
+ logger_server:update_config(HandlerId,Key,Value).
+
+-spec update_handler_config(HandlerId,Config) -> ok | {error,term()} when
+ HandlerId :: handler_id(),
+ Config :: handler_config().
+update_handler_config(HandlerId,Config) ->
+ logger_server:update_config(HandlerId,Config).
+
+-spec update_proxy_config(Config) -> ok | {error,term()} when
+ Config :: olp_config().
+update_proxy_config(Config) ->
+ logger_server:update_config(proxy,Config).
+
+-spec get_primary_config() -> Config when
+ Config :: primary_config().
+get_primary_config() ->
+ {ok,Config} = logger_config:get(?LOGGER_TABLE,primary),
+ maps:remove(handlers,Config).
+
+-spec get_handler_config(HandlerId) -> {ok,Config} | {error,term()} when
+ HandlerId :: handler_id(),
+ Config :: handler_config().
+get_handler_config(HandlerId) ->
+ case logger_config:get(?LOGGER_TABLE,HandlerId) of
+ {ok,#{module:=Module}=Config} ->
+ {ok,try Module:filter_config(Config)
+ catch _:_ -> Config
+ end};
+ Error ->
+ Error
+ end.
+
+-spec get_handler_config() -> [Config] when
+ Config :: handler_config().
+get_handler_config() ->
+ [begin
+ {ok,Config} = get_handler_config(HandlerId),
+ Config
+ end || HandlerId <- get_handler_ids()].
+
+-spec get_handler_ids() -> [HandlerId] when
+ HandlerId :: handler_id().
+get_handler_ids() ->
+ {ok,#{handlers:=HandlerIds}} = logger_config:get(?LOGGER_TABLE,primary),
+ HandlerIds.
+
+-spec get_proxy_config() -> Config when
+ Config :: olp_config().
+get_proxy_config() ->
+ {ok,Config} = logger_config:get(?LOGGER_TABLE,proxy),
+ Config.
+
+-spec update_formatter_config(HandlerId,FormatterConfig) ->
+ ok | {error,term()} when
+ HandlerId :: handler_id(),
+ FormatterConfig :: formatter_config().
+update_formatter_config(HandlerId,FormatterConfig) ->
+ logger_server:update_formatter_config(HandlerId,FormatterConfig).
+
+-spec update_formatter_config(HandlerId,Key,Value) ->
+ ok | {error,term()} when
+ HandlerId :: handler_id(),
+ Key :: atom(),
+ Value :: term().
+update_formatter_config(HandlerId,Key,Value) ->
+ logger_server:update_formatter_config(HandlerId,#{Key=>Value}).
+
+-spec set_module_level(Modules,Level) -> ok | {error,term()} when
+ Modules :: [module()] | module(),
+ Level :: level() | all | none.
+set_module_level(Module,Level) when is_atom(Module) ->
+ set_module_level([Module],Level);
+set_module_level(Modules,Level) ->
+ logger_server:set_module_level(Modules,Level).
+
+-spec unset_module_level(Modules) -> ok when
+ Modules :: [module()] | module().
+unset_module_level(Module) when is_atom(Module) ->
+ unset_module_level([Module]);
+unset_module_level(Modules) ->
+ logger_server:unset_module_level(Modules).
+
+-spec unset_module_level() -> ok.
+unset_module_level() ->
+ logger_server:unset_module_level().
+
+-spec set_application_level(Application,Level) -> ok | {error, not_loaded} when
+ Application :: atom(),
+ Level :: level() | all | none.
+set_application_level(App,Level) ->
+ case application:get_key(App, modules) of
+ {ok, Modules} ->
+ set_module_level(Modules, Level);
+ undefined ->
+ {error, {not_loaded, App}}
+ end.
+
+-spec unset_application_level(Application) -> ok | {error, not_loaded} when
+ Application :: atom().
+unset_application_level(App) ->
+ case application:get_key(App, modules) of
+ {ok, Modules} ->
+ unset_module_level(Modules);
+ undefined ->
+ {error, {not_loaded, App}}
+ end.
+
+-spec get_module_level(Modules) -> [{Module,Level}] when
+ Modules :: [Module] | Module,
+ Module :: module(),
+ Level :: level() | all | none.
+get_module_level(Module) when is_atom(Module) ->
+ get_module_level([Module]);
+get_module_level(Modules) when is_list(Modules) ->
+ [{M,L} || {M,L} <- get_module_level(),
+ lists:member(M,Modules)].
+
+-spec get_module_level() -> [{Module,Level}] when
+ Module :: module(),
+ Level :: level() | all | none.
+get_module_level() ->
+ logger_config:get_module_level(?LOGGER_TABLE).
+
+%%%-----------------------------------------------------------------
+%%% Misc
+-spec compare_levels(Level1,Level2) -> eq | gt | lt when
+ Level1 :: level(),
+ Level2 :: level().
+compare_levels(Level,Level) when ?IS_LEVEL(Level) ->
+ eq;
+compare_levels(Level1,Level2) when ?IS_LEVEL(Level1), ?IS_LEVEL(Level2) ->
+ Int1 = logger_config:level_to_int(Level1),
+ Int2 = logger_config:level_to_int(Level2),
+ if Int1 < Int2 -> gt;
+ true -> lt
+ end;
+compare_levels(Level1,Level2) ->
+ erlang:error(badarg,[Level1,Level2]).
+
+-spec set_process_metadata(Meta) -> ok when
+ Meta :: metadata().
+set_process_metadata(Meta) when is_map(Meta) ->
+ _ = put(?LOGGER_META_KEY,Meta),
+ ok;
+set_process_metadata(Meta) ->
+ erlang:error(badarg,[Meta]).
+
+-spec update_process_metadata(Meta) -> ok when
+ Meta :: metadata().
+update_process_metadata(Meta) when is_map(Meta) ->
+ case get_process_metadata() of
+ undefined ->
+ set_process_metadata(Meta);
+ Meta0 when is_map(Meta0) ->
+ set_process_metadata(maps:merge(Meta0,Meta)),
+ ok
+ end;
+update_process_metadata(Meta) ->
+ erlang:error(badarg,[Meta]).
+
+-spec get_process_metadata() -> Meta | undefined when
+ Meta :: metadata().
+get_process_metadata() ->
+ get(?LOGGER_META_KEY).
+
+-spec unset_process_metadata() -> ok.
+unset_process_metadata() ->
+ _ = erase(?LOGGER_META_KEY),
+ ok.
+
+-spec get_config() -> #{primary=>primary_config(),
+ handlers=>[handler_config()],
+ proxy=>olp_config(),
+ module_levels=>[{module(),level() | all | none}]}.
+get_config() ->
+ #{primary=>get_primary_config(),
+ handlers=>get_handler_config(),
+ proxy=>get_proxy_config(),
+ module_levels=>lists:keysort(1,get_module_level())}.
+
+-spec i() -> ok.
+i() ->
+ #{primary := Primary,
+ handlers := HandlerConfigs,
+ proxy := Proxy,
+ module_levels := Modules} = get_config(),
+ M = modifier(),
+ i_primary(Primary,M),
+ i_handlers(HandlerConfigs,M),
+ i_proxy(Proxy,M),
+ i_modules(Modules,M).
+
+-spec i(What) -> ok when
+ What :: primary | handlers | proxy | modules | handler_id().
+i(primary) ->
+ i_primary(get_primary_config(),modifier());
+i(handlers) ->
+ i_handlers(get_handler_config(),modifier());
+i(proxy) ->
+ i_proxy(get_proxy_config(),modifier());
+i(modules) ->
+ i_modules(get_module_level(),modifier());
+i(HandlerId) when is_atom(HandlerId) ->
+ case get_handler_config(HandlerId) of
+ {ok,HandlerConfig} ->
+ i_handlers([HandlerConfig],modifier());
+ Error ->
+ Error
+ end;
+i(What) ->
+ erlang:error(badarg,[What]).
+
+
+i_primary(#{level := Level,
+ filters := Filters,
+ filter_default := FilterDefault},
+ M) ->
+ io:format("Primary configuration: ~n",[]),
+ io:format(" Level: ~p~n",[Level]),
+ io:format(" Filter Default: ~p~n", [FilterDefault]),
+ io:format(" Filters: ~n", []),
+ print_filters(" ",Filters,M).
+
+i_handlers(HandlerConfigs,M) ->
+ io:format("Handler configuration: ~n", []),
+ print_handlers(HandlerConfigs,M).
+
+i_proxy(Proxy,M) ->
+ io:format("Proxy configuration: ~n", []),
+ print_custom(" ",Proxy,M).
+
+i_modules(Modules,M) ->
+ io:format("Level set per module: ~n", []),
+ print_module_levels(Modules,M).
+
+encoding() ->
+ case lists:keyfind(encoding, 1, io:getopts()) of
+ false -> latin1;
+ {encoding, Enc} -> Enc
+ end.
+
+modifier() ->
+ modifier(encoding()).
+
+modifier(latin1) -> "";
+modifier(_) -> "t".
+
+print_filters(Indent, {Id, {Fun, Arg}}, M) ->
+ io:format("~sId: ~"++M++"p~n"
+ "~s Fun: ~"++M++"p~n"
+ "~s Arg: ~"++M++"p~n",
+ [Indent, Id, Indent, Fun, Indent, Arg]);
+print_filters(Indent,[],_M) ->
+ io:format("~s(none)~n",[Indent]);
+print_filters(Indent,Filters,M) ->
+ [print_filters(Indent,Filter,M) || Filter <- Filters],
+ ok.
+
+print_handlers(#{id := Id,
+ module := Module,
+ level := Level,
+ filters := Filters, filter_default := FilterDefault,
+ formatter := {FormatterModule,FormatterConfig}} = Config, M) ->
+ io:format(" Id: ~"++M++"p~n"
+ " Module: ~p~n"
+ " Level: ~p~n"
+ " Formatter:~n"
+ " Module: ~p~n"
+ " Config:~n",
+ [Id, Module, Level, FormatterModule]),
+ print_custom(" ",FormatterConfig,M),
+ io:format(" Filter Default: ~p~n"
+ " Filters:~n",
+ [FilterDefault]),
+ print_filters(" ",Filters,M),
+ case maps:find(config,Config) of
+ {ok,HandlerConfig} ->
+ io:format(" Handler Config:~n"),
+ print_custom(" ",HandlerConfig,M);
+ error ->
+ ok
+ end,
+ MyKeys = [filter_default, filters, formatter, level, module, id, config],
+ case maps:without(MyKeys,Config) of
+ Empty when Empty==#{} ->
+ ok;
+ Unhandled ->
+ io:format(" Custom Config:~n"),
+ print_custom(" ",Unhandled,M)
+ end;
+print_handlers([], _M) ->
+ io:format(" (none)~n");
+print_handlers(HandlerConfigs, M) ->
+ [print_handlers(HandlerConfig, M) || HandlerConfig <- HandlerConfigs],
+ ok.
+
+print_custom(Indent, {Key, Value}, M) ->
+ io:format("~s~"++M++"p: ~"++M++"p~n",[Indent,Key,Value]);
+print_custom(Indent, Map, M) when is_map(Map) ->
+ print_custom(Indent,lists:keysort(1,maps:to_list(Map)), M);
+print_custom(Indent, List, M) when is_list(List), is_tuple(hd(List)) ->
+ [print_custom(Indent, X, M) || X <- List],
+ ok;
+print_custom(Indent, Value, M) ->
+ io:format("~s~"++M++"p~n",[Indent,Value]).
+
+print_module_levels({Module,Level},M) ->
+ io:format(" Module: ~"++M++"p~n"
+ " Level: ~p~n",
+ [Module,Level]);
+print_module_levels([],_M) ->
+ io:format(" (none)~n");
+print_module_levels(Modules,M) ->
+ [print_module_levels(Module,M) || Module <- Modules],
+ ok.
+
+-spec internal_init_logger() -> ok | {error,term()}.
+%% This function is responsible for config of the logger
+%% This is done before add_handlers because we want the
+%% logger settings to take effect before the kernel supervisor
+%% tree is started.
+internal_init_logger() ->
+ try
+ Env = get_logger_env(kernel),
+ check_logger_config(kernel,Env),
+ ok = logger:set_primary_config(level, get_logger_level()),
+ ok = logger:set_primary_config(filter_default,
+ get_primary_filter_default(Env)),
+
+ [case logger:add_primary_filter(Id, Filter) of
+ ok -> ok;
+ {error, Reason} -> throw(Reason)
+ end || {Id, Filter} <- get_primary_filters(Env)],
+
+ [case logger:set_module_level(Modules, Level) of
+ ok -> ok;
+ {error, Reason} -> throw(Reason)
+ end || {module_level, Level, Modules} <- Env],
+
+ case logger:set_handler_config(simple,filters,
+ get_default_handler_filters()) of
+ ok -> ok;
+ {error,{not_found,simple}} -> ok
+ end,
+
+ init_kernel_handlers(Env)
+ catch throw:Reason ->
+ ?LOG_ERROR("Invalid logger config: ~p", [Reason]),
+ {error, {bad_config, {kernel, Reason}}}
+ end.
+
+-spec init_kernel_handlers(config_logger()) -> ok | {error,term()}.
+%% Setup the kernel environment variables to be correct
+%% The actual handlers are started by a call to add_handlers.
+init_kernel_handlers(Env) ->
+ try
+ case get_logger_type(Env) of
+ {ok,silent} ->
+ ok = logger:remove_handler(simple);
+ {ok,false} ->
+ ok;
+ {ok,Type} ->
+ init_default_config(Type,Env)
+ end
+ catch throw:Reason ->
+ ?LOG_ERROR("Invalid default handler config: ~p", [Reason]),
+ {error, {bad_config, {kernel, Reason}}}
+ end.
+
+-spec add_handlers(Application) -> ok | {error,term()} when
+ Application :: atom();
+ (HandlerConfig) -> ok | {error,term()} when
+ HandlerConfig :: [config_handler()].
+%% This function is responsible for resolving the handler config
+%% and then starting the correct handlers. This is done after the
+%% kernel supervisor tree has been started as it needs the logger_sup.
+add_handlers(kernel) ->
+ Env = get_logger_env(kernel),
+ case get_proxy_opts(Env) of
+ undefined ->
+ add_handlers(kernel,Env);
+ Opts ->
+ case set_proxy_config(Opts) of
+ ok -> add_handlers(kernel,Env);
+ {error, Reason} -> {error,{bad_proxy_config,Reason}}
+ end
+ end;
+add_handlers(App) when is_atom(App) ->
+ add_handlers(App,get_logger_env(App));
+add_handlers(HandlerConfig) ->
+ add_handlers(application:get_application(),HandlerConfig).
+
+add_handlers(App,HandlerConfig) ->
+ try
+ check_logger_config(App,HandlerConfig),
+ DefaultAdded =
+ lists:foldl(
+ fun({handler, default = Id, Module, Config}, _)
+ when not is_map_key(filters, Config) ->
+ %% The default handler should have a couple of extra filters
+ %% set on it by default.
+ DefConfig = #{ filter_default => stop,
+ filters => get_default_handler_filters()},
+ setup_handler(Id, Module, maps:merge(DefConfig,Config)),
+ true;
+ ({handler, Id, Module, Config}, Default) ->
+ setup_handler(Id, Module, Config),
+ Default orelse Id == default;
+ (_,Default) -> Default
+ end, false, HandlerConfig),
+ %% If a default handler was added we try to remove the simple_logger
+ %% If the simple logger exists it will replay its log events
+ %% to the handler(s) added in the fold above.
+ [case logger:remove_handler(simple) of
+ ok -> ok;
+ {error,{not_found,simple}} -> ok
+ end || DefaultAdded],
+ ok
+ catch throw:Reason0 ->
+ Reason =
+ case App of
+ undefined -> Reason0;
+ _ -> {App,Reason0}
+ end,
+ ?LOG_ERROR("Invalid logger handler config: ~p", [Reason]),
+ {error, {bad_config, {handler, Reason}}}
+ end.
+
+setup_handler(Id, Module, Config) ->
+ case logger:add_handler(Id, Module, Config) of
+ ok -> ok;
+ {error, Reason} -> throw(Reason)
+ end.
+
+check_logger_config(_,[]) ->
+ ok;
+check_logger_config(App,[{handler,_,_,_}|Env]) ->
+ check_logger_config(App,Env);
+check_logger_config(kernel,[{handler,default,undefined}|Env]) ->
+ check_logger_config(kernel,Env);
+check_logger_config(kernel,[{filters,_,_}|Env]) ->
+ check_logger_config(kernel,Env);
+check_logger_config(kernel,[{module_level,_,_}|Env]) ->
+ check_logger_config(kernel,Env);
+check_logger_config(kernel,[{proxy,_}|Env]) ->
+ check_logger_config(kernel,Env);
+check_logger_config(_,Bad) ->
+ throw(Bad).
+
+-spec get_logger_type(config_logger()) ->
+ {ok, standard_io | false | silent |
+ {file, file:name_all()} |
+ {file, file:name_all(), [file:mode()]}}.
+get_logger_type(Env) ->
+ case application:get_env(kernel, error_logger) of
+ {ok, tty} ->
+ {ok, standard_io};
+ {ok, {file, File}} when is_list(File) ->
+ {ok, {file, File}};
+ {ok, false} ->
+ {ok, false};
+ {ok, silent} ->
+ {ok, silent};
+ undefined ->
+ case lists:member({handler,default,undefined}, Env) of
+ true ->
+ {ok, false};
+ false ->
+ {ok, standard_io} % default value
+ end;
+ {ok, Bad} ->
+ throw({error_logger, Bad})
+ end.
+
+get_logger_level() ->
+ case application:get_env(kernel,logger_level,info) of
+ Level when ?IS_LEVEL(Level); Level=:=all; Level=:=none ->
+ Level;
+ Level ->
+ throw({logger_level, Level})
+ end.
+
+get_primary_filter_default(Env) ->
+ case lists:keyfind(filters,1,Env) of
+ {filters,Default,_} ->
+ Default;
+ false ->
+ log
+ end.
+
+get_primary_filters(Env) ->
+ case [F || F={filters,_,_} <- Env] of
+ [{filters,_,Filters}] ->
+ case lists:all(fun({_,_}) -> true; (_) -> false end,Filters) of
+ true -> Filters;
+ false -> throw({invalid_filters,Filters})
+ end;
+ [] -> [];
+ _ -> throw({multiple_filters,Env})
+ end.
+
+get_proxy_opts(Env) ->
+ case [P || P={proxy,_} <- Env] of
+ [{proxy,Opts}] -> Opts;
+ [] -> undefined;
+ _ -> throw({multiple_proxies,Env})
+ end.
+
+%% This function looks at the kernel logger environment
+%% and updates it so that the correct logger is configured
+init_default_config(Type,Env) when Type==standard_io;
+ Type==standard_error;
+ element(1,Type)==file ->
+ DefaultFormatter = #{formatter=>{?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}},
+ DefaultConfig = DefaultFormatter#{config=>#{type=>Type}},
+ NewLoggerEnv =
+ case lists:keyfind(default, 2, Env) of
+ {handler, default, logger_std_h, Config} ->
+ %% Only want to add the logger_std_h config
+ %% if not configured by user AND the default
+ %% handler is still the logger_std_h.
+ lists:keyreplace(default, 2, Env,
+ {handler, default, logger_std_h,
+ maps:merge(DefaultConfig,Config)});
+ {handler, default, Module,Config} ->
+ %% Add default formatter. The point of this
+ %% is to get the expected formatter config
+ %% for the default handler, since this
+ %% differs from the default values that
+ %% logger_formatter itself adds.
+ lists:keyreplace(default, 2, Env,
+ {handler, default, Module,
+ maps:merge(DefaultFormatter,Config)});
+ _ ->
+ %% Nothing has been configured, use default
+ [{handler, default, logger_std_h, DefaultConfig} | Env]
+ end,
+ application:set_env(kernel, logger, NewLoggerEnv, [{timeout,infinity}]).
+
+get_default_handler_filters() ->
+ case application:get_env(kernel, logger_sasl_compatible, false) of
+ true ->
+ ?DEFAULT_HANDLER_FILTERS([otp]);
+ false ->
+ ?DEFAULT_HANDLER_FILTERS([otp,sasl])
+ end.
+
+get_logger_env(App) ->
+ application:get_env(App, logger, []).
+
+%%%-----------------------------------------------------------------
+%%% Internal
+do_log(Level,Msg,#{mfa:={Module,_,_}}=Meta) ->
+ case logger_config:allow(?LOGGER_TABLE,Level,Module) of
+ true ->
+ log_allowed(#{},Level,Msg,Meta);
+ false ->
+ ok
+ end;
+do_log(Level,Msg,Meta) ->
+ case logger_config:allow(?LOGGER_TABLE,Level) of
+ true ->
+ log_allowed(#{},Level,Msg,Meta);
+ false ->
+ ok
+ end.
+
+-spec log_allowed(Location,Level,Msg,Meta) -> ok when
+ Location :: location() | #{},
+ Level :: level(),
+ Msg :: {msg_fun(),term()} |
+ {io:format(),[term()]} |
+ report() |
+ unicode:chardata(),
+ Meta :: metadata().
+log_allowed(Location,Level,{Fun,FunArgs},Meta) when is_function(Fun,1) ->
+ try Fun(FunArgs) of
+ Msg={Format,Args} when is_list(Format), is_list(Args) ->
+ log_allowed(Location,Level,Msg,Meta);
+ Report when ?IS_REPORT(Report) ->
+ log_allowed(Location,Level,Report,Meta);
+ String when ?IS_STRING(String) ->
+ log_allowed(Location,Level,String,Meta);
+ Other ->
+ log_allowed(Location,Level,
+ {"LAZY_FUN ERROR: ~tp; Returned: ~tp",
+ [{Fun,FunArgs},Other]},
+ Meta)
+ catch C:R ->
+ log_allowed(Location,Level,
+ {"LAZY_FUN CRASH: ~tp; Reason: ~tp",
+ [{Fun,FunArgs},{C,R}]},
+ Meta)
+ end;
+log_allowed(Location,Level,Msg,Meta0) when is_map(Meta0) ->
+ %% Metadata priorities are:
+ %% Location (added in API macros) - will be overwritten by process
+ %% metadata (set by set_process_metadata/1), which in turn will be
+ %% overwritten by the metadata given as argument in the log call
+ %% (function or macro).
+ Meta = add_default_metadata(
+ maps:merge(Location,maps:merge(proc_meta(),Meta0))),
+ case node(maps:get(gl,Meta)) of
+ Node when Node=/=node() ->
+ log_remote(Node,Level,Msg,Meta);
+ _ ->
+ ok
+ end,
+ do_log_allowed(Level,Msg,Meta,tid()).
+
+do_log_allowed(Level,{Format,Args}=Msg,Meta,Tid)
+ when ?IS_LEVEL(Level),
+ is_list(Format),
+ is_list(Args),
+ is_map(Meta) ->
+ logger_backend:log_allowed(#{level=>Level,msg=>Msg,meta=>Meta},Tid);
+do_log_allowed(Level,Report,Meta,Tid)
+ when ?IS_LEVEL(Level),
+ ?IS_REPORT(Report),
+ is_map(Meta) ->
+ logger_backend:log_allowed(#{level=>Level,msg=>{report,Report},meta=>Meta},
+ Tid);
+do_log_allowed(Level,String,Meta,Tid)
+ when ?IS_LEVEL(Level),
+ ?IS_STRING(String),
+ is_map(Meta) ->
+ logger_backend:log_allowed(#{level=>Level,msg=>{string,String},meta=>Meta},
+ Tid).
+tid() ->
+ ets:whereis(?LOGGER_TABLE).
+
+log_remote(Node,Level,{Format,Args},Meta) ->
+ log_remote(Node,{log,Level,Format,Args,Meta});
+log_remote(Node,Level,Msg,Meta) ->
+ log_remote(Node,{log,Level,Msg,Meta}).
+
+log_remote(Node,Request) ->
+ logger_proxy:log({remote,Node,Request}),
+ ok.
+
+add_default_metadata(Meta) ->
+ add_default_metadata([pid,gl,time],Meta).
+
+add_default_metadata([Key|Keys],Meta) ->
+ case maps:is_key(Key,Meta) of
+ true ->
+ add_default_metadata(Keys,Meta);
+ false ->
+ add_default_metadata(Keys,Meta#{Key=>default(Key)})
+ end;
+add_default_metadata([],Meta) ->
+ Meta.
+
+proc_meta() ->
+ case get_process_metadata() of
+ ProcMeta when is_map(ProcMeta) -> ProcMeta;
+ _ -> #{}
+ end.
+
+default(pid) -> self();
+default(gl) -> group_leader();
+default(time) -> timestamp().
+
+%% Remove everything upto and including this module from the stacktrace
+filter_stacktrace(Module,[{Module,_,_,_}|_]) ->
+ [];
+filter_stacktrace(Module,[H|T]) ->
+ [H|filter_stacktrace(Module,T)];
+filter_stacktrace(_,[]) ->
+ [].
diff --git a/lib/kernel/src/logger_backend.erl b/lib/kernel/src/logger_backend.erl
new file mode 100644
index 0000000000..432c671afd
--- /dev/null
+++ b/lib/kernel/src/logger_backend.erl
@@ -0,0 +1,133 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017-2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_backend).
+
+-export([log_allowed/2]).
+
+-include("logger_internal.hrl").
+
+-define(OWN_KEYS,[level,filters,filter_default,handlers]).
+
+%%%-----------------------------------------------------------------
+%%% The default logger backend
+log_allowed(Log, Tid) ->
+ {ok,Config} = logger_config:get(Tid,primary),
+ Filters = maps:get(filters,Config,[]),
+ case apply_filters(primary,Log,Filters,Config) of
+ stop ->
+ ok;
+ Log1 ->
+ Handlers = maps:get(handlers,Config,[]),
+ call_handlers(Log1,Handlers,Tid)
+ end,
+ ok.
+
+call_handlers(#{level:=Level}=Log,[Id|Handlers],Tid) ->
+ case logger_config:get(Tid,Id,Level) of
+ {ok,#{module:=Module}=Config} ->
+ Filters = maps:get(filters,Config,[]),
+ case apply_filters(Id,Log,Filters,Config) of
+ stop ->
+ ok;
+ Log1 ->
+ Config1 = maps:without(?OWN_KEYS,Config),
+ try Module:log(Log1,Config1)
+ catch C:R:S ->
+ case logger:remove_handler(Id) of
+ ok ->
+ logger:internal_log(
+ error,{removed_failing_handler,Id}),
+ ?LOG_INTERNAL(
+ debug,
+ [{logger,removed_failing_handler},
+ {handler,{Id,Module}},
+ {log_event,Log1},
+ {config,Config1},
+ {reason,{C,R,filter_stacktrace(S)}}]);
+ {error,{not_found,_}} ->
+ %% Probably already removed by other client
+ %% Don't report again
+ ok;
+ {error,Reason} ->
+ ?LOG_INTERNAL(
+ debug,
+ [{logger,remove_handler_failed},
+ {reason,Reason}])
+ end
+ end
+ end;
+ _ ->
+ ok
+ end,
+ call_handlers(Log,Handlers,Tid);
+call_handlers(_Log,[],_Tid) ->
+ ok.
+
+apply_filters(Owner,Log,Filters,Config) ->
+ case do_apply_filters(Owner,Log,Filters,ignore) of
+ stop ->
+ stop;
+ ignore ->
+ case maps:get(filter_default,Config) of
+ log ->
+ Log;
+ stop ->
+ stop
+ end;
+ Log1 ->
+ Log1
+ end.
+
+do_apply_filters(Owner,Log,[{_Id,{FilterFun,FilterArgs}}=Filter|Filters],State) ->
+ try FilterFun(Log,FilterArgs) of
+ stop ->
+ stop;
+ ignore ->
+ do_apply_filters(Owner,Log,Filters,State);
+ Log1=#{level:=Level,msg:=Msg,meta:=Meta}
+ when is_atom(Level), ?IS_MSG(Msg), is_map(Meta) ->
+ do_apply_filters(Owner,Log1,Filters,log);
+ Bad ->
+ handle_filter_failed(Filter,Owner,Log,{bad_return_value,Bad})
+ catch C:R:S ->
+ handle_filter_failed(Filter,Owner,Log,{C,R,filter_stacktrace(S)})
+ end;
+do_apply_filters(_Owner,_Log,[],ignore) ->
+ ignore;
+do_apply_filters(_Owner,Log,[],log) ->
+ Log.
+
+handle_filter_failed({Id,_}=Filter,Owner,Log,Reason) ->
+ case logger_server:remove_filter(Owner,Id) of
+ ok ->
+ logger:internal_log(error,{removed_failing_filter,Id}),
+ ?LOG_INTERNAL(debug,
+ [{logger,removed_failing_filter},
+ {filter,Filter},
+ {owner,Owner},
+ {log_event,Log},
+ {reason,Reason}]);
+ _ ->
+ ok
+ end,
+ ignore.
+
+filter_stacktrace(Stacktrace) ->
+ logger:filter_stacktrace(?MODULE,Stacktrace).
diff --git a/lib/kernel/src/logger_config.erl b/lib/kernel/src/logger_config.erl
new file mode 100644
index 0000000000..5024d20cfe
--- /dev/null
+++ b/lib/kernel/src/logger_config.erl
@@ -0,0 +1,160 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017-2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_config).
+
+-export([new/1,delete/2,
+ exist/2,
+ allow/2,allow/3,
+ get/2, get/3,
+ create/3, set/3,
+ set_module_level/3,unset_module_level/2,
+ get_module_level/1,cache_module_level/2,
+ level_to_int/1]).
+
+-include("logger_internal.hrl").
+
+new(Name) ->
+ _ = ets:new(Name,[set,protected,named_table,
+ {read_concurrency,true},
+ {write_concurrency,true}]),
+ ets:whereis(Name).
+
+delete(Tid,Id) ->
+ ets:delete(Tid,table_key(Id)).
+
+allow(Tid,Level,Module) ->
+ LevelInt = level_to_int(Level),
+ case ets:lookup(Tid,Module) of
+ [{Module,{ModLevel,cached}}] when is_integer(ModLevel),
+ LevelInt =< ModLevel ->
+ true;
+ [{Module,ModLevel}] when is_integer(ModLevel),
+ LevelInt =< ModLevel ->
+ true;
+ [] ->
+ logger_server:cache_module_level(Module),
+ allow(Tid,Level);
+ _ ->
+ false
+ end.
+
+allow(Tid,Level) ->
+ GlobalLevelInt = ets:lookup_element(Tid,?PRIMARY_KEY,2),
+ level_to_int(Level) =< GlobalLevelInt.
+
+exist(Tid,What) ->
+ ets:member(Tid,table_key(What)).
+
+get(Tid,What) ->
+ case ets:lookup(Tid,table_key(What)) of
+ [{_,_,Config}] ->
+ {ok,Config};
+ [{_,Config}] when What=:=proxy ->
+ {ok,Config};
+ [] ->
+ {error,{not_found,What}}
+ end.
+
+get(Tid,What,Level) ->
+ MS = [{{table_key(What),'$1','$2'},
+ [{'>=','$1',level_to_int(Level)}],
+ ['$2']}],
+ case ets:select(Tid,MS) of
+ [] -> error;
+ [Data] -> {ok,Data}
+ end.
+
+create(Tid,proxy,Config) ->
+ ets:insert(Tid,{table_key(proxy),Config});
+create(Tid,What,Config) ->
+ LevelInt = level_to_int(maps:get(level,Config)),
+ ets:insert(Tid,{table_key(What),LevelInt,Config}).
+
+set(Tid,proxy,Config) ->
+ ets:insert(Tid,{table_key(proxy),Config}),
+ ok;
+set(Tid,What,Config) ->
+ LevelInt = level_to_int(maps:get(level,Config)),
+ %% Should do this only if the level has actually changed. Possibly
+ %% overwrite instead of delete?
+ case What of
+ primary ->
+ _ = ets:select_delete(Tid,[{{'_',{'$1',cached}},
+ [{'=/=','$1',LevelInt}],
+ [true]}]),
+ ok;
+ _ ->
+ ok
+ end,
+ ets:update_element(Tid,table_key(What),[{2,LevelInt},{3,Config}]),
+ ok.
+
+set_module_level(Tid,Modules,Level) ->
+ LevelInt = level_to_int(Level),
+ [ets:insert(Tid,{Module,LevelInt}) || Module <- Modules],
+ ok.
+
+%% should possibly overwrite instead of delete?
+unset_module_level(Tid,all) ->
+ MS = [{{'$1','$2'},[{is_atom,'$1'},{is_integer,'$2'}],[true]}],
+ _ = ets:select_delete(Tid,MS),
+ ok;
+unset_module_level(Tid,Modules) ->
+ [ets:delete(Tid,Module) || Module <- Modules],
+ ok.
+
+get_module_level(Tid) ->
+ MS = [{{'$1','$2'},[{is_atom,'$1'},{is_integer,'$2'}],[{{'$1','$2'}}]}],
+ Modules = ets:select(Tid,MS),
+ lists:sort([{M,int_to_level(L)} || {M,L} <- Modules]).
+
+cache_module_level(Tid,Module) ->
+ GlobalLevelInt = ets:lookup_element(Tid,?PRIMARY_KEY,2),
+ ets:insert_new(Tid,{Module,{GlobalLevelInt,cached}}),
+ ok.
+
+level_to_int(none) -> ?LOG_NONE;
+level_to_int(emergency) -> ?EMERGENCY;
+level_to_int(alert) -> ?ALERT;
+level_to_int(critical) -> ?CRITICAL;
+level_to_int(error) -> ?ERROR;
+level_to_int(warning) -> ?WARNING;
+level_to_int(notice) -> ?NOTICE;
+level_to_int(info) -> ?INFO;
+level_to_int(debug) -> ?DEBUG;
+level_to_int(all) -> ?LOG_ALL.
+
+int_to_level(?LOG_NONE) -> none;
+int_to_level(?EMERGENCY) -> emergency;
+int_to_level(?ALERT) -> alert;
+int_to_level(?CRITICAL) -> critical;
+int_to_level(?ERROR) -> error;
+int_to_level(?WARNING) -> warning;
+int_to_level(?NOTICE) -> notice;
+int_to_level(?INFO) -> info;
+int_to_level(?DEBUG) -> debug;
+int_to_level(?LOG_ALL) -> all.
+
+%%%-----------------------------------------------------------------
+%%% Internal
+
+table_key(proxy) -> ?PROXY_KEY;
+table_key(primary) -> ?PRIMARY_KEY;
+table_key(HandlerId) -> {?HANDLER_KEY,HandlerId}.
diff --git a/lib/kernel/src/logger_disk_log_h.erl b/lib/kernel/src/logger_disk_log_h.erl
new file mode 100644
index 0000000000..47b39da900
--- /dev/null
+++ b/lib/kernel/src/logger_disk_log_h.erl
@@ -0,0 +1,252 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017-2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_disk_log_h).
+
+-include("logger.hrl").
+-include("logger_internal.hrl").
+-include("logger_h_common.hrl").
+
+%%% API
+-export([filesync/1]).
+
+%% logger_h_common callbacks
+-export([init/2, check_config/4, reset_state/2,
+ filesync/3, write/4, handle_info/3, terminate/3]).
+
+%% logger callbacks
+-export([log/2, adding_handler/1, removing_handler/1, changing_config/3,
+ filter_config/1]).
+
+%%%===================================================================
+%%% API
+%%%===================================================================
+
+%%%-----------------------------------------------------------------
+%%%
+-spec filesync(Name) -> ok | {error,Reason} when
+ Name :: atom(),
+ Reason :: handler_busy | {badarg,term()}.
+
+filesync(Name) ->
+ logger_h_common:filesync(?MODULE,Name).
+
+%%%===================================================================
+%%% logger callbacks
+%%%===================================================================
+
+%%%-----------------------------------------------------------------
+%%% Handler being added
+adding_handler(Config) ->
+ logger_h_common:adding_handler(Config).
+
+%%%-----------------------------------------------------------------
+%%% Updating handler config
+changing_config(SetOrUpdate, OldConfig, NewConfig) ->
+ logger_h_common:changing_config(SetOrUpdate, OldConfig, NewConfig).
+
+%%%-----------------------------------------------------------------
+%%% Handler being removed
+removing_handler(Config) ->
+ logger_h_common:removing_handler(Config).
+
+%%%-----------------------------------------------------------------
+%%% Log a string or report
+-spec log(LogEvent, Config) -> ok when
+ LogEvent :: logger:log_event(),
+ Config :: logger:handler_config().
+
+log(LogEvent, Config) ->
+ logger_h_common:log(LogEvent, Config).
+
+%%%-----------------------------------------------------------------
+%%% Remove internal fields from configuration
+filter_config(Config) ->
+ logger_h_common:filter_config(Config).
+
+%%%===================================================================
+%%% logger_h_common callbacks
+%%%===================================================================
+init(Name, #{file:=File,type:=Type,max_no_bytes:=MNB,max_no_files:=MNF}) ->
+ case open_disk_log(Name, File, Type, MNB, MNF) of
+ ok ->
+ {ok,#{log_opts => #{file => File,
+ type => Type,
+ max_no_bytes => MNB,
+ max_no_files => MNF},
+ prev_log_result => ok,
+ prev_sync_result => ok,
+ prev_disk_log_info => undefined}};
+ Error ->
+ Error
+ end.
+
+check_config(Name,set,undefined,HConfig0) ->
+ HConfig=merge_default_logopts(Name,maps:merge(get_default_config(),HConfig0)),
+ check_config(HConfig);
+check_config(_Name,SetOrUpdate,OldHConfig,NewHConfig0) ->
+ WriteOnce = maps:with([type,file,max_no_files,max_no_bytes],OldHConfig),
+ Default =
+ case SetOrUpdate of
+ set ->
+ %% Do not reset write-once fields to defaults
+ maps:merge(get_default_config(),WriteOnce);
+ update ->
+ OldHConfig
+ end,
+
+ NewHConfig = maps:merge(Default,NewHConfig0),
+
+ %% Fail if write-once fields are changed
+ case maps:with([type,file,max_no_files,max_no_bytes],NewHConfig) of
+ WriteOnce ->
+ check_config(NewHConfig);
+ Other ->
+ {Old,New} = logger_server:diff_maps(WriteOnce,Other),
+ {error,{illegal_config_change,?MODULE,Old,New}}
+ end.
+
+check_config(HConfig) ->
+ case check_h_config(maps:to_list(HConfig)) of
+ ok ->
+ {ok,HConfig};
+ {error,{Key,Value}} ->
+ {error,{invalid_config,?MODULE,#{Key=>Value}}}
+ end.
+
+check_h_config([{file,File}|Config]) when is_list(File) ->
+ check_h_config(Config);
+check_h_config([{max_no_files,undefined}|Config]) ->
+ check_h_config(Config);
+check_h_config([{max_no_files,N}|Config]) when is_integer(N), N>0 ->
+ check_h_config(Config);
+check_h_config([{max_no_bytes,infinity}|Config]) ->
+ check_h_config(Config);
+check_h_config([{max_no_bytes,N}|Config]) when is_integer(N), N>0 ->
+ check_h_config(Config);
+check_h_config([{type,Type}|Config]) when Type==wrap; Type==halt ->
+ check_h_config(Config);
+check_h_config([Other | _]) ->
+ {error,Other};
+check_h_config([]) ->
+ ok.
+
+get_default_config() ->
+ #{}.
+
+merge_default_logopts(Name, HConfig) ->
+ Type = maps:get(type, HConfig, wrap),
+ {DefaultNoFiles,DefaultNoBytes} =
+ case Type of
+ halt -> {undefined,infinity};
+ _wrap -> {10,1048576}
+ end,
+ {ok,Dir} = file:get_cwd(),
+ Defaults = #{file => filename:join(Dir,Name),
+ max_no_files => DefaultNoFiles,
+ max_no_bytes => DefaultNoBytes,
+ type => Type},
+ maps:merge(Defaults, HConfig).
+
+filesync(Name,_Mode,State) ->
+ Result = ?disk_log_sync(Name),
+ maybe_notify_error(Name, filesync, Result, prev_sync_result, State).
+
+write(Name, Mode, Bin, State) ->
+ Result = ?disk_log_write(Name, Mode, Bin),
+ maybe_notify_error(Name, log, Result, prev_log_result, State).
+
+reset_state(_Name, State) ->
+ State#{prev_log_result => ok,
+ prev_sync_result => ok,
+ prev_disk_log_info => undefined}.
+
+%% The disk log owner must handle status messages from disk_log.
+handle_info(Name, {disk_log, _Node, Log, Info={truncated,_NoLostItems}}, State) ->
+ maybe_notify_status(Name, Log, Info, prev_disk_log_info, State);
+handle_info(Name, {disk_log, _Node, Log, Info = {blocked_log,_Items}}, State) ->
+ maybe_notify_status(Name, Log, Info, prev_disk_log_info, State);
+handle_info(Name, {disk_log, _Node, Log, Info = full}, State) ->
+ maybe_notify_status(Name, Log, Info, prev_disk_log_info, State);
+handle_info(Name, {disk_log, _Node, Log, Info = {error_status,_Status}}, State) ->
+ maybe_notify_status(Name, Log, Info, prev_disk_log_info, State);
+handle_info(_, _, State) ->
+ State.
+
+terminate(Name, _Reason, _State) ->
+ _ = close_disk_log(Name, normal),
+ ok.
+
+%%%-----------------------------------------------------------------
+%%% Internal functions
+open_disk_log(Name, File, Type, MaxNoBytes, MaxNoFiles) ->
+ case filelib:ensure_dir(File) of
+ ok ->
+ Size =
+ if Type == halt -> MaxNoBytes;
+ Type == wrap -> {MaxNoBytes,MaxNoFiles}
+ end,
+ Opts = [{name, Name},
+ {file, File},
+ {size, Size},
+ {type, Type},
+ {linkto, self()},
+ {repair, false},
+ {format, external},
+ {notify, true},
+ {quiet, true},
+ {mode, read_write}],
+ case disk_log:open(Opts) of
+ {ok,Name} ->
+ ok;
+ Error = {error,_Reason} ->
+ Error
+ end;
+ Error ->
+ Error
+ end.
+
+close_disk_log(Name, _) ->
+ _ = ?disk_log_sync(Name),
+ _ = disk_log:lclose(Name),
+ ok.
+
+disk_log_write(Name, sync, Bin) ->
+ disk_log:blog(Name, Bin);
+disk_log_write(Name, async, Bin) ->
+ disk_log:balog(Name, Bin).
+
+%%%-----------------------------------------------------------------
+%%% Print error messages, but don't repeat the same message
+maybe_notify_error(Name, Op, Result, Key, #{log_opts:=LogOpts}=State) ->
+ {Result,error_notify_new({Name, Op, LogOpts, Result}, Result, Key, State)}.
+
+maybe_notify_status(Name, Log, Info, Key, State) ->
+ error_notify_new({disk_log, Name, Log, Info}, Info, Key, State).
+
+error_notify_new(Term, What, Key, State) ->
+ error_notify_new(What, maps:get(Key,State), Term),
+ State#{Key => What}.
+
+error_notify_new(ok,_Prev,_Term) ->
+ ok;
+error_notify_new(Same,Same,_Term) ->
+ ok;
+error_notify_new(_New,_Prev,Term) ->
+ logger_h_common:error_notify(Term).
diff --git a/lib/kernel/src/logger_filters.erl b/lib/kernel/src/logger_filters.erl
new file mode 100644
index 0000000000..0664c598e1
--- /dev/null
+++ b/lib/kernel/src/logger_filters.erl
@@ -0,0 +1,127 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017-2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_filters).
+
+-export([domain/2,
+ level/2,
+ progress/2,
+ remote_gl/2]).
+
+-include("logger_internal.hrl").
+-define(IS_ACTION(A), (A==log orelse A==stop)).
+
+-spec domain(LogEvent,Extra) -> logger:filter_return() when
+ LogEvent :: logger:log_event(),
+ Extra :: {Action,Compare,MatchDomain},
+ Action :: log | stop,
+ Compare :: super | sub | equal | not_equal | undefined,
+ MatchDomain :: list(atom()).
+domain(#{meta:=Meta}=LogEvent,{Action,Compare,MatchDomain})
+ when ?IS_ACTION(Action) andalso
+ (Compare==super orelse
+ Compare==sub orelse
+ Compare==equal orelse
+ Compare==not_equal orelse
+ Compare==undefined) andalso
+ is_list(MatchDomain) ->
+ filter_domain(Compare,Meta,MatchDomain,on_match(Action,LogEvent));
+domain(LogEvent,Extra) ->
+ erlang:error(badarg,[LogEvent,Extra]).
+
+-spec level(LogEvent,Extra) -> logger:filter_return() when
+ LogEvent :: logger:log_event(),
+ Extra :: {Action,Operator,MatchLevel},
+ Action :: log | stop,
+ Operator :: neq | eq | lt | gt | lteq | gteq,
+ MatchLevel :: logger:level().
+level(#{level:=L1}=LogEvent,{Action,Op,L2})
+ when ?IS_ACTION(Action) andalso
+ (Op==neq orelse
+ Op==eq orelse
+ Op==lt orelse
+ Op==gt orelse
+ Op==lteq orelse
+ Op==gteq) andalso
+ ?IS_LEVEL(L2) ->
+ filter_level(Op,L1,L2,on_match(Action,LogEvent));
+level(LogEvent,Extra) ->
+ erlang:error(badarg,[LogEvent,Extra]).
+
+-spec progress(LogEvent,Extra) -> logger:filter_return() when
+ LogEvent :: logger:log_event(),
+ Extra :: log | stop.
+progress(LogEvent,Action) when ?IS_ACTION(Action) ->
+ filter_progress(LogEvent,on_match(Action,LogEvent));
+progress(LogEvent,Action) ->
+ erlang:error(badarg,[LogEvent,Action]).
+
+-spec remote_gl(LogEvent,Extra) -> logger:filter_return() when
+ LogEvent :: logger:log_event(),
+ Extra :: log | stop.
+remote_gl(LogEvent,Action) when ?IS_ACTION(Action) ->
+ filter_remote_gl(LogEvent,on_match(Action,LogEvent));
+remote_gl(LogEvent,Action) ->
+ erlang:error(badarg,[LogEvent,Action]).
+
+%%%-----------------------------------------------------------------
+%%% Internal
+filter_domain(super,#{domain:=Domain},MatchDomain,OnMatch) ->
+ is_prefix(Domain,MatchDomain,OnMatch);
+filter_domain(sub,#{domain:=Domain},MatchDomain,OnMatch) ->
+ is_prefix(MatchDomain,Domain,OnMatch);
+filter_domain(equal,#{domain:=Domain},Domain,OnMatch) ->
+ OnMatch;
+filter_domain(not_equal,#{domain:=Domain},MatchDomain,OnMatch)
+ when Domain=/=MatchDomain ->
+ OnMatch;
+filter_domain(Compare,Meta,_,OnMatch) ->
+ case maps:is_key(domain,Meta) of
+ false when Compare==undefined; Compare==not_equal -> OnMatch;
+ _ -> ignore
+ end.
+
+is_prefix(D1,D2,OnMatch) when is_list(D1), is_list(D2) ->
+ case lists:prefix(D1,D2) of
+ true -> OnMatch;
+ false -> ignore
+ end;
+is_prefix(_,_,_) ->
+ ignore.
+
+filter_level(Op,L1,L2,OnMatch) ->
+ case logger:compare_levels(L1,L2) of
+ eq when Op==eq; Op==lteq; Op==gteq -> OnMatch;
+ lt when Op==lt; Op==lteq; Op==neq -> OnMatch;
+ gt when Op==gt; Op==gteq; Op==neq -> OnMatch;
+ _ -> ignore
+ end.
+
+filter_progress(#{msg:={report,#{label:={_,progress}}}},OnMatch) ->
+ OnMatch;
+filter_progress(_,_) ->
+ ignore.
+
+filter_remote_gl(#{meta:=#{gl:=GL}},OnMatch) when node(GL)=/=node() ->
+ OnMatch;
+filter_remote_gl(_,_) ->
+ ignore.
+
+on_match(log,LogEvent) -> LogEvent;
+on_match(stop,_) -> stop.
diff --git a/lib/kernel/src/logger_formatter.erl b/lib/kernel/src/logger_formatter.erl
new file mode 100644
index 0000000000..8696adbd72
--- /dev/null
+++ b/lib/kernel/src/logger_formatter.erl
@@ -0,0 +1,563 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017-2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_formatter).
+
+-export([format/2]).
+-export([check_config/1]).
+
+-include("logger_internal.hrl").
+
+%%%-----------------------------------------------------------------
+%%% Types
+-type config() :: #{chars_limit => pos_integer() | unlimited,
+ depth => pos_integer() | unlimited,
+ legacy_header => boolean(),
+ max_size => pos_integer() | unlimited,
+ report_cb => logger:report_cb(),
+ single_line => boolean(),
+ template => template(),
+ time_designator => byte(),
+ time_offset => integer() | [byte()]}.
+-type template() :: [metakey() | {metakey(),template(),template()} | string()].
+-type metakey() :: atom() | [atom()].
+
+%%%-----------------------------------------------------------------
+%%% API
+-spec format(LogEvent,Config) -> unicode:chardata() when
+ LogEvent :: logger:log_event(),
+ Config :: config().
+format(#{level:=Level,msg:=Msg0,meta:=Meta},Config0)
+ when is_map(Config0) ->
+ Config = add_default_config(Config0),
+ Meta1 = maybe_add_legacy_header(Level,Meta,Config),
+ Template = maps:get(template,Config),
+ {BT,AT0} = lists:splitwith(fun(msg) -> false; (_) -> true end, Template),
+ {DoMsg,AT} =
+ case AT0 of
+ [msg|Rest] -> {true,Rest};
+ _ ->{false,AT0}
+ end,
+ B = do_format(Level,Meta1,BT,Config),
+ A = do_format(Level,Meta1,AT,Config),
+ MsgStr =
+ if DoMsg ->
+ Config1 =
+ case maps:get(chars_limit,Config) of
+ unlimited ->
+ Config;
+ Size0 ->
+ Size =
+ case Size0 - io_lib:chars_length([B,A]) of
+ S when S>=0 -> S;
+ _ -> 0
+ end,
+ Config#{chars_limit=>Size}
+ end,
+ MsgStr0 = format_msg(Msg0,Meta1,Config1),
+ case maps:get(single_line,Config) of
+ true ->
+ %% Trim leading and trailing whitespaces, and replace
+ %% newlines with ", "
+ T = lists:reverse(
+ trim(
+ lists:reverse(
+ trim(MsgStr0,false)),true)),
+ re:replace(T,",?\r?\n\s*",", ",
+ [{return,list},global,unicode]);
+ _false ->
+ MsgStr0
+ end;
+ true ->
+ ""
+ end,
+ truncate(B,MsgStr,A,maps:get(max_size,Config)).
+
+trim([H|T],Rev) when H==$\s; H==$\r; H==$\n ->
+ trim(T,Rev);
+trim([H|T],false) when is_list(H) ->
+ case trim(H,false) of
+ [] ->
+ trim(T,false);
+ TrimmedH ->
+ [TrimmedH|T]
+ end;
+trim([H|T],true) when is_list(H) ->
+ case trim(lists:reverse(H),true) of
+ [] ->
+ trim(T,true);
+ TrimmedH ->
+ [lists:reverse(TrimmedH)|T]
+ end;
+trim(String,_) ->
+ String.
+
+do_format(Level,Data,[level|Format],Config) ->
+ [to_string(level,Level,Config)|do_format(Level,Data,Format,Config)];
+do_format(Level,Data,[{Key,IfExist,Else}|Format],Config) ->
+ String =
+ case value(Key,Data) of
+ {ok,Value} -> do_format(Level,Data#{Key=>Value},IfExist,Config);
+ error -> do_format(Level,Data,Else,Config)
+ end,
+ [String|do_format(Level,Data,Format,Config)];
+do_format(Level,Data,[Key|Format],Config)
+ when is_atom(Key) orelse
+ (is_list(Key) andalso is_atom(hd(Key))) ->
+ String =
+ case value(Key,Data) of
+ {ok,Value} -> to_string(Key,Value,Config);
+ error -> ""
+ end,
+ [String|do_format(Level,Data,Format,Config)];
+do_format(Level,Data,[Str|Format],Config) ->
+ [Str|do_format(Level,Data,Format,Config)];
+do_format(_Level,_Data,[],_Config) ->
+ [].
+
+value(Key,Meta) when is_map_key(Key,Meta) ->
+ {ok,maps:get(Key,Meta)};
+value([Key|Keys],Meta) when is_map_key(Key,Meta) ->
+ value(Keys,maps:get(Key,Meta));
+value([],Value) ->
+ {ok,Value};
+value(_,_) ->
+ error.
+
+to_string(time,Time,Config) ->
+ format_time(Time,Config);
+to_string(mfa,MFA,Config) ->
+ format_mfa(MFA,Config);
+to_string(_,Value,Config) ->
+ to_string(Value,Config).
+
+to_string(X,_) when is_atom(X) ->
+ atom_to_list(X);
+to_string(X,_) when is_integer(X) ->
+ integer_to_list(X);
+to_string(X,_) when is_pid(X) ->
+ pid_to_list(X);
+to_string(X,_) when is_reference(X) ->
+ ref_to_list(X);
+to_string(X,Config) when is_list(X) ->
+ case printable_list(lists:flatten(X)) of
+ true -> X;
+ _ -> io_lib:format(p(Config),[X])
+ end;
+to_string(X,Config) ->
+ io_lib:format(p(Config),[X]).
+
+printable_list([]) ->
+ false;
+printable_list(X) ->
+ io_lib:printable_list(X).
+
+format_msg({string,Chardata},Meta,Config) ->
+ format_msg({"~ts",[Chardata]},Meta,Config);
+format_msg({report,_}=Msg,Meta,#{report_cb:=Fun}=Config)
+ when is_function(Fun,1); is_function(Fun,2) ->
+ format_msg(Msg,Meta#{report_cb=>Fun},maps:remove(report_cb,Config));
+format_msg({report,Report},#{report_cb:=Fun}=Meta,Config) when is_function(Fun,1) ->
+ try Fun(Report) of
+ {Format,Args} when is_list(Format), is_list(Args) ->
+ format_msg({Format,Args},maps:remove(report_cb,Meta),Config);
+ Other ->
+ P = p(Config),
+ format_msg({"REPORT_CB/1 ERROR: "++P++"; Returned: "++P,
+ [Report,Other]},Meta,Config)
+ catch C:R:S ->
+ P = p(Config),
+ format_msg({"REPORT_CB/1 CRASH: "++P++"; Reason: "++P,
+ [Report,{C,R,logger:filter_stacktrace(?MODULE,S)}]},
+ Meta,Config)
+ end;
+format_msg({report,Report},#{report_cb:=Fun}=Meta,Config) when is_function(Fun,2) ->
+ try Fun(Report,maps:with([depth,chars_limit,single_line],Config)) of
+ Chardata when ?IS_STRING(Chardata) ->
+ try chardata_to_list(Chardata) % already size limited by report_cb
+ catch _:_ ->
+ P = p(Config),
+ format_msg({"REPORT_CB/2 ERROR: "++P++"; Returned: "++P,
+ [Report,Chardata]},Meta,Config)
+ end;
+ Other ->
+ P = p(Config),
+ format_msg({"REPORT_CB/2 ERROR: "++P++"; Returned: "++P,
+ [Report,Other]},Meta,Config)
+ catch C:R:S ->
+ P = p(Config),
+ format_msg({"REPORT_CB/2 CRASH: "++P++"; Reason: "++P,
+ [Report,{C,R,logger:filter_stacktrace(?MODULE,S)}]},
+ Meta,Config)
+ end;
+format_msg({report,Report},Meta,Config) ->
+ format_msg({report,Report},
+ Meta#{report_cb=>fun logger:format_report/1},
+ Config);
+format_msg(Msg,_Meta,#{depth:=Depth,chars_limit:=CharsLimit,
+ single_line:=Single}) ->
+ Opts = chars_limit_to_opts(CharsLimit),
+ format_msg(Msg, Depth, Opts, Single).
+
+chars_limit_to_opts(unlimited) -> [];
+chars_limit_to_opts(CharsLimit) -> [{chars_limit,CharsLimit}].
+
+format_msg({Format0,Args},Depth,Opts,Single) ->
+ try
+ Format1 = io_lib:scan_format(Format0, Args),
+ Format = reformat(Format1, Depth, Single),
+ io_lib:build_text(Format,Opts)
+ catch C:R:S ->
+ P = p(Single),
+ FormatError = "FORMAT ERROR: "++P++" - "++P,
+ case Format0 of
+ FormatError ->
+ %% already been here - avoid failing cyclically
+ erlang:raise(C,R,S);
+ _ ->
+ format_msg({FormatError,[Format0,Args]},Depth,Opts,Single)
+ end
+ end.
+
+reformat(Format,unlimited,false) ->
+ Format;
+reformat([#{control_char:=C}=M|T], Depth, true) when C =:= $p ->
+ [limit_depth(M#{width => 0}, Depth)|reformat(T, Depth, true)];
+reformat([#{control_char:=C}=M|T], Depth, true) when C =:= $P ->
+ [M#{width => 0}|reformat(T, Depth, true)];
+reformat([#{control_char:=C}=M|T], Depth, Single) when C =:= $p; C =:= $w ->
+ [limit_depth(M, Depth)|reformat(T, Depth, Single)];
+reformat([H|T], Depth, Single) ->
+ [H|reformat(T, Depth, Single)];
+reformat([], _, _) ->
+ [].
+
+limit_depth(M0, unlimited) ->
+ M0;
+limit_depth(#{control_char:=C0, args:=Args}=M0, Depth) ->
+ C = C0 - ($a - $A), %To uppercase.
+ M0#{control_char:=C,args:=Args++[Depth]}.
+
+chardata_to_list(Chardata) ->
+ case unicode:characters_to_list(Chardata,unicode) of
+ List when is_list(List) ->
+ List;
+ Error ->
+ throw(Error)
+ end.
+
+truncate(B,Msg,A,unlimited) ->
+ [B,Msg,A];
+truncate(B,Msg,A,Size) ->
+ String = [B,Msg,A],
+ Length = io_lib:chars_length(String),
+ if Length>Size ->
+ {Last,FlatString} =
+ case A of
+ [] ->
+ case Msg of
+ [] ->
+ {get_last(B),lists:flatten(B)};
+ _ ->
+ {get_last(Msg),lists:flatten([B,Msg])}
+ end;
+ _ ->
+ {get_last(A),lists:flatten(String)}
+ end,
+ case Last of
+ $\n->
+ lists:sublist(FlatString,1,Size-4)++"...\n";
+ _ ->
+ lists:sublist(FlatString,1,Size-3)++"..."
+ end;
+ true ->
+ String
+ end.
+
+get_last(L) ->
+ get_first(lists:reverse(L)).
+
+get_first([]) ->
+ error;
+get_first([C|_]) when is_integer(C) ->
+ C;
+get_first([L|Rest]) when is_list(L) ->
+ case get_last(L) of
+ error -> get_first(Rest);
+ First -> First
+ end.
+
+%% SysTime is the system time in microseconds
+format_time(SysTime,#{time_offset:=Offset,time_designator:=Des})
+ when is_integer(SysTime) ->
+ calendar:system_time_to_rfc3339(SysTime,[{unit,microsecond},
+ {offset,Offset},
+ {time_designator,Des}]).
+
+%% SysTime is the system time in microseconds
+timestamp_to_datetimemicro(SysTime,Config) when is_integer(SysTime) ->
+ Micro = SysTime rem 1000000,
+ Sec = SysTime div 1000000,
+ UniversalTime = erlang:posixtime_to_universaltime(Sec),
+ {{Date,Time},UtcStr} =
+ case offset_to_utc(maps:get(time_offset,Config)) of
+ true -> {UniversalTime,"UTC "};
+ _ -> {erlang:universaltime_to_localtime(UniversalTime),""}
+ end,
+ {Date,Time,Micro,UtcStr}.
+
+format_mfa({M,F,A},_) when is_atom(M), is_atom(F), is_integer(A) ->
+ atom_to_list(M)++":"++atom_to_list(F)++"/"++integer_to_list(A);
+format_mfa({M,F,A},Config) when is_atom(M), is_atom(F), is_list(A) ->
+ format_mfa({M,F,length(A)},Config);
+format_mfa(MFA,Config) ->
+ to_string(MFA,Config).
+
+maybe_add_legacy_header(Level,
+ #{time:=Timestamp}=Meta,
+ #{legacy_header:=true}=Config) ->
+ #{title:=Title}=MyMeta = add_legacy_title(Level,Meta,Config),
+ {{Y,Mo,D},{H,Mi,S},Micro,UtcStr} =
+ timestamp_to_datetimemicro(Timestamp,Config),
+ Header =
+ io_lib:format("=~ts==== ~w-~s-~4w::~2..0w:~2..0w:~2..0w.~6..0w ~s===",
+ [Title,D,month(Mo),Y,H,Mi,S,Micro,UtcStr]),
+ Meta#{?MODULE=>MyMeta#{header=>Header}};
+maybe_add_legacy_header(_,Meta,_) ->
+ Meta.
+
+add_legacy_title(_Level,#{?MODULE:=#{title:=_}=MyMeta},_) ->
+ MyMeta;
+add_legacy_title(Level,Meta,Config) ->
+ case maps:get(?MODULE,Meta,#{}) of
+ #{title:=_}=MyMeta ->
+ MyMeta;
+ MyMeta ->
+ TitleLevel =
+ case (Level=:=notice andalso maps:find(error_logger,Meta)) of
+ {ok,_} ->
+ maps:get(error_logger_notice_header,Config);
+ _ ->
+ Level
+ end,
+ Title = string:uppercase(atom_to_list(TitleLevel)) ++ " REPORT",
+ MyMeta#{title=>Title}
+ end.
+
+month(1) -> "Jan";
+month(2) -> "Feb";
+month(3) -> "Mar";
+month(4) -> "Apr";
+month(5) -> "May";
+month(6) -> "Jun";
+month(7) -> "Jul";
+month(8) -> "Aug";
+month(9) -> "Sep";
+month(10) -> "Oct";
+month(11) -> "Nov";
+month(12) -> "Dec".
+
+%% Ensure that all valid configuration parameters exist in the final
+%% configuration map
+add_default_config(Config0) ->
+ Default =
+ #{chars_limit=>unlimited,
+ error_logger_notice_header=>info,
+ legacy_header=>false,
+ single_line=>true,
+ time_designator=>$T},
+ MaxSize = get_max_size(maps:get(max_size,Config0,undefined)),
+ Depth = get_depth(maps:get(depth,Config0,undefined)),
+ Offset = get_offset(maps:get(time_offset,Config0,undefined)),
+ add_default_template(maps:merge(Default,Config0#{max_size=>MaxSize,
+ depth=>Depth,
+ time_offset=>Offset})).
+
+add_default_template(#{template:=_}=Config) ->
+ Config;
+add_default_template(Config) ->
+ Config#{template=>default_template(Config)}.
+
+default_template(#{legacy_header:=true}) ->
+ ?DEFAULT_FORMAT_TEMPLATE_HEADER;
+default_template(#{single_line:=true}) ->
+ ?DEFAULT_FORMAT_TEMPLATE_SINGLE;
+default_template(_) ->
+ ?DEFAULT_FORMAT_TEMPLATE.
+
+get_max_size(undefined) ->
+ unlimited;
+get_max_size(S) ->
+ max(10,S).
+
+get_depth(undefined) ->
+ error_logger:get_format_depth();
+get_depth(S) ->
+ max(5,S).
+
+get_offset(undefined) ->
+ utc_to_offset(get_utc_config());
+get_offset(Offset) ->
+ Offset.
+
+utc_to_offset(true) ->
+ "Z";
+utc_to_offset(false) ->
+ "".
+
+get_utc_config() ->
+ %% SASL utc_log overrides stdlib config - in order to have uniform
+ %% timestamps in log messages
+ case application:get_env(sasl, utc_log) of
+ {ok, Val} when is_boolean(Val) -> Val;
+ _ ->
+ case application:get_env(stdlib, utc_log) of
+ {ok, Val} when is_boolean(Val) -> Val;
+ _ -> false
+ end
+ end.
+
+offset_to_utc(Z) when Z=:=0; Z=:="z"; Z=:="Z" ->
+ true;
+offset_to_utc([$+|Tz]) ->
+ case io_lib:fread("~d:~d", Tz) of
+ {ok, [0, 0], []} ->
+ true;
+ _ ->
+ false
+ end;
+offset_to_utc(_) ->
+ false.
+
+-spec check_config(Config) -> ok | {error,term()} when
+ Config :: config().
+check_config(Config) when is_map(Config) ->
+ do_check_config(maps:to_list(Config));
+check_config(Config) ->
+ {error,{invalid_formatter_config,?MODULE,Config}}.
+
+do_check_config([{Type,L}|Config]) when Type == chars_limit;
+ Type == depth;
+ Type == max_size ->
+ case check_limit(L) of
+ ok -> do_check_config(Config);
+ error -> {error,{invalid_formatter_config,?MODULE,{Type,L}}}
+ end;
+do_check_config([{single_line,SL}|Config]) when is_boolean(SL) ->
+ do_check_config(Config);
+do_check_config([{legacy_header,LH}|Config]) when is_boolean(LH) ->
+ do_check_config(Config);
+do_check_config([{error_logger_notice_header,ELNH}|Config]) when ELNH == info;
+ ELNH == notice ->
+ do_check_config(Config);
+do_check_config([{report_cb,RCB}|Config]) when is_function(RCB,1);
+ is_function(RCB,2) ->
+ do_check_config(Config);
+do_check_config([{template,T}|Config]) ->
+ case check_template(T) of
+ ok -> do_check_config(Config);
+ error -> {error,{invalid_formatter_template,?MODULE,T}}
+ end;
+do_check_config([{time_offset,Offset}|Config]) ->
+ case check_offset(Offset) of
+ ok ->
+ do_check_config(Config);
+ error ->
+ {error,{invalid_formatter_config,?MODULE,{time_offset,Offset}}}
+ end;
+do_check_config([{time_designator,Char}|Config]) when Char>=0, Char=<255 ->
+ case io_lib:printable_latin1_list([Char]) of
+ true ->
+ do_check_config(Config);
+ false ->
+ {error,{invalid_formatter_config,?MODULE,{time_designator,Char}}}
+ end;
+do_check_config([C|_]) ->
+ {error,{invalid_formatter_config,?MODULE,C}};
+do_check_config([]) ->
+ ok.
+
+check_limit(L) when is_integer(L), L>0 ->
+ ok;
+check_limit(unlimited) ->
+ ok;
+check_limit(_) ->
+ error.
+
+check_template([Key|T]) when is_atom(Key) ->
+ check_template(T);
+check_template([Key|T]) when is_list(Key), is_atom(hd(Key)) ->
+ case lists:all(fun(X) when is_atom(X) -> true;
+ (_) -> false
+ end,
+ Key) of
+ true ->
+ check_template(T);
+ false ->
+ error
+ end;
+check_template([{Key,IfExist,Else}|T])
+ when is_atom(Key) orelse
+ (is_list(Key) andalso is_atom(hd(Key))) ->
+ case check_template(IfExist) of
+ ok ->
+ case check_template(Else) of
+ ok ->
+ check_template(T);
+ error ->
+ error
+ end;
+ error ->
+ error
+ end;
+check_template([Str|T]) when is_list(Str) ->
+ case io_lib:printable_unicode_list(Str) of
+ true -> check_template(T);
+ false -> error
+ end;
+check_template([]) ->
+ ok;
+check_template(_) ->
+ error.
+
+check_offset(I) when is_integer(I) ->
+ ok;
+check_offset(Tz) when Tz=:=""; Tz=:="Z"; Tz=:="z" ->
+ ok;
+check_offset([Sign|Tz]) when Sign=:=$+; Sign=:=$- ->
+ check_timezone(Tz);
+check_offset(_) ->
+ error.
+
+check_timezone(Tz) ->
+ try io_lib:fread("~d:~d", Tz) of
+ {ok, [_, _], []} ->
+ ok;
+ _ ->
+ error
+ catch _:_ ->
+ error
+ end.
+
+p(#{single_line:=Single}) ->
+ p(Single);
+p(true) ->
+ "~0tp";
+p(false) ->
+ "~tp".
diff --git a/lib/kernel/src/logger_h_common.erl b/lib/kernel/src/logger_h_common.erl
new file mode 100644
index 0000000000..16946ff97c
--- /dev/null
+++ b/lib/kernel/src/logger_h_common.erl
@@ -0,0 +1,463 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017-2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_h_common).
+-behaviour(gen_server).
+
+-include("logger_h_common.hrl").
+-include("logger_internal.hrl").
+
+%% API
+-export([filesync/2]).
+
+%% logger_olp callbacks
+-export([init/1, handle_load/2, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3, notify/2, reset_state/1]).
+
+%% logger callbacks
+-export([log/2, adding_handler/1, removing_handler/1, changing_config/3,
+ filter_config/1]).
+
+%% Library functions for handlers
+-export([error_notify/1]).
+
+-define(OLP_KEYS,[sync_mode_qlen,
+ drop_mode_qlen,
+ flush_qlen,
+ burst_limit_enable,
+ burst_limit_max_count,
+ burst_limit_window_time,
+ overload_kill_enable,
+ overload_kill_qlen,
+ overload_kill_mem_size,
+ overload_kill_restart_after]).
+
+-define(COMMON_KEYS,[filesync_repeat_interval]).
+
+-define(READ_ONLY_KEYS,[olp]).
+
+%%%-----------------------------------------------------------------
+%%% API
+
+%% This function is called by the logger_sup supervisor
+filesync(Module, Name) ->
+ call(Module, Name, filesync).
+
+%%%-----------------------------------------------------------------
+%%% Handler being added
+adding_handler(#{id:=Name,module:=Module}=Config) ->
+ HConfig0 = maps:get(config, Config, #{}),
+ HandlerConfig0 = maps:without(?OLP_KEYS++?COMMON_KEYS,HConfig0),
+ case Module:check_config(Name,set,undefined,HandlerConfig0) of
+ {ok,HandlerConfig} ->
+ ModifiedCommon = maps:with(?COMMON_KEYS,HandlerConfig),
+ CommonConfig0 = maps:with(?COMMON_KEYS,HConfig0),
+ CommonConfig = maps:merge(
+ maps:merge(get_default_config(), CommonConfig0),
+ ModifiedCommon),
+ case check_config(CommonConfig) of
+ ok ->
+ HConfig = maps:merge(CommonConfig,HandlerConfig),
+ OlpOpts = maps:with(?OLP_KEYS,HConfig0),
+ start(OlpOpts, Config#{config => HConfig});
+ {error,Faulty} ->
+ {error,{invalid_config,Module,Faulty}}
+ end;
+ Error ->
+ Error
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Handler being removed
+removing_handler(#{id:=Name, module:=Module, config:=#{olp:=Olp}}) ->
+ case whereis(?name_to_reg_name(Module,Name)) of
+ undefined ->
+ ok;
+ _Pid ->
+ %% We don't want to do supervisor:terminate_child here
+ %% since we need to distinguish this explicit stop from a
+ %% system termination in order to avoid circular attempts
+ %% at removing the handler (implying deadlocks and
+ %% timeouts).
+ %% And we don't need to do supervisor:delete_child, since
+ %% the restart type is temporary, which means that the
+ %% child specification is automatically removed from the
+ %% supervisor when the process dies.
+ _ = logger_olp:stop(Olp),
+ ok
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Updating handler config
+changing_config(SetOrUpdate,
+ #{id:=Name,config:=OldHConfig,module:=Module},
+ NewConfig0) ->
+ NewHConfig0 = maps:get(config, NewConfig0, #{}),
+ NoHandlerKeys = ?OLP_KEYS++?COMMON_KEYS++?READ_ONLY_KEYS,
+ OldHandlerConfig = maps:without(NoHandlerKeys,OldHConfig),
+ NewHandlerConfig0 = maps:without(NoHandlerKeys,NewHConfig0),
+ case Module:check_config(Name, SetOrUpdate,
+ OldHandlerConfig,NewHandlerConfig0) of
+ {ok, NewHandlerConfig} ->
+ ModifiedCommon = maps:with(?COMMON_KEYS,NewHandlerConfig),
+ NewCommonConfig0 = maps:with(?COMMON_KEYS,NewHConfig0),
+ OldCommonConfig = maps:with(?COMMON_KEYS,OldHConfig),
+ CommonDefault =
+ case SetOrUpdate of
+ set ->
+ get_default_config();
+ update ->
+ OldCommonConfig
+ end,
+ NewCommonConfig = maps:merge(
+ maps:merge(CommonDefault,NewCommonConfig0),
+ ModifiedCommon),
+ case check_config(NewCommonConfig) of
+ ok ->
+ OlpDefault =
+ case SetOrUpdate of
+ set ->
+ logger_olp:get_default_opts();
+ update ->
+ maps:with(?OLP_KEYS,OldHConfig)
+ end,
+ Olp = maps:get(olp,OldHConfig),
+ NewOlpOpts = maps:merge(OlpDefault,
+ maps:with(?OLP_KEYS,NewHConfig0)),
+ case logger_olp:set_opts(Olp,NewOlpOpts) of
+ ok ->
+ logger_olp:cast(Olp, {config_changed,
+ NewCommonConfig,
+ NewHandlerConfig}),
+ ReadOnly = maps:with(?READ_ONLY_KEYS,OldHConfig),
+ NewHConfig =
+ maps:merge(
+ maps:merge(
+ maps:merge(NewCommonConfig,NewHandlerConfig),
+ ReadOnly),
+ NewOlpOpts),
+ NewConfig = NewConfig0#{config=>NewHConfig},
+ {ok,NewConfig};
+ Error ->
+ Error
+ end;
+ {error,Faulty} ->
+ {error,{invalid_config,Module,Faulty}}
+ end;
+ Error ->
+ Error
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Log a string or report
+-spec log(LogEvent, Config) -> ok when
+ LogEvent :: logger:log_event(),
+ Config :: logger:handler_config().
+
+log(LogEvent, Config = #{config := #{olp:=Olp}}) ->
+ %% if the handler has crashed, we must drop this event
+ %% and hope the handler restarts so we can try again
+ true = is_process_alive(logger_olp:get_pid(Olp)),
+ Bin = log_to_binary(LogEvent, Config),
+ logger_olp:load(Olp,Bin).
+
+%%%-----------------------------------------------------------------
+%%% Remove internal fields from configuration
+filter_config(#{config:=HConfig}=Config) ->
+ Config#{config=>maps:without(?READ_ONLY_KEYS,HConfig)}.
+
+%%%-----------------------------------------------------------------
+%%% Start the handler process
+%%%
+%%% The process must always exist if the handler is registered with
+%%% logger (and must not exist if the handler is not registered).
+%%%
+%%% The handler process is linked to logger_sup, which is part of the
+%%% kernel application's supervision tree.
+start(OlpOpts0, #{id := Name, module:=Module, config:=HConfig} = Config0) ->
+ RegName = ?name_to_reg_name(Module,Name),
+ ChildSpec =
+ #{id => Name,
+ start => {logger_olp, start_link, [RegName,?MODULE,
+ Config0, OlpOpts0]},
+ restart => temporary,
+ shutdown => 2000,
+ type => worker,
+ modules => [?MODULE]},
+ case supervisor:start_child(logger_sup, ChildSpec) of
+ {ok,Pid,Olp} ->
+ ok = logger_handler_watcher:register_handler(Name,Pid),
+ OlpOpts = logger_olp:get_opts(Olp),
+ {ok,Config0#{config=>(maps:merge(HConfig,OlpOpts))#{olp=>Olp}}};
+ {error,{Reason,Ch}} when is_tuple(Ch), element(1,Ch)==child ->
+ {error,Reason};
+ Error ->
+ Error
+ end.
+
+%%%===================================================================
+%%% gen_server callbacks
+%%%===================================================================
+
+init(#{id := Name, module := Module, config := HConfig}) ->
+ process_flag(trap_exit, true),
+
+ ?init_test_hooks(),
+
+ case Module:init(Name, HConfig) of
+ {ok,HState} ->
+ %% Storing common config in state to avoid copying
+ %% (sending) the config data for each log message
+ CommonConfig = maps:with(?COMMON_KEYS,HConfig),
+ State = CommonConfig#{id => Name,
+ module => Module,
+ ctrl_sync_count =>
+ ?CONTROLLER_SYNC_INTERVAL,
+ last_op => sync,
+ handler_state => HState},
+ State1 = set_repeated_filesync(State),
+ {ok,State1};
+ Error ->
+ Error
+ end.
+
+%% This is the log event.
+handle_load(Bin, #{id:=Name,
+ module:=Module,
+ handler_state:=HandlerState,
+ ctrl_sync_count := CtrlSync}=State) ->
+ if CtrlSync==0 ->
+ {_,HS1} = Module:write(Name, sync, Bin, HandlerState),
+ State#{handler_state => HS1,
+ ctrl_sync_count => ?CONTROLLER_SYNC_INTERVAL,
+ last_op=>write};
+ true ->
+ {_,HS1} = Module:write(Name, async, Bin, HandlerState),
+ State#{handler_state => HS1,
+ ctrl_sync_count => CtrlSync-1,
+ last_op=>write}
+ end.
+
+handle_call(filesync, _From, State = #{id := Name,
+ module := Module,
+ handler_state := HandlerState}) ->
+ {Result,HandlerState1} = Module:filesync(Name,sync,HandlerState),
+ {reply, Result, State#{handler_state=>HandlerState1, last_op=>sync}}.
+
+%% If FILESYNC_REPEAT_INTERVAL is set to a millisec value, this
+%% clause gets called repeatedly by the handler. In order to
+%% guarantee that a filesync *always* happens after the last log
+%% event, the repeat operation must be active!
+handle_cast(repeated_filesync,State = #{filesync_repeat_interval := no_repeat}) ->
+ %% This clause handles a race condition which may occur when
+ %% config changes filesync_repeat_interval from an integer value
+ %% to no_repeat.
+ {noreply,State};
+handle_cast(repeated_filesync,
+ State = #{id := Name,
+ module := Module,
+ handler_state := HandlerState,
+ last_op := LastOp}) ->
+ State1 =
+ if LastOp == sync ->
+ State;
+ true ->
+ {_,HS} = Module:filesync(Name, async, HandlerState),
+ State#{handler_state => HS, last_op => sync}
+ end,
+ {noreply,set_repeated_filesync(State1)};
+handle_cast({config_changed, CommonConfig, HConfig},
+ State = #{id := Name,
+ module := Module,
+ handler_state := HandlerState,
+ filesync_repeat_interval := OldFSyncInt}) ->
+ State1 =
+ case maps:get(filesync_repeat_interval,CommonConfig) of
+ OldFSyncInt ->
+ State;
+ FSyncInt ->
+ set_repeated_filesync(
+ cancel_repeated_filesync(
+ State#{filesync_repeat_interval=>FSyncInt}))
+ end,
+ HS = try Module:config_changed(Name, HConfig, HandlerState)
+ catch error:undef -> HandlerState
+ end,
+ {noreply, State1#{handler_state => HS}}.
+
+handle_info(Info, #{id := Name, module := Module,
+ handler_state := HandlerState} = State) ->
+ {noreply,State#{handler_state => Module:handle_info(Name,Info,HandlerState)}}.
+
+terminate(overloaded=Reason, #{id:=Name}=State) ->
+ _ = log_handler_info(Name,"Handler ~p overloaded and stopping",[Name],State),
+ do_terminate(Reason,State),
+ ConfigResult = logger:get_handler_config(Name),
+ case ConfigResult of
+ {ok,#{module:=Module}=HConfig0} ->
+ spawn(fun() -> logger:remove_handler(Name) end),
+ HConfig = try Module:filter_config(HConfig0)
+ catch _:_ -> HConfig0
+ end,
+ {ok,fun() -> logger:add_handler(Name,Module,HConfig) end};
+ Error ->
+ error_notify({Name,restart_impossible,Error}),
+ Error
+ end;
+terminate(Reason, State) ->
+ do_terminate(Reason, State).
+
+do_terminate(Reason, State = #{id := Name,
+ module := Module,
+ handler_state := HandlerState}) ->
+ _ = cancel_repeated_filesync(State),
+ _ = Module:terminate(Name, Reason, HandlerState),
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+reset_state(#{id:=Name, module:=Module, handler_state:=HandlerState} = State) ->
+ State#{handler_state=>Module:reset_state(Name, HandlerState)}.
+
+%%%-----------------------------------------------------------------
+%%% Internal functions
+call(Module, Name, Op) when is_atom(Name) ->
+ case logger_olp:call(?name_to_reg_name(Module,Name), Op) of
+ {error,busy} -> {error,handler_busy};
+ Other -> Other
+ end;
+call(_, Name, Op) ->
+ {error,{badarg,{Op,[Name]}}}.
+
+notify({mode_change,Mode0,Mode1},#{id:=Name}=State) ->
+ log_handler_info(Name,"Handler ~p switched from ~p to ~p mode",
+ [Name,Mode0,Mode1], State);
+notify({flushed,Flushed},#{id:=Name}=State) ->
+ log_handler_info(Name, "Handler ~p flushed ~w log events",
+ [Name,Flushed], State);
+notify(restart,#{id:=Name}=State) ->
+ log_handler_info(Name, "Handler ~p restarted", [Name], State);
+notify(idle,#{id:=Name,module:=Module,handler_state:=HandlerState}=State) ->
+ {_,HS} = Module:filesync(Name,async,HandlerState),
+ State#{handler_state=>HS, last_op=>sync}.
+
+log_handler_info(Name, Format, Args, #{module:=Module,
+ handler_state:=HandlerState}=State) ->
+ Config =
+ case logger:get_handler_config(Name) of
+ {ok,Conf} -> Conf;
+ _ -> #{formatter=>{?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}}
+ end,
+ Meta = #{time=>logger:timestamp()},
+ Bin = log_to_binary(#{level => notice,
+ msg => {Format,Args},
+ meta => Meta}, Config),
+ {_,HS} = Module:write(Name, async, Bin, HandlerState),
+ State#{handler_state=>HS, last_op=>write}.
+
+%%%-----------------------------------------------------------------
+%%% Convert log data on any form to binary
+-spec log_to_binary(LogEvent,Config) -> LogString when
+ LogEvent :: logger:log_event(),
+ Config :: logger:handler_config(),
+ LogString :: binary().
+log_to_binary(#{msg:={report,_},meta:=#{report_cb:=_}}=Log,Config) ->
+ do_log_to_binary(Log,Config);
+log_to_binary(#{msg:={report,_},meta:=Meta}=Log,Config) ->
+ DefaultReportCb = fun logger:format_otp_report/1,
+ do_log_to_binary(Log#{meta=>Meta#{report_cb=>DefaultReportCb}},Config);
+log_to_binary(Log,Config) ->
+ do_log_to_binary(Log,Config).
+
+do_log_to_binary(Log,Config) ->
+ {Formatter,FormatterConfig} =
+ maps:get(formatter,Config,{?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}),
+ String = try_format(Log,Formatter,FormatterConfig),
+ try string_to_binary(String)
+ catch C2:R2:S2 ->
+ ?LOG_INTERNAL(debug,[{formatter_error,Formatter},
+ {config,FormatterConfig},
+ {log_event,Log},
+ {bad_return_value,String},
+ {catched,{C2,R2,S2}}]),
+ <<"FORMATTER ERROR: bad return value">>
+ end.
+
+try_format(Log,Formatter,FormatterConfig) ->
+ try Formatter:format(Log,FormatterConfig)
+ catch
+ C:R:S ->
+ ?LOG_INTERNAL(debug,[{formatter_crashed,Formatter},
+ {config,FormatterConfig},
+ {log_event,Log},
+ {reason,
+ {C,R,logger:filter_stacktrace(?MODULE,S)}}]),
+ case {?DEFAULT_FORMATTER,#{}} of
+ {Formatter,FormatterConfig} ->
+ "DEFAULT FORMATTER CRASHED";
+ {DefaultFormatter,DefaultConfig} ->
+ try_format(Log#{msg=>{"FORMATTER CRASH: ~tp",
+ [maps:get(msg,Log)]}},
+ DefaultFormatter,DefaultConfig)
+ end
+ end.
+
+string_to_binary(String) ->
+ case unicode:characters_to_binary(String) of
+ Binary when is_binary(Binary) ->
+ Binary;
+ Error ->
+ throw(Error)
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Check that the configuration term is valid
+check_config(Config) when is_map(Config) ->
+ check_common_config(maps:to_list(Config)).
+
+check_common_config([{filesync_repeat_interval,NorA}|Config])
+ when is_integer(NorA); NorA == no_repeat ->
+ check_common_config(Config);
+check_common_config([{Key,Value}|_]) ->
+ {error,#{Key=>Value}};
+check_common_config([]) ->
+ ok.
+
+get_default_config() ->
+ #{filesync_repeat_interval => ?FILESYNC_REPEAT_INTERVAL}.
+
+set_repeated_filesync(#{filesync_repeat_interval:=FSyncInt} = State)
+ when is_integer(FSyncInt) ->
+ {ok,TRef} = timer:apply_after(FSyncInt, gen_server, cast,
+ [self(),repeated_filesync]),
+ State#{rep_sync_tref=>TRef};
+set_repeated_filesync(State) ->
+ State.
+
+cancel_repeated_filesync(State) ->
+ case maps:take(rep_sync_tref,State) of
+ {TRef,State1} ->
+ _ = timer:cancel(TRef),
+ State1;
+ error ->
+ State
+ end.
+error_notify(Term) ->
+ ?internal_log(error, Term).
diff --git a/lib/kernel/src/logger_h_common.hrl b/lib/kernel/src/logger_h_common.hrl
new file mode 100644
index 0000000000..004a61d9d9
--- /dev/null
+++ b/lib/kernel/src/logger_h_common.hrl
@@ -0,0 +1,127 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1997-2015. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%% The handler sends asynchronous write requests to the process
+%% controlling the i/o device, but every once in this interval
+%% will the write request be synchronous, so that the i/o device
+%% process doesn't get overloaded. This gives the handler time
+%% to keep up with its mailbox in overload situations, even if
+%% the i/o is slow.
+-define(CONTROLLER_SYNC_INTERVAL, 20).
+%% The handler will not perform a file sync operation if the
+%% mailbox size is greater than this number. This is to ensure
+%% the handler process doesn't get overloaded while waiting for
+%% an expensive file sync operation to finish.
+-define(FILESYNC_OK_QLEN, 2).
+%% Do a file/disk_log sync operation every integer() millisec
+%% (if necessary) or set to 'no_repeat' to only do file sync when
+%% the handler is idle. Note that file sync is not guaranteed to
+%% happen automatically if this operation is disabled.
+-define(FILESYNC_REPEAT_INTERVAL, 5000).
+%%-define(FILESYNC_REPEAT_INTERVAL, no_repeat).
+
+%% Default disk log option values
+-define(DISK_LOG_TYPE, wrap).
+-define(DISK_LOG_MAX_NO_FILES, 10).
+-define(DISK_LOG_MAX_NO_BYTES, 1048576).
+
+%%%-----------------------------------------------------------------
+%%% Utility macros
+
+-define(name_to_reg_name(MODULE,Name),
+ list_to_atom(lists:concat([MODULE,"_",Name]))).
+
+%%%-----------------------------------------------------------------
+%%% The test hook macros make it possible to observe and manipulate
+%%% internal handler functionality. When enabled, these macros will
+%%% slow down execution and therefore should not be include in code
+%%% to be officially released.
+
+%%-define(TEST_HOOKS, true).
+-ifdef(TEST_HOOKS).
+ -define(TEST_HOOKS_TAB, logger_h_test_hooks).
+
+ -define(init_test_hooks(),
+ _ = case ets:whereis(?TEST_HOOKS_TAB) of
+ undefined -> ets:new(?TEST_HOOKS_TAB, [named_table,public]);
+ _ -> ok
+ end,
+ ets:insert(?TEST_HOOKS_TAB, {internal_log,{logger,internal_log}}),
+ ets:insert(?TEST_HOOKS_TAB, {file_write,ok}),
+ ets:insert(?TEST_HOOKS_TAB, {file_datasync,ok}),
+ ets:insert(?TEST_HOOKS_TAB, {disk_log_write,ok}),
+ ets:insert(?TEST_HOOKS_TAB, {disk_log_sync,ok})).
+
+ -define(set_internal_log(MOD_FUNC),
+ ets:insert(?TEST_HOOKS_TAB, {internal_log,MOD_FUNC})).
+
+ -define(set_result(OPERATION, RESULT),
+ ets:insert(?TEST_HOOKS_TAB, {OPERATION,RESULT})).
+
+ -define(set_defaults(),
+ ets:insert(?TEST_HOOKS_TAB, {internal_log,{logger,internal_log}}),
+ ets:insert(?TEST_HOOKS_TAB, {file_write,ok}),
+ ets:insert(?TEST_HOOKS_TAB, {file_datasync,ok}),
+ ets:insert(?TEST_HOOKS_TAB, {disk_log_write,ok}),
+ ets:insert(?TEST_HOOKS_TAB, {disk_log_sync,ok})).
+
+ -define(internal_log(TYPE, TERM),
+ try ets:lookup(?TEST_HOOKS_TAB, internal_log) of
+ [{_,{LMOD,LFUNC}}] -> apply(LMOD, LFUNC, [TYPE,TERM]);
+ _ -> logger:internal_log(TYPE, TERM)
+ catch _:_ -> logger:internal_log(TYPE, TERM) end).
+
+ -define(file_write(DEVICE, DATA),
+ try ets:lookup(?TEST_HOOKS_TAB, file_write) of
+ [{_,ok}] -> file:write(DEVICE, DATA);
+ [{_,ERROR}] -> ERROR
+ catch _:_ -> file:write(DEVICE, DATA) end).
+
+ -define(file_datasync(DEVICE),
+ try ets:lookup(?TEST_HOOKS_TAB, file_datasync) of
+ [{_,ok}] -> file:datasync(DEVICE);
+ [{_,ERROR}] -> ERROR
+ catch _:_ -> file:datasync(DEVICE) end).
+
+ -define(disk_log_write(LOG, MODE, DATA),
+ try ets:lookup(?TEST_HOOKS_TAB, disk_log_write) of
+ [{_,ok}] -> disk_log_write(LOG, MODE, DATA);
+ [{_,ERROR}] -> ERROR
+ catch _:_ -> disk_log_write(LOG, MODE, DATA) end).
+
+ -define(disk_log_sync(LOG),
+ try ets:lookup(?TEST_HOOKS_TAB, disk_log_sync) of
+ [{_,ok}] -> disk_log:sync(LOG);
+ [{_,ERROR}] -> ERROR
+ catch _:_ -> disk_log:sync(LOG) end).
+
+
+-else. % DEFAULTS!
+ -define(TEST_HOOKS_TAB, undefined).
+ -define(init_test_hooks(), ok).
+ -define(set_internal_log(_MOD_FUNC), ok).
+ -define(set_result(_OPERATION, _RESULT), ok).
+ -define(set_defaults(), ok).
+ -define(internal_log(TYPE, TERM), logger:internal_log(TYPE, TERM)).
+ -define(file_write(DEVICE, DATA), file:write(DEVICE, DATA)).
+ -define(file_datasync(DEVICE), file:datasync(DEVICE)).
+ -define(disk_log_write(LOG, MODE, DATA), disk_log_write(LOG, MODE, DATA)).
+ -define(disk_log_sync(LOG), disk_log:sync(LOG)).
+-endif.
diff --git a/lib/kernel/src/logger_handler_watcher.erl b/lib/kernel/src/logger_handler_watcher.erl
new file mode 100644
index 0000000000..b75c74c643
--- /dev/null
+++ b/lib/kernel/src/logger_handler_watcher.erl
@@ -0,0 +1,113 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_handler_watcher).
+
+-behaviour(gen_server).
+
+%% API
+-export([start_link/0]).
+-export([register_handler/2]).
+
+%% gen_server callbacks
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2]).
+
+-define(SERVER, ?MODULE).
+
+-record(state, {handlers}).
+
+%%%===================================================================
+%%% API
+%%%===================================================================
+
+-spec start_link() -> {ok, Pid :: pid()} |
+ {error, Error :: {already_started, pid()}} |
+ {error, Error :: term()} |
+ ignore.
+start_link() ->
+ gen_server:start_link({local, ?SERVER}, ?MODULE, [], []).
+
+-spec register_handler(Id::logger:handler_id(),Pid::pid()) -> ok.
+register_handler(Id,Pid) ->
+ gen_server:call(?SERVER,{register,Id,Pid}).
+
+%%%===================================================================
+%%% gen_server callbacks
+%%%===================================================================
+
+-spec init(Args :: term()) -> {ok, State :: term()} |
+ {ok, State :: term(), Timeout :: timeout()} |
+ {ok, State :: term(), hibernate} |
+ {stop, Reason :: term()} |
+ ignore.
+init([]) ->
+ process_flag(trap_exit, true),
+ {ok, #state{handlers=[]}}.
+
+-spec handle_call(Request :: term(), From :: {pid(), term()}, State :: term()) ->
+ {reply, Reply :: term(), NewState :: term()} |
+ {reply, Reply :: term(), NewState :: term(), Timeout :: timeout()} |
+ {reply, Reply :: term(), NewState :: term(), hibernate} |
+ {noreply, NewState :: term()} |
+ {noreply, NewState :: term(), Timeout :: timeout()} |
+ {noreply, NewState :: term(), hibernate} |
+ {stop, Reason :: term(), Reply :: term(), NewState :: term()} |
+ {stop, Reason :: term(), NewState :: term()}.
+handle_call({register,Id,Pid}, _From, #state{handlers=Hs}=State) ->
+ Ref = erlang:monitor(process,Pid),
+ Hs1 = lists:keystore(Id,1,Hs,{Id,Ref}),
+ {reply, ok, State#state{handlers=Hs1}}.
+
+-spec handle_cast(Request :: term(), State :: term()) ->
+ {noreply, NewState :: term()} |
+ {noreply, NewState :: term(), Timeout :: timeout()} |
+ {noreply, NewState :: term(), hibernate} |
+ {stop, Reason :: term(), NewState :: term()}.
+handle_cast(_Request, State) ->
+ {noreply, State}.
+
+-spec handle_info(Info :: timeout() | term(), State :: term()) ->
+ {noreply, NewState :: term()} |
+ {noreply, NewState :: term(), Timeout :: timeout()} |
+ {noreply, NewState :: term(), hibernate} |
+ {stop, Reason :: normal | term(), NewState :: term()}.
+handle_info({'DOWN',Ref,process,_,shutdown}, #state{handlers=Hs}=State) ->
+ case lists:keytake(Ref,2,Hs) of
+ {value,{Id,Ref},Hs1} ->
+ %% Probably terminated by supervisor. Remove the handler to avoid
+ %% error printouts due to failing handler.
+ _ = case logger:get_handler_config(Id) of
+ {ok,_} ->
+ logger:remove_handler(Id);
+ _ ->
+ ok
+ end,
+ {noreply,State#state{handlers=Hs1}};
+ false ->
+ {noreply, State}
+ end;
+handle_info({'DOWN',Ref,process,_,_OtherReason}, #state{handlers=Hs}=State) ->
+ {noreply,State#state{handlers=lists:keydelete(Ref,2,Hs)}};
+handle_info(_Other,State) ->
+ {noreply,State}.
+
+-spec terminate(Reason :: normal | shutdown | {shutdown, term()} | term(),
+ State :: term()) -> any().
+terminate(_Reason, _State) ->
+ ok.
diff --git a/lib/kernel/src/logger_internal.hrl b/lib/kernel/src/logger_internal.hrl
new file mode 100644
index 0000000000..e53922e5d3
--- /dev/null
+++ b/lib/kernel/src/logger_internal.hrl
@@ -0,0 +1,104 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017-2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-include_lib("kernel/include/logger.hrl").
+-define(LOGGER_TABLE,logger).
+-define(PROXY_KEY,'$proxy_config$').
+-define(PRIMARY_KEY,'$primary_config$').
+-define(HANDLER_KEY,'$handler_config$').
+-define(LOGGER_META_KEY,'$logger_metadata$').
+-define(STANDARD_HANDLER, default).
+-define(DEFAULT_HANDLER_FILTERS,?DEFAULT_HANDLER_FILTERS([otp])).
+-define(DEFAULT_HANDLER_FILTERS(Domain),
+ [{remote_gl,{fun logger_filters:remote_gl/2,stop}},
+ {domain,{fun logger_filters:domain/2,{log,super,Domain}}},
+ {no_domain,{fun logger_filters:domain/2,{log,undefined,[]}}}]).
+-define(DEFAULT_FORMATTER,logger_formatter).
+-define(DEFAULT_FORMAT_CONFIG,#{legacy_header=>true,
+ single_line=>false}).
+-define(DEFAULT_FORMAT_TEMPLATE_HEADER,
+ [[logger_formatter,header],"\n",msg,"\n"]).
+-define(DEFAULT_FORMAT_TEMPLATE_SINGLE,
+ [time," ",level,": ",msg,"\n"]).
+-define(DEFAULT_FORMAT_TEMPLATE,
+ [time," ",level,":\n",msg,"\n"]).
+
+-define(DEFAULT_LOGGER_CALL_TIMEOUT, infinity).
+
+-define(LOG_INTERNAL(Level,Report),?DO_LOG_INTERNAL(Level,[Report])).
+-define(LOG_INTERNAL(Level,Format,Args),?DO_LOG_INTERNAL(Level,[Format,Args])).
+-define(DO_LOG_INTERNAL(Level,Data),
+ case logger:allow(Level,?MODULE) of
+ true ->
+ %% Spawn this to avoid deadlocks
+ _ = spawn(logger,macro_log,[?LOCATION,Level|Data]++
+ [logger:add_default_metadata(#{})]),
+ ok;
+ false ->
+ ok
+ end).
+
+%%%-----------------------------------------------------------------
+%%% Levels
+%%% Using same as syslog
+-define(LEVELS,[none,
+ emergency,
+ alert,
+ critical,
+ error,
+ warning,
+ notice,
+ info,
+ debug,
+ all]).
+-define(LOG_NONE,-1).
+-define(EMERGENCY,0).
+-define(ALERT,1).
+-define(CRITICAL,2).
+-define(ERROR,3).
+-define(WARNING,4).
+-define(NOTICE,5).
+-define(INFO,6).
+-define(DEBUG,7).
+-define(LOG_ALL,10).
+
+-define(IS_LEVEL(L),
+ (L=:=emergency orelse
+ L=:=alert orelse
+ L=:=critical orelse
+ L=:=error orelse
+ L=:=warning orelse
+ L=:=notice orelse
+ L=:=info orelse
+ L=:=debug)).
+
+-define(IS_MSG(Msg),
+ ((is_tuple(Msg) andalso tuple_size(Msg)==2)
+ andalso
+ (is_list(element(1,Msg)) andalso is_list(element(2,Msg)))
+ orelse
+ (element(1,Msg)==report andalso ?IS_REPORT(element(2,Msg)))
+ orelse
+ (element(1,Msg)==string andalso ?IS_STRING(element(2,Msg))))).
+
+-define(IS_REPORT(Report),
+ (is_map(Report) orelse (is_list(Report) andalso is_tuple(hd(Report))))).
+
+-define(IS_STRING(String),
+ (is_list(String) orelse is_binary(String))).
diff --git a/lib/kernel/src/logger_olp.erl b/lib/kernel/src/logger_olp.erl
new file mode 100644
index 0000000000..8365383fe2
--- /dev/null
+++ b/lib/kernel/src/logger_olp.erl
@@ -0,0 +1,627 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017-2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_olp).
+-behaviour(gen_server).
+
+-include("logger_olp.hrl").
+-include("logger_internal.hrl").
+
+%% API
+-export([start_link/4, load/2, info/1, reset/1, stop/1, restart/1,
+ set_opts/2, get_opts/1, get_default_opts/0, get_pid/1,
+ call/2, cast/2, get_ref/0, get_ref/1]).
+
+%% gen_server and proc_lib callbacks
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+-define(OPT_KEYS,[sync_mode_qlen,
+ drop_mode_qlen,
+ flush_qlen,
+ burst_limit_enable,
+ burst_limit_max_count,
+ burst_limit_window_time,
+ overload_kill_enable,
+ overload_kill_qlen,
+ overload_kill_mem_size,
+ overload_kill_restart_after]).
+
+-export_type([olp_ref/0, options/0]).
+
+-opaque olp_ref() :: {atom(),pid(),ets:tid()}.
+
+-type options() :: logger:olp_config().
+
+%%%-----------------------------------------------------------------
+%%% API
+
+-spec start_link(Name,Module,Args,Options) -> {ok,Pid,Olp} | {error,Reason} when
+ Name :: atom(),
+ Module :: module(),
+ Args :: term(),
+ Options :: options(),
+ Pid :: pid(),
+ Olp :: olp_ref(),
+ Reason :: term().
+start_link(Name,Module,Args,Options0) when is_map(Options0) ->
+ Options = maps:merge(get_default_opts(),Options0),
+ case check_opts(Options) of
+ ok ->
+ proc_lib:start_link(?MODULE,init,[[Name,Module,Args,Options]]);
+ Error ->
+ Error
+ end.
+
+-spec load(Olp, Msg) -> ok when
+ Olp :: olp_ref(),
+ Msg :: term().
+load({_Name,Pid,ModeRef},Msg) ->
+ %% If the process is getting overloaded, the message will be
+ %% synchronous instead of asynchronous (slows down the tempo of a
+ %% process causing much load). If the process is choked, drop mode
+ %% is set and no message is sent.
+ try ?get_mode(ModeRef) of
+ async ->
+ gen_server:cast(Pid, {'$olp_load',Msg});
+ sync ->
+ case call(Pid, {'$olp_load',Msg}) of
+ ok ->
+ ok;
+ _Other ->
+ %% dropped or {error,busy}
+ ?observe(_Name,{dropped,1}),
+ ok
+ end;
+ drop ->
+ ?observe(_Name,{dropped,1})
+ catch
+ %% if the ETS table doesn't exist (maybe because of a
+ %% process restart), we can only drop the event
+ _:_ -> ?observe(_Name,{dropped,1})
+ end,
+ ok.
+
+-spec info(Olp) -> map() | {error, busy} when
+ Olp :: atom() | pid() | olp_ref().
+info(Olp) ->
+ call(Olp, info).
+
+-spec reset(Olp) -> ok | {error, busy} when
+ Olp :: atom() | pid() | olp_ref().
+reset(Olp) ->
+ call(Olp, reset).
+
+-spec stop(Olp) -> ok when
+ Olp :: atom() | pid() | olp_ref().
+stop({_Name,Pid,_ModRef}) ->
+ stop(Pid);
+stop(Pid) ->
+ _ = gen_server:call(Pid, stop),
+ ok.
+
+-spec set_opts(Olp, Opts) -> ok | {error,term()} | {error, busy} when
+ Olp :: atom() | pid() | olp_ref(),
+ Opts :: options().
+set_opts(Olp, Opts) ->
+ call(Olp, {set_opts,Opts}).
+
+-spec get_opts(Olp) -> options() | {error, busy} when
+ Olp :: atom() | pid() | olp_ref().
+get_opts(Olp) ->
+ call(Olp, get_opts).
+
+-spec get_default_opts() -> options().
+get_default_opts() ->
+ #{sync_mode_qlen => ?SYNC_MODE_QLEN,
+ drop_mode_qlen => ?DROP_MODE_QLEN,
+ flush_qlen => ?FLUSH_QLEN,
+ burst_limit_enable => ?BURST_LIMIT_ENABLE,
+ burst_limit_max_count => ?BURST_LIMIT_MAX_COUNT,
+ burst_limit_window_time => ?BURST_LIMIT_WINDOW_TIME,
+ overload_kill_enable => ?OVERLOAD_KILL_ENABLE,
+ overload_kill_qlen => ?OVERLOAD_KILL_QLEN,
+ overload_kill_mem_size => ?OVERLOAD_KILL_MEM_SIZE,
+ overload_kill_restart_after => ?OVERLOAD_KILL_RESTART_AFTER}.
+
+-spec restart(fun(() -> any())) -> ok.
+restart(Fun) ->
+ Result =
+ try Fun()
+ catch C:R:S ->
+ {error,{restart_failed,Fun,C,R,S}}
+ end,
+ ?LOG_INTERNAL(debug,[{logger_olp,restart},
+ {result,Result}]),
+ ok.
+
+-spec get_ref() -> olp_ref().
+get_ref() ->
+ get(olp_ref).
+
+-spec get_ref(PidOrName) -> olp_ref() | {error, busy} when
+ PidOrName :: pid() | atom().
+get_ref(PidOrName) ->
+ call(PidOrName,get_ref).
+
+-spec get_pid(olp_ref()) -> pid().
+get_pid({_Name,Pid,_ModeRef}) ->
+ Pid.
+
+%%%===================================================================
+%%% gen_server callbacks
+%%%===================================================================
+
+init([Name,Module,Args,Options]) ->
+ register(Name, self()),
+ process_flag(message_queue_data, off_heap),
+
+ ?start_observation(Name),
+
+ try ets:new(Name, [public]) of
+ ModeRef ->
+ OlpRef = {Name,self(),ModeRef},
+ put(olp_ref,OlpRef),
+ try Module:init(Args) of
+ {ok,CBState} ->
+ ?set_mode(ModeRef, async),
+ T0 = ?timestamp(),
+ proc_lib:init_ack({ok,self(),OlpRef}),
+ %% Storing options in state to avoid copying
+ %% (sending) the option data with each message
+ State0 = ?merge_with_stats(
+ Options#{id => Name,
+ idle=> true,
+ module => Module,
+ mode_ref => ModeRef,
+ mode => async,
+ last_qlen => 0,
+ last_load_ts => T0,
+ burst_win_ts => T0,
+ burst_msg_count => 0,
+ cb_state => CBState}),
+ State = reset_restart_flag(State0),
+ gen_server:enter_loop(?MODULE, [], State);
+ Error ->
+ _ = ets:delete(ModeRef),
+ unregister(Name),
+ proc_lib:init_ack(Error)
+ catch
+ _:Error ->
+ _ = ets:delete(ModeRef),
+ unregister(Name),
+ proc_lib:init_ack(Error)
+ end
+ catch
+ _:Error ->
+ unregister(Name),
+ proc_lib:init_ack(Error)
+ end.
+
+%% This is the synchronous load event.
+handle_call({'$olp_load', Msg}, _From, State) ->
+ {Result,State1} = do_load(Msg, call, State#{idle=>false}),
+ %% Result == ok | dropped
+ reply_return(Result,State1);
+
+handle_call(get_ref,_From,#{id:=Name,mode_ref:=ModeRef}=State) ->
+ reply_return({Name,self(),ModeRef},State);
+
+handle_call({set_opts,Opts0},_From,State) ->
+ Opts = maps:merge(maps:with(?OPT_KEYS,State),Opts0),
+ case check_opts(Opts) of
+ ok ->
+ reply_return(ok, maps:merge(State,Opts));
+ Error ->
+ reply_return(Error, State)
+ end;
+
+handle_call(get_opts,_From,State) ->
+ reply_return(maps:with(?OPT_KEYS,State), State);
+
+handle_call(info, _From, State) ->
+ reply_return(State, State);
+
+handle_call(reset, _From, #{module:=Module,cb_state:=CBState}=State) ->
+ State1 = ?merge_with_stats(State),
+ CBState1 = try_callback_call(Module,reset_state,[CBState],CBState),
+ reply_return(ok, State1#{idle => true,
+ last_qlen => 0,
+ last_load_ts => ?timestamp(),
+ cb_state => CBState1});
+
+handle_call(stop, _From, State) ->
+ {stop, {shutdown,stopped}, ok, State};
+
+handle_call(Msg, From, #{module:=Module,cb_state:=CBState}=State) ->
+ case try_callback_call(Module,handle_call,[Msg, From, CBState]) of
+ {reply,Reply,CBState1} ->
+ reply_return(Reply,State#{cb_state=>CBState1});
+ {noreply,CBState1} ->
+ noreply_return(State#{cb_state=>CBState1});
+ {stop, Reason, Reply, CBState1} ->
+ {stop, Reason, Reply, State#{cb_state=>CBState1}};
+ {stop, Reason, CBState1} ->
+ {stop, Reason, State#{cb_state=>CBState1}}
+ end.
+
+%% This is the asynchronous load event.
+handle_cast({'$olp_load', Msg}, State) ->
+ {_Result,State1} = do_load(Msg, cast, State#{idle=>false}),
+ noreply_return(State1);
+
+handle_cast(Msg, #{module:=Module, cb_state:=CBState} = State) ->
+ case try_callback_call(Module,handle_cast,[Msg, CBState]) of
+ {noreply,CBState1} ->
+ noreply_return(State#{cb_state=>CBState1});
+ {stop, Reason, CBState1} ->
+ {stop, Reason, State#{cb_state=>CBState1}}
+ end.
+
+handle_info(timeout, #{mode_ref:=_ModeRef, mode:=Mode} = State) ->
+ State1 = notify(idle,State),
+ State2 = maybe_notify_mode_change(async,State1),
+ {noreply, State2#{idle => true,
+ mode => ?change_mode(_ModeRef, Mode, async),
+ burst_msg_count => 0}};
+handle_info(Msg, #{module := Module, cb_state := CBState} = State) ->
+ case try_callback_call(Module,handle_info,[Msg, CBState]) of
+ {noreply,CBState1} ->
+ noreply_return(State#{cb_state=>CBState1});
+ {stop, Reason, CBState1} ->
+ {stop, Reason, State#{cb_state=>CBState1}};
+ {load,CBState1} ->
+ {_,State1} = do_load(Msg, cast, State#{idle=>false,
+ cb_state=>CBState1}),
+ noreply_return(State1)
+ end.
+
+terminate({shutdown,{overloaded,_QLen,_Mem}},
+ #{id:=Name, module := Module, cb_state := CBState,
+ overload_kill_restart_after := RestartAfter} = State) ->
+ %% We're terminating because of an overload situation (see
+ %% kill_if_choked/3).
+ unregister(Name), %%!!!! to avoid error printout of callback crashed on stop
+ case try_callback_call(Module,terminate,[overloaded,CBState],ok) of
+ {ok,Fun} when is_function(Fun,0), is_integer(RestartAfter) ->
+ set_restart_flag(State),
+ _ = timer:apply_after(RestartAfter,?MODULE,restart,[Fun]),
+ ok;
+ _ ->
+ ok
+ end;
+terminate(Reason, #{id:=Name, module:=Module, cb_state:=CBState}) ->
+ _ = try_callback_call(Module,terminate,[Reason,CBState],ok),
+ unregister(Name),
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+
+%%%-----------------------------------------------------------------
+%%% Internal functions
+-spec call(Olp, term()) -> term() | {error,busy} when
+ Olp :: atom() | pid() | olp_ref().
+call({_Name, Pid, _ModeRef},Msg) ->
+ call(Pid, Msg);
+call(Server, Msg) ->
+ try
+ gen_server:call(Server, Msg)
+ catch
+ _:{timeout,_} -> {error,busy}
+ end.
+
+-spec cast(olp_ref(),term()) -> ok.
+cast({_Name, Pid, _ModeRef},Msg) ->
+ gen_server:cast(Pid, Msg).
+
+%% check for overload between every event (and set Mode to async,
+%% sync or drop accordingly), but never flush the whole mailbox
+%% before LogWindowSize events have been handled
+do_load(Msg, CallOrCast, State) ->
+ T1 = ?timestamp(),
+ State1 = ?update_time(T1,State),
+
+ %% check if the process is getting overloaded, or if it's
+ %% recovering from overload (the check must be done for each
+ %% event to react quickly to large bursts of events and
+ %% to ensure that the handler can never end up in drop mode
+ %% with an empty mailbox, which would stop operation)
+ {Mode1,QLen,Mem,State2} = check_load(State1),
+
+ %% kill the handler if it can't keep up with the load
+ kill_if_choked(QLen, Mem, State2),
+
+ if Mode1 == flush ->
+ flush(T1, State2);
+ true ->
+ handle_load(Mode1, T1, Msg, CallOrCast, State2)
+ end.
+
+%% this function is called by do_load/3 after an overload check
+%% has been performed, where QLen > FlushQLen
+flush(T1, State=#{id := _Name, mode := Mode, last_load_ts := _T0, mode_ref := ModeRef}) ->
+ %% flush load messages in the mailbox (a limited number in order
+ %% to not cause long delays)
+ NewFlushed = flush_load(?FLUSH_MAX_N),
+
+ %% write info in log about flushed messages
+ State1=notify({flushed,NewFlushed},State),
+
+ %% because of the receive loop when flushing messages, the
+ %% handler will be scheduled out often and the mailbox could
+ %% grow very large, so we'd better check the queue again here
+ {_,QLen1} = process_info(self(), message_queue_len),
+ ?observe(_Name,{max_qlen,QLen1}),
+
+ %% Add 1 for the current log event
+ ?observe(_Name,{flushed,NewFlushed+1}),
+
+ State2 = ?update_max_time(?diff_time(T1,_T0),State1),
+ State3 = ?update_max_qlen(QLen1,State2),
+ State4 = maybe_notify_mode_change(async,State3),
+ {dropped,?update_other(flushed,FLUSHED,NewFlushed,
+ State4#{mode => ?change_mode(ModeRef,Mode,async),
+ last_qlen => QLen1,
+ last_load_ts => T1})}.
+
+%% this function is called to actually handle the message
+handle_load(Mode, T1, Msg, _CallOrCast,
+ State = #{id := _Name,
+ module := Module,
+ cb_state := CBState,
+ last_qlen := LastQLen,
+ last_load_ts := _T0}) ->
+ %% check if we need to limit the number of writes
+ %% during a burst of log events
+ {DoWrite,State1} = limit_burst(State),
+
+ {Result,LastQLen1,CBState1} =
+ if DoWrite ->
+ ?observe(_Name,{_CallOrCast,1}),
+ CBS = try_callback_call(Module,handle_load,[Msg,CBState]),
+ {ok,element(2, process_info(self(), message_queue_len)),CBS};
+ true ->
+ ?observe(_Name,{flushed,1}),
+ {dropped,LastQLen,CBState}
+ end,
+ State2 = State1#{cb_state=>CBState1},
+
+ State3 = State2#{mode => Mode},
+ State4 = ?update_calls_or_casts(_CallOrCast,1,State3),
+ State5 = ?update_max_qlen(LastQLen1,State4),
+ State6 =
+ ?update_max_time(?diff_time(T1,_T0),
+ State5#{last_qlen := LastQLen1,
+ last_load_ts => T1}),
+ State7 = case Result of
+ ok ->
+ S = ?update_freq(T1,State6),
+ ?update_other(writes,WRITES,1,S);
+ _ ->
+ State6
+ end,
+ {Result,State7}.
+
+
+%%%-----------------------------------------------------------------
+%%% Check that the options are valid
+check_opts(Options) when is_map(Options) ->
+ case do_check_opts(maps:to_list(Options)) of
+ ok ->
+ case overload_levels_ok(Options) of
+ true ->
+ ok;
+ false ->
+ Faulty = maps:with([sync_mode_qlen,
+ drop_mode_qlen,
+ flush_qlen],Options),
+ {error,{invalid_olp_levels,Faulty}}
+ end;
+ {error,Key,Value} ->
+ {error,{invalid_olp_config,#{Key=>Value}}}
+ end.
+
+do_check_opts([{sync_mode_qlen,N}|Options]) when is_integer(N) ->
+ do_check_opts(Options);
+do_check_opts([{drop_mode_qlen,N}|Options]) when is_integer(N) ->
+ do_check_opts(Options);
+do_check_opts([{flush_qlen,N}|Options]) when is_integer(N) ->
+ do_check_opts(Options);
+do_check_opts([{burst_limit_enable,Bool}|Options]) when is_boolean(Bool) ->
+ do_check_opts(Options);
+do_check_opts([{burst_limit_max_count,N}|Options]) when is_integer(N) ->
+ do_check_opts(Options);
+do_check_opts([{burst_limit_window_time,N}|Options]) when is_integer(N) ->
+ do_check_opts(Options);
+do_check_opts([{overload_kill_enable,Bool}|Options]) when is_boolean(Bool) ->
+ do_check_opts(Options);
+do_check_opts([{overload_kill_qlen,N}|Options]) when is_integer(N) ->
+ do_check_opts(Options);
+do_check_opts([{overload_kill_mem_size,N}|Options]) when is_integer(N) ->
+ do_check_opts(Options);
+do_check_opts([{overload_kill_restart_after,NorA}|Options])
+ when is_integer(NorA); NorA == infinity ->
+ do_check_opts(Options);
+do_check_opts([{Key,Value}|_]) ->
+ {error,Key,Value};
+do_check_opts([]) ->
+ ok.
+
+set_restart_flag(#{id := Name, module := Module}) ->
+ Flag = list_to_atom(lists:concat([Module,"_",Name,"_restarting"])),
+ spawn(fun() ->
+ register(Flag, self()),
+ timer:sleep(infinity)
+ end),
+ ok.
+
+reset_restart_flag(#{id := Name, module := Module} = State) ->
+ Flag = list_to_atom(lists:concat([Module,"_",Name,"_restarting"])),
+ case whereis(Flag) of
+ undefined ->
+ State;
+ Pid ->
+ exit(Pid, kill),
+ notify(restart,State)
+ end.
+
+check_load(State = #{id:=_Name, mode_ref := ModeRef, mode := Mode,
+ sync_mode_qlen := SyncModeQLen,
+ drop_mode_qlen := DropModeQLen,
+ flush_qlen := FlushQLen}) ->
+ {_,Mem} = process_info(self(), memory),
+ ?observe(_Name,{max_mem,Mem}),
+ {_,QLen} = process_info(self(), message_queue_len),
+ ?observe(_Name,{max_qlen,QLen}),
+ %% When the handler process gets scheduled in, it's impossible
+ %% to predict the QLen. We could jump "up" arbitrarily from say
+ %% async to sync, async to drop, sync to flush, etc. However, when
+ %% the handler process manages the log events (without flushing),
+ %% one after the other, we will move "down" from drop to sync and
+ %% from sync to async. This way we don't risk getting stuck in
+ %% drop or sync mode with an empty mailbox.
+ {Mode1,_NewDrops,_NewFlushes} =
+ if
+ QLen >= FlushQLen ->
+ {flush, 0,1};
+ QLen >= DropModeQLen ->
+ %% Note that drop mode will force load messages to
+ %% be dropped on the client side (never sent to
+ %% the olp process).
+ IncDrops = if Mode == drop -> 0; true -> 1 end,
+ {?change_mode(ModeRef, Mode, drop), IncDrops,0};
+ QLen >= SyncModeQLen ->
+ {?change_mode(ModeRef, Mode, sync), 0,0};
+ true ->
+ {?change_mode(ModeRef, Mode, async), 0,0}
+ end,
+ State1 = ?update_other(drops,DROPS,_NewDrops,State),
+ State2 = ?update_max_qlen(QLen,State1),
+ State3 = ?update_max_mem(Mem,State2),
+ State4 = maybe_notify_mode_change(Mode1,State3),
+ {Mode1, QLen, Mem,
+ ?update_other(flushes,FLUSHES,_NewFlushes,
+ State4#{last_qlen => QLen})}.
+
+limit_burst(#{burst_limit_enable := false}=State) ->
+ {true,State};
+limit_burst(#{burst_win_ts := BurstWinT0,
+ burst_msg_count := BurstMsgCount,
+ burst_limit_window_time := BurstLimitWinTime,
+ burst_limit_max_count := BurstLimitMaxCnt} = State) ->
+ if (BurstMsgCount >= BurstLimitMaxCnt) ->
+ %% the limit for allowed messages has been reached
+ BurstWinT1 = ?timestamp(),
+ case ?diff_time(BurstWinT1,BurstWinT0) of
+ BurstCheckTime when BurstCheckTime < (BurstLimitWinTime*1000) ->
+ %% we're still within the burst time frame
+ {false,?update_other(burst_drops,BURSTS,1,State)};
+ _BurstCheckTime ->
+ %% burst time frame passed, reset counters
+ {true,State#{burst_win_ts => BurstWinT1,
+ burst_msg_count => 0}}
+ end;
+ true ->
+ %% the limit for allowed messages not yet reached
+ {true,State#{burst_win_ts => BurstWinT0,
+ burst_msg_count => BurstMsgCount+1}}
+ end.
+
+kill_if_choked(QLen, Mem, #{overload_kill_enable := KillIfOL,
+ overload_kill_qlen := OLKillQLen,
+ overload_kill_mem_size := OLKillMem}) ->
+ if KillIfOL andalso
+ ((QLen > OLKillQLen) orelse (Mem > OLKillMem)) ->
+ exit({shutdown,{overloaded,QLen,Mem}});
+ true ->
+ ok
+ end.
+
+flush_load(Limit) ->
+ process_flag(priority, high),
+ Flushed = flush_load(0, Limit),
+ process_flag(priority, normal),
+ Flushed.
+
+flush_load(Limit, Limit) ->
+ Limit;
+flush_load(N, Limit) ->
+ %% flush log events but leave other events, such as info, reset
+ %% and stop, so that these have a chance to be processed even
+ %% under heavy load
+ receive
+ {'$gen_cast',{'$olp_load',_}} ->
+ flush_load(N+1, Limit);
+ {'$gen_call',{Pid,MRef},{'$olp_load',_}} ->
+ Pid ! {MRef, dropped},
+ flush_load(N+1, Limit);
+ {log,_,_,_,_} ->
+ flush_load(N+1, Limit);
+ {log,_,_,_} ->
+ flush_load(N+1, Limit)
+ after
+ 0 -> N
+ end.
+
+overload_levels_ok(Options) ->
+ SMQL = maps:get(sync_mode_qlen, Options, ?SYNC_MODE_QLEN),
+ DMQL = maps:get(drop_mode_qlen, Options, ?DROP_MODE_QLEN),
+ FQL = maps:get(flush_qlen, Options, ?FLUSH_QLEN),
+ (DMQL > 1) andalso (SMQL =< DMQL) andalso (DMQL =< FQL).
+
+maybe_notify_mode_change(drop,#{mode:=Mode0}=State)
+ when Mode0=/=drop ->
+ notify({mode_change,Mode0,drop},State);
+maybe_notify_mode_change(Mode1,#{mode:=drop}=State)
+ when Mode1==async; Mode1==sync ->
+ notify({mode_change,drop,Mode1},State);
+maybe_notify_mode_change(_,State) ->
+ State.
+
+notify(Note,#{module:=Module,cb_state:=CBState}=State) ->
+ CBState1 = try_callback_call(Module,notify,[Note,CBState],CBState),
+ State#{cb_state=>CBState1}.
+
+try_callback_call(Module, Function, Args) ->
+ try_callback_call(Module, Function, Args, '$no_default_return').
+
+try_callback_call(Module, Function, Args, DefRet) ->
+ try apply(Module, Function, Args)
+ catch
+ throw:R -> R;
+ error:undef:S when DefRet=/='$no_default_return' ->
+ case S of
+ [{Module,Function,Args,_}|_] ->
+ DefRet;
+ _ ->
+ erlang:raise(error,undef,S)
+ end
+ end.
+
+noreply_return(#{idle:=true}=State) ->
+ {noreply,State};
+noreply_return(#{idle:=false}=State) ->
+ {noreply,State,?IDLE_DETECT_TIME}.
+
+reply_return(Reply,#{idle:=true}=State) ->
+ {reply,Reply,State};
+reply_return(Reply,#{idle:=false}=State) ->
+ {reply,Reply,State,?IDLE_DETECT_TIME}.
diff --git a/lib/kernel/src/logger_olp.hrl b/lib/kernel/src/logger_olp.hrl
new file mode 100644
index 0000000000..d68b5c048d
--- /dev/null
+++ b/lib/kernel/src/logger_olp.hrl
@@ -0,0 +1,185 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 1997-2015. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%%-----------------------------------------------------------------
+%%% Overload protection configuration
+
+%%! *** NOTE ***
+%%! It's important that:
+%%! SYNC_MODE_QLEN =< DROP_MODE_QLEN =< FLUSH_QLEN
+%%! and that DROP_MODE_QLEN >= 2.
+%%! Otherwise the process could end up in drop mode with no new
+%%! log requests to process. This would cause all future requests
+%%! to be dropped (no switch to async mode would ever take place).
+
+%% This specifies the message_queue_len value where the log
+%% requests switch from asynchronous casts to synchronous calls.
+-define(SYNC_MODE_QLEN, 10).
+%% Above this message_queue_len, log requests will be dropped,
+%% i.e. no log requests get sent to the process.
+-define(DROP_MODE_QLEN, 200).
+%% Above this message_queue_len, the process will flush its mailbox
+%% and only leave this number of messages in it.
+-define(FLUSH_QLEN, 1000).
+
+%% Never flush more than this number of messages in one go, or the
+%% process will be unresponsive for seconds (keep this number as large
+%% as possible or the mailbox could grow large).
+-define(FLUSH_MAX_N, 5000).
+
+%% BURST_LIMIT_MAX_COUNT is the max number of log requests allowed
+%% to be written within a BURST_LIMIT_WINDOW_TIME time frame.
+-define(BURST_LIMIT_ENABLE, true).
+-define(BURST_LIMIT_MAX_COUNT, 500).
+-define(BURST_LIMIT_WINDOW_TIME, 1000).
+
+%% This enables/disables the feature to automatically terminate the
+%% process if it gets too loaded (and can't keep up).
+-define(OVERLOAD_KILL_ENABLE, false).
+%% If the message_queue_len goes above this size even after
+%% flushing has been performed, the process is terminated.
+-define(OVERLOAD_KILL_QLEN, 20000).
+%% If the memory usage exceeds this level, the process is terminated.
+-define(OVERLOAD_KILL_MEM_SIZE, 3000000).
+
+%% This is the default time to wait before restarting and accepting
+%% new requests. The value 'infinity' disables restarts.
+-define(OVERLOAD_KILL_RESTART_AFTER, 5000).
+
+%% This is the time in milliseconds after last load message received
+%% that we notify the callback about being idle.
+-define(IDLE_DETECT_TIME, 100).
+
+%%%-----------------------------------------------------------------
+%%% Overload protection macros
+
+-define(timestamp(), erlang:monotonic_time(microsecond)).
+
+-define(get_mode(Tid),
+ case ets:lookup(Tid, mode) of
+ [{mode,M}] -> M;
+ _ -> async
+ end).
+
+-define(set_mode(Tid, M),
+ begin ets:insert(Tid, {mode,M}), M end).
+
+-define(change_mode(Tid, M0, M1),
+ if M0 == M1 ->
+ M0;
+ true ->
+ ets:insert(Tid, {mode,M1}),
+ M1
+ end).
+
+-define(max(X1, X2),
+ if
+ X2 == undefined -> X1;
+ X2 > X1 -> X2;
+ true -> X1
+ end).
+
+-define(diff_time(OS_T1, OS_T0), OS_T1-OS_T0).
+
+%%%-----------------------------------------------------------------
+%%% These macros enable statistics counters in the state of the
+%%% process, which is useful for analysing the overload protection
+%%% behaviour. These counters should not be included in code to be
+%%% officially released (as some counters will grow very large over
+%%% time).
+
+%% -define(SAVE_STATS, true).
+-ifdef(SAVE_STATS).
+ -define(merge_with_stats(STATE),
+ begin
+ TIME = ?timestamp(),
+ STATE#{start => TIME, time => {TIME,0},
+ flushes => 0, flushed => 0, drops => 0,
+ burst_drops => 0, casts => 0, calls => 0,
+ writes => 0, max_qlen => 0, max_time => 0,
+ max_mem => 0, freq => {TIME,0,0}} end).
+
+ -define(update_max_qlen(QLEN, STATE),
+ begin #{max_qlen := QLEN0} = STATE,
+ STATE#{max_qlen => ?max(QLEN0,QLEN)} end).
+
+ -define(update_max_mem(MEM, STATE),
+ begin #{max_mem := MEM0} = STATE,
+ STATE#{max_mem => ?max(MEM0,MEM)} end).
+
+ -define(update_calls_or_casts(CALL_OR_CAST, INC, STATE),
+ case CALL_OR_CAST of
+ cast ->
+ #{casts := CASTS0} = STATE,
+ STATE#{casts => CASTS0+INC};
+ call ->
+ #{calls := CALLS0} = STATE,
+ STATE#{calls => CALLS0+INC}
+ end).
+
+ -define(update_max_time(TIME, STATE),
+ begin #{max_time := TIME0} = STATE,
+ STATE#{max_time => ?max(TIME0,TIME)} end).
+
+ -define(update_other(OTHER, VAR, INCVAL, STATE),
+ begin #{OTHER := VAR} = STATE,
+ STATE#{OTHER => VAR+INCVAL} end).
+
+ -define(update_freq(TIME,STATE),
+ begin
+ case STATE of
+ #{freq := {START, 49, _}} ->
+ STATE#{freq => {TIME, 0, trunc(1000000*50/(?diff_time(TIME,START)))}};
+ #{freq := {START, N, FREQ}} ->
+ STATE#{freq => {START, N+1, FREQ}}
+ end end).
+
+ -define(update_time(TIME,STATE),
+ begin #{start := START} = STATE,
+ STATE#{time => {TIME,trunc((?diff_time(TIME,START))/1000000)}} end).
+
+-else. % DEFAULT!
+ -define(merge_with_stats(STATE), STATE).
+ -define(update_max_qlen(_QLEN, STATE), STATE).
+ -define(update_max_mem(_MEM, STATE), STATE).
+ -define(update_calls_or_casts(_CALL_OR_CAST, _INC, STATE), STATE).
+ -define(update_max_time(_TIME, STATE), STATE).
+ -define(update_other(_OTHER, _VAR, _INCVAL, STATE), STATE).
+ -define(update_freq(_TIME, STATE), STATE).
+ -define(update_time(_TIME, STATE), STATE).
+-endif.
+
+%%%-----------------------------------------------------------------
+%%% These macros enable callbacks that make it possible to analyse the
+%%% overload protection behaviour from outside the process (including
+%%% dropped requests on the client side). An external callback module
+%%% (?OBSERVER_MOD) is required which is not part of the kernel
+%%% application. For this reason, these callbacks should not be
+%%% included in code to be officially released.
+
+%%-define(OBSERVER_MOD, logger_test).
+-ifdef(OBSERVER_MOD).
+ -define(start_observation(NAME), ?OBSERVER:start_observation(NAME)).
+ -define(observe(NAME,EVENT), ?OBSERVER:observe(NAME,EVENT)).
+
+-else. % DEFAULT!
+ -define(start_observation(_NAME), ok).
+ -define(observe(_NAME,_EVENT), ok).
+-endif.
diff --git a/lib/kernel/src/logger_proxy.erl b/lib/kernel/src/logger_proxy.erl
new file mode 100644
index 0000000000..24b293805c
--- /dev/null
+++ b/lib/kernel/src/logger_proxy.erl
@@ -0,0 +1,165 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017-2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_proxy).
+
+%% API
+-export([start_link/0, restart/0, log/1, child_spec/0, get_default_config/0]).
+
+%% logger_olp callbacks
+-export([init/1, handle_load/2, handle_info/2, terminate/2,
+ notify/2]).
+
+-include("logger_internal.hrl").
+
+-define(SERVER,?MODULE).
+
+%%%-----------------------------------------------------------------
+%%% API
+-spec log(RemoteLog) -> ok when
+ RemoteLog :: {remote,node(),LogEvent},
+ LogEvent :: {log,Level,Format,Args,Meta} |
+ {log,Level,StringOrReport,Meta},
+ Level :: logger:level(),
+ Format :: io:format(),
+ Args :: list(term()),
+ StringOrReport :: unicode:chardata() | logger:report(),
+ Meta :: logger:metadata().
+log(RemoteLog) ->
+ Olp = persistent_term:get(?MODULE),
+ case logger_olp:get_pid(Olp) =:= self() of
+ true ->
+ %% This happens when the log event comes from the
+ %% emulator, and the group leader is on a remote node.
+ _ = handle_load(RemoteLog, no_state),
+ ok;
+ false ->
+ logger_olp:load(Olp, RemoteLog)
+ end.
+
+%% Called by supervisor
+-spec start_link() -> {ok,pid(),logger_olp:olp_ref()} | {error,term()}.
+start_link() ->
+ %% Notice that sync_mode is only used when logging to remote node,
+ %% i.e. when the log/2 API function is called.
+ %%
+ %% When receiving log events from the emulator or from a remote
+ %% node, the log event is sent as a message to this process, and
+ %% thus received directly in handle_info/2. This means that the
+ %% mode (async/sync/drop) is not read before the message is
+ %% sent. Thus sync mode is never entered, and drop mode is
+ %% implemented by setting the system_logger flag to undefined (see
+ %% notify/2)
+ %%
+ %% Burst limit is disabled, since this is only a proxy and we
+ %% don't want to limit bursts twice (here and in the handler).
+ logger_olp:start_link(?SERVER,?MODULE,[],logger:get_proxy_config()).
+
+%% Fun used for restarting this process after it has been killed due
+%% to overload (must set overload_kill_enable=>true in opts)
+restart() ->
+ case supervisor:start_child(logger_sup, child_spec()) of
+ {ok,_Pid,Olp} ->
+ {ok,Olp};
+ {error,{Reason,Ch}} when is_tuple(Ch), element(1,Ch)==child ->
+ {error,Reason};
+ Error ->
+ Error
+ end.
+
+%% Called internally and by logger_sup
+child_spec() ->
+ Name = ?SERVER,
+ #{id => Name,
+ start => {?MODULE, start_link, []},
+ restart => temporary,
+ shutdown => 2000,
+ type => worker,
+ modules => [?MODULE]}.
+
+get_default_config() ->
+ OlpDefault = logger_olp:get_default_opts(),
+ OlpDefault#{sync_mode_qlen=>500,
+ drop_mode_qlen=>1000,
+ flush_qlen=>5000,
+ burst_limit_enable=>false}.
+
+%%%===================================================================
+%%% gen_server callbacks
+%%%===================================================================
+
+init([]) ->
+ process_flag(trap_exit, true),
+ _ = erlang:system_flag(system_logger,self()),
+ persistent_term:put(?MODULE,logger_olp:get_ref()),
+ {ok,no_state}.
+
+%% Log event to send to the node where the group leader of it's client resides
+handle_load({remote,Node,Log},State) ->
+ %% If the connection is overloaded (send_nosuspend returns false),
+ %% we drop the message.
+ _ = erlang:send_nosuspend({?SERVER,Node},Log),
+ State;
+%% Log event to log on this node
+handle_load({log,Level,Format,Args,Meta},State) ->
+ try_log([Level,Format,Args,Meta]),
+ State;
+handle_load({log,Level,Report,Meta},State) ->
+ try_log([Level,Report,Meta]),
+ State.
+
+%% Log event sent to this process e.g. from the emulator - it is really load
+handle_info(Log,State) when is_tuple(Log), element(1,Log)==log ->
+ {load,State}.
+
+terminate(overloaded, _State) ->
+ _ = erlang:system_flag(system_logger,undefined),
+ {ok,fun ?MODULE:restart/0};
+terminate(_Reason, _State) ->
+ _ = erlang:system_flag(system_logger,whereis(logger)),
+ ok.
+
+notify({mode_change,Mode0,Mode1},State) ->
+ _ = if Mode1=:=drop -> % entering drop mode
+ erlang:system_flag(system_logger,undefined);
+ Mode0=:=drop -> % leaving drop mode
+ erlang:system_flag(system_logger,self());
+ true ->
+ ok
+ end,
+ ?LOG_INTERNAL(notice,"~w switched from ~w to ~w mode",[?MODULE,Mode0,Mode1]),
+ State;
+notify({flushed,Flushed},State) ->
+ ?LOG_INTERNAL(notice, "~w flushed ~w log events",[?MODULE,Flushed]),
+ State;
+notify(restart,State) ->
+ ?LOG_INTERNAL(notice, "~w restarted", [?MODULE]),
+ State;
+notify(_Note,State) ->
+ State.
+
+%%%-----------------------------------------------------------------
+%%% Internal functions
+try_log(Args) ->
+ try apply(logger,log,Args)
+ catch C:R:S ->
+ ?LOG_INTERNAL(debug,[{?MODULE,log_failed},
+ {log,Args},
+ {reason,{C,R,S}}])
+ end.
diff --git a/lib/kernel/src/logger_server.erl b/lib/kernel/src/logger_server.erl
new file mode 100644
index 0000000000..722246e82c
--- /dev/null
+++ b/lib/kernel/src/logger_server.erl
@@ -0,0 +1,617 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017-2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_server).
+
+-behaviour(gen_server).
+
+%% API
+-export([start_link/0, add_handler/3, remove_handler/1,
+ add_filter/2, remove_filter/2,
+ set_module_level/2, unset_module_level/0,
+ unset_module_level/1, cache_module_level/1,
+ set_config/2, set_config/3,
+ update_config/2, update_config/3,
+ update_formatter_config/2]).
+
+%% Helper
+-export([diff_maps/2]).
+
+%% gen_server callbacks
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2]).
+
+-include("logger_internal.hrl").
+
+-define(SERVER, logger).
+-define(LOGGER_SERVER_TAG, '$logger_cb_process').
+
+-record(state, {tid, async_req, async_req_queue, remote_logger}).
+
+%%%===================================================================
+%%% API
+%%%===================================================================
+
+start_link() ->
+ gen_server:start_link({local, ?SERVER}, ?MODULE, [], []).
+
+add_handler(Id,Module,Config0) ->
+ try {check_id(Id),check_mod(Module)} of
+ {ok,ok} ->
+ case sanity_check(Id,Config0) of
+ ok ->
+ Default = default_config(Id,Module),
+ Config = maps:merge(Default,Config0),
+ call({add_handler,Id,Module,Config});
+ Error ->
+ Error
+ end
+ catch throw:Error ->
+ {error,Error}
+ end.
+
+remove_handler(HandlerId) ->
+ call({remove_handler,HandlerId}).
+
+add_filter(Owner,Filter) ->
+ case sanity_check(Owner,filters,[Filter]) of
+ ok -> call({add_filter,Owner,Filter});
+ Error -> Error
+ end.
+
+remove_filter(Owner,FilterId) ->
+ call({remove_filter,Owner,FilterId}).
+
+set_module_level(Modules,Level) when is_list(Modules) ->
+ case lists:all(fun(M) -> is_atom(M) end,Modules) of
+ true ->
+ case sanity_check(primary,level,Level) of
+ ok -> call({set_module_level,Modules,Level});
+ Error -> Error
+ end;
+ false ->
+ {error,{not_a_list_of_modules,Modules}}
+ end;
+set_module_level(Modules,_) ->
+ {error,{not_a_list_of_modules,Modules}}.
+
+unset_module_level() ->
+ call({unset_module_level,all}).
+
+unset_module_level(Modules) when is_list(Modules) ->
+ case lists:all(fun(M) -> is_atom(M) end,Modules) of
+ true ->
+ call({unset_module_level,Modules});
+ false ->
+ {error,{not_a_list_of_modules,Modules}}
+ end;
+unset_module_level(Modules) ->
+ {error,{not_a_list_of_modules,Modules}}.
+
+cache_module_level(Module) ->
+ gen_server:cast(?SERVER,{cache_module_level,Module}).
+
+set_config(Owner,Key,Value) ->
+ case sanity_check(Owner,Key,Value) of
+ ok ->
+ call({change_config,set,Owner,Key,Value});
+ Error ->
+ Error
+ end.
+
+set_config(Owner,Config) ->
+ case sanity_check(Owner,Config) of
+ ok ->
+ call({change_config,set,Owner,Config});
+ Error ->
+ Error
+ end.
+
+update_config(Owner,Key,Value) ->
+ case sanity_check(Owner,Key,Value) of
+ ok ->
+ call({change_config,update,Owner,Key,Value});
+ Error ->
+ Error
+ end.
+
+update_config(Owner, Config) ->
+ case sanity_check(Owner,Config) of
+ ok ->
+ call({change_config,update,Owner,Config});
+ Error ->
+ Error
+ end.
+
+update_formatter_config(HandlerId, FormatterConfig)
+ when is_map(FormatterConfig) ->
+ call({update_formatter_config,HandlerId,FormatterConfig});
+update_formatter_config(_HandlerId, FormatterConfig) ->
+ {error,{invalid_formatter_config,FormatterConfig}}.
+
+
+%%%===================================================================
+%%% gen_server callbacks
+%%%===================================================================
+
+init([]) ->
+ process_flag(trap_exit, true),
+ put(?LOGGER_SERVER_TAG,true),
+ Tid = logger_config:new(?LOGGER_TABLE),
+ %% Store initial proxy config. logger_proxy reads config from here at startup.
+ logger_config:create(Tid,proxy,logger_proxy:get_default_config()),
+ PrimaryConfig = maps:merge(default_config(primary),
+ #{handlers=>[simple]}),
+ logger_config:create(Tid,primary,PrimaryConfig),
+ SimpleConfig0 = maps:merge(default_config(simple,logger_simple_h),
+ #{filter_default=>stop,
+ filters=>?DEFAULT_HANDLER_FILTERS}),
+ %% If this fails, then the node should crash
+ {ok,SimpleConfig} = logger_simple_h:adding_handler(SimpleConfig0),
+ logger_config:create(Tid,simple,SimpleConfig),
+ {ok, #state{tid=Tid, async_req_queue = queue:new()}}.
+
+handle_call({add_handler,Id,Module,HConfig}, From, #state{tid=Tid}=State) ->
+ case logger_config:exist(Tid,Id) of
+ true ->
+ {reply,{error,{already_exist,Id}},State};
+ false ->
+ call_h_async(
+ fun() ->
+ %% inform the handler
+ call_h(Module,adding_handler,[HConfig],{ok,HConfig})
+ end,
+ fun({ok,HConfig1}) ->
+ %% We know that the call_h would have loaded the module
+ %% if it existed, so it is safe here to call function_exported
+ %% to find out if this is a valid handler
+ case erlang:function_exported(Module, log, 2) of
+ true ->
+ logger_config:create(Tid,Id,HConfig1),
+ {ok,Config} = logger_config:get(Tid,primary),
+ Handlers = maps:get(handlers,Config,[]),
+ logger_config:set(Tid,primary,
+ Config#{handlers=>[Id|Handlers]});
+ false ->
+ {error,{invalid_handler,
+ {function_not_exported,
+ {Module,log,2}}}}
+ end;
+ ({error,HReason}) ->
+ {error,{handler_not_added,HReason}}
+ end,From,State)
+ end;
+handle_call({remove_handler,HandlerId}, From, #state{tid=Tid}=State) ->
+ case logger_config:get(Tid,HandlerId) of
+ {ok,#{module:=Module}=HConfig} ->
+ {ok,Config} = logger_config:get(Tid,primary),
+ Handlers0 = maps:get(handlers,Config,[]),
+ Handlers = lists:delete(HandlerId,Handlers0),
+ call_h_async(
+ fun() ->
+ %% inform the handler
+ call_h(Module,removing_handler,[HConfig],ok)
+ end,
+ fun(_Res) ->
+ logger_config:set(Tid,primary,Config#{handlers=>Handlers}),
+ logger_config:delete(Tid,HandlerId),
+ ok
+ end,From,State);
+ _ ->
+ {reply,{error,{not_found,HandlerId}},State}
+ end;
+handle_call({add_filter,Id,Filter}, _From,#state{tid=Tid}=State) ->
+ Reply = do_add_filter(Tid,Id,Filter),
+ {reply,Reply,State};
+handle_call({remove_filter,Id,FilterId}, _From, #state{tid=Tid}=State) ->
+ Reply = do_remove_filter(Tid,Id,FilterId),
+ {reply,Reply,State};
+handle_call({change_config,SetOrUpd,proxy,Config0},_From,#state{tid=Tid}=State) ->
+ Default =
+ case SetOrUpd of
+ set ->
+ logger_proxy:get_default_config();
+ update ->
+ {ok,OldConfig} = logger_config:get(Tid,proxy),
+ OldConfig
+ end,
+ Config = maps:merge(Default,Config0),
+ Reply =
+ case logger_olp:set_opts(logger_proxy,Config) of
+ ok ->
+ logger_config:set(Tid,proxy,Config);
+ Error ->
+ Error
+ end,
+ {reply,Reply,State};
+handle_call({change_config,SetOrUpd,primary,Config0}, _From,
+ #state{tid=Tid}=State) ->
+ {ok,#{handlers:=Handlers}=OldConfig} = logger_config:get(Tid,primary),
+ Default =
+ case SetOrUpd of
+ set -> default_config(primary);
+ update -> OldConfig
+ end,
+ Config = maps:merge(Default,Config0),
+ Reply = logger_config:set(Tid,primary,Config#{handlers=>Handlers}),
+ {reply,Reply,State};
+handle_call({change_config,_SetOrUpd,primary,Key,Value}, _From,
+ #state{tid=Tid}=State) ->
+ {ok,OldConfig} = logger_config:get(Tid,primary),
+ Reply = logger_config:set(Tid,primary,OldConfig#{Key=>Value}),
+ {reply,Reply,State};
+handle_call({change_config,SetOrUpd,HandlerId,Config0}, From,
+ #state{tid=Tid}=State) ->
+ case logger_config:get(Tid,HandlerId) of
+ {ok,#{module:=Module}=OldConfig} ->
+ Default =
+ case SetOrUpd of
+ set -> default_config(HandlerId,Module);
+ update -> OldConfig
+ end,
+ Config = maps:merge(Default,Config0),
+ case check_config_change(OldConfig,Config) of
+ ok ->
+ call_h_async(
+ fun() ->
+ call_h(Module,changing_config,
+ [SetOrUpd,OldConfig,Config],
+ {ok,Config})
+ end,
+ fun({ok,Config1}) ->
+ logger_config:set(Tid,HandlerId,Config1);
+ (Error) ->
+ Error
+ end,From,State);
+ Error ->
+ {reply,Error,State}
+ end;
+ _ ->
+ {reply,{error,{not_found,HandlerId}},State}
+ end;
+handle_call({change_config,SetOrUpd,HandlerId,Key,Value}, From,
+ #state{tid=Tid}=State) ->
+ case logger_config:get(Tid,HandlerId) of
+ {ok,#{module:=Module}=OldConfig} ->
+ Config = OldConfig#{Key=>Value},
+ case check_config_change(OldConfig,Config) of
+ ok ->
+ call_h_async(
+ fun() ->
+ call_h(Module,changing_config,
+ [SetOrUpd,OldConfig,Config],
+ {ok,Config})
+ end,
+ fun({ok,Config1}) ->
+ logger_config:set(Tid,HandlerId,Config1);
+ (Error) ->
+ Error
+ end,From,State);
+ Error ->
+ {reply,Error,State}
+ end;
+ _ ->
+ {reply,{error,{not_found,HandlerId}},State}
+ end;
+handle_call({update_formatter_config,HandlerId,NewFConfig},_From,
+ #state{tid=Tid}=State) ->
+ Reply =
+ case logger_config:get(Tid,HandlerId) of
+ {ok,#{formatter:={FMod,OldFConfig}}=Config} ->
+ try
+ FConfig = maps:merge(OldFConfig,NewFConfig),
+ check_formatter({FMod,FConfig}),
+ logger_config:set(Tid,HandlerId,
+ Config#{formatter=>{FMod,FConfig}})
+ catch throw:Reason -> {error,Reason}
+ end;
+ _ ->
+ {error,{not_found,HandlerId}}
+ end,
+ {reply,Reply,State};
+handle_call({set_module_level,Modules,Level}, _From, #state{tid=Tid}=State) ->
+ Reply = logger_config:set_module_level(Tid,Modules,Level),
+ {reply,Reply,State};
+handle_call({unset_module_level,Modules}, _From, #state{tid=Tid}=State) ->
+ Reply = logger_config:unset_module_level(Tid,Modules),
+ {reply,Reply,State}.
+
+handle_cast({async_req_reply,_Ref,_Reply} = Reply,State) ->
+ call_h_reply(Reply,State);
+handle_cast({cache_module_level,Module}, #state{tid=Tid}=State) ->
+ logger_config:cache_module_level(Tid,Module),
+ {noreply, State}.
+
+%% Interface for those who can't call the API - e.g. the emulator, or
+%% places related to code loading.
+%%
+%% This can also be log events from remote nodes which are sent from
+%% logger.erl when the group leader of the client process is on a
+%% same node as the client process itself.
+handle_info({log,Level,Format,Args,Meta}, State) ->
+ logger:log(Level,Format,Args,Meta),
+ {noreply, State};
+handle_info({log,Level,Report,Meta}, State) ->
+ logger:log(Level,Report,Meta),
+ {noreply, State};
+handle_info({Ref,_Reply},State) when is_reference(Ref) ->
+ %% Assuming this is a timed-out gen_server reply - ignoring
+ {noreply, State};
+handle_info({'DOWN',_Ref,_Proc,_Pid,_Reason} = Down,State) ->
+ call_h_reply(Down,State);
+handle_info(Unexpected,State) when element(1,Unexpected) == 'EXIT' ->
+ %% The simple handler will send an 'EXIT' message when it is replaced
+ %% We may as well ignore all 'EXIT' messages that we get
+ ?LOG_INTERNAL(debug,
+ [{logger,got_unexpected_message},
+ {process,?SERVER},
+ {message,Unexpected}]),
+ {noreply,State};
+handle_info(Unexpected,State) ->
+ ?LOG_INTERNAL(info,
+ [{logger,got_unexpected_message},
+ {process,?SERVER},
+ {message,Unexpected}]),
+ {noreply,State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+%%%===================================================================
+%%% Internal functions
+%%%===================================================================
+call(Request) when is_tuple(Request) ->
+ Action = element(1,Request),
+ case get(?LOGGER_SERVER_TAG) of
+ true when
+ Action == add_handler; Action == remove_handler;
+ Action == add_filter; Action == remove_filter;
+ Action == change_config ->
+ {error,{attempting_syncronous_call_to_self,Request}};
+ _ ->
+ gen_server:call(?SERVER,Request,?DEFAULT_LOGGER_CALL_TIMEOUT)
+ end.
+
+
+do_add_filter(Tid,Id,{FId,_} = Filter) ->
+ case logger_config:get(Tid,Id) of
+ {ok,Config} ->
+ Filters = maps:get(filters,Config,[]),
+ case lists:keymember(FId,1,Filters) of
+ true ->
+ {error,{already_exist,FId}};
+ false ->
+ logger_config:set(Tid,Id,Config#{filters=>[Filter|Filters]})
+ end;
+ Error ->
+ Error
+ end.
+
+do_remove_filter(Tid,Id,FilterId) ->
+ case logger_config:get(Tid,Id) of
+ {ok,Config} ->
+ Filters0 = maps:get(filters,Config,[]),
+ case lists:keytake(FilterId,1,Filters0) of
+ {value,_,Filters} ->
+ logger_config:set(Tid,Id,Config#{filters=>Filters});
+ false ->
+ {error,{not_found,FilterId}}
+ end;
+ Error ->
+ Error
+ end.
+
+default_config(primary) ->
+ #{level=>notice,
+ filters=>[],
+ filter_default=>log};
+default_config(Id) ->
+ #{id=>Id,
+ level=>all,
+ filters=>[],
+ filter_default=>log,
+ formatter=>{?DEFAULT_FORMATTER,#{}}}.
+default_config(Id,Module) ->
+ (default_config(Id))#{module=>Module}.
+
+sanity_check(Owner,Key,Value) ->
+ sanity_check_1(Owner,[{Key,Value}]).
+
+sanity_check(Owner,Config) when is_map(Config) ->
+ sanity_check_1(Owner,maps:to_list(Config));
+sanity_check(_,Config) ->
+ {error,{invalid_config,Config}}.
+
+sanity_check_1(proxy,_Config) ->
+ ok; % Details are checked by logger_olp:set_opts/2
+sanity_check_1(Owner,Config) when is_list(Config) ->
+ try
+ Type = get_type(Owner),
+ check_config(Type,Config)
+ catch throw:Error -> {error,Error}
+ end.
+
+get_type(primary) ->
+ primary;
+get_type(Id) ->
+ check_id(Id),
+ handler.
+
+check_config(Owner,[{level,Level}|Config]) ->
+ check_level(Level),
+ check_config(Owner,Config);
+check_config(Owner,[{filters,Filters}|Config]) ->
+ check_filters(Filters),
+ check_config(Owner,Config);
+check_config(Owner,[{filter_default,FD}|Config]) ->
+ check_filter_default(FD),
+ check_config(Owner,Config);
+check_config(handler,[{formatter,Formatter}|Config]) ->
+ check_formatter(Formatter),
+ check_config(handler,Config);
+check_config(primary,[C|_]) ->
+ throw({invalid_primary_config,C});
+check_config(handler,[{_,_}|Config]) ->
+ %% Arbitrary config elements are allowed for handlers
+ check_config(handler,Config);
+check_config(_,[]) ->
+ ok.
+
+check_id(Id) when is_atom(Id) ->
+ ok;
+check_id(Id) ->
+ throw({invalid_id,Id}).
+
+check_mod(Mod) when is_atom(Mod) ->
+ ok;
+check_mod(Mod) ->
+ throw({invalid_module,Mod}).
+
+check_level(Level) ->
+ case lists:member(Level,?LEVELS) of
+ true ->
+ ok;
+ false ->
+ throw({invalid_level,Level})
+ end.
+
+check_filters([{Id,{Fun,_Args}}|Filters]) when is_atom(Id), is_function(Fun,2) ->
+ check_filters(Filters);
+check_filters([Filter|_]) ->
+ throw({invalid_filter,Filter});
+check_filters([]) ->
+ ok;
+check_filters(Filters) ->
+ throw({invalid_filters,Filters}).
+
+check_filter_default(FD) when FD==stop; FD==log ->
+ ok;
+check_filter_default(FD) ->
+ throw({invalid_filter_default,FD}).
+
+check_formatter({Mod,Config}) ->
+ check_mod(Mod),
+ try Mod:check_config(Config) of
+ ok -> ok;
+ {error,Error} -> throw(Error)
+ catch
+ C:R:S ->
+ case {C,R,S} of
+ {error,undef,[{Mod,check_config,[Config],_}|_]} ->
+ ok;
+ _ ->
+ throw({callback_crashed,
+ {C,R,logger:filter_stacktrace(?MODULE,S)}})
+ end
+ end;
+check_formatter(Formatter) ->
+ throw({invalid_formatter,Formatter}).
+
+%% When changing configuration for a handler, the id and module fields
+%% can not be changed.
+check_config_change(#{id:=Id,module:=Module},#{id:=Id,module:=Module}) ->
+ ok;
+check_config_change(OldConfig,NewConfig) ->
+ {Old,New} = logger_server:diff_maps(maps:with([id,module],OldConfig),
+ maps:with([id,module],NewConfig)),
+ {error,{illegal_config_change,Old,New}}.
+
+call_h(Module, Function, Args, DefRet) ->
+ %% Not calling code:ensure_loaded + erlang:function_exported here,
+ %% since in some rare terminal cases, the code_server might not
+ %% exist and we'll get a deadlock in removing the handler.
+ try apply(Module, Function, Args)
+ catch
+ C:R:S ->
+ case {C,R,S} of
+ {error,undef,[{Module,Function=changing_config,Args,_}|_]}
+ when length(Args)=:=3 ->
+ %% Backwards compatible call, if changing_config/3
+ %% did not exist.
+ call_h(Module, Function, tl(Args), DefRet);
+ {error,undef,[{Module,Function,Args,_}|_]} ->
+ DefRet;
+ _ ->
+ ST = logger:filter_stacktrace(?MODULE,S),
+ ?LOG_INTERNAL(error,
+ [{logger,callback_crashed},
+ {process,?SERVER},
+ {reason,{C,R,ST}}]),
+ {error,{callback_crashed,{C,R,ST}}}
+ end
+ end.
+
+%% There are all sort of API functions that can cause deadlocks if called
+%% from the handler callbacks. So we spawn a process that does the request
+%% for the logger_server. There are still APIs that will cause problems,
+%% namely logger:add_handler
+call_h_async(AsyncFun,PostFun,From,#state{ async_req = undefined } = State) ->
+ Parent = self(),
+ {Pid, Ref} = spawn_monitor(
+ fun() ->
+ put(?LOGGER_SERVER_TAG,true),
+ receive Ref -> Ref end,
+ gen_server:cast(Parent, {async_req_reply, Ref, AsyncFun()})
+ end),
+ Pid ! Ref,
+ {noreply,State#state{ async_req = {Ref,PostFun,From} }};
+call_h_async(AsyncFun,PostFun,From,#state{ async_req_queue = Q } = State) ->
+ {noreply,State#state{ async_req_queue = queue:in({AsyncFun,PostFun,From},Q) }}.
+
+call_h_reply({async_req_reply,Ref,Reply},
+ #state{ async_req = {Ref,PostFun,From}, async_req_queue = Q} = State) ->
+ erlang:demonitor(Ref,[flush]),
+ _ = gen_server:reply(From, PostFun(Reply)),
+ {Value,NewQ} = queue:out(Q),
+ NewState = State#state{ async_req = undefined,
+ async_req_queue = NewQ },
+ case Value of
+ {value,{AsyncFun,NPostFun,NFrom}} ->
+ call_h_async(AsyncFun,NPostFun,NFrom,NewState);
+ empty ->
+ {noreply,NewState}
+ end;
+call_h_reply({'DOWN',Ref,_Proc,Pid,Reason}, #state{ async_req = {Ref,_PostFun,_From}} = State) ->
+ %% This clause should only be triggered if someone explicitly sends an exit signal
+ %% to the spawned process. It is only here to make sure that the logger_server does
+ %% not deadlock if that happens.
+ ?LOG_INTERNAL(error,
+ [{logger,process_exited},
+ {process,Pid},
+ {reason,Reason}]),
+ call_h_reply(
+ {async_req_reply,Ref,{error,{logger_process_exited,Pid,Reason}}},
+ State);
+call_h_reply(Unexpected,State) ->
+ ?LOG_INTERNAL(info,
+ [{logger,got_unexpected_message},
+ {process,?SERVER},
+ {message,Unexpected}]),
+ {noreply,State}.
+
+%% Return two maps containing only the fields that differ.
+diff_maps(M1,M2) ->
+ diffs(lists:sort(maps:to_list(M1)),lists:sort(maps:to_list(M2)),#{},#{}).
+
+diffs([H|T1],[H|T2],D1,D2) ->
+ diffs(T1,T2,D1,D2);
+diffs([{K,V1}|T1],[{K,V2}|T2],D1,D2) ->
+ diffs(T1,T2,D1#{K=>V1},D2#{K=>V2});
+diffs([],[],D1,D2) ->
+ {D1,D2}.
diff --git a/lib/kernel/src/logger_simple_h.erl b/lib/kernel/src/logger_simple_h.erl
new file mode 100644
index 0000000000..a0d51dba25
--- /dev/null
+++ b/lib/kernel/src/logger_simple_h.erl
@@ -0,0 +1,215 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017-2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_simple_h).
+
+-export([adding_handler/1, removing_handler/1, log/2]).
+
+%% This module implements a simple handler for logger. It is the
+%% default used during system start.
+
+%%%-----------------------------------------------------------------
+%%% Logger callback
+
+adding_handler(#{id:=simple}=Config) ->
+ Me = self(),
+ case whereis(?MODULE) of
+ undefined ->
+ {Pid,Ref} = spawn_opt(fun() -> init(Me) end,
+ [link,monitor,{message_queue_data,off_heap}]),
+ receive
+ {'DOWN',Ref,process,Pid,Reason} ->
+ {error,Reason};
+ {Pid,started} ->
+ erlang:demonitor(Ref),
+ {ok,Config}
+ end;
+ _ ->
+ {error,{handler_process_name_already_exists,?MODULE}}
+ end.
+
+removing_handler(#{id:=simple}) ->
+ case whereis(?MODULE) of
+ undefined ->
+ ok;
+ Pid ->
+ Ref = erlang:monitor(process,Pid),
+ Pid ! stop,
+ receive {'DOWN',Ref,process,Pid,_} ->
+ ok
+ end
+ end.
+
+log(#{meta:=#{error_logger:=#{tag:=info_report,type:=Type}}},_Config)
+ when Type=/=std_info ->
+ %% Skip info reports that are not 'std_info' (ref simple logger in
+ %% error_logger)
+ ok;
+log(#{msg:=_,meta:=#{time:=_}}=Log,_Config) ->
+ _ = case whereis(?MODULE) of
+ undefined ->
+ %% Is the node on the way down? Real emergency?
+ %% Log directly from client just to get it out
+ do_log(
+ #{level=>error,
+ msg=>{report,{error,simple_handler_process_dead}},
+ meta=>#{time=>logger:timestamp()}}),
+ do_log(Log);
+ _ ->
+ ?MODULE ! {log,Log}
+ end,
+ ok;
+log(_,_) ->
+ %% Unexpected log.
+ %% We don't want to crash the simple logger, so ignore this.
+ ok.
+
+%%%-----------------------------------------------------------------
+%%% Process
+init(Starter) ->
+ register(?MODULE,self()),
+ Starter ! {self(),started},
+ loop(#{buffer_size=>10,dropped=>0,buffer=>[]}).
+
+loop(Buffer) ->
+ receive
+ stop ->
+ %% We replay the logger messages of there is
+ %% a default handler when the simple handler
+ %% is removed.
+ case logger:get_handler_config(default) of
+ {ok, _} ->
+ replay_buffer(Buffer);
+ _ ->
+ ok
+ end,
+ %% Before stopping, we unlink the logger process to avoid
+ %% an unexpected EXIT message
+ unlink(whereis(logger)),
+ ok;
+ {log,#{msg:=_,meta:=#{time:=_}}=Log} ->
+ do_log(Log),
+ loop(update_buffer(Buffer,Log));
+ _ ->
+ %% Unexpected message - flush it!
+ loop(Buffer)
+ end.
+
+update_buffer(#{buffer_size:=0,dropped:=D}=Buffer,_Log) ->
+ Buffer#{dropped=>D+1};
+update_buffer(#{buffer_size:=S,buffer:=B}=Buffer,Log) ->
+ Buffer#{buffer_size=>S-1,buffer=>[Log|B]}.
+
+replay_buffer(#{ dropped := D, buffer := Buffer }) ->
+ lists:foreach(
+ fun F(#{msg := {Tag, Msg}} = L) when Tag =:= string; Tag =:= report ->
+ F(L#{ msg := Msg });
+ F(#{ level := Level, msg := Msg, meta := MD}) ->
+ logger:log(Level, Msg, MD)
+ end, lists:reverse(Buffer, drop_msg(D))).
+
+drop_msg(0) ->
+ [];
+drop_msg(N) ->
+ [#{level=>info,
+ msg=>{"Simple handler buffer full, dropped ~w messages",[N]},
+ meta=>#{time=>logger:timestamp()}}].
+
+%%%-----------------------------------------------------------------
+%%% Internal
+
+%% Can't do io_lib:format
+
+do_log(#{msg:={report,Report},
+ meta:=#{time:=T,error_logger:=#{type:=Type}}}) ->
+ display_date(T),
+ display_report(Type,Report);
+do_log(#{msg:=Msg,meta:=#{time:=T}}) ->
+ display_date(T),
+ display(Msg).
+
+display_date(Timestamp) when is_integer(Timestamp) ->
+ Micro = Timestamp rem 1000000,
+ Sec = Timestamp div 1000000,
+ {{Y,Mo,D},{H,Mi,S}} = erlang:universaltime_to_localtime(
+ erlang:posixtime_to_universaltime(Sec)),
+ erlang:display_string(
+ integer_to_list(Y) ++ "-" ++
+ pad(Mo,2) ++ "-" ++
+ pad(D,2) ++ " " ++
+ pad(H,2) ++ ":" ++
+ pad(Mi,2) ++ ":" ++
+ pad(S,2) ++ "." ++
+ pad(Micro,6) ++ " ").
+
+pad(Int,Size) when is_integer(Int) ->
+ pad(integer_to_list(Int),Size);
+pad(Str,Size) when length(Str)==Size ->
+ Str;
+pad(Str,Size) ->
+ pad([$0|Str],Size).
+
+display({string,Chardata}) ->
+ try unicode:characters_to_list(Chardata) of
+ String -> erlang:display_string(String), erlang:display_string("\n")
+ catch _:_ -> erlang:display(Chardata)
+ end;
+display({report,Report}) when is_map(Report) ->
+ display_report(maps:to_list(Report));
+display({report,Report}) ->
+ display_report(Report);
+display({F, A}) when is_list(F), is_list(A) ->
+ erlang:display_string(F ++ "\n"),
+ [begin
+ erlang:display_string("\t"),
+ erlang:display(Arg)
+ end || Arg <- A],
+ ok.
+
+display_report(Atom, A) when is_atom(Atom) ->
+ %% The widest atom seems to be 'supervisor_report' at 17.
+ ColumnWidth = 20,
+ AtomString = atom_to_list(Atom),
+ AtomLength = length(AtomString),
+ Padding = lists:duplicate(ColumnWidth - AtomLength, $\s),
+ erlang:display_string(AtomString ++ Padding),
+ display_report(A);
+display_report(F, A) ->
+ erlang:display({F, A}).
+
+display_report([A, []]) ->
+ %% Special case for crash reports when process has no links
+ display_report(A);
+display_report(A = [_|_]) ->
+ case lists:all(fun({Key,_Value}) -> is_atom(Key); (_) -> false end, A) of
+ true ->
+ erlang:display_string("\n"),
+ lists:foreach(
+ fun({Key, Value}) ->
+ erlang:display_string(
+ " " ++
+ atom_to_list(Key) ++
+ ": "),
+ erlang:display(Value)
+ end, A);
+ false ->
+ erlang:display(A)
+ end;
+display_report(A) ->
+ erlang:display(A).
diff --git a/lib/kernel/src/logger_std_h.erl b/lib/kernel/src/logger_std_h.erl
new file mode 100644
index 0000000000..c8f1acfca4
--- /dev/null
+++ b/lib/kernel/src/logger_std_h.erl
@@ -0,0 +1,643 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017-2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_std_h).
+
+-include("logger.hrl").
+-include("logger_internal.hrl").
+-include("logger_h_common.hrl").
+
+-include_lib("kernel/include/file.hrl").
+
+%% API
+-export([filesync/1]).
+
+%% logger_h_common callbacks
+-export([init/2, check_config/4, config_changed/3, reset_state/2,
+ filesync/3, write/4, handle_info/3, terminate/3]).
+
+%% logger callbacks
+-export([log/2, adding_handler/1, removing_handler/1, changing_config/3,
+ filter_config/1]).
+
+-define(DEFAULT_CALL_TIMEOUT, 5000).
+
+%%%===================================================================
+%%% API
+%%%===================================================================
+
+%%%-----------------------------------------------------------------
+%%%
+-spec filesync(Name) -> ok | {error,Reason} when
+ Name :: atom(),
+ Reason :: handler_busy | {badarg,term()}.
+
+filesync(Name) ->
+ logger_h_common:filesync(?MODULE,Name).
+
+%%%===================================================================
+%%% logger callbacks - just forward to logger_h_common
+%%%===================================================================
+
+%%%-----------------------------------------------------------------
+%%% Handler being added
+-spec adding_handler(Config) -> {ok,Config} | {error,Reason} when
+ Config :: logger:handler_config(),
+ Reason :: term().
+
+adding_handler(Config) ->
+ logger_h_common:adding_handler(Config).
+
+%%%-----------------------------------------------------------------
+%%% Updating handler config
+-spec changing_config(SetOrUpdate, OldConfig, NewConfig) ->
+ {ok,Config} | {error,Reason} when
+ SetOrUpdate :: set | update,
+ OldConfig :: logger:handler_config(),
+ NewConfig :: logger:handler_config(),
+ Config :: logger:handler_config(),
+ Reason :: term().
+
+changing_config(SetOrUpdate, OldConfig, NewConfig) ->
+ logger_h_common:changing_config(SetOrUpdate, OldConfig, NewConfig).
+
+%%%-----------------------------------------------------------------
+%%% Handler being removed
+-spec removing_handler(Config) -> ok when
+ Config :: logger:handler_config().
+
+removing_handler(Config) ->
+ logger_h_common:removing_handler(Config).
+
+%%%-----------------------------------------------------------------
+%%% Log a string or report
+-spec log(LogEvent, Config) -> ok when
+ LogEvent :: logger:log_event(),
+ Config :: logger:handler_config().
+
+log(LogEvent, Config) ->
+ logger_h_common:log(LogEvent, Config).
+
+%%%-----------------------------------------------------------------
+%%% Remove internal fields from configuration
+-spec filter_config(Config) -> Config when
+ Config :: logger:handler_config().
+
+filter_config(Config) ->
+ logger_h_common:filter_config(Config).
+
+%%%===================================================================
+%%% logger_h_common callbacks
+%%%===================================================================
+init(Name, Config) ->
+ MyConfig = maps:with([type,file,modes,file_check,max_no_bytes,
+ max_no_files,compress_on_rotate],Config),
+ case file_ctrl_start(Name, MyConfig) of
+ {ok,FileCtrlPid} ->
+ {ok,MyConfig#{file_ctrl_pid=>FileCtrlPid}};
+ Error ->
+ Error
+ end.
+
+check_config(Name,set,undefined,NewHConfig) ->
+ check_h_config(merge_default_config(Name,normalize_config(NewHConfig)));
+check_config(Name,SetOrUpdate,OldHConfig,NewHConfig0) ->
+ WriteOnce = maps:with([type,file,modes],OldHConfig),
+ Default =
+ case SetOrUpdate of
+ set ->
+ %% Do not reset write-once fields to defaults
+ merge_default_config(Name,WriteOnce);
+ update ->
+ OldHConfig
+ end,
+
+ NewHConfig = maps:merge(Default, normalize_config(NewHConfig0)),
+
+ %% Fail if write-once fields are changed
+ case maps:with([type,file,modes],NewHConfig) of
+ WriteOnce ->
+ check_h_config(NewHConfig);
+ Other ->
+ {error,{illegal_config_change,?MODULE,WriteOnce,Other}}
+ end.
+
+check_h_config(HConfig) ->
+ case check_h_config(maps:get(type,HConfig),maps:to_list(HConfig)) of
+ ok ->
+ {ok,fix_file_opts(HConfig)};
+ {error,{Key,Value}} ->
+ {error,{invalid_config,?MODULE,#{Key=>Value}}}
+ end.
+
+check_h_config(Type,[{type,Type} | Config]) when Type =:= standard_io;
+ Type =:= standard_error;
+ Type =:= file ->
+ check_h_config(Type,Config);
+check_h_config(file,[{file,File} | Config]) when is_list(File) ->
+ check_h_config(file,Config);
+check_h_config(file,[{modes,Modes} | Config]) when is_list(Modes) ->
+ check_h_config(file,Config);
+check_h_config(file,[{max_no_bytes,Size} | Config])
+ when (is_integer(Size) andalso Size>0) orelse Size=:=infinity ->
+ check_h_config(file,Config);
+check_h_config(file,[{max_no_files,Num} | Config]) when is_integer(Num), Num>=0 ->
+ check_h_config(file,Config);
+check_h_config(file,[{compress_on_rotate,Bool} | Config]) when is_boolean(Bool) ->
+ check_h_config(file,Config);
+check_h_config(file,[{file_check,FileCheck} | Config])
+ when is_integer(FileCheck), FileCheck>=0 ->
+ check_h_config(file,Config);
+check_h_config(_Type,[Other | _]) ->
+ {error,Other};
+check_h_config(_Type,[]) ->
+ ok.
+
+normalize_config(#{type:={file,File}}=HConfig) ->
+ HConfig#{type=>file,file=>File};
+normalize_config(#{type:={file,File,Modes}}=HConfig) ->
+ HConfig#{type=>file,file=>File,modes=>Modes};
+normalize_config(HConfig) ->
+ HConfig.
+
+merge_default_config(Name,#{type:=Type}=HConfig) ->
+ merge_default_config(Name,Type,HConfig);
+merge_default_config(Name,#{file:=_}=HConfig) ->
+ merge_default_config(Name,file,HConfig);
+merge_default_config(Name,HConfig) ->
+ merge_default_config(Name,standard_io,HConfig).
+
+merge_default_config(Name,Type,HConfig) ->
+ maps:merge(get_default_config(Name,Type),HConfig).
+
+get_default_config(Name,file) ->
+ #{type => file,
+ file => atom_to_list(Name),
+ modes => [raw,append],
+ file_check => 0,
+ max_no_bytes => infinity,
+ max_no_files => 0,
+ compress_on_rotate => false};
+get_default_config(_Name,Type) ->
+ #{type => Type}.
+
+fix_file_opts(#{modes:=Modes}=HConfig) ->
+ HConfig#{modes=>fix_modes(Modes)};
+fix_file_opts(HConfig) ->
+ HConfig#{filesync_repeat_interval=>no_repeat}.
+
+fix_modes(Modes) ->
+ %% Ensure write|append|exclusive
+ Modes1 =
+ case [M || M <- Modes,
+ lists:member(M,[write,append,exclusive])] of
+ [] -> [append|Modes];
+ _ -> Modes
+ end,
+ %% Ensure raw
+ Modes2 =
+ case lists:member(raw,Modes) of
+ false -> [raw|Modes1];
+ true -> Modes1
+ end,
+ %% Ensure delayed_write
+ case lists:partition(fun(delayed_write) -> true;
+ ({delayed_write,_,_}) -> true;
+ (_) -> false
+ end, Modes2) of
+ {[],_} ->
+ [delayed_write|Modes2];
+ _ ->
+ Modes2
+ end.
+
+config_changed(_Name,
+ #{file_check:=FileCheck,
+ max_no_bytes:=Size,
+ max_no_files:=Count,
+ compress_on_rotate:=Compress},
+ #{file_check:=FileCheck,
+ max_no_bytes:=Size,
+ max_no_files:=Count,
+ compress_on_rotate:=Compress}=State) ->
+ State;
+config_changed(_Name,
+ #{file_check:=FileCheck,
+ max_no_bytes:=Size,
+ max_no_files:=Count,
+ compress_on_rotate:=Compress},
+ #{file_ctrl_pid := FileCtrlPid} = State) ->
+ FileCtrlPid ! {update_config,#{file_check=>FileCheck,
+ max_no_bytes=>Size,
+ max_no_files=>Count,
+ compress_on_rotate=>Compress}},
+ State#{file_check:=FileCheck,
+ max_no_bytes:=Size,
+ max_no_files:=Count,
+ compress_on_rotate:=Compress};
+config_changed(_Name,_NewHConfig,State) ->
+ State.
+
+filesync(_Name, SyncAsync, #{file_ctrl_pid := FileCtrlPid} = State) ->
+ Result = file_ctrl_filesync(SyncAsync, FileCtrlPid),
+ {Result,State}.
+
+write(_Name, SyncAsync, Bin, #{file_ctrl_pid:=FileCtrlPid} = State) ->
+ Result = file_write(SyncAsync, FileCtrlPid, Bin),
+ {Result,State}.
+
+reset_state(_Name, State) ->
+ State.
+
+handle_info(_Name, {'EXIT',Pid,Why}, #{file_ctrl_pid := Pid}=State) ->
+ %% file_ctrl_pid died, file error, terminate handler
+ exit({error,{write_failed,maps:with([type,file,modes],State),Why}});
+handle_info(_, _, State) ->
+ State.
+
+terminate(_Name, _Reason, #{file_ctrl_pid:=FWPid}) ->
+ case is_process_alive(FWPid) of
+ true ->
+ unlink(FWPid),
+ _ = file_ctrl_stop(FWPid),
+ MRef = erlang:monitor(process, FWPid),
+ receive
+ {'DOWN',MRef,_,_,_} ->
+ ok
+ after
+ ?DEFAULT_CALL_TIMEOUT ->
+ exit(FWPid, kill),
+ ok
+ end;
+ false ->
+ ok
+ end.
+
+%%%===================================================================
+%%% Internal functions
+%%%===================================================================
+
+%%%-----------------------------------------------------------------
+%%%
+open_log_file(HandlerName,#{type:=file,
+ file:=FileName,
+ modes:=Modes,
+ file_check:=FileCheck,
+ max_no_bytes:=Size,
+ max_no_files:=Count,
+ compress_on_rotate:=Compress}) ->
+ try
+ case filelib:ensure_dir(FileName) of
+ ok ->
+ case file:open(FileName, Modes) of
+ {ok, Fd} ->
+ {ok,#file_info{inode=INode}} =
+ file:read_file_info(FileName,[raw]),
+ UpdateModes = [append | Modes--[write,append,exclusive]],
+ State0 = #{handler_name=>HandlerName,
+ file_name=>FileName,
+ modes=>UpdateModes,
+ file_check=>FileCheck,
+ fd=>Fd,
+ inode=>INode,
+ last_check=>timestamp(),
+ synced=>false,
+ write_res=>ok,
+ sync_res=>ok},
+ State = update_rotation({Size,Count,Compress},State0),
+ {ok,State};
+ Error ->
+ Error
+ end;
+ Error ->
+ Error
+ end
+ catch
+ _:Reason -> {error,Reason}
+ end.
+
+close_log_file(#{fd:=Fd}) ->
+ _ = file:datasync(Fd),
+ _ = file:close(Fd),
+ ok;
+close_log_file(_) ->
+ ok.
+
+
+
+%%%-----------------------------------------------------------------
+%%% File control process
+
+file_ctrl_start(HandlerName, HConfig) ->
+ Starter = self(),
+ FileCtrlPid =
+ spawn_link(fun() ->
+ file_ctrl_init(HandlerName, HConfig, Starter)
+ end),
+ receive
+ {FileCtrlPid,ok} ->
+ {ok,FileCtrlPid};
+ {FileCtrlPid,Error} ->
+ Error
+ after
+ ?DEFAULT_CALL_TIMEOUT ->
+ {error,file_ctrl_process_not_started}
+ end.
+
+file_ctrl_stop(Pid) ->
+ Pid ! stop.
+
+file_write(async, Pid, Bin) ->
+ Pid ! {log,Bin},
+ ok;
+file_write(sync, Pid, Bin) ->
+ file_ctrl_call(Pid, {log,Bin}).
+
+file_ctrl_filesync(async, Pid) ->
+ Pid ! filesync,
+ ok;
+file_ctrl_filesync(sync, Pid) ->
+ file_ctrl_call(Pid, filesync).
+
+file_ctrl_call(Pid, Msg) ->
+ MRef = monitor(process, Pid),
+ Pid ! {Msg,{self(),MRef}},
+ receive
+ {MRef,Result} ->
+ demonitor(MRef, [flush]),
+ Result;
+ {'DOWN',MRef,_Type,_Object,Reason} ->
+ {error,Reason}
+ after
+ ?DEFAULT_CALL_TIMEOUT ->
+ {error,{no_response,Pid}}
+ end.
+
+file_ctrl_init(HandlerName,
+ #{type:=file,
+ file:=FileName} = HConfig,
+ Starter) ->
+ process_flag(message_queue_data, off_heap),
+ case open_log_file(HandlerName,HConfig) of
+ {ok,State} ->
+ Starter ! {self(),ok},
+ file_ctrl_loop(State);
+ {error,Reason} ->
+ Starter ! {self(),{error,{open_failed,FileName,Reason}}}
+ end;
+file_ctrl_init(HandlerName, #{type:=StdDev}, Starter) ->
+ Starter ! {self(),ok},
+ file_ctrl_loop(#{handler_name=>HandlerName,dev=>StdDev}).
+
+file_ctrl_loop(State) ->
+ receive
+ %% asynchronous event
+ {log,Bin} ->
+ State1 = write_to_dev(Bin,State),
+ file_ctrl_loop(State1);
+
+ %% synchronous event
+ {{log,Bin},{From,MRef}} ->
+ State1 = ensure_file(State),
+ State2 = write_to_dev(Bin,State1),
+ From ! {MRef,ok},
+ file_ctrl_loop(State2);
+
+ filesync ->
+ State1 = sync_dev(State),
+ file_ctrl_loop(State1);
+
+ {filesync,{From,MRef}} ->
+ State1 = ensure_file(State),
+ State2 = sync_dev(State1),
+ From ! {MRef,ok},
+ file_ctrl_loop(State2);
+
+ {update_config,#{file_check:=FileCheck,
+ max_no_bytes:=Size,
+ max_no_files:=Count,
+ compress_on_rotate:=Compress}} ->
+ State1 = update_rotation({Size,Count,Compress},State),
+ file_ctrl_loop(State1#{file_check=>FileCheck});
+
+ stop ->
+ close_log_file(State),
+ stopped
+ end.
+
+maybe_ensure_file(#{file_check:=0}=State) ->
+ ensure_file(State);
+maybe_ensure_file(#{last_check:=T0,file_check:=CheckInt}=State)
+ when is_integer(CheckInt) ->
+ T = timestamp(),
+ if T-T0 > CheckInt -> ensure_file(State);
+ true -> State
+ end;
+maybe_ensure_file(State) ->
+ State.
+
+%% In order to play well with tools like logrotate, we need to be able
+%% to re-create the file if it has disappeared (e.g. if rotated by
+%% logrotate)
+ensure_file(#{fd:=Fd0,inode:=INode0,file_name:=FileName,modes:=Modes}=State) ->
+ case file:read_file_info(FileName,[raw]) of
+ {ok,#file_info{inode=INode0}} ->
+ State#{last_check=>timestamp()};
+ _ ->
+ close_log_file(Fd0),
+ case file:open(FileName,Modes) of
+ {ok,Fd} ->
+ {ok,#file_info{inode=INode}} =
+ file:read_file_info(FileName,[raw]),
+ State#{fd=>Fd,inode=>INode,
+ last_check=>timestamp(),
+ synced=>true,sync_res=>ok};
+ Error ->
+ exit({could_not_reopen_file,Error})
+ end
+ end;
+ensure_file(State) ->
+ State.
+
+write_to_dev(Bin,#{dev:=DevName}=State) ->
+ io:put_chars(DevName, Bin),
+ State;
+write_to_dev(Bin, State) ->
+ State1 = #{fd:=Fd} = maybe_ensure_file(State),
+ Result = ?file_write(Fd, Bin),
+ State2 = maybe_rotate_file(Bin,State1),
+ maybe_notify_error(write,Result,State2),
+ State2#{synced=>false,write_res=>Result}.
+
+sync_dev(#{synced:=false}=State) ->
+ State1 = #{fd:=Fd} = maybe_ensure_file(State),
+ Result = ?file_datasync(Fd),
+ maybe_notify_error(filesync,Result,State1),
+ State1#{synced=>true,sync_res=>Result};
+sync_dev(State) ->
+ State.
+
+update_rotation({infinity,_,_},State) ->
+ maybe_remove_archives(0,State),
+ maps:remove(rotation,State);
+update_rotation({Size,Count,Compress},#{file_name:=FileName} = State) ->
+ maybe_remove_archives(Count,State),
+ {ok,#file_info{size=CurrSize}} = file:read_file_info(FileName,[raw]),
+ State1 = State#{rotation=>#{size=>Size,
+ count=>Count,
+ compress=>Compress,
+ curr_size=>CurrSize}},
+ maybe_update_compress(0,State1),
+ maybe_rotate_file(0,State1).
+
+maybe_remove_archives(Count,#{file_name:=FileName}=State) ->
+ Archive = rot_file_name(FileName,Count,false),
+ CompressedArchive = rot_file_name(FileName,Count,true),
+ case {file:read_file_info(Archive,[raw]),
+ file:read_file_info(CompressedArchive,[raw])} of
+ {{error,enoent},{error,enoent}} ->
+ ok;
+ _ ->
+ _ = file:delete(Archive),
+ _ = file:delete(CompressedArchive),
+ maybe_remove_archives(Count+1,State)
+ end.
+
+maybe_update_compress(Count,#{rotation:=#{count:=Count}}) ->
+ ok;
+maybe_update_compress(N,#{file_name:=FileName,
+ rotation:=#{compress:=Compress}}=State) ->
+ Archive = rot_file_name(FileName,N,not Compress),
+ case file:read_file_info(Archive,[raw]) of
+ {ok,_} when Compress ->
+ compress_file(Archive);
+ {ok,_} ->
+ decompress_file(Archive);
+ _ ->
+ ok
+ end,
+ maybe_update_compress(N+1,State).
+
+maybe_rotate_file(Bin,#{rotation:=_}=State) when is_binary(Bin) ->
+ maybe_rotate_file(byte_size(Bin),State);
+maybe_rotate_file(AddSize,#{rotation:=#{size:=RotSize,
+ curr_size:=CurrSize}=Rotation}=State) ->
+ NewSize = CurrSize + AddSize,
+ if NewSize>RotSize ->
+ rotate_file(State#{rotation=>Rotation#{curr_size=>NewSize}});
+ true ->
+ State#{rotation=>Rotation#{curr_size=>NewSize}}
+ end;
+maybe_rotate_file(_Bin,State) ->
+ State.
+
+rotate_file(#{fd:=Fd0,file_name:=FileName,modes:=Modes,rotation:=Rotation}=State) ->
+ State1 = sync_dev(State),
+ _ = file:close(Fd0),
+ _ = file:close(Fd0),
+ rotate_files(FileName,maps:get(count,Rotation),maps:get(compress,Rotation)),
+ case file:open(FileName,Modes) of
+ {ok,Fd} ->
+ {ok,#file_info{inode=INode}} = file:read_file_info(FileName,[raw]),
+ State1#{fd=>Fd,inode=>INode,rotation=>Rotation#{curr_size=>0}};
+ Error ->
+ exit({could_not_reopen_file,Error})
+ end.
+
+rotate_files(FileName,0,_Compress) ->
+ _ = file:delete(FileName),
+ ok;
+rotate_files(FileName,1,Compress) ->
+ FileName0 = FileName++".0",
+ _ = file:rename(FileName,FileName0),
+ if Compress -> compress_file(FileName0);
+ true -> ok
+ end,
+ ok;
+rotate_files(FileName,Count,Compress) ->
+ _ = file:rename(rot_file_name(FileName,Count-2,Compress),
+ rot_file_name(FileName,Count-1,Compress)),
+ rotate_files(FileName,Count-1,Compress).
+
+rot_file_name(FileName,Count,false) ->
+ FileName ++ "." ++ integer_to_list(Count);
+rot_file_name(FileName,Count,true) ->
+ rot_file_name(FileName,Count,false) ++ ".gz".
+
+compress_file(FileName) ->
+ {ok,In} = file:open(FileName,[read,binary]),
+ {ok,Out} = file:open(FileName++".gz",[write]),
+ Z = zlib:open(),
+ zlib:deflateInit(Z, default, deflated, 31, 8, default),
+ compress_data(Z,In,Out),
+ zlib:deflateEnd(Z),
+ zlib:close(Z),
+ _ = file:close(In),
+ _ = file:close(Out),
+ _ = file:delete(FileName),
+ ok.
+
+compress_data(Z,In,Out) ->
+ case file:read(In,100000) of
+ {ok,Data} ->
+ Compressed = zlib:deflate(Z, Data),
+ _ = file:write(Out,Compressed),
+ compress_data(Z,In,Out);
+ eof ->
+ Compressed = zlib:deflate(Z, <<>>, finish),
+ _ = file:write(Out,Compressed),
+ ok
+ end.
+
+decompress_file(FileName) ->
+ {ok,In} = file:open(FileName,[read,binary]),
+ {ok,Out} = file:open(filename:rootname(FileName,".gz"),[write]),
+ Z = zlib:open(),
+ zlib:inflateInit(Z, 31),
+ decompress_data(Z,In,Out),
+ zlib:inflateEnd(Z),
+ zlib:close(Z),
+ _ = file:close(In),
+ _ = file:close(Out),
+ _ = file:delete(FileName),
+ ok.
+
+decompress_data(Z,In,Out) ->
+ case file:read(In,1000) of
+ {ok,Data} ->
+ Decompressed = zlib:inflate(Z, Data),
+ _ = file:write(Out,Decompressed),
+ decompress_data(Z,In,Out);
+ eof ->
+ ok
+ end.
+
+maybe_notify_error(_Op, ok, _State) ->
+ ok;
+maybe_notify_error(Op, Result, #{write_res:=WR,sync_res:=SR})
+ when (Op==write andalso Result==WR) orelse
+ (Op==filesync andalso Result==SR) ->
+ %% don't report same error twice
+ ok;
+maybe_notify_error(Op, Error, #{handler_name:=HandlerName,file_name:=FileName}) ->
+ logger_h_common:error_notify({HandlerName,Op,FileName,Error}),
+ ok.
+
+timestamp() ->
+ erlang:monotonic_time(millisecond).
diff --git a/lib/kernel/src/logger_sup.erl b/lib/kernel/src/logger_sup.erl
new file mode 100644
index 0000000000..9ea8558a16
--- /dev/null
+++ b/lib/kernel/src/logger_sup.erl
@@ -0,0 +1,59 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017-2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_sup).
+
+-behaviour(supervisor).
+
+%% API
+-export([start_link/0]).
+
+%% Supervisor callbacks
+-export([init/1]).
+
+-define(SERVER, ?MODULE).
+
+%%%===================================================================
+%%% API functions
+%%%===================================================================
+
+start_link() ->
+ supervisor:start_link({local, ?SERVER}, ?MODULE, []).
+
+%%%===================================================================
+%%% Supervisor callbacks
+%%%===================================================================
+
+init([]) ->
+
+ SupFlags = #{strategy => one_for_one,
+ intensity => 1,
+ period => 5},
+
+ Watcher = #{id => logger_handler_watcher,
+ start => {logger_handler_watcher, start_link, []},
+ shutdown => brutal_kill},
+
+ Proxy = logger_proxy:child_spec(),
+
+ {ok, {SupFlags, [Watcher,Proxy]}}.
+
+%%%===================================================================
+%%% Internal functions
+%%%===================================================================
diff --git a/lib/kernel/src/net_kernel.erl b/lib/kernel/src/net_kernel.erl
index 7da89dd7cb..a9dc77837e 100644
--- a/lib/kernel/src/net_kernel.erl
+++ b/lib/kernel/src/net_kernel.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2017. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -53,7 +53,7 @@
%% Documented API functions.
--export([allow/1,
+-export([allow/1, allowed/0,
connect_node/1,
monitor_nodes/1,
monitor_nodes/2,
@@ -70,8 +70,8 @@
protocol_childspecs/0,
epmd_module/0]).
--export([connect/1, disconnect/1, hidden_connect/1, passive_cnct/1]).
--export([hidden_connect_node/1]). %% explicit connect
+-export([disconnect/1, passive_cnct/1]).
+-export([hidden_connect_node/1]).
-export([set_net_ticktime/1, set_net_ticktime/2, get_net_ticktime/0]).
-export([node_info/1, node_info/2, nodes_info/0,
@@ -122,6 +122,7 @@
-record(connection, {
node, %% remote node name
+ conn_id, %% Connection identity
state, %% pending | up | up_pending
owner, %% owner pid
pending_owner, %% possible new owner
@@ -170,6 +171,8 @@ kernel_apply(M,F,A) -> request({apply,M,F,A}).
Nodes :: [node()].
allow(Nodes) -> request({allow, Nodes}).
+allowed() -> request(allowed).
+
longnames() -> request(longnames).
-spec stop() -> ok | {error, Reason} when
@@ -221,8 +224,7 @@ get_net_ticktime() ->
Error :: error | {error, term()}.
monitor_nodes(Flag) ->
case catch process_flag(monitor_nodes, Flag) of
- true -> ok;
- false -> ok;
+ N when is_integer(N) -> ok;
_ -> mk_monitor_nodes_error(Flag, [])
end.
@@ -235,8 +237,7 @@ monitor_nodes(Flag) ->
Error :: error | {error, term()}.
monitor_nodes(Flag, Opts) ->
case catch process_flag({monitor_nodes, Opts}, Flag) of
- true -> ok;
- false -> ok;
+ N when is_integer(N) -> ok;
_ -> mk_monitor_nodes_error(Flag, Opts)
end.
@@ -247,14 +248,15 @@ ticktime_res(A) when is_atom(A) -> A.
%% Called though BIF's
-connect(Node) -> do_connect(Node, normal, false).
%%% Long timeout if blocked (== barred), only affects nodes with
%%% {dist_auto_connect, once} set.
-passive_cnct(Node) -> do_connect(Node, normal, true).
-disconnect(Node) -> request({disconnect, Node}).
+passive_cnct(Node) ->
+ case request({passive_cnct, Node}) of
+ ignored -> false;
+ Other -> Other
+ end.
-%% connect but not seen
-hidden_connect(Node) -> do_connect(Node, hidden, false).
+disconnect(Node) -> request({disconnect, Node}).
%% Should this node publish itself on Node?
publish_on_node(Node) when is_atom(Node) ->
@@ -272,67 +274,24 @@ connect_node(Node) when is_atom(Node) ->
hidden_connect_node(Node) when is_atom(Node) ->
request({connect, hidden, Node}).
-do_connect(Node, Type, WaitForBarred) -> %% Type = normal | hidden
- case catch ets:lookup(sys_dist, Node) of
- {'EXIT', _} ->
- ?connect_failure(Node,{table_missing, sys_dist}),
- false;
- [#barred_connection{}] ->
- case WaitForBarred of
- false ->
- false;
- true ->
- Pid = spawn(?MODULE,passive_connect_monitor,[self(),Node]),
- receive
- {Pid, true} ->
- %%io:format("Net Kernel: barred connection (~p) "
- %% "connected from other end.~n",[Node]),
- true;
- {Pid, false} ->
- ?connect_failure(Node,{barred_connection,
- ets:lookup(sys_dist, Node)}),
- %%io:format("Net Kernel: barred connection (~p) "
- %% "- failure.~n",[Node]),
- false
- end
- end;
- Else ->
- case application:get_env(kernel, dist_auto_connect) of
- {ok, never} ->
- ?connect_failure(Node,{dist_auto_connect,never}),
- false;
- % This might happen due to connection close
- % not beeing propagated to user space yet.
- % Save the day by just not connecting...
- {ok, once} when Else =/= [],
- (hd(Else))#connection.state =:= up ->
- ?connect_failure(Node,{barred_connection,
- ets:lookup(sys_dist, Node)}),
- false;
- _ ->
- request({connect, Type, Node})
- end
- end.
-passive_connect_monitor(Parent, Node) ->
+passive_connect_monitor(From, Node) ->
ok = monitor_nodes(true,[{node_type,all}]),
- case lists:member(Node,nodes([connected])) of
- true ->
- ok = monitor_nodes(false,[{node_type,all}]),
- Parent ! {self(),true};
- _ ->
- Ref = make_ref(),
- Tref = erlang:send_after(connecttime(),self(),Ref),
- receive
- Ref ->
- ok = monitor_nodes(false,[{node_type,all}]),
- Parent ! {self(), false};
- {nodeup,Node,_} ->
- ok = monitor_nodes(false,[{node_type,all}]),
- _ = erlang:cancel_timer(Tref),
- Parent ! {self(),true}
- end
- end.
+ Reply = case lists:member(Node,nodes([connected])) of
+ true ->
+ true;
+ _ ->
+ receive
+ {nodeup,Node,_} ->
+ true
+ after connecttime() ->
+ false
+ end
+ end,
+ ok = monitor_nodes(false,[{node_type,all}]),
+ {Pid, Tag} = From,
+ erlang:send(Pid, {Tag, Reply}).
+
%% If the net_kernel isn't running we ignore all requests to the
%% kernel, thus basically accepting them :-)
@@ -393,41 +352,140 @@ init({Name, LongOrShortNames, TickT, CleanHalt}) ->
{stop, Error}
end.
+do_auto_connect_1(Node, ConnId, From, State) ->
+ case ets:lookup(sys_dist, Node) of
+ [#barred_connection{}] ->
+ case ConnId of
+ passive_cnct ->
+ spawn(?MODULE,passive_connect_monitor,[From,Node]),
+ {noreply, State};
+ _ ->
+ erts_internal:abort_connection(Node, ConnId),
+ {reply, false, State}
+ end;
+
+ ConnLookup ->
+ do_auto_connect_2(Node, ConnId, From, State, ConnLookup)
+ end.
+
+do_auto_connect_2(Node, passive_cnct, From, State, ConnLookup) ->
+ try erts_internal:new_connection(Node) of
+ ConnId ->
+ do_auto_connect_2(Node, ConnId, From, State, ConnLookup)
+ catch
+ _:_ ->
+ error_logger:error_msg("~n** Cannot get connection id for node ~w~n",
+ [Node]),
+ {reply, false, State}
+ end;
+do_auto_connect_2(Node, ConnId, From, State, ConnLookup) ->
+ case ConnLookup of
+ [#connection{conn_id=ConnId, state = up}] ->
+ {reply, true, State};
+ [#connection{conn_id=ConnId, waiting=Waiting}=Conn] ->
+ case From of
+ noreply -> ok;
+ _ -> ets:insert(sys_dist, Conn#connection{waiting = [From|Waiting]})
+ end,
+ {noreply, State};
+
+ _ ->
+ case application:get_env(kernel, dist_auto_connect) of
+ {ok, never} ->
+ ?connect_failure(Node,{dist_auto_connect,never}),
+ erts_internal:abort_connection(Node, ConnId),
+ {reply, false, State};
+
+ %% This might happen due to connection close
+ %% not beeing propagated to user space yet.
+ %% Save the day by just not connecting...
+ {ok, once} when ConnLookup =/= [],
+ (hd(ConnLookup))#connection.state =:= up ->
+ ?connect_failure(Node,{barred_connection,
+ ets:lookup(sys_dist, Node)}),
+ erts_internal:abort_connection(Node, ConnId),
+ {reply, false, State};
+ _ ->
+ case setup(Node, ConnId, normal, From, State) of
+ {ok, SetupPid} ->
+ Owners = [{SetupPid, Node} | State#state.conn_owners],
+ {noreply,State#state{conn_owners=Owners}};
+ _Error ->
+ ?connect_failure(Node, {setup_call, failed, _Error}),
+ erts_internal:abort_connection(Node, ConnId),
+ {reply, false, State}
+ end
+ end
+ end.
+
+
+do_explicit_connect([#connection{conn_id = ConnId, state = up}], _, _, ConnId, _From, State) ->
+ {reply, true, State};
+do_explicit_connect([#connection{conn_id = ConnId}=Conn], _, _, ConnId, From, State)
+ when Conn#connection.state =:= pending;
+ Conn#connection.state =:= up_pending ->
+ Waiting = Conn#connection.waiting,
+ ets:insert(sys_dist, Conn#connection{waiting = [From|Waiting]}),
+ {noreply, State};
+do_explicit_connect([#barred_connection{}], Type, Node, ConnId, From , State) ->
+ %% Barred connection only affects auto_connect, ignore it.
+ do_explicit_connect([], Type, Node, ConnId, From , State);
+do_explicit_connect(_ConnLookup, Type, Node, ConnId, From , State) ->
+ case setup(Node,ConnId,Type,From,State) of
+ {ok, SetupPid} ->
+ Owners = [{SetupPid, Node} | State#state.conn_owners],
+ {noreply,State#state{conn_owners=Owners}};
+ _Error ->
+ ?connect_failure(Node, {setup_call, failed, _Error}),
+ {reply, false, State}
+ end.
+
%% ------------------------------------------------------------
%% handle_call.
%% ------------------------------------------------------------
%%
-%% Set up a connection to Node.
-%% The response is delayed until the connection is up and
-%% running.
+%% Passive auto-connect to Node.
+%% The response is delayed until the connection is up and running.
+%%
+handle_call({passive_cnct, Node}, From, State) when Node =:= node() ->
+ async_reply({reply, true, State}, From);
+handle_call({passive_cnct, Node}, From, State) ->
+ verbose({passive_cnct, Node}, 1, State),
+ R = do_auto_connect_1(Node, passive_cnct, From, State),
+ return_call(R, From);
+
+%%
+%% Explicit connect
+%% The response is delayed until the connection is up and running.
%%
handle_call({connect, _, Node}, From, State) when Node =:= node() ->
async_reply({reply, true, State}, From);
handle_call({connect, Type, Node}, From, State) ->
verbose({connect, Type, Node}, 1, State),
- case ets:lookup(sys_dist, Node) of
- [Conn] when Conn#connection.state =:= up ->
- async_reply({reply, true, State}, From);
- [Conn] when Conn#connection.state =:= pending ->
- Waiting = Conn#connection.waiting,
- ets:insert(sys_dist, Conn#connection{waiting = [From|Waiting]}),
- {noreply, State};
- [Conn] when Conn#connection.state =:= up_pending ->
- Waiting = Conn#connection.waiting,
- ets:insert(sys_dist, Conn#connection{waiting = [From|Waiting]}),
- {noreply, State};
- _ ->
- case setup(Node,Type,From,State) of
- {ok, SetupPid} ->
- Owners = [{SetupPid, Node} | State#state.conn_owners],
- {noreply,State#state{conn_owners=Owners}};
- _ ->
- ?connect_failure(Node, {setup_call, failed}),
- async_reply({reply, false, State}, From)
- end
- end;
+ ConnLookup = ets:lookup(sys_dist, Node),
+ R = try erts_internal:new_connection(Node) of
+ ConnId ->
+ R1 = do_explicit_connect(ConnLookup, Type, Node, ConnId, From, State),
+ case R1 of
+ {reply, true, _S} -> %% already connected
+ ok;
+ {noreply, _S} -> %% connection pending
+ ok;
+ {reply, false, _S} -> %% connection refused
+ erts_internal:abort_connection(Node, ConnId)
+ end,
+ R1
+
+ catch
+ _:_ ->
+ error_logger:error_msg("~n** Cannot get connection id for node ~w~n",
+ [Node]),
+ {reply, false, State}
+ end,
+ return_call(R, From);
+
%%
%% Close the connection to Node.
@@ -470,6 +528,9 @@ handle_call({allow, Nodes}, From, State) ->
async_reply({reply,error,State}, From)
end;
+handle_call(allowed, From, #state{allowed = Allowed} = State) ->
+ async_reply({reply,{ok,Allowed},State}, From);
+
%%
%% authentication, used by auth. Simply works as this:
%% if the message comes through, the other node IS authorized.
@@ -634,6 +695,25 @@ terminate(_Reason, State) ->
%% ------------------------------------------------------------
%%
+%% Asynchronous auto connect request
+%%
+handle_info({auto_connect,Node, DHandle}, State) ->
+ verbose({auto_connect, Node, DHandle}, 1, State),
+ ConnId = DHandle,
+ NewState =
+ case do_auto_connect_1(Node, ConnId, noreply, State) of
+ {noreply, S} -> %% Pending connection
+ S;
+
+ {reply, true, S} -> %% Already connected
+ S;
+
+ {reply, false, S} -> %% Connection refused
+ S
+ end,
+ {noreply, NewState};
+
+%%
%% accept a new connection.
%%
handle_info({accept,AcceptPid,Socket,Family,Proto}, State) ->
@@ -713,14 +793,24 @@ handle_info({AcceptPid, {accept_pending,MyNode,Node,Address,Type}}, State) ->
AcceptPid ! {self(), {accept_pending, already_pending}},
{noreply, State};
_ ->
- ets:insert(sys_dist, #connection{node = Node,
- state = pending,
- owner = AcceptPid,
- address = Address,
- type = Type}),
- AcceptPid ! {self(),{accept_pending,ok}},
- Owners = [{AcceptPid,Node} | State#state.conn_owners],
- {noreply, State#state{conn_owners = Owners}}
+ try erts_internal:new_connection(Node) of
+ ConnId ->
+ ets:insert(sys_dist, #connection{node = Node,
+ conn_id = ConnId,
+ state = pending,
+ owner = AcceptPid,
+ address = Address,
+ type = Type}),
+ AcceptPid ! {self(),{accept_pending,ok}},
+ Owners = [{AcceptPid,Node} | State#state.conn_owners],
+ {noreply, State#state{conn_owners = Owners}}
+ catch
+ _:_ ->
+ error_logger:error_msg("~n** Cannot get connection id for node ~w~n",
+ [Node]),
+ AcceptPid ! {self(),{accept_pending,nok_pending}},
+ {noreply, State}
+ end
end;
handle_info({SetupPid, {is_pending, Node}}, State) ->
@@ -906,6 +996,7 @@ pending_nodedown(Conn, Node, Type, State) ->
% Don't bar connections that have never been alive
%mark_sys_dist_nodedown(Node),
% - instead just delete the node:
+ erts_internal:abort_connection(Node, Conn#connection.conn_id),
ets:delete(sys_dist, Node),
reply_waiting(Node,Conn#connection.waiting, false),
case Type of
@@ -920,7 +1011,9 @@ up_pending_nodedown(Conn, Node, _Reason, _Type, State) ->
AcceptPid = Conn#connection.pending_owner,
Owners = State#state.conn_owners,
Pend = lists:keydelete(AcceptPid, 1, State#state.pend_owners),
+ erts_internal:abort_connection(Node, Conn#connection.conn_id),
Conn1 = Conn#connection { owner = AcceptPid,
+ conn_id = erts_internal:new_connection(Node),
pending_owner = undefined,
state = pending },
ets:insert(sys_dist, Conn1),
@@ -928,15 +1021,16 @@ up_pending_nodedown(Conn, Node, _Reason, _Type, State) ->
State#state{conn_owners = [{AcceptPid,Node}|Owners], pend_owners = Pend}.
-up_nodedown(_Conn, Node, _Reason, Type, State) ->
- mark_sys_dist_nodedown(Node),
+up_nodedown(Conn, Node, _Reason, Type, State) ->
+ mark_sys_dist_nodedown(Conn, Node),
case Type of
normal -> ?nodedown(Node, State);
_ -> ok
end,
State.
-mark_sys_dist_nodedown(Node) ->
+mark_sys_dist_nodedown(Conn, Node) ->
+ erts_internal:abort_connection(Node, Conn#connection.conn_id),
case application:get_env(kernel, dist_auto_connect) of
{ok, once} ->
ets:insert(sys_dist, #barred_connection{node = Node});
@@ -1179,15 +1273,8 @@ spawn_func(_,{From,Tag},M,F,A,Gleader) ->
%% Set up connection to a new node.
%% -----------------------------------------------------------
-setup(Node,Type,From,State) ->
- Allowed = State#state.allowed,
- case lists:member(Node, Allowed) of
- false when Allowed =/= [] ->
- error_msg("** Connection attempt with "
- "disallowed node ~w ** ~n", [Node]),
- {error, bad_node};
- _ ->
- case select_mod(Node, State#state.listen) of
+setup(Node, ConnId, Type, From, State) ->
+ case setup_check(Node, State) of
{ok, L} ->
Mod = L#listen.module,
LAddr = L#listen.address,
@@ -1200,18 +1287,38 @@ setup(Node,Type,From,State) ->
Addr = LAddr#net_address {
address = undefined,
host = undefined },
+ Waiting = case From of
+ noreply -> [];
+ _ -> [From]
+ end,
ets:insert(sys_dist, #connection{node = Node,
+ conn_id = ConnId,
state = pending,
owner = Pid,
- waiting = [From],
+ waiting = Waiting,
address = Addr,
type = normal}),
{ok, Pid};
Error ->
Error
- end
end.
+setup_check(Node, State) ->
+ Allowed = State#state.allowed,
+ case lists:member(Node, Allowed) of
+ false when Allowed =/= [] ->
+ error_msg("** Connection attempt with "
+ "disallowed node ~w ** ~n", [Node]),
+ {error, bad_node};
+ _ ->
+ case select_mod(Node, State#state.listen) of
+ {ok, _L}=OK -> OK;
+ Error -> Error
+ end
+ end.
+
+
+
%%
%% Find a module that is willing to handle connection setup to Node
%%
@@ -1652,6 +1759,11 @@ verbose(_, _, _) ->
getnode(P) when is_pid(P) -> node(P);
getnode(P) -> P.
+return_call({noreply, _State}=R, _From) ->
+ R;
+return_call(R, From) ->
+ async_reply(R, From).
+
async_reply({reply, Msg, State}, From) ->
async_gen_server_reply(From, Msg),
{noreply, State}.
@@ -1659,16 +1771,16 @@ async_reply({reply, Msg, State}, From) ->
async_gen_server_reply(From, Msg) ->
{Pid, Tag} = From,
M = {Tag, Msg},
- case catch erlang:send(Pid, M, [nosuspend, noconnect]) of
+ try erlang:send(Pid, M, [nosuspend, noconnect]) of
ok ->
ok;
nosuspend ->
_ = spawn(fun() -> catch erlang:send(Pid, M, [noconnect]) end),
ok;
noconnect ->
- ok; % The gen module takes care of this case.
- {'EXIT', _} ->
- ok
+ ok % The gen module takes care of this case.
+ catch
+ _:_ -> ok
end.
call_owner(Owner, Msg) ->
diff --git a/lib/kernel/src/os.erl b/lib/kernel/src/os.erl
index a4b4f798f9..29a26674ba 100644
--- a/lib/kernel/src/os.erl
+++ b/lib/kernel/src/os.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2017. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -25,41 +25,36 @@
-include("file.hrl").
+-export_type([env_var_name/0, env_var_value/0, env_var_name_value/0]).
+
+-export([getenv/0, getenv/1, getenv/2, putenv/2, unsetenv/1]).
+
%%% BIFs
--export([getenv/0, getenv/1, getenv/2, getpid/0,
- perf_counter/0, perf_counter/1,
- putenv/2, set_signal/2, system_time/0, system_time/1,
- timestamp/0, unsetenv/1]).
+-export([get_env_var/1, getpid/0, list_env_vars/0, perf_counter/0,
+ perf_counter/1, set_env_var/2, set_signal/2, system_time/0,
+ system_time/1, timestamp/0, unset_env_var/1]).
-type os_command() :: atom() | io_lib:chars().
-type os_command_opts() :: #{ max_size => non_neg_integer() | infinity }.
-export_type([os_command/0, os_command_opts/0]).
--spec getenv() -> [string()].
+-type env_var_name() :: nonempty_string().
-getenv() -> erlang:nif_error(undef).
+-type env_var_value() :: string().
--spec getenv(VarName) -> Value | false when
- VarName :: string(),
- Value :: string().
+-type env_var_name_value() :: nonempty_string().
-getenv(_) ->
+-spec list_env_vars() -> [{env_var_name(), env_var_value()}].
+list_env_vars() ->
erlang:nif_error(undef).
--spec getenv(VarName, DefaultValue) -> Value when
- VarName :: string(),
- DefaultValue :: string(),
- Value :: string().
-
-getenv(VarName, DefaultValue) ->
- case os:getenv(VarName) of
- false ->
- DefaultValue;
- Value ->
- Value
- end.
+-spec get_env_var(VarName) -> Value | false when
+ VarName :: env_var_name(),
+ Value :: env_var_value().
+get_env_var(_VarName) ->
+ erlang:nif_error(undef).
-spec getpid() -> Value when
Value :: string().
@@ -79,11 +74,10 @@ perf_counter() ->
perf_counter(Unit) ->
erlang:convert_time_unit(os:perf_counter(), perf_counter, Unit).
--spec putenv(VarName, Value) -> true when
- VarName :: string(),
- Value :: string().
-
-putenv(_, _) ->
+-spec set_env_var(VarName, Value) -> true when
+ VarName :: env_var_name(),
+ Value :: env_var_value().
+set_env_var(_, _) ->
erlang:nif_error(undef).
-spec system_time() -> integer().
@@ -103,10 +97,9 @@ system_time(_Unit) ->
timestamp() ->
erlang:nif_error(undef).
--spec unsetenv(VarName) -> true when
- VarName :: string().
-
-unsetenv(_) ->
+-spec unset_env_var(VarName) -> true when
+ VarName :: env_var_name().
+unset_env_var(_) ->
erlang:nif_error(undef).
-spec set_signal(Signal, Option) -> 'ok' when
@@ -120,6 +113,39 @@ set_signal(_Signal, _Option) ->
%%% End of BIFs
+-spec getenv() -> [env_var_name_value()].
+getenv() ->
+ [lists:flatten([Key, $=, Value]) || {Key, Value} <- os:list_env_vars() ].
+
+-spec getenv(VarName) -> Value | false when
+ VarName :: env_var_name(),
+ Value :: env_var_value().
+getenv(VarName) ->
+ os:get_env_var(VarName).
+
+-spec getenv(VarName, DefaultValue) -> Value when
+ VarName :: env_var_name(),
+ DefaultValue :: env_var_value(),
+ Value :: env_var_value().
+getenv(VarName, DefaultValue) ->
+ case os:getenv(VarName) of
+ false ->
+ DefaultValue;
+ Value ->
+ Value
+ end.
+
+-spec putenv(VarName, Value) -> true when
+ VarName :: env_var_name(),
+ Value :: env_var_value().
+putenv(VarName, Value) ->
+ os:set_env_var(VarName, Value).
+
+-spec unsetenv(VarName) -> true when
+ VarName :: env_var_name().
+unsetenv(VarName) ->
+ os:unset_env_var(VarName).
+
-spec type() -> {Osfamily, Osname} when
Osfamily :: unix | win32,
Osname :: atom().
@@ -183,7 +209,7 @@ verify_executable(Name0, [Ext|Rest], OrigExtensions) ->
end;
verify_executable(Name, [], OrigExtensions) when OrigExtensions =/= [""] -> %% Windows
%% Will only happen on windows, hence case insensitivity
- case can_be_full_name(string:to_lower(Name),OrigExtensions) of
+ case can_be_full_name(string:lowercase(Name),OrigExtensions) of
true ->
verify_executable(Name,[""],[""]);
_ ->
@@ -245,8 +271,7 @@ cmd(Cmd) ->
Command :: os_command(),
Options :: os_command_opts().
cmd(Cmd, Opts) ->
- validate(Cmd),
- {SpawnCmd, SpawnOpts, SpawnInput, Eot} = mk_cmd(os:type(), Cmd),
+ {SpawnCmd, SpawnOpts, SpawnInput, Eot} = mk_cmd(os:type(), validate(Cmd)),
Port = open_port({spawn, SpawnCmd}, [binary, stderr_to_stdout,
stream, in, hide | SpawnOpts]),
MonRef = erlang:monitor(port, Port),
@@ -266,8 +291,6 @@ mk_cmd({win32,Wtype}, Cmd) ->
{Cspec,_} -> lists:concat([Cspec," /c",Cmd])
end,
{Command, [], [], <<>>};
-mk_cmd(OsType,Cmd) when is_atom(Cmd) ->
- mk_cmd(OsType, atom_to_list(Cmd));
mk_cmd(_,Cmd) ->
%% Have to send command in like this in order to make sh commands like
%% cd and ulimit available
@@ -290,17 +313,33 @@ mk_cmd(_,Cmd) ->
<<$\^D>>}.
validate(Atom) when is_atom(Atom) ->
- ok;
+ validate(atom_to_list(Atom));
validate(List) when is_list(List) ->
- validate1(List).
+ case validate1(List) of
+ false ->
+ List;
+ true ->
+ %% Had zeros at end; remove them...
+ string:trim(List, trailing, [0])
+ end.
-validate1([C|Rest]) when is_integer(C) ->
+validate1([0|Rest]) ->
+ validate2(Rest);
+validate1([C|Rest]) when is_integer(C), C > 0 ->
validate1(Rest);
validate1([List|Rest]) when is_list(List) ->
- validate1(List),
- validate1(Rest);
+ validate1(List) or validate1(Rest);
validate1([]) ->
- ok.
+ false.
+
+%% Ensure that the rest is zero only...
+validate2([]) ->
+ true;
+validate2([0|Rest]) ->
+ validate2(Rest);
+validate2([List|Rest]) when is_list(List) ->
+ validate2(List),
+ validate2(Rest).
get_data(Port, MonRef, Eot, Sofar, Size, Max) ->
receive
diff --git a/lib/kernel/src/raw_file_io.erl b/lib/kernel/src/raw_file_io.erl
new file mode 100644
index 0000000000..e3c07c8f78
--- /dev/null
+++ b/lib/kernel/src/raw_file_io.erl
@@ -0,0 +1,75 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(raw_file_io).
+
+-export([open/2]).
+
+open(Filename, Modes) ->
+ %% Layers are applied in this order, and the listed modules will call this
+ %% function again as necessary. eg. a raw compressed delayed file in list
+ %% mode will walk through [_list -> _compressed -> _delayed -> _raw].
+ ModuleOrder = [{raw_file_io_list, fun match_list/1},
+ {raw_file_io_compressed, fun match_compressed/1},
+ {raw_file_io_delayed, fun match_delayed/1},
+ {raw_file_io_raw, fun match_raw/1}],
+ open_1(ModuleOrder, Filename, add_implicit_modes(Modes)).
+open_1([], _Filename, _Modes) ->
+ error(badarg);
+open_1([{Module, Match} | Rest], Filename, Modes) ->
+ case lists:any(Match, Modes) of
+ true ->
+ {Options, ChildModes} =
+ lists:partition(fun(Mode) -> Match(Mode) end, Modes),
+ Module:open_layer(Filename, ChildModes, Options);
+ false ->
+ open_1(Rest, Filename, Modes)
+ end.
+
+%% 'read' and 'list' mode are enabled unless disabled by another option, so
+%% we'll explicitly add them to avoid duplicating this logic in child layers.
+add_implicit_modes(Modes0) ->
+ Modes1 = add_unless_matched(Modes0, fun match_writable/1, read),
+ add_unless_matched(Modes1, fun match_binary/1, list).
+add_unless_matched(Modes, Match, Default) ->
+ case lists:any(Match, Modes) of
+ false -> [Default | Modes];
+ true -> Modes
+ end.
+
+match_list(list) -> true;
+match_list(_Other) -> false.
+
+match_compressed(compressed) -> true;
+match_compressed(_Other) -> false.
+
+match_delayed({delayed_write, _Size, _Timeout}) -> true;
+match_delayed(delayed_write) -> true;
+match_delayed(_Other) -> false.
+
+match_raw(raw) -> true;
+match_raw(_Other) -> false.
+
+match_writable(write) -> true;
+match_writable(append) -> true;
+match_writable(exclusive) -> true;
+match_writable(_Other) -> false.
+
+match_binary(binary) -> true;
+match_binary(_Other) -> false.
diff --git a/lib/kernel/src/raw_file_io_compressed.erl b/lib/kernel/src/raw_file_io_compressed.erl
new file mode 100644
index 0000000000..d5ab042d25
--- /dev/null
+++ b/lib/kernel/src/raw_file_io_compressed.erl
@@ -0,0 +1,134 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(raw_file_io_compressed).
+
+-export([close/1, sync/1, datasync/1, truncate/1, advise/4, allocate/3,
+ position/2, write/2, pwrite/2, pwrite/3,
+ read_line/1, read/2, pread/2, pread/3]).
+
+%% OTP internal.
+-export([ipread_s32bu_p32bu/3, sendfile/8]).
+
+-export([open_layer/3]).
+
+-include("file_int.hrl").
+
+open_layer(Filename, Modes, Options) ->
+ IsAppend = lists:member(append, Modes),
+ IsDeflate = lists:member(write, Modes),
+ IsInflate = lists:member(read, Modes),
+ if
+ IsDeflate, IsInflate; IsAppend ->
+ {error, einval};
+ IsDeflate, not IsInflate ->
+ start_server_module(raw_file_io_deflate, Filename, Modes, Options);
+ IsInflate ->
+ start_server_module(raw_file_io_inflate, Filename, Modes, Options)
+ end.
+
+start_server_module(Module, Filename, Modes, Options) ->
+ Secret = make_ref(),
+ case gen_statem:start(Module, {self(), Secret, Options}, []) of
+ {ok, Pid} -> open_next_layer(Pid, Secret, Filename, Modes);
+ Other -> Other
+ end.
+
+open_next_layer(Pid, Secret, Filename, Modes) ->
+ case gen_statem:call(Pid, {'$open', Secret, Filename, Modes}, infinity) of
+ ok ->
+ PublicFd = #file_descriptor{
+ module = raw_file_io_compressed, data = {self(), Pid} },
+ {ok, PublicFd};
+ Other -> Other
+ end.
+
+close(Fd) ->
+ wrap_call(Fd, [close]).
+
+sync(Fd) ->
+ wrap_call(Fd, [sync]).
+datasync(Fd) ->
+ wrap_call(Fd, [datasync]).
+
+truncate(Fd) ->
+ wrap_call(Fd, [truncate]).
+
+advise(Fd, Offset, Length, Advise) ->
+ wrap_call(Fd, [advise, Offset, Length, Advise]).
+allocate(Fd, Offset, Length) ->
+ wrap_call(Fd, [allocate, Offset, Length]).
+
+position(Fd, Mark) ->
+ wrap_call(Fd, [position, Mark]).
+
+write(Fd, IOData) ->
+ try
+ CompactedData = erlang:iolist_to_iovec(IOData),
+ wrap_call(Fd, [write, CompactedData])
+ catch
+ error:badarg -> {error, badarg}
+ end.
+
+pwrite(Fd, Offset, IOData) ->
+ try
+ CompactedData = erlang:iolist_to_iovec(IOData),
+ wrap_call(Fd, [pwrite, Offset, CompactedData])
+ catch
+ error:badarg -> {error, badarg}
+ end.
+pwrite(Fd, LocBytes) ->
+ try
+ CompactedLocBytes =
+ [ {Offset, erlang:iolist_to_iovec(IOData)} ||
+ {Offset, IOData} <- LocBytes ],
+ wrap_call(Fd, [pwrite, CompactedLocBytes])
+ catch
+ error:badarg -> {error, badarg}
+ end.
+
+read_line(Fd) ->
+ wrap_call(Fd, [read_line]).
+read(Fd, Size) ->
+ wrap_call(Fd, [read, Size]).
+pread(Fd, Offset, Size) ->
+ wrap_call(Fd, [pread, Offset, Size]).
+pread(Fd, LocNums) ->
+ wrap_call(Fd, [pread, LocNums]).
+
+ipread_s32bu_p32bu(Fd, Offset, MaxSize) ->
+ wrap_call(Fd, [ipread_s32bu_p32bu, Offset, MaxSize]).
+
+sendfile(_,_,_,_,_,_,_,_) ->
+ {error, enotsup}.
+
+wrap_call(Fd, Command) ->
+ {_Owner, Pid} = get_fd_data(Fd),
+ try gen_statem:call(Pid, Command, infinity) of
+ Result -> Result
+ catch
+ exit:{noproc, _StackTrace} -> {error, einval}
+ end.
+
+get_fd_data(#file_descriptor{ data = Data }) ->
+ {Owner, _ServerPid} = Data,
+ case self() of
+ Owner -> Data;
+ _ -> error(not_on_controlling_process)
+ end.
diff --git a/lib/kernel/src/raw_file_io_deflate.erl b/lib/kernel/src/raw_file_io_deflate.erl
new file mode 100644
index 0000000000..acfc546743
--- /dev/null
+++ b/lib/kernel/src/raw_file_io_deflate.erl
@@ -0,0 +1,159 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(raw_file_io_deflate).
+
+-behavior(gen_statem).
+
+-export([init/1, callback_mode/0, terminate/3]).
+-export([opening/3, opened/3]).
+
+-include("file_int.hrl").
+
+-define(GZIP_WBITS, 16 + 15).
+
+callback_mode() -> state_functions.
+
+init({Owner, Secret, [compressed]}) ->
+ Monitor = monitor(process, Owner),
+ Z = zlib:open(),
+ ok = zlib:deflateInit(Z, default, deflated, ?GZIP_WBITS, 8, default),
+ Data =
+ #{ owner => Owner,
+ monitor => Monitor,
+ secret => Secret,
+ position => 0,
+ zlib => Z },
+ {ok, opening, Data}.
+
+opening({call, From}, {'$open', Secret, Filename, Modes}, #{ secret := Secret } = Data) ->
+ case raw_file_io:open(Filename, Modes) of
+ {ok, PrivateFd} ->
+ NewData = Data#{ handle => PrivateFd },
+ {next_state, opened, NewData, [{reply, From, ok}]};
+ Other ->
+ {stop_and_reply, normal, [{reply, From, Other}]}
+ end;
+opening(_Event, _Contents, _Data) ->
+ {keep_state_and_data, [postpone]}.
+
+%%
+
+opened(info, {'DOWN', Monitor, process, _Owner, Reason}, #{ monitor := Monitor } = Data) ->
+ if
+ Reason =/= kill -> flush_deflate_state(Data);
+ Reason =:= kill -> ignored
+ end,
+ {stop, shutdown};
+
+opened(info, _Message, _Data) ->
+ keep_state_and_data;
+
+opened({call, {Owner, _Tag} = From}, [close], #{ owner := Owner } = Data) ->
+ #{ handle := PrivateFd } = Data,
+ Response =
+ case flush_deflate_state(Data) of
+ ok -> ?CALL_FD(PrivateFd, close, []);
+ Other -> Other
+ end,
+ {stop_and_reply, normal, [{reply, From, Response}]};
+
+opened({call, {Owner, _Tag} = From}, [position, Mark], #{ owner := Owner } = Data) ->
+ case position(Data, Mark) of
+ {ok, NewData, Result} ->
+ Response = {ok, Result},
+ {keep_state, NewData, [{reply, From, Response}]};
+ Other ->
+ {keep_state_and_data, [{reply, From, Other}]}
+ end;
+
+opened({call, {Owner, _Tag} = From}, [write, IOVec], #{ owner := Owner } = Data) ->
+ case write(Data, IOVec) of
+ {ok, NewData} -> {keep_state, NewData, [{reply, From, ok}]};
+ Other -> {keep_state_and_data, [{reply, From, Other}]}
+ end;
+
+opened({call, {Owner, _Tag} = From}, [read, _Size], #{ owner := Owner }) ->
+ Response = {error, ebadf},
+ {keep_state_and_data, [{reply, From, Response}]};
+
+opened({call, {Owner, _Tag} = From}, [read_line], #{ owner := Owner }) ->
+ Response = {error, ebadf},
+ {keep_state_and_data, [{reply, From, Response}]};
+
+opened({call, {Owner, _Tag} = From}, _Command, #{ owner := Owner }) ->
+ Response = {error, enotsup},
+ {keep_state_and_data, [{reply, From, Response}]};
+
+opened({call, _From}, _Command, _Data) ->
+ %% The client functions filter this out, so we'll crash if the user does
+ %% anything stupid on purpose.
+ {shutdown, protocol_violation};
+
+opened(_Event, _Request, _Data) ->
+ keep_state_and_data.
+
+write(Data, IOVec) ->
+ #{ handle := PrivateFd, position := Position, zlib := Z } = Data,
+ UncompressedSize = iolist_size(IOVec),
+ case ?CALL_FD(PrivateFd, write, [zlib:deflate(Z, IOVec)]) of
+ ok -> {ok, Data#{ position := (Position + UncompressedSize) }};
+ Other -> Other
+ end.
+
+%%
+%% We support "seeking" forward as long as it isn't relative to EOF.
+%%
+%% Seeking is a bit of a misnomer as it's really just compressing zeroes until
+%% we reach the desired point, but it has always behaved like this.
+%%
+
+position(Data, Mark) when is_atom(Mark) ->
+ position(Data, {Mark, 0});
+position(Data, Offset) when is_integer(Offset) ->
+ position(Data, {bof, Offset});
+position(Data, {bof, Offset}) when is_integer(Offset) ->
+ position_1(Data, Offset);
+position(Data, {cur, Offset}) when is_integer(Offset) ->
+ #{ position := Position } = Data,
+ position_1(Data, Position + Offset);
+position(_Data, {eof, Offset}) when is_integer(Offset) ->
+ {error, einval};
+position(_Data, _Any) ->
+ {error, badarg}.
+
+position_1(#{ position := Desired } = Data, Desired) ->
+ {ok, Data, Desired};
+position_1(#{ position := Current } = Data, Desired) when Current < Desired ->
+ BytesToWrite = min(Desired - Current, 4 bsl 20),
+ case write(Data, <<0:(BytesToWrite)/unit:8>>) of
+ {ok, NewData} -> position_1(NewData, Desired);
+ Other -> Other
+ end;
+position_1(#{ position := Current }, Desired) when Current > Desired ->
+ {error, einval}.
+
+flush_deflate_state(#{ handle := PrivateFd, zlib := Z }) ->
+ case ?CALL_FD(PrivateFd, write, [zlib:deflate(Z, [], finish)]) of
+ ok -> ok;
+ Other -> Other
+ end.
+
+terminate(_Reason, _State, _Data) ->
+ ok.
diff --git a/lib/kernel/src/raw_file_io_delayed.erl b/lib/kernel/src/raw_file_io_delayed.erl
new file mode 100644
index 0000000000..d2ad7550a1
--- /dev/null
+++ b/lib/kernel/src/raw_file_io_delayed.erl
@@ -0,0 +1,320 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(raw_file_io_delayed).
+
+-behavior(gen_statem).
+
+-export([close/1, sync/1, datasync/1, truncate/1, advise/4, allocate/3,
+ position/2, write/2, pwrite/2, pwrite/3,
+ read_line/1, read/2, pread/2, pread/3]).
+
+%% OTP internal.
+-export([ipread_s32bu_p32bu/3, sendfile/8]).
+
+-export([open_layer/3]).
+
+-export([init/1, callback_mode/0, terminate/3]).
+-export([opening/3, opened/3]).
+
+-include("file_int.hrl").
+
+open_layer(Filename, Modes, Options) ->
+ Secret = make_ref(),
+ case gen_statem:start(?MODULE, {self(), Secret, Options}, []) of
+ {ok, Pid} ->
+ gen_statem:call(Pid, {'$open', Secret, Filename, Modes}, infinity);
+ Other ->
+ Other
+ end.
+
+callback_mode() -> state_functions.
+
+init({Owner, Secret, Options}) ->
+ Monitor = monitor(process, Owner),
+ Defaults =
+ #{ owner => Owner,
+ monitor => Monitor,
+ secret => Secret,
+ timer => none,
+ pid => self(),
+ buffer => prim_buffer:new(),
+ delay_size => 64 bsl 10,
+ delay_time => 2000 },
+ Data = fill_delay_values(Defaults, Options),
+ {ok, opening, Data}.
+
+fill_delay_values(Data, []) ->
+ Data;
+fill_delay_values(Data, [{delayed_write, Size, Time} | Options]) ->
+ fill_delay_values(Data#{ delay_size => Size, delay_time => Time }, Options);
+fill_delay_values(Data, [_ | Options]) ->
+ fill_delay_values(Data, Options).
+
+opening({call, From}, {'$open', Secret, Filename, Modes}, #{ secret := Secret } = Data) ->
+ case raw_file_io:open(Filename, Modes) of
+ {ok, PrivateFd} ->
+ PublicData = maps:with([owner, buffer, delay_size, pid], Data),
+ PublicFd = #file_descriptor{ module = ?MODULE, data = PublicData },
+
+ NewData = Data#{ handle => PrivateFd },
+ Response = {ok, PublicFd},
+ {next_state, opened, NewData, [{reply, From, Response}]};
+ Other ->
+ {stop_and_reply, normal, [{reply, From, Other}]}
+ end;
+opening(_Event, _Contents, _Data) ->
+ {keep_state_and_data, [postpone]}.
+
+%%
+
+opened(info, {'$timed_out', Secret}, #{ secret := Secret } = Data) ->
+ %% If the user writes something at this exact moment, the flush will fail
+ %% and the timer won't reset on the next write since the buffer won't be
+ %% empty (Unless we collided on a flush). We therefore reset the timeout to
+ %% ensure that data won't sit idle for extended periods of time.
+ case try_flush_write_buffer(Data) of
+ busy -> gen_statem:cast(self(), '$reset_timeout');
+ ok -> ok
+ end,
+ {keep_state, Data#{ timer => none }, []};
+
+opened(info, {'DOWN', Monitor, process, _Owner, Reason}, #{ monitor := Monitor } = Data) ->
+ if
+ Reason =/= kill -> try_flush_write_buffer(Data);
+ Reason =:= kill -> ignored
+ end,
+ {stop, shutdown};
+
+opened(info, _Message, _Data) ->
+ keep_state_and_data;
+
+opened({call, {Owner, _Tag} = From}, [close], #{ owner := Owner } = Data) ->
+ case flush_write_buffer(Data) of
+ ok ->
+ #{ handle := PrivateFd } = Data,
+ Response = ?CALL_FD(PrivateFd, close, []),
+ {stop_and_reply, normal, [{reply, From, Response}]};
+ Other ->
+ {stop_and_reply, normal, [{reply, From, Other}]}
+ end;
+
+opened({call, {Owner, _Tag} = From}, '$wait', #{ owner := Owner }) ->
+ %% Used in write/2 to synchronize writes on lock conflicts.
+ {keep_state_and_data, [{reply, From, ok}]};
+
+opened({call, {Owner, _Tag} = From}, '$synchronous_flush', #{ owner := Owner } = Data) ->
+ cancel_flush_timeout(Data),
+ Response = flush_write_buffer(Data),
+ {keep_state_and_data, [{reply, From, Response}]};
+
+opened({call, {Owner, _Tag} = From}, Command, #{ owner := Owner } = Data) ->
+ Response =
+ case flush_write_buffer(Data) of
+ ok -> dispatch_command(Data, Command);
+ Other -> Other
+ end,
+ {keep_state_and_data, [{reply, From, Response}]};
+
+opened({call, _From}, _Command, _Data) ->
+ %% The client functions filter this out, so we'll crash if the user does
+ %% anything stupid on purpose.
+ {shutdown, protocol_violation};
+
+opened(cast, '$reset_timeout', #{ delay_time := Timeout, secret := Secret } = Data) ->
+ cancel_flush_timeout(Data),
+ Timer = erlang:send_after(Timeout, self(), {'$timed_out', Secret}),
+ {keep_state, Data#{ timer => Timer }, []};
+
+opened(cast, _Message, _Data) ->
+ {keep_state_and_data, []}.
+
+dispatch_command(Data, [Function | Args]) ->
+ #{ handle := Handle } = Data,
+ Module = Handle#file_descriptor.module,
+ apply(Module, Function, [Handle | Args]).
+
+cancel_flush_timeout(#{ timer := none }) ->
+ ok;
+cancel_flush_timeout(#{ timer := Timer }) ->
+ _ = erlang:cancel_timer(Timer, [{async, true}]),
+ ok.
+
+try_flush_write_buffer(#{ buffer := Buffer, handle := PrivateFd }) ->
+ case prim_buffer:try_lock(Buffer) of
+ acquired ->
+ flush_write_buffer_1(Buffer, PrivateFd),
+ prim_buffer:unlock(Buffer),
+ ok;
+ busy ->
+ busy
+ end.
+
+%% This is only safe to use when there is no chance of conflict with the owner
+%% process, or in other words, "during synchronous calls outside of the locked
+%% section of write/2"
+flush_write_buffer(#{ buffer := Buffer, handle := PrivateFd }) ->
+ acquired = prim_buffer:try_lock(Buffer),
+ Result = flush_write_buffer_1(Buffer, PrivateFd),
+ prim_buffer:unlock(Buffer),
+ Result.
+
+flush_write_buffer_1(Buffer, PrivateFd) ->
+ case prim_buffer:size(Buffer) of
+ Size when Size > 0 ->
+ ?CALL_FD(PrivateFd, write, [prim_buffer:read_iovec(Buffer, Size)]);
+ 0 ->
+ ok
+ end.
+
+terminate(_Reason, _State, _Data) ->
+ ok.
+
+%% Client functions
+
+write(Fd, IOData) ->
+ try
+ enqueue_write(Fd, erlang:iolist_to_iovec(IOData))
+ catch
+ error:badarg -> {error, badarg}
+ end.
+enqueue_write(_Fd, []) ->
+ ok;
+enqueue_write(Fd, IOVec) ->
+ %% get_fd_data will reject everyone except the process that opened the Fd,
+ %% so we can't race with anyone except the wrapper process.
+ #{ delay_size := DelaySize,
+ buffer := Buffer,
+ pid := Pid } = get_fd_data(Fd),
+ case prim_buffer:try_lock(Buffer) of
+ acquired ->
+ %% (The wrapper process will exit without flushing if we're killed
+ %% while holding the lock).
+ enqueue_write_locked(Pid, Buffer, DelaySize, IOVec);
+ busy ->
+ %% This can only happen while we're processing a timeout in the
+ %% wrapper process, so we perform a bogus call to get a completion
+ %% notification before trying again.
+ gen_statem:call(Pid, '$wait'),
+ enqueue_write(Fd, IOVec)
+ end.
+enqueue_write_locked(Pid, Buffer, DelaySize, IOVec) ->
+ %% The synchronous operations (write, forced flush) are safe since we're
+ %% running on the only process that can fill the buffer; a timeout being
+ %% processed just before $synchronous_flush will cause the flush to nop,
+ %% and a timeout sneaking in just before a synchronous write won't do
+ %% anything since the buffer is guaranteed to be empty at that point.
+ BufSize = prim_buffer:size(Buffer),
+ case is_iovec_smaller_than(IOVec, DelaySize - BufSize) of
+ true when BufSize > 0 ->
+ prim_buffer:write(Buffer, IOVec),
+ prim_buffer:unlock(Buffer);
+ true ->
+ prim_buffer:write(Buffer, IOVec),
+ prim_buffer:unlock(Buffer),
+ gen_statem:cast(Pid, '$reset_timeout');
+ false when BufSize > 0 ->
+ prim_buffer:write(Buffer, IOVec),
+ prim_buffer:unlock(Buffer),
+ gen_statem:call(Pid, '$synchronous_flush');
+ false ->
+ prim_buffer:unlock(Buffer),
+ gen_statem:call(Pid, [write, IOVec])
+ end.
+
+%% iolist_size/1 will always look through the entire list to get a precise
+%% amount, which is pretty inefficient since we only need to know whether we've
+%% hit the buffer threshold or not.
+%%
+%% We only handle the binary case since write/2 forcibly translates input to
+%% erlang:iovec().
+is_iovec_smaller_than(IOVec, Max) ->
+ is_iovec_smaller_than_1(IOVec, Max, 0).
+is_iovec_smaller_than_1(_IOVec, Max, Acc) when Acc >= Max ->
+ false;
+is_iovec_smaller_than_1([], _Max, _Acc) ->
+ true;
+is_iovec_smaller_than_1([Binary | Rest], Max, Acc) when is_binary(Binary) ->
+ is_iovec_smaller_than_1(Rest, Max, Acc + byte_size(Binary)).
+
+close(Fd) ->
+ wrap_call(Fd, [close]).
+
+sync(Fd) ->
+ wrap_call(Fd, [sync]).
+datasync(Fd) ->
+ wrap_call(Fd, [datasync]).
+
+truncate(Fd) ->
+ wrap_call(Fd, [truncate]).
+
+advise(Fd, Offset, Length, Advise) ->
+ wrap_call(Fd, [advise, Offset, Length, Advise]).
+allocate(Fd, Offset, Length) ->
+ wrap_call(Fd, [allocate, Offset, Length]).
+
+position(Fd, Mark) ->
+ wrap_call(Fd, [position, Mark]).
+
+pwrite(Fd, Offset, IOData) ->
+ try
+ CompactedData = erlang:iolist_to_iovec(IOData),
+ wrap_call(Fd, [pwrite, Offset, CompactedData])
+ catch
+ error:badarg -> {error, badarg}
+ end.
+pwrite(Fd, LocBytes) ->
+ try
+ CompactedLocBytes =
+ [ {Offset, erlang:iolist_to_iovec(IOData)} ||
+ {Offset, IOData} <- LocBytes ],
+ wrap_call(Fd, [pwrite, CompactedLocBytes])
+ catch
+ error:badarg -> {error, badarg}
+ end.
+
+read_line(Fd) ->
+ wrap_call(Fd, [read_line]).
+read(Fd, Size) ->
+ wrap_call(Fd, [read, Size]).
+pread(Fd, Offset, Size) ->
+ wrap_call(Fd, [pread, Offset, Size]).
+pread(Fd, LocNums) ->
+ wrap_call(Fd, [pread, LocNums]).
+
+ipread_s32bu_p32bu(Fd, Offset, MaxSize) ->
+ wrap_call(Fd, [ipread_s32bu_p32bu, Offset, MaxSize]).
+
+sendfile(_,_,_,_,_,_,_,_) ->
+ {error, enotsup}.
+
+wrap_call(Fd, Command) ->
+ #{ pid := Pid } = get_fd_data(Fd),
+ try gen_statem:call(Pid, Command, infinity) of
+ Result -> Result
+ catch
+ exit:{noproc, _StackTrace} -> {error, einval}
+ end.
+
+get_fd_data(#file_descriptor{ data = Data }) ->
+ #{ owner := Owner } = Data,
+ case self() of
+ Owner -> Data;
+ _ -> error(not_on_controlling_process)
+ end.
diff --git a/lib/kernel/src/raw_file_io_inflate.erl b/lib/kernel/src/raw_file_io_inflate.erl
new file mode 100644
index 0000000000..d3ed02dd03
--- /dev/null
+++ b/lib/kernel/src/raw_file_io_inflate.erl
@@ -0,0 +1,261 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(raw_file_io_inflate).
+
+-behavior(gen_statem).
+
+-export([init/1, callback_mode/0, terminate/3]).
+-export([opening/3, opened_gzip/3, opened_passthrough/3]).
+
+-include("file_int.hrl").
+
+-define(INFLATE_CHUNK_SIZE, (8 bsl 10)).
+-define(GZIP_WBITS, (16 + 15)).
+
+callback_mode() -> state_functions.
+
+init({Owner, Secret, [compressed]}) ->
+ Monitor = monitor(process, Owner),
+ %% We're using the undocumented inflateInit/3 to open the stream in
+ %% 'reset mode', which resets the inflate state at the end of every stream,
+ %% allowing us to read concatenated gzip files.
+ Z = zlib:open(),
+ ok = zlib:inflateInit(Z, ?GZIP_WBITS, reset),
+ Data =
+ #{ owner => Owner,
+ monitor => Monitor,
+ secret => Secret,
+ position => 0,
+ buffer => prim_buffer:new(),
+ zlib => Z },
+ {ok, opening, Data}.
+
+%% The old driver fell back to plain reads if the file didn't start with the
+%% magic gzip bytes.
+choose_decompression_state(PrivateFd) ->
+ State =
+ case ?CALL_FD(PrivateFd, read, [2]) of
+ {ok, <<16#1F, 16#8B>>} -> opened_gzip;
+ _Other -> opened_passthrough
+ end,
+ {ok, 0} = ?CALL_FD(PrivateFd, position, [0]),
+ State.
+
+opening({call, From}, {'$open', Secret, Filename, Modes}, #{ secret := Secret } = Data) ->
+ case raw_file_io:open(Filename, Modes) of
+ {ok, PrivateFd} ->
+ NextState = choose_decompression_state(PrivateFd),
+ NewData = Data#{ handle => PrivateFd },
+ {next_state, NextState, NewData, [{reply, From, ok}]};
+ Other ->
+ {stop_and_reply, normal, [{reply, From, Other}]}
+ end;
+opening(_Event, _Contents, _Data) ->
+ {keep_state_and_data, [postpone]}.
+
+internal_close(From, Data) ->
+ #{ handle := PrivateFd } = Data,
+ Response = ?CALL_FD(PrivateFd, close, []),
+ {stop_and_reply, normal, [{reply, From, Response}]}.
+
+opened_passthrough(info, {'DOWN', Monitor, process, _Owner, _Reason}, #{ monitor := Monitor }) ->
+ {stop, shutdown};
+
+opened_passthrough(info, _Message, _Data) ->
+ keep_state_and_data;
+
+opened_passthrough({call, {Owner, _Tag} = From}, [close], #{ owner := Owner } = Data) ->
+ internal_close(From, Data);
+
+opened_passthrough({call, {Owner, _Tag} = From}, [Method | Args], #{ owner := Owner } = Data) ->
+ #{ handle := PrivateFd } = Data,
+ Response = ?CALL_FD(PrivateFd, Method, Args),
+ {keep_state_and_data, [{reply, From, Response}]};
+
+opened_passthrough({call, _From}, _Command, _Data) ->
+ %% The client functions filter this out, so we'll crash if the user does
+ %% anything stupid on purpose.
+ {shutdown, protocol_violation};
+
+opened_passthrough(_Event, _Request, _Data) ->
+ keep_state_and_data.
+
+%%
+
+opened_gzip(info, {'DOWN', Monitor, process, _Owner, _Reason}, #{ monitor := Monitor }) ->
+ {stop, shutdown};
+
+opened_gzip(info, _Message, _Data) ->
+ keep_state_and_data;
+
+opened_gzip({call, {Owner, _Tag} = From}, [close], #{ owner := Owner } = Data) ->
+ internal_close(From, Data);
+
+opened_gzip({call, {Owner, _Tag} = From}, [position, Mark], #{ owner := Owner } = Data) ->
+ case position(Data, Mark) of
+ {ok, NewData, Result} ->
+ Response = {ok, Result},
+ {keep_state, NewData, [{reply, From, Response}]};
+ Other ->
+ {keep_state_and_data, [{reply, From, Other}]}
+ end;
+
+opened_gzip({call, {Owner, _Tag} = From}, [read, Size], #{ owner := Owner } = Data) ->
+ case read(Data, Size) of
+ {ok, NewData, Result} ->
+ Response = {ok, Result},
+ {keep_state, NewData, [{reply, From, Response}]};
+ Other ->
+ {keep_state_and_data, [{reply, From, Other}]}
+ end;
+
+opened_gzip({call, {Owner, _Tag} = From}, [read_line], #{ owner := Owner } = Data) ->
+ case read_line(Data) of
+ {ok, NewData, Result} ->
+ Response = {ok, Result},
+ {keep_state, NewData, [{reply, From, Response}]};
+ Other ->
+ {keep_state_and_data, [{reply, From, Other}]}
+ end;
+
+opened_gzip({call, {Owner, _Tag} = From}, [write, _IOData], #{ owner := Owner }) ->
+ Response = {error, ebadf},
+ {keep_state_and_data, [{reply, From, Response}]};
+
+opened_gzip({call, {Owner, _Tag} = From}, _Request, #{ owner := Owner }) ->
+ Response = {error, enotsup},
+ {keep_state_and_data, [{reply, From, Response}]};
+
+opened_gzip({call, _From}, _Request, _Data) ->
+ %% The client functions filter this out, so we'll crash if the user does
+ %% anything stupid on purpose.
+ {shutdown, protocol_violation};
+
+opened_gzip(_Event, _Request, _Data) ->
+ keep_state_and_data.
+
+%%
+
+read(#{ buffer := Buffer } = Data, Size) ->
+ try read_1(Data, Buffer, prim_buffer:size(Buffer), Size) of
+ Result -> Result
+ catch
+ error:badarg -> {error, badarg};
+ error:_ -> {error, eio}
+ end.
+read_1(Data, Buffer, BufferSize, ReadSize) when BufferSize >= ReadSize ->
+ #{ position := Position } = Data,
+ Decompressed = prim_buffer:read(Buffer, ReadSize),
+ {ok, Data#{ position => (Position + ReadSize) }, Decompressed};
+read_1(Data, Buffer, BufferSize, ReadSize) when BufferSize < ReadSize ->
+ #{ handle := PrivateFd } = Data,
+ case ?CALL_FD(PrivateFd, read, [?INFLATE_CHUNK_SIZE]) of
+ {ok, Compressed} ->
+ #{ zlib := Z } = Data,
+ Uncompressed = erlang:iolist_to_iovec(zlib:inflate(Z, Compressed)),
+ prim_buffer:write(Buffer, Uncompressed),
+ read_1(Data, Buffer, prim_buffer:size(Buffer), ReadSize);
+ eof when BufferSize > 0 ->
+ read_1(Data, Buffer, BufferSize, BufferSize);
+ Other ->
+ Other
+ end.
+
+read_line(#{ buffer := Buffer } = Data) ->
+ try read_line_1(Data, Buffer, prim_buffer:find_byte_index(Buffer, $\n)) of
+ {ok, NewData, Decompressed} -> {ok, NewData, Decompressed};
+ Other -> Other
+ catch
+ error:badarg -> {error, badarg};
+ error:_ -> {error, eio}
+ end.
+
+read_line_1(Data, Buffer, not_found) ->
+ #{ handle := PrivateFd, zlib := Z } = Data,
+ case ?CALL_FD(PrivateFd, read, [?INFLATE_CHUNK_SIZE]) of
+ {ok, Compressed} ->
+ Uncompressed = erlang:iolist_to_iovec(zlib:inflate(Z, Compressed)),
+ prim_buffer:write(Buffer, Uncompressed),
+ read_line_1(Data, Buffer, prim_buffer:find_byte_index(Buffer, $\n));
+ eof ->
+ case prim_buffer:size(Buffer) of
+ Size when Size > 0 -> {ok, prim_buffer:read(Buffer, Size)};
+ Size when Size =:= 0 -> eof
+ end;
+ Error ->
+ Error
+ end;
+read_line_1(Data, Buffer, {ok, LFIndex}) ->
+ %% Translate CRLF into just LF, completely ignoring which encoding is used,
+ %% but treat the file position as including CR.
+ #{ position := Position } = Data,
+ NewData = Data#{ position => (Position + LFIndex + 1) },
+ CRIndex = (LFIndex - 1),
+ TranslatedLine =
+ case prim_buffer:read(Buffer, LFIndex + 1) of
+ <<Line:CRIndex/binary, "\r\n">> -> <<Line/binary, "\n">>;
+ Line -> Line
+ end,
+ {ok, NewData, TranslatedLine}.
+
+%%
+%% We support seeking in both directions as long as it isn't relative to EOF.
+%%
+%% Seeking backwards is extremely inefficient since we have to seek to the very
+%% beginning and then decompress up to the desired point.
+%%
+
+position(Data, Mark) when is_atom(Mark) ->
+ position(Data, {Mark, 0});
+position(Data, Offset) when is_integer(Offset) ->
+ position(Data, {bof, Offset});
+position(Data, {bof, Offset}) when is_integer(Offset) ->
+ position_1(Data, Offset);
+position(Data, {cur, Offset}) when is_integer(Offset) ->
+ #{ position := Position } = Data,
+ position_1(Data, Position + Offset);
+position(_Data, {eof, Offset}) when is_integer(Offset) ->
+ {error, einval};
+position(_Data, _Other) ->
+ {error, badarg}.
+
+position_1(_Data, Desired) when Desired < 0 ->
+ {error, einval};
+position_1(#{ position := Desired } = Data, Desired) ->
+ {ok, Data, Desired};
+position_1(#{ position := Current } = Data, Desired) when Current < Desired ->
+ case read(Data, min(Desired - Current, ?INFLATE_CHUNK_SIZE)) of
+ {ok, NewData, _Data} -> position_1(NewData, Desired);
+ eof -> {ok, Data, Current};
+ Other -> Other
+ end;
+position_1(#{ position := Current } = Data, Desired) when Current > Desired ->
+ #{ handle := PrivateFd, buffer := Buffer, zlib := Z } = Data,
+ case ?CALL_FD(PrivateFd, position, [bof]) of
+ {ok, 0} ->
+ ok = zlib:inflateReset(Z),
+ prim_buffer:wipe(Buffer),
+ position_1(Data#{ position => 0 }, Desired);
+ Other ->
+ Other
+ end.
+
+terminate(_Reason, _State, _Data) ->
+ ok.
diff --git a/lib/kernel/src/raw_file_io_list.erl b/lib/kernel/src/raw_file_io_list.erl
new file mode 100644
index 0000000000..2e16e63f0e
--- /dev/null
+++ b/lib/kernel/src/raw_file_io_list.erl
@@ -0,0 +1,128 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(raw_file_io_list).
+
+-export([close/1, sync/1, datasync/1, truncate/1, advise/4, allocate/3,
+ position/2, write/2, pwrite/2, pwrite/3,
+ read_line/1, read/2, pread/2, pread/3]).
+
+%% OTP internal.
+-export([ipread_s32bu_p32bu/3, sendfile/8]).
+
+-export([open_layer/3]).
+
+-include("file_int.hrl").
+
+open_layer(Filename, Modes, [list]) ->
+ case raw_file_io:open(Filename, [binary | Modes]) of
+ {ok, PrivateFd} -> {ok, make_public_fd(PrivateFd, Modes)};
+ Other -> Other
+ end.
+
+%% We can skip wrapping the file if it's write-only since only read operations
+%% are affected by list mode. Since raw_file_io fills in all implicit options
+%% for us, all we need to do is check whether 'read' is among them.
+make_public_fd(PrivateFd, Modes) ->
+ case lists:member(read, Modes) of
+ true -> #file_descriptor{ module = ?MODULE, data = PrivateFd };
+ false -> PrivateFd
+ end.
+
+close(Fd) ->
+ PrivateFd = Fd#file_descriptor.data,
+ ?CALL_FD(PrivateFd, close, []).
+
+sync(Fd) ->
+ PrivateFd = Fd#file_descriptor.data,
+ ?CALL_FD(PrivateFd, sync, []).
+datasync(Fd) ->
+ PrivateFd = Fd#file_descriptor.data,
+ ?CALL_FD(PrivateFd, datasync, []).
+
+truncate(Fd) ->
+ PrivateFd = Fd#file_descriptor.data,
+ ?CALL_FD(PrivateFd, truncate, []).
+
+advise(Fd, Offset, Length, Advise) ->
+ PrivateFd = Fd#file_descriptor.data,
+ ?CALL_FD(PrivateFd, advise, [Offset, Length, Advise]).
+allocate(Fd, Offset, Length) ->
+ PrivateFd = Fd#file_descriptor.data,
+ ?CALL_FD(PrivateFd, allocate, [Offset, Length]).
+
+position(Fd, Mark) ->
+ PrivateFd = Fd#file_descriptor.data,
+ ?CALL_FD(PrivateFd, position, [Mark]).
+
+write(Fd, IOData) ->
+ PrivateFd = Fd#file_descriptor.data,
+ ?CALL_FD(PrivateFd, write, [IOData]).
+
+pwrite(Fd, Offset, IOData) ->
+ PrivateFd = Fd#file_descriptor.data,
+ ?CALL_FD(PrivateFd, pwrite, [Offset, IOData]).
+pwrite(Fd, LocBytes) ->
+ PrivateFd = Fd#file_descriptor.data,
+ ?CALL_FD(PrivateFd, pwrite, [LocBytes]).
+
+read_line(Fd) ->
+ PrivateFd = Fd#file_descriptor.data,
+ case ?CALL_FD(PrivateFd, read_line, []) of
+ {ok, Binary} -> {ok, binary_to_list(Binary)};
+ Other -> Other
+ end.
+read(Fd, Size) ->
+ PrivateFd = Fd#file_descriptor.data,
+ case ?CALL_FD(PrivateFd, read, [Size]) of
+ {ok, Binary} -> {ok, binary_to_list(Binary)};
+ Other -> Other
+ end.
+pread(Fd, Offset, Size) ->
+ PrivateFd = Fd#file_descriptor.data,
+ case ?CALL_FD(PrivateFd, pread, [Offset, Size]) of
+ {ok, Binary} -> {ok, binary_to_list(Binary)};
+ Other -> Other
+ end.
+pread(Fd, LocNums) ->
+ PrivateFd = Fd#file_descriptor.data,
+ case ?CALL_FD(PrivateFd, pread, [LocNums]) of
+ {ok, LocResults} ->
+ TranslatedResults =
+ [ case Result of
+ Result when is_binary(Result) -> binary_to_list(Result);
+ eof -> eof
+ end || Result <- LocResults ],
+ {ok, TranslatedResults};
+ Other -> Other
+ end.
+
+ipread_s32bu_p32bu(Fd, Offset, MaxSize) ->
+ PrivateFd = Fd#file_descriptor.data,
+ case ?CALL_FD(PrivateFd, ipread_s32bu_p32bu, [Offset, MaxSize]) of
+ {ok, {Size, Pointer, Binary}} when is_binary(Binary) ->
+ {ok, {Size, Pointer, binary_to_list(Binary)}};
+ Other ->
+ Other
+ end.
+
+sendfile(Fd, Dest, Offset, Bytes, ChunkSize, Headers, Trailers, Flags) ->
+ Args = [Dest, Offset, Bytes, ChunkSize, Headers, Trailers, Flags],
+ PrivateFd = Fd#file_descriptor.data,
+ ?CALL_FD(PrivateFd, sendfile, Args).
diff --git a/lib/kernel/src/raw_file_io_raw.erl b/lib/kernel/src/raw_file_io_raw.erl
new file mode 100644
index 0000000000..9a9fe78eb1
--- /dev/null
+++ b/lib/kernel/src/raw_file_io_raw.erl
@@ -0,0 +1,25 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(raw_file_io_raw).
+
+-export([open_layer/3]).
+
+open_layer(Filename, Modes, [raw]) ->
+ prim_file:open(Filename, [raw | Modes]).
diff --git a/lib/kernel/src/rpc.erl b/lib/kernel/src/rpc.erl
index 0e0b7dffa3..d197de942f 100644
--- a/lib/kernel/src/rpc.erl
+++ b/lib/kernel/src/rpc.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2017. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -418,10 +418,7 @@ abcast(Name, Mess) ->
abcast([Node|Tail], Name, Mess) ->
Dest = {Name,Node},
- case catch erlang:send(Dest, Mess, [noconnect]) of
- noconnect -> spawn(erlang, send, [Dest,Mess]), ok;
- _ -> ok
- end,
+ try erlang:send(Dest, Mess) catch error:_ -> ok end,
abcast(Tail, Name, Mess);
abcast([], _,_) -> abcast.
@@ -498,7 +495,7 @@ start_monitor(Node, Name) ->
Module :: module(),
Function :: atom(),
Args :: [term()],
- ResL :: [term()],
+ ResL :: [Res :: term() | {'badrpc', Reason :: term()}],
BadNodes :: [node()].
multicall(M, F, A) ->
@@ -509,14 +506,14 @@ multicall(M, F, A) ->
Module :: module(),
Function :: atom(),
Args :: [term()],
- ResL :: [term()],
+ ResL :: [Res :: term() | {'badrpc', Reason :: term()}],
BadNodes :: [node()];
(Module, Function, Args, Timeout) -> {ResL, BadNodes} when
Module :: module(),
Function :: atom(),
Args :: [term()],
Timeout :: timeout(),
- ResL :: [term()],
+ ResL :: [Res :: term() | {'badrpc', Reason :: term()}],
BadNodes :: [node()].
multicall(Nodes, M, F, A) when is_list(Nodes) ->
@@ -531,7 +528,7 @@ multicall(M, F, A, Timeout) ->
Function :: atom(),
Args :: [term()],
Timeout :: timeout(),
- ResL :: [term()],
+ ResL :: [Res :: term() | {'badrpc', Reason :: term()}],
BadNodes :: [node()].
multicall(Nodes, M, F, A, infinity)
diff --git a/lib/kernel/src/seq_trace.erl b/lib/kernel/src/seq_trace.erl
index cc0c10909b..4f9d7b3e5c 100644
--- a/lib/kernel/src/seq_trace.erl
+++ b/lib/kernel/src/seq_trace.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1998-2016. All Rights Reserved.
+%% Copyright Ericsson AB 1998-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -41,7 +41,7 @@
-type flag() :: 'send' | 'receive' | 'print' | 'timestamp' | 'monotonic_timestamp' | 'strict_monotonic_timestamp'.
-type component() :: 'label' | 'serial' | flag().
--type value() :: (Integer :: non_neg_integer())
+-type value() :: (Label :: term())
| {Previous :: non_neg_integer(),
Current :: non_neg_integer()}
| (Bool :: boolean()).
@@ -59,10 +59,6 @@ set_token({Flags,Label,Serial,_From,Lastcnt}) ->
F = decode_flags(Flags),
set_token2([{label,Label},{serial,{Lastcnt, Serial}} | F]).
-%% We limit the label type to always be a small integer because erl_interface
-%% expects that, the BIF can however "unofficially" handle atoms as well, and
-%% atoms can be used if only Erlang nodes are involved
-
-spec set_token(Component, Val) -> {Component, OldVal} when
Component :: component(),
Val :: value(),
@@ -102,7 +98,7 @@ print(Label, Term) ->
-spec reset_trace() -> 'true'.
reset_trace() ->
- erlang:system_flag(1, 0).
+ erlang:system_flag(reset_seq_trace, true).
%% reset_trace(Pid) -> % this might be a useful function too
diff --git a/lib/kernel/src/standard_error.erl b/lib/kernel/src/standard_error.erl
index 5d649e5f94..ef5b532960 100644
--- a/lib/kernel/src/standard_error.erl
+++ b/lib/kernel/src/standard_error.erl
@@ -27,7 +27,8 @@
-define(PROCNAME_SUP, standard_error_sup).
%% Defines for control ops
--define(CTRL_OP_GET_WINSIZE,100).
+-define(ERTS_TTYSL_DRV_CONTROL_MAGIC_NUMBER, 16#018b0900).
+-define(CTRL_OP_GET_WINSIZE, (100 + ?ERTS_TTYSL_DRV_CONTROL_MAGIC_NUMBER)).
%%
%% The basic server and start-up.
diff --git a/lib/kernel/src/user.erl b/lib/kernel/src/user.erl
index 872e63ab53..0c9e1ea303 100644
--- a/lib/kernel/src/user.erl
+++ b/lib/kernel/src/user.erl
@@ -28,7 +28,8 @@
-define(NAME, user).
%% Defines for control ops
--define(CTRL_OP_GET_WINSIZE,100).
+-define(ERTS_TTYSL_DRV_CONTROL_MAGIC_NUMBER, 16#018b0900).
+-define(CTRL_OP_GET_WINSIZE, (100 + ?ERTS_TTYSL_DRV_CONTROL_MAGIC_NUMBER)).
%%
%% The basic server and start-up.
diff --git a/lib/kernel/src/user_drv.erl b/lib/kernel/src/user_drv.erl
index 99ea4210bd..08286dd476 100644
--- a/lib/kernel/src/user_drv.erl
+++ b/lib/kernel/src/user_drv.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2016. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -32,9 +32,10 @@
-define(OP_BEEP,4).
-define(OP_PUTC_SYNC,5).
% Control op
--define(CTRL_OP_GET_WINSIZE,100).
--define(CTRL_OP_GET_UNICODE_STATE,101).
--define(CTRL_OP_SET_UNICODE_STATE,102).
+-define(ERTS_TTYSL_DRV_CONTROL_MAGIC_NUMBER, 16#018b0900).
+-define(CTRL_OP_GET_WINSIZE, (100 + ?ERTS_TTYSL_DRV_CONTROL_MAGIC_NUMBER)).
+-define(CTRL_OP_GET_UNICODE_STATE, (101 + ?ERTS_TTYSL_DRV_CONTROL_MAGIC_NUMBER)).
+-define(CTRL_OP_SET_UNICODE_STATE, (102 + ?ERTS_TTYSL_DRV_CONTROL_MAGIC_NUMBER)).
%% start()
%% start(ArgumentList)
diff --git a/lib/kernel/test/Makefile b/lib/kernel/test/Makefile
index efe3a68531..8a6ffe7e72 100644
--- a/lib/kernel/test/Makefile
+++ b/lib/kernel/test/Makefile
@@ -1,7 +1,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 1997-2016. All Rights Reserved.
+# Copyright Ericsson AB 1997-2018. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -70,6 +70,18 @@ MODULES= \
interactive_shell_SUITE \
init_SUITE \
kernel_config_SUITE \
+ logger_SUITE \
+ logger_disk_log_h_SUITE \
+ logger_env_var_SUITE \
+ logger_filters_SUITE \
+ logger_formatter_SUITE \
+ logger_legacy_SUITE \
+ logger_olp_SUITE \
+ logger_proxy_SUITE \
+ logger_simple_h_SUITE \
+ logger_std_h_SUITE \
+ logger_stress_SUITE \
+ logger_test_lib \
os_SUITE \
pg2_SUITE \
seq_trace_SUITE \
@@ -80,7 +92,8 @@ MODULES= \
loose_node \
sendfile_SUITE \
standard_error_SUITE \
- multi_load_SUITE
+ multi_load_SUITE \
+ zzz_SUITE
APP_FILES = \
appinc.app \
@@ -101,7 +114,7 @@ TARGET_FILES= $(MODULES:%=$(EBIN)/%.$(EMULATOR))
INSTALL_PROGS= $(TARGET_FILES)
EMAKEFILE=Emakefile
-COVERFILE=kernel.cover
+COVERFILE=kernel.cover logger.cover
# ----------------------------------------------------
# Release directory specification
@@ -148,7 +161,8 @@ release_tests_spec: make_emakefile
$(INSTALL_DIR) "$(RELSYSDIR)"
$(INSTALL_DATA) $(ERL_FILES) "$(RELSYSDIR)"
$(INSTALL_DATA) $(APP_FILES) "$(RELSYSDIR)"
- $(INSTALL_DATA) kernel.spec kernel_smoke.spec kernel_bench.spec \
+ $(INSTALL_DATA) \
+ kernel.spec kernel_smoke.spec kernel_bench.spec logger.spec \
$(EMAKEFILE) $(COVERFILE) "$(RELSYSDIR)"
chmod -R u+w "$(RELSYSDIR)"
@tar cf - *_SUITE_data | (cd "$(RELSYSDIR)"; tar xf -)
diff --git a/lib/kernel/test/application_SUITE.erl b/lib/kernel/test/application_SUITE.erl
index 866043cfb4..94d7c17712 100644
--- a/lib/kernel/test/application_SUITE.erl
+++ b/lib/kernel/test/application_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2017. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -31,13 +31,15 @@
otp_3002/1, otp_3184/1, otp_4066/1, otp_4227/1, otp_5363/1,
otp_5606/1,
start_phases/1, get_key/1, get_env/1,
+ set_env/1, set_env_persistent/1, set_env_errors/1,
permit_false_start_local/1, permit_false_start_dist/1, script_start/1,
nodedown_start/1, init2973/0, loop2973/0, loop5606/1]).
-export([config_change/1, persistent_env/1,
distr_changed_tc1/1, distr_changed_tc2/1,
ensure_started/1, ensure_all_started/1,
- shutdown_func/1, do_shutdown/1, shutdown_timeout/1, shutdown_deadlock/1]).
+ shutdown_func/1, do_shutdown/1, shutdown_timeout/1, shutdown_deadlock/1,
+ config_relative_paths/1]).
-define(TESTCASE, testcase_name).
-define(testcase, proplists:get_value(?TESTCASE, Config)).
@@ -54,8 +56,9 @@ all() ->
load_use_cache, ensure_started, {group, reported_bugs}, start_phases,
script_start, nodedown_start, permit_false_start_local,
permit_false_start_dist, get_key, get_env, ensure_all_started,
+ set_env, set_env_persistent, set_env_errors,
{group, distr_changed}, config_change, shutdown_func, shutdown_timeout,
- shutdown_deadlock,
+ shutdown_deadlock, config_relative_paths,
persistent_env].
groups() ->
@@ -1568,7 +1571,8 @@ loop5606(Pid) ->
%% Tests get_env/* functions.
get_env(Conf) when is_list(Conf) ->
- {ok, _} = application:get_env(kernel, error_logger),
+ ok = application:set_env(kernel, new_var, new_val),
+ {ok, new_val} = application:get_env(kernel, new_var),
undefined = application:get_env(undefined_app, a),
undefined = application:get_env(kernel, error_logger_xyz),
default = application:get_env(kernel, error_logger_xyz, default),
@@ -1602,8 +1606,7 @@ get_key(Conf) when is_list(Conf) ->
{ok, [{init, [kalle]}, {takeover, []}, {go, [sune]}]} =
rpc:call(Cp1, application, get_key, [appinc, start_phases]),
{ok, Env} = rpc:call(Cp1, application, get_key, [appinc ,env]),
- [{included_applications,[appinc1,appinc2]},
- {own2,val2},{own_env1,value1}] = lists:sort(Env),
+ [{own2,val2},{own_env1,value1}] = lists:sort(Env),
{ok, []} = rpc:call(Cp1, application, get_key, [appinc, modules]),
{ok, {application_starter, [ch_sup, {appinc, 41, 43}] }} =
rpc:call(Cp1, application, get_key, [appinc, mod]),
@@ -1624,8 +1627,7 @@ get_key(Conf) when is_list(Conf) ->
{mod, {application_starter, [ch_sup, {appinc, 41, 43}] }},
{start_phases, [{init, [kalle]}, {takeover, []}, {go, [sune]}]}]} =
rpc:call(Cp1, application, get_all_key, [appinc]),
- [{included_applications,[appinc1,appinc2]},
- {own2,val2},{own_env1,value1}] = lists:sort(Env),
+ [{own2,val2},{own_env1,value1}] = lists:sort(Env),
{ok, "Test of new app file, including appnew"} =
gen_server:call({global, {ch,41}}, {get_pid_key, description}),
@@ -1642,8 +1644,7 @@ get_key(Conf) when is_list(Conf) ->
{ok, [{init, [kalle]}, {takeover, []}, {go, [sune]}]} =
gen_server:call({global, {ch,41}}, {get_pid_key, start_phases}),
{ok, Env} = gen_server:call({global, {ch,41}}, {get_pid_key, env}),
- [{included_applications,[appinc1,appinc2]},
- {own2,val2},{own_env1,value1}] = lists:sort(Env),
+ [{own2,val2},{own_env1,value1}] = lists:sort(Env),
{ok, []} =
gen_server:call({global, {ch,41}}, {get_pid_key, modules}),
{ok, {application_starter, [ch_sup, {appinc, 41, 43}] }} =
@@ -1670,8 +1671,7 @@ get_key(Conf) when is_list(Conf) ->
{mod, {application_starter, [ch_sup, {appinc, 41, 43}] }},
{start_phases, [{init, [kalle]}, {takeover, []}, {go, [sune]}]}]} =
gen_server:call({global, {ch,41}}, get_pid_all_key),
- [{included_applications,[appinc1,appinc2]},
- {own2,val2},{own_env1,value1}] = lists:sort(Env),
+ [{own2,val2},{own_env1,value1}] = lists:sort(Env),
stop_node_nice(Cp1),
ok.
@@ -1946,6 +1946,101 @@ get_appls([_ | T], Res) ->
get_appls([], Res) ->
Res.
+%% Test set_env/1.
+set_env(Conf) when is_list(Conf) ->
+ ok = application:set_env([{appinc, [{own2, persist}, {not_in_app, persist}]},
+ {unknown_app, [{key, persist}]}]),
+
+ %% own_env1 and own2 are set in appinc
+ undefined = application:get_env(appinc, own_env1),
+ {ok, persist} = application:get_env(appinc, own2),
+ {ok, persist} = application:get_env(appinc, not_in_app),
+ {ok, persist} = application:get_env(unknown_app, key),
+
+ ok = application:load(appinc()),
+ {ok, value1} = application:get_env(appinc, own_env1),
+ {ok, val2} = application:get_env(appinc, own2),
+ {ok, persist} = application:get_env(appinc, not_in_app),
+ {ok, persist} = application:get_env(unknown_app, key),
+
+ %% On reload, values are lost
+ ok = application:unload(appinc),
+ ok = application:load(appinc()),
+ {ok, value1} = application:get_env(appinc, own_env1),
+ {ok, val2} = application:get_env(appinc, own2),
+ undefined = application:get_env(appinc, not_in_app),
+
+ %% Clean up
+ ok = application:unload(appinc).
+
+%% Test set_env/2 with persistent true.
+set_env_persistent(Conf) when is_list(Conf) ->
+ Opts = [{persistent, true}],
+ ok = application:set_env([{appinc, [{own2, persist}, {not_in_app, persist}]},
+ {unknown_app, [{key, persist}]}], Opts),
+
+ %% own_env1 and own2 are set in appinc
+ undefined = application:get_env(appinc, own_env1),
+ {ok, persist} = application:get_env(appinc, own2),
+ {ok, persist} = application:get_env(appinc, not_in_app),
+ {ok, persist} = application:get_env(unknown_app, key),
+
+ ok = application:load(appinc()),
+ {ok, value1} = application:get_env(appinc, own_env1),
+ {ok, persist} = application:get_env(appinc, own2),
+ {ok, persist} = application:get_env(appinc, not_in_app),
+ {ok, persist} = application:get_env(unknown_app, key),
+
+ %% On reload, values are not lost
+ ok = application:unload(appinc),
+ ok = application:load(appinc()),
+ {ok, value1} = application:get_env(appinc, own_env1),
+ {ok, persist} = application:get_env(appinc, own2),
+ {ok, persist} = application:get_env(appinc, not_in_app),
+
+ %% Clean up
+ ok = application:unload(appinc).
+
+set_env_errors(Conf) when is_list(Conf) ->
+ "application: 1; application name must be an atom" =
+ badarg_msg(fun() -> application:set_env([{1, []}]) end),
+
+ "application: foo; parameters must be a list" =
+ badarg_msg(fun() -> application:set_env([{foo, bar}]) end),
+
+ "invalid application config: foo_bar" =
+ badarg_msg(fun() -> application:set_env([foo_bar]) end),
+
+ "application: foo; invalid parameter name: 1" =
+ badarg_msg(fun() -> application:set_env([{foo, [{1, 2}]}]) end),
+
+ "application: foo; invalid parameter: config" =
+ badarg_msg(fun() -> application:set_env([{foo, [config]}]) end),
+
+ "application: kernel; erroneous parameter: distributed" =
+ badarg_msg(fun() -> application:set_env([{kernel, [{distributed, config}]}]) end),
+
+ %% This will raise in the future
+ ct:capture_start(),
+ _ = application:set_env([{foo, []}, {foo, []}]),
+ timer:sleep(100),
+ ct:capture_stop(),
+ [_ | _] = string:find(ct:capture_get(), "duplicate application config: foo"),
+
+ ct:capture_start(),
+ _ = application:set_env([{foo, [{bar, baz}, {bar, bat}]}]),
+ timer:sleep(100),
+ ct:capture_stop(),
+ [_ | _] = string:find(ct:capture_get(), "application: foo; duplicate parameter: bar"),
+
+ ok.
+
+badarg_msg(Fun) ->
+ try Fun() of
+ _ -> ct:fail(try_succeeded)
+ catch
+ error:{badarg, Msg} -> Msg
+ end.
%% Test set_env/4 and unset_env/3 with persistent true.
persistent_env(Conf) when is_list(Conf) ->
@@ -2078,6 +2173,42 @@ shutdown_deadlock(Config) when is_list(Config) ->
%%-----------------------------------------------------------------
+%% Relative paths in sys.config
+%%-----------------------------------------------------------------
+config_relative_paths(Config) ->
+ Dir = ?config(priv_dir,Config),
+ SubDir = filename:join(Dir,"subdir"),
+ Sys = filename:join(SubDir,"sys.config"),
+ ok = filelib:ensure_dir(Sys),
+ ok = file:write_file(Sys,"[\"../up.config\",\"current\"].\n"),
+
+ Up = filename:join(Dir,"up.config"),
+ ok = file:write_file(Up,"[{app1,[{key1,value}]}].\n"),
+
+ {ok,Cwd} = file:get_cwd(),
+ Current1 = filename:join(Cwd,"current.config"),
+ ok = file:write_file(Current1,"[{app1,[{key2,value1}]}].\n"),
+
+ N1 = list_to_atom(lists:concat([?FUNCTION_NAME,"_1"])),
+ {ok,Node1} = start_node(N1,filename:rootname(Sys)),
+ ok = rpc:call(Node1, application, load, [app1()]),
+ {ok, value} = rpc:call(Node1, application, get_env,[app1,key1]),
+ {ok, value1} = rpc:call(Node1, application, get_env,[app1,key2]),
+
+ Current2 = filename:join(SubDir,"current.config"),
+ ok = file:write_file(Current2,"[{app1,[{key2,value2}]}].\n"),
+
+ N2 = list_to_atom(lists:concat([?FUNCTION_NAME,"_2"])),
+ {ok, Node2} = start_node(N2,filename:rootname(Sys)),
+ ok = rpc:call(Node2, application, load, [app1()]),
+ {ok, value} = rpc:call(Node2, application, get_env,[app1,key1]),
+ {ok, value2} = rpc:call(Node2, application, get_env,[app1,key2]),
+
+ stop_node_nice([Node1,Node2]),
+
+ ok.
+
+%%-----------------------------------------------------------------
%% Utility functions
%%-----------------------------------------------------------------
app0() ->
diff --git a/lib/kernel/test/code_SUITE.erl b/lib/kernel/test/code_SUITE.erl
index 569753155f..1314316c13 100644
--- a/lib/kernel/test/code_SUITE.erl
+++ b/lib/kernel/test/code_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2017. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -931,37 +931,34 @@ purge_stacktrace(Config) when is_list(Config) ->
code:purge(code_b_test),
try code_b_test:call(fun(b) -> ok end, a)
catch
- error:function_clause ->
+ error:function_clause:Stacktrace ->
code:load_file(code_b_test),
- case erlang:get_stacktrace() of
+ case Stacktrace of
[{?MODULE,_,[a],_},
{code_b_test,call,2,_},
{?MODULE,purge_stacktrace,1,_}|_] ->
- false = code:purge(code_b_test),
- [] = erlang:get_stacktrace()
+ false = code:purge(code_b_test)
end
end,
try code_b_test:call(nofun, 2)
catch
- error:function_clause ->
+ error:function_clause:Stacktrace2 ->
code:load_file(code_b_test),
- case erlang:get_stacktrace() of
+ case Stacktrace2 of
[{code_b_test,call,[nofun,2],_},
{?MODULE,purge_stacktrace,1,_}|_] ->
- false = code:purge(code_b_test),
- [] = erlang:get_stacktrace()
+ false = code:purge(code_b_test)
end
end,
Args = [erlang,error,[badarg]],
try code_b_test:call(erlang, error, [badarg,Args])
catch
- error:badarg ->
+ error:badarg:Stacktrace3 ->
code:load_file(code_b_test),
- case erlang:get_stacktrace() of
+ case Stacktrace3 of
[{code_b_test,call,Args,_},
{?MODULE,purge_stacktrace,1,_}|_] ->
- false = code:purge(code_b_test),
- [] = erlang:get_stacktrace()
+ false = code:purge(code_b_test)
end
end,
ok.
diff --git a/lib/kernel/test/disk_log_SUITE.erl b/lib/kernel/test/disk_log_SUITE.erl
index 12e2521939..9704c3b28c 100644
--- a/lib/kernel/test/disk_log_SUITE.erl
+++ b/lib/kernel/test/disk_log_SUITE.erl
@@ -89,8 +89,6 @@
dist_terminate/1, dist_accessible/1, dist_deadlock/1,
dist_open2/1, other_groups/1,
- evil/1,
-
otp_6278/1, otp_10131/1]).
-export([head_fun/1, hf/0, lserv/1,
@@ -123,7 +121,7 @@
[halt_int, wrap_int, halt_ext, wrap_ext, read_mode, head,
notif, new_idx_vsn, reopen, block, unblock, open, close,
error, chunk, truncate, many_users, info, change_size,
- change_attribute, distribution, evil, otp_6278, otp_10131]).
+ change_attribute, distribution, otp_6278, otp_10131]).
%% These test cases should be skipped if the VxWorks card is
%% configured without NFS cache.
@@ -149,7 +147,7 @@ all() ->
{group, open}, {group, close}, {group, error}, chunk,
truncate, many_users, {group, info},
{group, change_size}, change_attribute,
- {group, distribution}, evil, otp_6278, otp_10131].
+ {group, distribution}, otp_6278, otp_10131].
groups() ->
[{halt_int, [], [halt_int_inf, {group, halt_int_sz}]},
@@ -1752,7 +1750,7 @@ block_queue(Conf) when is_list(Conf) ->
true = [{1,a},{2,b},{3,c},{4,d},{5,e},{6,f},{7,g},{8,h}] == Terms,
del(File, 2),
Q = qlen(),
- true = (P0 == pps()),
+ check_pps(P0),
ok.
%% OTP-4880. Blocked processes did not get disk_log_stopped message.
@@ -1784,7 +1782,7 @@ block_queue2(Conf) when is_list(Conf) ->
{ok,<<>>} = file:read_file(File ++ ".1"),
del(File, No),
Q = qlen(),
- true = (P0 == pps()),
+ check_pps(P0),
ok.
@@ -2121,7 +2119,7 @@ close_block(Conf) when is_list(Conf) ->
0 = sync_do(Pid2, users),
sync_do(Pid2, terminate),
{error, no_such_log} = disk_log:info(n),
- true = (P0 == pps()),
+ check_pps(P0),
%% Users terminate (no link...).
Pid3 = spawn_link(?MODULE, lserv, [n]),
@@ -2139,7 +2137,7 @@ close_block(Conf) when is_list(Conf) ->
disk_log:close(n),
disk_log:close(n),
{error, no_such_log} = disk_log:info(n),
- true = (P0 == pps()),
+ check_pps(P0),
%% Blocking owner terminates.
Pid5 = spawn_link(?MODULE, lserv, [n]),
@@ -2156,7 +2154,7 @@ close_block(Conf) when is_list(Conf) ->
1 = users(n),
ok = disk_log:close(n),
{error, no_such_log} = disk_log:info(n),
- true = (P0 == pps()),
+ check_pps(P0),
%% Blocking user terminates.
Pid6 = spawn_link(?MODULE, lserv, [n]),
@@ -2176,7 +2174,7 @@ close_block(Conf) when is_list(Conf) ->
1 = users(n),
ok = disk_log:close(n),
{error, no_such_log} = disk_log:info(n),
- true = (P0 == pps()),
+ check_pps(P0),
%% Blocking owner terminates.
Pid7 = spawn_link(?MODULE, lserv, [n]),
@@ -2194,7 +2192,7 @@ close_block(Conf) when is_list(Conf) ->
1 = users(n),
ok = disk_log:close(n),
{error, no_such_log} = disk_log:info(n),
- true = (P0 == pps()),
+ check_pps(P0),
%% Two owners, the blocking one terminates.
Pid8 = spawn_link(?MODULE, lserv, [n]),
@@ -2209,7 +2207,7 @@ close_block(Conf) when is_list(Conf) ->
0 = sync_do(Pid9, users),
sync_do(Pid9, terminate),
{error, no_such_log} = disk_log:info(n),
- true = (P0 == pps()),
+ check_pps(P0),
%% Blocking user closes.
Pid10 = spawn_link(?MODULE, lserv, [n]),
@@ -2227,7 +2225,7 @@ close_block(Conf) when is_list(Conf) ->
ok = disk_log:close(n),
sync_do(Pid10, terminate),
{error, no_such_log} = disk_log:info(n),
- true = (P0 == pps()),
+ check_pps(P0),
%% Blocking user unblocks and closes.
Pid11 = spawn_link(?MODULE, lserv, [n]),
@@ -2246,7 +2244,7 @@ close_block(Conf) when is_list(Conf) ->
ok = disk_log:close(n),
{error, no_such_log} = disk_log:info(n),
sync_do(Pid11, terminate),
- true = (P0 == pps()),
+ check_pps(P0),
%% Blocking owner closes.
Pid12 = spawn_link(?MODULE, lserv, [n]),
@@ -2265,7 +2263,7 @@ close_block(Conf) when is_list(Conf) ->
ok = disk_log:close(n),
{error, no_such_log} = disk_log:info(n),
sync_do(Pid12, terminate),
- true = (P0 == pps()),
+ check_pps(P0),
%% Blocking owner unblocks and closes.
Pid13 = spawn_link(?MODULE, lserv, [n]),
@@ -2285,7 +2283,7 @@ close_block(Conf) when is_list(Conf) ->
ok = disk_log:close(n),
{error, no_such_log} = disk_log:info(n),
sync_do(Pid13, terminate),
- true = (P0 == pps()),
+ check_pps(P0),
del(File, No), % cleanup
ok.
@@ -2489,7 +2487,7 @@ error_repair(Conf) when is_list(Conf) ->
P0 = pps(),
{error, {file_error, _, _}} =
disk_log:open([{name, n}, {file, File}, {type, wrap}, {size,{40,4}}]),
- true = (P0 == pps()),
+ check_pps(P0),
del(File, No),
ok = file:del_dir(Dir),
@@ -2508,7 +2506,7 @@ error_repair(Conf) when is_list(Conf) ->
disk_log:open([{name, n}, {file, File}, {type, wrap},
{format, internal}, {size, {40,No}}]),
ok = disk_log:close(n),
- true = (P1 == pps()),
+ check_pps(P1),
del(File, No),
receive {info_msg, _, "disk_log: repairing" ++ _, _} -> ok
after 1000 -> ct:fail(failed) end,
@@ -2526,7 +2524,7 @@ error_repair(Conf) when is_list(Conf) ->
disk_log:open([{name, n}, {file, File}, {type, wrap},
{format, internal}, {size, {4000,No}}]),
ok = disk_log:close(n),
- true = (P2 == pps()),
+ check_pps(P2),
del(File, No),
receive {info_msg, _, "disk_log: repairing" ++ _, _} -> ok
after 1000 -> ct:fail(failed) end,
@@ -2635,7 +2633,7 @@ error_log(Conf) when is_list(Conf) ->
{ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
{format, external},{size, {100, No}}]),
{error, {file_error, _, _}} = disk_log:truncate(n),
- true = (P0 == pps()),
+ check_pps(P0),
del(File, No),
%% OTP-4880.
@@ -2643,7 +2641,7 @@ error_log(Conf) when is_list(Conf) ->
{ok, n} = disk_log:open([{name, n}, {file, File}, {type, halt},
{format, external},{size, 100000}]),
{error, {file_error, _, eisdir}} = disk_log:reopen(n, LDir),
- true = (P0 == pps()),
+ check_pps(P0),
file:delete(File),
B = mk_bytes(60),
@@ -3005,7 +3003,7 @@ error_index(Conf) when is_list(Conf) ->
{error, {invalid_index_file, _}} = disk_log:open(Args),
del(File, No),
- true = (P0 == pps()),
+ check_pps(P0),
true = (Q == qlen()),
ok.
@@ -4438,7 +4436,7 @@ dist_open2(Conf) when is_list(Conf) ->
timer:sleep(500),
file:delete(File),
- true = (P0 == pps()),
+ check_pps(P0),
%% This time the first process has a naughty head_func. This test
%% does not add very much. Perhaps it should be removed. However,
@@ -4484,7 +4482,7 @@ dist_open2(Conf) when is_list(Conf) ->
timer:sleep(100),
{error, no_such_log} = disk_log:close(Log),
file:delete(File),
- true = (P0 == pps()),
+ check_pps(P0),
No = 2,
Log2 = n2,
@@ -4513,7 +4511,7 @@ dist_open2(Conf) when is_list(Conf) ->
file:delete(File2),
del(File, No),
- true = (P0 == pps()),
+ check_pps(P0),
R.
@@ -4558,7 +4556,7 @@ dist_open2_1(Conf, Delay) ->
{error, no_such_log} = disk_log:info(Log),
file:delete(File),
- true = (P0 == pps()),
+ check_pps(P0),
ok.
@@ -4615,7 +4613,7 @@ dist_open2_2(Conf, Delay) ->
{[{Node1,{repaired,_,_,_}}],[]}} -> ok
end,
- true = (P0 == pps()),
+ check_pps(P0),
stop_node(Node1),
file:delete(File),
ok.
@@ -4676,119 +4674,6 @@ other_groups(Conf) when is_list(Conf) ->
ok.
--define(MAX, ?MAX_FWRITE_CACHE). % as in disk_log_1.erl
-%% Evil cases such as closed file descriptor port.
-evil(Conf) when is_list(Conf) ->
- Dir = ?privdir(Conf),
- File = filename:join(Dir, "n.LOG"),
- Log = n,
-
- %% Not a very thorough test.
-
- ok = setup_evil_filled_cache_wrap(Log, Dir),
- {error, {file_error,_,einval}} = disk_log:log(Log, apa),
- ok = disk_log:close(Log),
-
- ok = setup_evil_filled_cache_halt(Log, Dir),
- {error, {file_error,_,einval}} = disk_log:truncate(Log, apa),
- ok = stop_evil(Log),
-
- %% White box test.
- file:delete(File),
- Ports0 = erlang:ports(),
- {ok, Log} = disk_log:open([{name,Log},{file,File},{type,halt},
- {size,?MAX+50},{format,external}]),
- [Fd] = erlang:ports() -- Ports0,
- {B,_} = x_mk_bytes(30),
- ok = disk_log:blog(Log, <<0:(?MAX-1)/unit:8>>),
- exit(Fd, kill),
- {error, {file_error,_,einval}} = disk_log:blog_terms(Log, [B,B]),
- ok= disk_log:close(Log),
- file:delete(File),
-
- ok = setup_evil_wrap(Log, Dir),
- {error, {file_error,_,einval}} = disk_log:close(Log),
-
- ok = setup_evil_wrap(Log, Dir),
- {error, {file_error,_,einval}} = disk_log:log(Log, apa),
- ok = stop_evil(Log),
-
- ok = setup_evil_halt(Log, Dir),
- {error, {file_error,_,einval}} = disk_log:log(Log, apa),
- ok = stop_evil(Log),
-
- ok = setup_evil_wrap(Log, Dir),
- {error, {file_error,_,einval}} = disk_log:reopen(Log, apa),
- {error, {file_error,_,einval}} = disk_log:reopen(Log, apa),
- ok = stop_evil(Log),
-
- ok = setup_evil_wrap(Log, Dir),
- {error, {file_error,_,einval}} = disk_log:reopen(Log, apa),
- ok = stop_evil(Log),
-
- ok = setup_evil_wrap(Log, Dir),
- {error, {file_error,_,einval}} = disk_log:inc_wrap_file(Log),
- ok = stop_evil(Log),
-
- ok = setup_evil_wrap(Log, Dir),
- {error, {file_error,_,einval}} = disk_log:chunk(Log, start),
- ok = stop_evil(Log),
-
- ok = setup_evil_wrap(Log, Dir),
- {error, {file_error,_,einval}} = disk_log:truncate(Log),
- ok = stop_evil(Log),
-
- ok = setup_evil_wrap(Log, Dir),
- {error, {file_error,_,einval}} = disk_log:chunk_step(Log, start, 1),
- ok = stop_evil(Log),
-
- io:format("messages: ~p~n", [erlang:process_info(self(), messages)]),
- del(File, 2),
- file:delete(File),
- ok.
-
-setup_evil_wrap(Log, Dir) ->
- setup_evil(Log, [{type,wrap},{size,{100,2}}], Dir).
-
-setup_evil_halt(Log, Dir) ->
- setup_evil(Log, [{type,halt},{size,10000}], Dir).
-
-setup_evil(Log, Args, Dir) ->
- File = filename:join(Dir, lists:concat([Log, ".LOG"])),
- file:delete(File),
- del(File, 2),
- ok = disk_log:start(),
- Ports0 = erlang:ports(),
- {ok, Log} = disk_log:open([{name,Log},{file,File} | Args]),
- [Fd] = erlang:ports() -- Ports0,
- exit(Fd, kill),
- ok = disk_log:log_terms(n, [<<0:10/unit:8>>]),
- timer:sleep(2500), % TIMEOUT in disk_log_1.erl is 2000
- ok.
-
-stop_evil(Log) ->
- {error, _} = disk_log:close(Log),
- ok.
-
-setup_evil_filled_cache_wrap(Log, Dir) ->
- setup_evil_filled_cache(Log, [{type,wrap},{size,{?MAX,2}}], Dir).
-
-setup_evil_filled_cache_halt(Log, Dir) ->
- setup_evil_filled_cache(Log, [{type,halt},{size,infinity}], Dir).
-
-%% The cache is filled, and the file descriptor port gone.
-setup_evil_filled_cache(Log, Args, Dir) ->
- File = filename:join(Dir, lists:concat([Log, ".LOG"])),
- file:delete(File),
- del(File, 2),
- ok = disk_log:start(),
- Ports0 = erlang:ports(),
- {ok, Log} = disk_log:open([{name,Log},{file,File} | Args]),
- [Fd] = erlang:ports() -- Ports0,
- ok = disk_log:log_terms(n, [<<0:?MAX/unit:8>>]),
- exit(Fd, kill),
- ok.
-
%% OTP-6278. open/1 creates no status or crash report.
otp_6278(Conf) when is_list(Conf) ->
Dir = ?privdir(Conf),
@@ -4906,10 +4791,59 @@ log(Name, N) ->
format_error(E) ->
lists:flatten(disk_log:format_error(E)).
+check_pps({Ports0,Procs0} = P0) ->
+ case pps() of
+ P0 ->
+ ok;
+ _ ->
+ timer:sleep(500),
+ case pps() of
+ P0 ->
+ ok;
+ {Ports1,Procs1} = P1 ->
+ case {Ports1 -- Ports0, Procs1 -- Procs0} of
+ {[], []} -> ok;
+ {PortsDiff,ProcsDiff} ->
+ io:format("failure, got ~p~n, expected ~p\n", [P1, P0]),
+ show("Old port", Ports0 -- Ports1),
+ show("New port", PortsDiff),
+ show("Old proc", Procs0 -- Procs1),
+ show("New proc", ProcsDiff),
+ ct:fail(failed)
+ end
+ end
+ end.
+
+show(_S, []) ->
+ ok;
+show(S, [{Pid, Name, InitCall}|Pids]) when is_pid(Pid) ->
+ io:format("~s: ~w (~w), ~w: ~p~n",
+ [S, Pid, proc_reg_name(Name), InitCall,
+ erlang:process_info(Pid)]),
+ show(S, Pids);
+show(S, [{Port, _}|Ports]) when is_port(Port)->
+ io:format("~s: ~w: ~p~n", [S, Port, erlang:port_info(Port)]),
+ show(S, Ports).
+
pps() ->
timer:sleep(100),
- {erlang:ports(), lists:filter(fun(P) -> erlang:is_process_alive(P) end,
- processes())}.
+ {port_list(), process_list()}.
+
+port_list() ->
+ [{P,safe_second_element(erlang:port_info(P, name))} ||
+ P <- erlang:ports()].
+
+process_list() ->
+ [{P,process_info(P, registered_name),
+ safe_second_element(process_info(P, initial_call))} ||
+ P <- processes(), erlang:is_process_alive(P)].
+
+proc_reg_name({registered_name, Name}) -> Name;
+proc_reg_name([]) -> no_reg_name.
+
+safe_second_element({_,Info}) -> Info;
+safe_second_element(Other) -> Other.
+
qlen() ->
{_, {_, N}} = lists:keysearch(message_queue_len, 1, process_info(self())),
diff --git a/lib/kernel/test/erl_distribution_SUITE.erl b/lib/kernel/test/erl_distribution_SUITE.erl
index e34b4d77d2..5a8bbd56c4 100644
--- a/lib/kernel/test/erl_distribution_SUITE.erl
+++ b/lib/kernel/test/erl_distribution_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2017. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -25,6 +25,7 @@
init_per_group/2,end_per_group/2]).
-export([tick/1, tick_change/1,
+ connect_node/1,
nodenames/1, hostnames/1,
illegal_nodenames/1, hidden_node/1,
setopts/1,
@@ -70,6 +71,7 @@ suite() ->
all() ->
[tick, tick_change, nodenames, hostnames, illegal_nodenames,
+ connect_node,
hidden_node, setopts,
table_waste, net_setuptime, inet_dist_options_options,
{group, monitor_nodes}].
@@ -87,6 +89,7 @@ init_per_suite(Config) ->
Config.
end_per_suite(_Config) ->
+ [slave:stop(N) || N <- nodes()],
ok.
init_per_group(_GroupName, Config) ->
@@ -106,6 +109,12 @@ init_per_testcase(Func, Config) when is_atom(Func), is_list(Config) ->
end_per_testcase(_Func, _Config) ->
ok.
+connect_node(Config) when is_list(Config) ->
+ Connected = nodes(connected),
+ true = net_kernel:connect_node(node()),
+ Connected = nodes(connected),
+ ok.
+
tick(Config) when is_list(Config) ->
PaDir = filename:dirname(code:which(erl_distribution_SUITE)),
@@ -244,7 +253,7 @@ illegal(Name) ->
test_node(Name) ->
test_node(Name, false).
test_node(Name, Illigal) ->
- ProgName = atom_to_list(lib:progname()),
+ ProgName = ct:get_progname(),
Command = ProgName ++ " -noinput " ++ long_or_short() ++ Name ++
" -eval \"net_adm:ping('" ++ atom_to_list(node()) ++ "')\"" ++
case Illigal of
@@ -463,9 +472,9 @@ run_remote_test([FuncStr, TestNodeStr | Args]) ->
1
end
catch
- C:E ->
+ C:E:S ->
io:format("Node ~p got EXCEPTION ~p:~p\nat ~p\n",
- [node(), C, E, erlang:get_stacktrace()]),
+ [node(), C, E, S]),
2
end,
io:format("Node ~p doing halt(~p).\n",[node(), Status]),
@@ -1144,17 +1153,16 @@ monitor_nodes_otp_6481_test(Config, TestType) when is_list(Config) ->
TestMonNodeState = monitor_node_state(),
%% io:format("~p~n", [TestMonNodeState]),
TestMonNodeState =
- MonNodeState
+ case TestType of
+ nodedown -> [];
+ nodeup -> [{self(), []}]
+ end
+ ++ lists:map(fun (_) -> {MN, []} end, Seq)
++ case TestType of
nodedown -> [{self(), []}];
nodeup -> []
end
- ++ lists:map(fun (_) -> {MN, []} end, Seq)
- ++ case TestType of
- nodedown -> [];
- nodeup -> [{self(), []}]
- end,
-
+ ++ MonNodeState,
{ok, Node} = start_node(Name, "", this),
receive {nodeup, Node} -> ok end,
diff --git a/lib/kernel/test/erl_distribution_wb_SUITE.erl b/lib/kernel/test/erl_distribution_wb_SUITE.erl
index 03aaee56b7..8256444bdc 100644
--- a/lib/kernel/test/erl_distribution_wb_SUITE.erl
+++ b/lib/kernel/test/erl_distribution_wb_SUITE.erl
@@ -61,10 +61,13 @@
%% From R9 and forward extended references is compulsory
%% From R10 and forward extended pids and ports are compulsory
%% From R20 and forward UTF8 atoms are compulsory
+%% From R21 and forward NEW_FUN_TAGS is compulsory (no more tuple fallback {fun, ...})
-define(COMPULSORY_DFLAGS, (?DFLAG_EXTENDED_REFERENCES bor
?DFLAG_EXTENDED_PIDS_PORTS bor
- ?DFLAG_UTF8_ATOMS)).
+ ?DFLAG_UTF8_ATOMS bor
+ ?DFLAG_NEW_FUN_TAGS)).
+-define(PASS_THROUGH, $p).
-define(shutdown(X), exit(X)).
-define(int16(X), [((X) bsr 8) band 16#ff, (X) band 16#ff]).
@@ -674,15 +677,16 @@ build_rex_message(Cookie,OurName) ->
%% Receive a distribution message
recv_message(Socket) ->
case gen_tcp:recv(Socket, 0) of
+ {ok,[]} ->
+ recv_message(Socket); %% a tick, ignore
{ok,Data} ->
B0 = list_to_binary(Data),
- {_,B1} = erlang:split_binary(B0,1),
- Header = binary_to_term(B1),
- Siz = byte_size(term_to_binary(Header)),
- {_,B2} = erlang:split_binary(B1,Siz),
+ <<?PASS_THROUGH, B1/binary>> = B0,
+ {Header,Siz} = binary_to_term(B1,[used]),
+ <<_:Siz/binary,B2/binary>> = B1,
Message = case (catch binary_to_term(B2)) of
{'EXIT', _} ->
- could_not_digest_message;
+ {could_not_digest_message,B2};
Other ->
Other
end,
diff --git a/lib/kernel/test/erl_prim_loader_SUITE.erl b/lib/kernel/test/erl_prim_loader_SUITE.erl
index b6417210b9..16a127aa3e 100644
--- a/lib/kernel/test/erl_prim_loader_SUITE.erl
+++ b/lib/kernel/test/erl_prim_loader_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2016. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -33,6 +33,7 @@
primary_archive/1, virtual_dir_in_archive/1,
get_modules/1]).
+-define(PRIM_FILE, prim_file).
%%-----------------------------------------------------------------
%% Test suite for erl_prim_loader. (Most code is run during system start/stop.)
@@ -461,7 +462,7 @@ primary_archive(Config) when is_list(Config) ->
%% Set primary archive
ExpectedEbins = [Archive, DictDir ++ "/ebin", DummyDir ++ "/ebin"],
io:format("ExpectedEbins: ~p\n", [ExpectedEbins]),
- {ok, FileInfo} = prim_file:read_file_info(Archive),
+ {ok, FileInfo} = ?PRIM_FILE:read_file_info(Archive),
{ok, Ebins} = rpc:call(Node, erl_prim_loader, set_primary_archive,
[Archive, ArchiveBin, FileInfo,
fun escript:parse_file/1]),
diff --git a/lib/kernel/test/error_logger_SUITE.erl b/lib/kernel/test/error_logger_SUITE.erl
index 2d26a7246c..eab72e58a7 100644
--- a/lib/kernel/test/error_logger_SUITE.erl
+++ b/lib/kernel/test/error_logger_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2017. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -32,7 +32,8 @@
init_per_group/2,end_per_group/2,
off_heap/1,
error_report/1, info_report/1, error/1, info/1,
- emulator/1, tty/1, logfile/1, add/1, delete/1]).
+ emulator/1, via_logger_process/1, other_node/1,
+ tty/1, logfile/1, add/1, delete/1, format_depth/1]).
-export([generate_error/2]).
@@ -46,16 +47,20 @@ suite() ->
{timetrap,{minutes,1}}].
all() ->
- [off_heap, error_report, info_report, error, info, emulator, tty,
- logfile, add, delete].
+ [off_heap, error_report, info_report, error, info, emulator,
+ via_logger_process, other_node, tty, logfile, add, delete,
+ format_depth].
groups() ->
[].
init_per_suite(Config) ->
+ logger:add_handler(error_logger,error_logger,
+ #{level=>info,filter_default=>log}),
Config.
end_per_suite(_Config) ->
+ logger:remove_handler(error_logger),
ok.
init_per_group(_GroupName, Config) ->
@@ -226,6 +231,40 @@ generate_error(Error, Stack) ->
erlang:raise(error, Error, Stack).
%%-----------------------------------------------------------------
+
+via_logger_process(Config) ->
+ case os:type() of
+ {win32,_} ->
+ {skip,"Skip on windows - cant change file mode"};
+ _ ->
+ error_logger:add_report_handler(?MODULE, self()),
+ Dir = filename:join(?config(priv_dir,Config),"dummydir"),
+ Msg = "File operation error: eacces. Target: " ++
+ Dir ++ ". Function: list_dir. ",
+ ok = file:make_dir(Dir),
+ ok = file:change_mode(Dir,8#0222),
+ error = erl_prim_loader:list_dir(Dir),
+ ok = file:change_mode(Dir,8#0664),
+ _ = file:del_dir(Dir),
+ reported(error_report, std_error, Msg),
+ my_yes = error_logger:delete_report_handler(?MODULE),
+ ok
+ end.
+
+%%-----------------------------------------------------------------
+
+other_node(_Config) ->
+ error_logger:add_report_handler(?MODULE, self()),
+ {ok,Node} = test_server:start_node(?FUNCTION_NAME,slave,[]),
+ ok = rpc:call(Node,logger,add_handler,[error_logger,error_logger,
+ #{level=>info,filter_default=>log}]),
+ rpc:call(Node,error_logger,error_report,[hi_from_remote]),
+ reported(error_report,std_error,hi_from_remote),
+ test_server:stop_node(Node),
+ ok.
+
+
+%%-----------------------------------------------------------------
%% We don't enables or disables tty error logging here. We do not
%% want to interact with the test run.
%%-----------------------------------------------------------------
@@ -271,6 +310,21 @@ delete(Config) when is_list(Config) ->
ok.
%%-----------------------------------------------------------------
+
+format_depth(_Config) ->
+ ok = application:set_env(kernel,error_logger_format_depth,30),
+ 30 = error_logger:get_format_depth(),
+ ok = application:set_env(kernel,error_logger_format_depth,3),
+ 10 = error_logger:get_format_depth(),
+ ok = application:set_env(kernel,error_logger_format_depth,11),
+ 11 = error_logger:get_format_depth(),
+ ok = application:set_env(kernel,error_logger_format_depth,unlimited),
+ unlimited = error_logger:get_format_depth(),
+ ok = application:unset_env(kernel,error_logger_format_depth),
+ unlimited = error_logger:get_format_depth(),
+ ok.
+
+%%-----------------------------------------------------------------
%% Check that the report has been received.
%%-----------------------------------------------------------------
reported(Tag, Type, Report) ->
@@ -279,7 +333,7 @@ reported(Tag, Type, Report) ->
test_server:messages_get(),
ok
after 1000 ->
- ct:fail(no_report_received)
+ ct:fail({no_report_received,test_server:messages_get()})
end.
%%-----------------------------------------------------------------
diff --git a/lib/kernel/test/error_logger_warn_SUITE.erl b/lib/kernel/test/error_logger_warn_SUITE.erl
index a8087e11f9..8f1eb2ba0a 100644
--- a/lib/kernel/test/error_logger_warn_SUITE.erl
+++ b/lib/kernel/test/error_logger_warn_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2003-2016. All Rights Reserved.
+%% Copyright Ericsson AB 2003-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -480,9 +480,12 @@ rb_utc() ->
UtcLog=case application:get_env(sasl,utc_log) of
{ok,true} ->
true;
- _AllOthers ->
+ {ok,false} ->
application:set_env(sasl,utc_log,true),
- false
+ false;
+ undefined ->
+ application:set_env(sasl,utc_log,true),
+ undefined
end,
application:start(sasl),
rb:start([{report_dir, rd()}]),
@@ -494,7 +497,12 @@ rb_utc() ->
Sum=one_rb_findstr([],"UTC"),
rb:stop(),
application:stop(sasl),
- application:set_env(sasl,utc_log,UtcLog),
+ case UtcLog of
+ undefined ->
+ application:unset_env(sasl,utc_log);
+ _ ->
+ application:set_env(sasl,utc_log,UtcLog)
+ end,
stop_node(Node),
ok.
diff --git a/lib/kernel/test/file_SUITE.erl b/lib/kernel/test/file_SUITE.erl
index 119e1f24bb..a51025cba6 100644
--- a/lib/kernel/test/file_SUITE.erl
+++ b/lib/kernel/test/file_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2017. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -39,6 +39,8 @@
-define(FILE_FIN_PER_TESTCASE(Config), Config).
-endif.
+-define(PRIM_FILE, prim_file).
+
-module(?FILE_SUITE).
-export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1,
@@ -54,7 +56,8 @@
open1/1,
old_modes/1, new_modes/1, path_open/1, open_errors/1]).
-export([ file_info_basic_file/1, file_info_basic_directory/1,
- file_info_bad/1, file_info_times/1, file_write_file_info/1]).
+ file_info_bad/1, file_info_times/1, file_write_file_info/1,
+ file_wfi_helpers/1]).
-export([rename/1, access/1, truncate/1, datasync/1, sync/1,
read_write/1, pread_write/1, append/1, exclusive/1]).
-export([ e_delete/1, e_rename/1, e_make_dir/1, e_del_dir/1]).
@@ -97,6 +100,12 @@
-export([unicode_mode/1]).
+-export([volume_relative_paths/1,unc_paths/1]).
+
+-export([tiny_writes/1, tiny_writes_delayed/1,
+ large_writes/1, large_writes_delayed/1,
+ tiny_reads/1, tiny_reads_ahead/1]).
+
%% Debug exports
-export([create_file_slow/2, create_file/2, create_bin/2]).
-export([verify_file/2, verify_bin/3]).
@@ -107,6 +116,8 @@
-export([disc_free/1, memsize/0]).
-include_lib("common_test/include/ct.hrl").
+-include_lib("common_test/include/ct_event.hrl").
+
-include_lib("kernel/include/file.hrl").
-define(THROW_ERROR(RES), throw({fail, ?LINE, RES})).
@@ -118,13 +129,13 @@ suite() ->
all() ->
[unicode, altname, read_write_file, {group, dirs},
- {group, files}, delete, rename, names, {group, errors},
- {group, compression}, {group, links}, copy,
+ {group, files}, delete, rename, names, volume_relative_paths, unc_paths,
+ {group, errors}, {group, compression}, {group, links}, copy,
delayed_write, read_ahead, segment_read, segment_write,
ipread, pid2name, interleaved_read_write, otp_5814, otp_10852,
large_file, large_write, read_line_1, read_line_2, read_line_3,
read_line_4, standard_io, old_io_protocol,
- unicode_mode
+ unicode_mode, {group, bench}
].
groups() ->
@@ -142,7 +153,8 @@ groups() ->
{pos, [], [pos1, pos2, pos3]},
{file_info, [],
[file_info_basic_file, file_info_basic_directory,
- file_info_bad, file_info_times, file_write_file_info]},
+ file_info_bad, file_info_times, file_write_file_info,
+ file_wfi_helpers]},
{consult, [], [consult1, path_consult]},
{eval, [], [eval1, path_eval]},
{script, [], [script1, path_script]},
@@ -154,11 +166,19 @@ groups() ->
write_compressed, compress_errors, catenated_gzips,
compress_async_crash]},
{links, [],
- [make_link, read_link_info_for_non_link, symlinks]}].
+ [make_link, read_link_info_for_non_link, symlinks]},
+ {bench, [],
+ [tiny_writes, tiny_writes_delayed,
+ large_writes, large_writes_delayed,
+ tiny_reads, tiny_reads_ahead]}].
init_per_group(_GroupName, Config) ->
Config.
+end_per_group(bench, Config) ->
+ ScratchDir = proplists:get_value(priv_dir, Config),
+ file:delete(filename:join(ScratchDir, "benchmark_scratch_file")),
+ Config;
end_per_group(_GroupName, Config) ->
Config.
@@ -381,11 +401,11 @@ read_write_0(Str, {Func, ReadFun}, Options) ->
io:format("~p:~p: ~p ERROR: ~ts vs~n ~w~n - ~p~n",
[?MODULE, Line, Func, Str, ReadBytes, Options]),
exit({error, ?LINE});
- error:What ->
+ error:What:Stacktrace ->
io:format("~p:??: ~p ERROR: ~p from~n ~w~n ~p~n",
[?MODULE, Func, What, Str, Options]),
- io:format("\t~p~n", [erlang:get_stacktrace()]),
+ io:format("\t~p~n", [Stacktrace]),
exit({error, ?LINE})
end.
@@ -473,7 +493,7 @@ um_check_unicode(_Utf8Bin, {ok, _ListOrBin}, _, _UTF8_) ->
um_filename(Bin, Dir, Options) when is_binary(Bin) ->
um_filename(binary_to_list(Bin), Dir, Options);
um_filename(Str = [_|_], Dir, Options) ->
- Name = hd(string:tokens(Str, ":")),
+ Name = hd(string:lexemes(Str, ":")),
Enc = atom_to_list(proplists:get_value(encoding, Options, latin1)),
File = case lists:member(binary, Options) of
true ->
@@ -638,6 +658,10 @@ cur_dir_0(Config) when is_list(Config) ->
{ok,NewDirFiles} = ?FILE_MODULE:list_dir("."),
true = lists:member(UncommonName,NewDirFiles),
+ %% Ensure that we get the same result with a trailing slash; the
+ %% APIs used on Windows will choke on them if passed directly.
+ {ok,NewDirFiles} = ?FILE_MODULE:list_dir("./"),
+
%% Delete the directory and return to the old current directory
%% and check that the created file isn't there (too!)
expect({error, einval}, {error, eacces},
@@ -690,10 +714,15 @@ win_cur_dir_1(_Config) ->
%% Get the drive letter from the current directory,
%% and try to get current directory for that drive.
- [Drive,$:|_] = BaseDir,
- {ok,BaseDir} = ?FILE_MODULE:get_cwd([Drive,$:]),
+ [CurDrive,$:|_] = BaseDir,
+ {ok,BaseDir} = ?FILE_MODULE:get_cwd([CurDrive,$:]),
io:format("BaseDir = ~s\n", [BaseDir]),
+ %% We should error out on non-existent drives. Any reasonable system will
+ %% have at least one.
+ CurDirs = [?FILE_MODULE:get_cwd([Drive,$:]) || Drive <- lists:seq($A, $Z)],
+ lists:member({error,eaccess}, CurDirs),
+
%% Unfortunately, there is no way to move away from the
%% current drive as we can't use the "subst" command from
%% a SSH connection. We can't test any more.
@@ -831,7 +860,7 @@ no_untranslatable_names() ->
end.
start_node(Name, Args) ->
- [_,Host] = string:tokens(atom_to_list(node()), "@"),
+ [_,Host] = string:lexemes(atom_to_list(node()), "@"),
ct:log("Trying to start ~w@~s~n", [Name,Host]),
case test_server:start_node(Name, peer, [{args,Args}]) of
{error,Reason} ->
@@ -1019,6 +1048,23 @@ close(Config) when is_list(Config) ->
Val = ?FILE_MODULE:close(Fd1),
io:format("Second close gave: ~p",[Val]),
+ %% All operations on a closed raw file should EINVAL, even if they're not
+ %% supported on the current platform.
+ {ok,Fd2} = ?FILE_MODULE:open(Name, [read, write, raw]),
+ ok = ?FILE_MODULE:close(Fd2),
+
+ {error, einval} = ?FILE_MODULE:advise(Fd2, 5, 5, normal),
+ {error, einval} = ?FILE_MODULE:allocate(Fd2, 5, 5),
+ {error, einval} = ?FILE_MODULE:close(Fd2),
+ {error, einval} = ?FILE_MODULE:datasync(Fd2),
+ {error, einval} = ?FILE_MODULE:position(Fd2, 5),
+ {error, einval} = ?FILE_MODULE:pread(Fd2, 5, 1),
+ {error, einval} = ?FILE_MODULE:pwrite(Fd2, 5, "einval please"),
+ {error, einval} = ?FILE_MODULE:read(Fd2, 1),
+ {error, einval} = ?FILE_MODULE:sync(Fd2),
+ {error, einval} = ?FILE_MODULE:truncate(Fd2),
+ {error, einval} = ?FILE_MODULE:write(Fd2, "einval please"),
+
[] = flush(),
ok.
@@ -1132,8 +1178,8 @@ pread_write_test(File, Data) ->
end,
I = Size + 17,
ok = ?FILE_MODULE:pwrite(File, 0, Data),
- Res = ?FILE_MODULE:pread(File, 0, I),
- {ok, Data} = Res,
+ {ok, Data} = ?FILE_MODULE:pread(File, 0, I),
+ {ok, [Data]} = ?FILE_MODULE:pread(File, [{0, I}]),
eof = ?FILE_MODULE:pread(File, I, 1),
ok = ?FILE_MODULE:pwrite(File, [{0, Data}, {I, Data}]),
{ok, [Data, eof, Data]} =
@@ -1321,6 +1367,10 @@ file_info_basic_file(Config) when is_list(Config) ->
io:put_chars(Fd1, "foo bar"),
ok = ?FILE_MODULE:close(Fd1),
+ %% Don't crash the file server when passing incorrect arguments.
+ {error,badarg} = ?FILE_MODULE:read_file_info(Name, [{time, gurka}]),
+ {error,badarg} = ?FILE_MODULE:read_file_info([#{} | gaffel]),
+
%% Test that the file has the expected attributes.
%% The times are tricky, so we will save them to a separate test case.
{ok,FileInfo} = ?FILE_MODULE:read_file_info(Name),
@@ -1564,6 +1614,39 @@ file_write_file_info(Config) when is_list(Config) ->
[] = flush(),
ok.
+file_wfi_helpers(Config) when is_list(Config) ->
+ RootDir = get_good_directory(Config),
+ io:format("RootDir = ~p", [RootDir]),
+
+ Name = filename:join(RootDir,
+ atom_to_list(?MODULE) ++ "_wfi_helpers"),
+
+ ok = ?FILE_MODULE:write_file(Name, "hello again"),
+ NewTime = {{1997, 02, 15}, {13, 18, 20}},
+ ok = ?FILE_MODULE:change_time(Name, NewTime, NewTime),
+
+ {ok, #file_info{atime=NewActAtime, mtime=NewTime}} =
+ ?FILE_MODULE:read_file_info(Name),
+
+ NewFilteredAtime = filter_atime(NewTime, Config),
+ NewFilteredAtime = filter_atime(NewActAtime, Config),
+
+ %% Make the file unwritable
+ ok = ?FILE_MODULE:change_mode(Name, 8#400),
+ {error, eacces} = ?FILE_MODULE:write_file(Name, "hello again"),
+
+ %% ... and writable again
+ ok = ?FILE_MODULE:change_mode(Name, 8#600),
+ ok = ?FILE_MODULE:write_file(Name, "hello again"),
+
+ %% We have no idea which users will work, so all we can do is to check
+ %% that it returns enoent instead of crashing.
+ {error, enoent} = ?FILE_MODULE:change_group("bogus file name", 0),
+ {error, enoent} = ?FILE_MODULE:change_owner("bogus file name", 0),
+
+ [] = flush(),
+ ok.
+
%% Returns a directory on a file system that has correct file times.
get_good_directory(Config) ->
@@ -2044,13 +2127,22 @@ names(Config) when is_list(Config) ->
ok = ?FILE_MODULE:close(Fd2),
{ok,Fd3} = ?FILE_MODULE:open(Name3,read),
ok = ?FILE_MODULE:close(Fd3),
+
+ %% Now try the same on raw files.
+ {ok,Fd4} = ?FILE_MODULE:open(Name2, [read, raw]),
+ ok = ?FILE_MODULE:close(Fd4),
+ {ok,Fd4f} = ?FILE_MODULE:open(lists:flatten(Name2), [read, raw]),
+ ok = ?FILE_MODULE:close(Fd4f),
+ {ok,Fd5} = ?FILE_MODULE:open(Name3, [read, raw]),
+ ok = ?FILE_MODULE:close(Fd5),
+
case length(Name1) > 255 of
true ->
io:format("Path too long for an atom:\n\n~p\n", [Name1]);
false ->
Name4 = list_to_atom(Name1),
- {ok,Fd4} = ?FILE_MODULE:open(Name4,read),
- ok = ?FILE_MODULE:close(Fd4)
+ {ok,Fd6} = ?FILE_MODULE:open(Name4,read),
+ ok = ?FILE_MODULE:close(Fd6)
end,
%% Try some path names
@@ -2074,6 +2166,46 @@ names(Config) when is_list(Config) ->
[] = flush(),
ok.
+volume_relative_paths(Config) when is_list(Config) ->
+ case os:type() of
+ {win32, _} ->
+ {ok, [Drive, $: | _]} = file:get_cwd(),
+ %% Relative to current device root.
+ {ok, RootInfo} = file:read_file_info([Drive, $:, $/]),
+ {ok, RootInfo} = file:read_file_info("/"),
+ %% Relative to current device directory.
+ {ok, DirContents} = file:list_dir([Drive, $:]),
+ {ok, DirContents} = file:list_dir("."),
+ [] = flush(),
+ ok;
+ _ ->
+ {skip, "This test is Windows-specific."}
+ end.
+
+unc_paths(Config) when is_list(Config) ->
+ case os:type() of
+ {win32, _} ->
+ %% We assume administrative shares are set up and reachable, and we
+ %% settle for testing presence as some of the returned data is
+ %% different.
+ {ok, _} = file:read_file_info("C:\\Windows\\explorer.exe"),
+ {ok, _} = file:read_file_info("\\\\localhost\\c$\\Windows\\explorer.exe"),
+
+ {ok, Cwd} = file:get_cwd(),
+
+ try
+ ok = file:set_cwd("\\\\localhost\\c$\\Windows\\"),
+ {ok, _} = file:read_file_info("explorer.exe")
+ after
+ file:set_cwd(Cwd)
+ end,
+
+ [] = flush(),
+ ok;
+ _ ->
+ {skip, "This test is Windows-specific."}
+ end.
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -2102,13 +2234,14 @@ e_delete(Config) when is_list(Config) ->
case os:type() of
{win32, _} ->
%% Remove a character device.
- {error, eacces} = ?FILE_MODULE:delete("nul");
+ expect({error, eacces}, {error, einval},
+ ?FILE_MODULE:delete("nul"));
_ ->
?FILE_MODULE:write_file_info(
Base, #file_info {mode=0}),
{error, eacces} = ?FILE_MODULE:delete(Afile),
?FILE_MODULE:write_file_info(
- Base, #file_info {mode=8#600})
+ Base, #file_info {mode=8#700})
end,
[] = flush(),
@@ -2239,7 +2372,7 @@ e_make_dir(Config) when is_list(Config) ->
?FILE_MODULE:write_file_info(Base, #file_info {mode=0}),
{error, eacces} = ?FILE_MODULE:make_dir(filename:join(Base, "xxxx")),
?FILE_MODULE:write_file_info(
- Base, #file_info {mode=8#600})
+ Base, #file_info {mode=8#700})
end,
ok.
@@ -2285,7 +2418,7 @@ e_del_dir(Config) when is_list(Config) ->
ok = ?FILE_MODULE:make_dir(ADirectory),
?FILE_MODULE:write_file_info( Base, #file_info {mode=0}),
{error, eacces} = ?FILE_MODULE:del_dir(ADirectory),
- ?FILE_MODULE:write_file_info( Base, #file_info {mode=8#600})
+ ?FILE_MODULE:write_file_info( Base, #file_info {mode=8#700})
end,
[] = flush(),
ok.
@@ -2641,8 +2774,8 @@ altname(Config) when is_list(Config) ->
{skipped, "Altname not supported on this platform"};
{ok, "LONGAL~1"} ->
{ok, "A_FILE~1"} = ?FILE_MODULE:altname(Name),
- {ok, "C:/"} = ?FILE_MODULE:altname("C:/"),
- {ok, "C:\\"} = ?FILE_MODULE:altname("C:\\"),
+ {ok, "c:/"} = ?FILE_MODULE:altname("C:/"),
+ {ok, "c:/"} = ?FILE_MODULE:altname("C:\\"),
{error,enoent} = ?FILE_MODULE:altname(NonexName),
{ok, "short"} = ?FILE_MODULE:altname(ShortName),
ok
@@ -2923,20 +3056,22 @@ delayed_write(Config) when is_list(Config) ->
%%
%% Test caching and normal close of non-raw file
{ok, Fd1} =
- ?FILE_MODULE:open(File, [write, {delayed_write, Size+1, 2000}]),
+ ?FILE_MODULE:open(File, [write, {delayed_write, Size+1, 400}]),
ok = ?FILE_MODULE:write(Fd1, Data1),
- timer:sleep(1000), % Just in case the file system is slow
+ %% Wait for a reasonable amount of time to check whether the write was
+ %% practically instantaneous or actually delayed.
+ timer:sleep(100),
{ok, Fd2} = ?FILE_MODULE:open(File, [read]),
eof = ?FILE_MODULE:read(Fd2, 1),
ok = ?FILE_MODULE:write(Fd1, Data1), % Data flush on size
- timer:sleep(1000), % Just in case the file system is slow
+ timer:sleep(100),
{ok, Data1Data1} = ?FILE_MODULE:pread(Fd2, bof, 2*Size+1),
ok = ?FILE_MODULE:write(Fd1, Data1),
- timer:sleep(3000), % Wait until data flush on timeout
+ timer:sleep(500), % Wait until data flush on timeout
{ok, Data1Data1Data1} = ?FILE_MODULE:pread(Fd2, bof, 3*Size+1),
ok = ?FILE_MODULE:write(Fd1, Data1),
ok = ?FILE_MODULE:close(Fd1), % Data flush on close
- timer:sleep(1000), % Just in case the file system is slow
+ timer:sleep(100),
{ok, Data1Data1Data1Data1} = ?FILE_MODULE:pread(Fd2, bof, 4*Size+1),
ok = ?FILE_MODULE:close(Fd2),
%%
@@ -2970,7 +3105,7 @@ delayed_write(Config) when is_list(Config) ->
{'DOWN', Mref1, _, _, _} = Down1a ->
ct:fail(Down1a)
end,
- timer:sleep(1000), % Just in case the file system is slow
+ timer:sleep(100), % Just in case the file system is slow
{ok, Fd3} = ?FILE_MODULE:open(File, [read]),
eof = ?FILE_MODULE:read(Fd3, 1),
Child1 ! {Parent, continue, normal},
@@ -2980,7 +3115,7 @@ delayed_write(Config) when is_list(Config) ->
{'DOWN', Mref1, _, _, _} = Down1b ->
ct:fail(Down1b)
end,
- timer:sleep(1000), % Just in case the file system is slow
+ timer:sleep(100), % Just in case the file system is slow
{ok, Data1} = ?FILE_MODULE:pread(Fd3, bof, Size+1),
ok = ?FILE_MODULE:close(Fd3),
%%
@@ -2993,7 +3128,7 @@ delayed_write(Config) when is_list(Config) ->
{'DOWN', Mref2, _, _, _} = Down2a ->
ct:fail(Down2a)
end,
- timer:sleep(1000), % Just in case the file system is slow
+ timer:sleep(100), % Just in case the file system is slow
{ok, Fd4} = ?FILE_MODULE:open(File, [read]),
eof = ?FILE_MODULE:read(Fd4, 1),
Child2 ! {Parent, continue, kill},
@@ -3003,7 +3138,7 @@ delayed_write(Config) when is_list(Config) ->
{'DOWN', Mref2, _, _, _} = Down2b ->
ct:fail(Down2b)
end,
- timer:sleep(1000), % Just in case the file system is slow
+ timer:sleep(100), % Just in case the file system is slow
eof = ?FILE_MODULE:pread(Fd4, bof, 1),
ok = ?FILE_MODULE:close(Fd4),
%%
@@ -3095,6 +3230,16 @@ read_ahead(Config) when is_list(Config) ->
Data1Data2Data3 = Data1++Data2++Data3,
{ok, Data1Data2Data3} = ?FILE_MODULE:read(Fd5, 3*Size+1),
ok = ?FILE_MODULE:close(Fd5),
+
+ %% Ensure that a read that draws from both the buffer and the file won't
+ %% return anything wonky.
+ SplitData = << <<(I rem 256)>> || I <- lists:seq(1, 1024) >>,
+ file:write_file(File, SplitData),
+ {ok, Fd6} = ?FILE_MODULE:open(File, [raw, read, binary, {read_ahead, 256}]),
+ {ok, <<1>>} = file:read(Fd6, 1),
+ <<1, Shifted:512/binary, _Rest/binary>> = SplitData,
+ {ok, Shifted} = file:read(Fd6, 512),
+
%%
[] = flush(),
ok.
@@ -3699,6 +3844,83 @@ do_large_write(Name) ->
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% Benchmarks
+%%
+%% Note that we only measure the time it takes to run the isolated file
+%% operations and that the actual test runtime can differ significantly,
+%% especially on the write side as the files need to be truncated before
+%% writing.
+
+large_writes(Config) when is_list(Config) ->
+ Modes = [raw, binary],
+ OpCount = 4096,
+ Data = <<0:(64 bsl 10)/unit:8>>,
+ run_write_benchmark(Config, Modes, OpCount, Data).
+
+large_writes_delayed(Config) when is_list(Config) ->
+ %% Each write is exactly as large as the delay buffer, causing the writes
+ %% to pass through each time, giving us a decent idea of how much overhead
+ %% delayed_write adds.
+ Modes = [raw, binary, {delayed_write, 64 bsl 10, 2000}],
+ OpCount = 4096,
+ Data = <<0:(64 bsl 10)/unit:8>>,
+ run_write_benchmark(Config, Modes, OpCount, Data).
+
+tiny_writes(Config) when is_list(Config) ->
+ Modes = [raw, binary],
+ OpCount = 512 bsl 10,
+ Data = <<0>>,
+ run_write_benchmark(Config, Modes, OpCount, Data).
+
+tiny_writes_delayed(Config) when is_list(Config) ->
+ Modes = [raw, binary, {delayed_write, 512 bsl 10, 2000}],
+ OpCount = 512 bsl 10,
+ Data = <<0>>,
+ run_write_benchmark(Config, Modes, OpCount, Data).
+
+%% The read benchmarks assume that "benchmark_scratch_file" has been filled by
+%% the write benchmarks.
+
+tiny_reads(Config) when is_list(Config) ->
+ Modes = [raw, binary],
+ OpCount = 512 bsl 10,
+ run_read_benchmark(Config, Modes, OpCount, 1).
+
+tiny_reads_ahead(Config) when is_list(Config) ->
+ Modes = [raw, binary, {read_ahead, 512 bsl 10}],
+ OpCount = 512 bsl 10,
+ run_read_benchmark(Config, Modes, OpCount, 1).
+
+run_write_benchmark(Config, Modes, OpCount, Data) ->
+ run_benchmark(Config, [write | Modes], OpCount, fun file:write/2, Data).
+
+run_read_benchmark(Config, Modes, OpCount, OpSize) ->
+ run_benchmark(Config, [read | Modes], OpCount, fun file:read/2, OpSize).
+
+run_benchmark(Config, Modes, OpCount, Fun, Arg) ->
+ ScratchDir = proplists:get_value(priv_dir, Config),
+ Path = filename:join(ScratchDir, "benchmark_scratch_file"),
+ {ok, Fd} = file:open(Path, Modes),
+ submit_throughput_results(Fun, [Fd, Arg], OpCount).
+
+submit_throughput_results(Fun, Args, Times) ->
+ MSecs = measure_repeated_file_op(Fun, Args, Times, millisecond),
+ IOPS = trunc(Times * (1000 / MSecs)),
+ ct_event:notify(#event{ name = benchmark_data, data = [{value,IOPS}] }),
+ {comment, io_lib:format("~p IOPS, ~p ms", [IOPS, trunc(MSecs)])}.
+
+measure_repeated_file_op(Fun, Args, Times, Unit) ->
+ Start = os:perf_counter(Unit),
+ repeated_apply(Fun, Args, Times),
+ os:perf_counter(Unit) - Start.
+
+repeated_apply(_F, _Args, Times) when Times =< 0 ->
+ ok;
+repeated_apply(F, Args, Times) ->
+ erlang:apply(F, Args),
+ repeated_apply(F, Args, Times - 1).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
response_analysis(Module, Function, Arguments) ->
@@ -3934,7 +4156,7 @@ read_line_create_files(TestData) ->
read_line_remove_files(TestData) ->
[ file:delete(File) || {_Function,File,_,_} <- TestData ].
-%% read_line with prim_file.
+%% read_line with ?PRIM_FILE.
read_line_1(Config) when is_list(Config) ->
PrivDir = proplists:get_value(priv_dir, Config),
All = read_line_testdata(PrivDir),
@@ -4103,9 +4325,9 @@ read_line_create7(Filename) ->
file:close(F).
read_line_all(Filename) ->
- {ok,F} = prim_file:open(Filename,[read,binary]),
+ {ok,F} = ?PRIM_FILE:open(Filename,[read,binary]),
X=read_rl_lines(F),
- prim_file:close(F),
+ ?PRIM_FILE:close(F),
Bin = list_to_binary([B || {ok,B} <- X]),
Bin = re:replace(list_to_binary([element(2,file:read_file(Filename))]),
"\r\n","\n",[global,{return,binary}]),
@@ -4138,7 +4360,7 @@ read_line_all4(Filename) ->
{length(X),Bin}.
read_rl_lines(F) ->
- case prim_file:read_line(F) of
+ case ?PRIM_FILE:read_line(F) of
eof ->
[];
{error,X} ->
@@ -4158,9 +4380,9 @@ read_rl_lines2(F) ->
end.
read_line_all_alternating(Filename) ->
- {ok,F} = prim_file:open(Filename,[read,binary]),
+ {ok,F} = ?PRIM_FILE:open(Filename,[read,binary]),
X=read_rl_lines(F,true),
- prim_file:close(F),
+ ?PRIM_FILE:close(F),
Bin = list_to_binary([B || {ok,B} <- X]),
Bin = re:replace(list_to_binary([element(2,file:read_file(Filename))]),
"\r\n","\n",[global,{return,binary}]),
@@ -4194,8 +4416,8 @@ read_line_all_alternating4(Filename) ->
read_rl_lines(F,Alternate) ->
case begin
case Alternate of
- true -> prim_file:read(F,1);
- false -> prim_file:read_line(F)
+ true -> ?PRIM_FILE:read(F,1);
+ false -> ?PRIM_FILE:read_line(F)
end
end of
eof ->
diff --git a/lib/kernel/test/file_name_SUITE.erl b/lib/kernel/test/file_name_SUITE.erl
index 899102c908..3afc647081 100644
--- a/lib/kernel/test/file_name_SUITE.erl
+++ b/lib/kernel/test/file_name_SUITE.erl
@@ -77,6 +77,7 @@
init_per_testcase/2, end_per_testcase/2]).
-export([normal/1,icky/1,very_icky/1,normalize/1,home_dir/1]).
+-define(PRIM_FILE, prim_file).
init_per_testcase(_Func, Config) ->
Config.
@@ -131,7 +132,7 @@ home_dir(Config) when is_list(Config) ->
os:putenv("HOME",NewHome),
{"HOME",Save};
_ ->
- rm_rf(prim_file,NewHome),
+ rm_rf(?PRIM_FILE,NewHome),
throw(unsupported_os)
end,
try
@@ -145,7 +146,7 @@ home_dir(Config) when is_list(Config) ->
_ ->
os:putenv(SaveOldName,SaveOldValue)
end,
- rm_rf(prim_file,NewHome)
+ rm_rf(?PRIM_FILE,NewHome)
end
catch
throw:need_unicode_mode ->
@@ -190,7 +191,7 @@ normal(Config) when is_list(Config) ->
try
Priv = proplists:get_value(priv_dir, Config),
file:set_cwd(Priv),
- ok = check_normal(prim_file),
+ ok = check_normal(?PRIM_FILE),
ok = check_normal(file),
%% If all is good, delete dir again (avoid hanging dir on windows)
rm_rf(file,"normal_dir"),
@@ -210,7 +211,7 @@ icky(Config) when is_list(Config) ->
try
Priv = proplists:get_value(priv_dir, Config),
file:set_cwd(Priv),
- ok = check_icky(prim_file),
+ ok = check_icky(?PRIM_FILE),
ok = check_icky(file),
%% If all is good, delete dir again (avoid hanging dir on windows)
rm_rf(file,"icky_dir"),
@@ -229,7 +230,7 @@ very_icky(Config) when is_list(Config) ->
try
Priv = proplists:get_value(priv_dir, Config),
file:set_cwd(Priv),
- case check_very_icky(prim_file) of
+ case check_very_icky(?PRIM_FILE) of
need_unicode_mode ->
{skipped,"VM needs to be started in Unicode filename mode"};
ok ->
@@ -292,17 +293,14 @@ check_normal(Mod) ->
ok
end,
[ begin
- {ok, FD} = Mod:open(Name,[read]),
- {ok, Content} = Mod:read(FD,1024),
- ok = file:close(FD)
- end || {regular,Name,Content} <- NormalDir ],
- [ begin
{ok, FD} = Mod:open(Name,[read,binary]),
BC = list_to_binary(Content),
{ok, BC} = Mod:read(FD,1024),
ok = file:close(FD)
end || {regular,Name,Content} <- NormalDir ],
+ {error, badarg} = Mod:rename("fil1\0tmp_fil2","tmp_fil1"),
Mod:rename("fil1","tmp_fil1"),
+ {error, badarg} = Mod:read_file("tmp_fil1\0.txt"),
{ok, <<"fil1">>} = Mod:read_file("tmp_fil1"),
{error,enoent} = Mod:read_file("fil1"),
Mod:rename("tmp_fil1","fil1"),
@@ -410,11 +408,6 @@ check_icky(Mod) ->
ok
end,
[ begin
- {ok, FD} = Mod:open(Name,[read]),
- {ok, Content} = Mod:read(FD,1024),
- ok = file:close(FD)
- end || {regular,Name,Content} <- IckyDir ],
- [ begin
{ok, FD} = Mod:open(Name,[read,binary]),
BC = list_to_binary([Content]),
{ok, BC} = Mod:read(FD,1024),
@@ -519,11 +512,6 @@ check_very_icky(Mod) ->
ok
end,
[ begin
- {ok, FD} = Mod:open(Name,[read]),
- {ok, Content} = Mod:read(FD,1024),
- ok = file:close(FD)
- end || {regular,Name,Content} <- VeryIckyDir ],
- [ begin
{ok, FD} = Mod:open(Name,[read,binary]),
BC = list_to_binary([Content]),
{ok, BC} = Mod:read(FD,1024),
diff --git a/lib/kernel/test/gen_sctp_SUITE.erl b/lib/kernel/test/gen_sctp_SUITE.erl
index 620ab235a0..a0ae792ba9 100644
--- a/lib/kernel/test/gen_sctp_SUITE.erl
+++ b/lib/kernel/test/gen_sctp_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2007-2016. All Rights Reserved.
+%% Copyright Ericsson AB 2007-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -1038,8 +1038,7 @@ do_from_other_process(Fun) ->
Result ->
Parent ! {Ref,Result}
catch
- Class:Reason ->
- Stacktrace = erlang:get_stacktrace(),
+ Class:Reason:Stacktrace ->
Parent ! {Ref,Class,Reason,Stacktrace}
end
end),
@@ -1617,8 +1616,7 @@ s_start(Socket, Timeout, Parent) ->
try
s_loop(Socket, Timeout, Parent, Handler, gb_trees:empty())
catch
- Class:Reason ->
- Stacktrace = erlang:get_stacktrace(),
+ Class:Reason:Stacktrace ->
io:format(?MODULE_STRING":socket exception ~w:~w at~n"
"~p.~n", [Class,Reason,Stacktrace]),
erlang:raise(Class, Reason, Stacktrace)
diff --git a/lib/kernel/test/gen_tcp_api_SUITE.erl b/lib/kernel/test/gen_tcp_api_SUITE.erl
index 12d22519ce..1be016444f 100644
--- a/lib/kernel/test/gen_tcp_api_SUITE.erl
+++ b/lib/kernel/test/gen_tcp_api_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1998-2017. All Rights Reserved.
+%% Copyright Ericsson AB 1998-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -605,9 +605,9 @@ ok({ok,V}) -> V;
ok(NotOk) ->
try throw(not_ok)
catch
- Thrown ->
+ throw:Thrown:Stacktrace ->
erlang:raise(
- error, {Thrown, NotOk}, tl(erlang:get_stacktrace()))
+ error, {Thrown, NotOk}, tl(Stacktrace))
end.
get_localaddr() ->
diff --git a/lib/kernel/test/gen_tcp_misc_SUITE.erl b/lib/kernel/test/gen_tcp_misc_SUITE.erl
index 331864b5de..52edfaee29 100644
--- a/lib/kernel/test/gen_tcp_misc_SUITE.erl
+++ b/lib/kernel/test/gen_tcp_misc_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1998-2016. All Rights Reserved.
+%% Copyright Ericsson AB 1998-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -41,6 +41,7 @@
busy_send/1, busy_disconnect_passive/1, busy_disconnect_active/1,
fill_sendq/1, partial_recv_and_close/1,
partial_recv_and_close_2/1,partial_recv_and_close_3/1,so_priority/1,
+ recvtos/1, recvttl/1, recvtosttl/1, recvtclass/1,
%% Accept tests
primitive_accept/1,multi_accept_close_listen/1,accept_timeout/1,
accept_timeouts_in_order/1,accept_timeouts_in_order2/1,
@@ -51,7 +52,8 @@
several_accepts_in_one_go/1, accept_system_limit/1,
active_once_closed/1, send_timeout/1, send_timeout_active/1,
otp_7731/1, zombie_sockets/1, otp_7816/1, otp_8102/1,
- wrapping_oct/0, wrapping_oct/1, otp_9389/1, otp_13939/1]).
+ wrapping_oct/0, wrapping_oct/1, otp_9389/1, otp_13939/1,
+ otp_12242/1, delay_send_error/1]).
%% Internal exports.
-export([sender/3, not_owner/1, passive_sockets_server/2, priority_server/1,
@@ -83,7 +85,8 @@ all() ->
busy_disconnect_passive, busy_disconnect_active,
fill_sendq, partial_recv_and_close,
partial_recv_and_close_2, partial_recv_and_close_3,
- so_priority, primitive_accept,
+ so_priority, recvtos, recvttl, recvtosttl,
+ recvtclass, primitive_accept,
multi_accept_close_listen, accept_timeout,
accept_timeouts_in_order, accept_timeouts_in_order2,
accept_timeouts_in_order3, accept_timeouts_in_order4,
@@ -93,7 +96,8 @@ all() ->
killing_multi_acceptors2, several_accepts_in_one_go, accept_system_limit,
active_once_closed, send_timeout, send_timeout_active, otp_7731,
wrapping_oct,
- zombie_sockets, otp_7816, otp_8102, otp_9389].
+ zombie_sockets, otp_7816, otp_8102, otp_9389,
+ otp_12242, delay_send_error].
groups() ->
[].
@@ -1572,52 +1576,56 @@ fill_sendq(Config) when is_list(Config) ->
Master = self(),
Server =
spawn_link(fun () ->
- {ok,L} = gen_tcp:listen
- (0, [{active,false},binary,
- {reuseaddr,true},{packet,0}]),
+ {ok,L} = gen_tcp:listen(0, [{active,false},binary,
+ {reuseaddr,true},{packet,0}]),
{ok,Port} = inet:port(L),
Master ! {self(),client,
fill_sendq_client(Port, Master)},
fill_sendq_srv(L, Master)
end),
io:format("~p Server~n", [Server]),
- receive {Server,client,Client} ->
- io:format("~p Client~n", [Client]),
- receive {Server,reader,Reader} ->
- io:format("~p Reader~n", [Reader]),
- fill_sendq_loop(Server, Client, Reader)
+ receive
+ {Server,client,Client} ->
+ io:format("~p Client~n", [Client]),
+ receive
+ {Server,reader,Reader} ->
+ io:format("~p Reader~n", [Reader]),
+ fill_sendq_loop(Server, Client, Reader)
end
end.
fill_sendq_loop(Server, Client, Reader) ->
%% Master
%%
- receive {Server,send} ->
+ receive
+ {Server,send} ->
fill_sendq_loop(Server, Client, Reader)
after 2000 ->
%% Send queue full, sender blocked -> close client.
io:format("Send timeout, closing Client...~n", []),
Client ! {self(),close},
- receive {Server,[{error,closed}]} ->
- io:format("Got server closed.~n"),
- receive {Reader,[{error,closed}]} ->
- io:format
- ("Got reader closed.~n"),
- ok
- after 3000 ->
- ct:fail({timeout,{closed,reader}})
- end;
- {Reader,[{error,closed}]} ->
- io:format("Got reader closed.~n"),
- receive {Server,[{error,closed}]} ->
- io:format("Got server closed~n"),
- ok
- after 3000 ->
- ct:fail({timeout,{closed,server}})
- end
- after 3000 ->
- ct:fail({timeout,{closed,[server,reader]}})
- end
+ receive
+ {Server,[{error,closed}]} ->
+ io:format("Got server closed.~n"),
+ receive
+ {Reader,[{error,closed}]} ->
+ io:format("Got reader closed.~n"),
+ ok
+ after 3000 ->
+ ct:fail({timeout,{closed,reader}})
+ end;
+ {Reader,[{error,closed}]} ->
+ io:format("Got reader closed.~n"),
+ receive
+ {Server,[{error,closed}]} ->
+ io:format("Got server closed~n"),
+ ok
+ after 3000 ->
+ ct:fail({timeout,{closed,server}})
+ end
+ after 3000 ->
+ ct:fail({timeout,{closed,[server,reader]}})
+ end
end.
fill_sendq_srv(L, Master) ->
@@ -1910,6 +1918,233 @@ so_priority(Config) when is_list(Config) ->
end
end.
+
+
+%% IP_RECVTOS and IP_RECVTCLASS for IP_PKTOPTIONS
+%% does not seem to be implemented in Linux until kernel 3.1
+%%
+%% It seems pktoptions does not return valid values
+%% for IPv4 connect sockets. On the accept socket
+%% we get valid values, but on the connect socket we get
+%% the default values for TOS and TTL.
+%%
+%% Therefore the argument CheckConnect that enables
+%% checking the returned values for the connect socket.
+%% It is only used for recvtclass that is an IPv6 option
+%% and there we get valid values from both socket ends.
+
+recvtos(_Config) ->
+ test_pktoptions(
+ inet, [{recvtos,tos,96}],
+ fun recvtos_ok/2,
+ false).
+
+recvtosttl(_Config) ->
+ test_pktoptions(
+ inet, [{recvtos,tos,96},{recvttl,ttl,33}],
+ fun (OSType, OSVer) ->
+ recvtos_ok(OSType, OSVer) andalso recvttl_ok(OSType, OSVer)
+ end,
+ false).
+
+recvttl(_Config) ->
+ test_pktoptions(
+ inet, [{recvttl,ttl,33}],
+ fun recvttl_ok/2,
+ false).
+
+recvtclass(_Config) ->
+ {ok,IFs} = inet:getifaddrs(),
+ case
+ [Name ||
+ {Name,Opts} <- IFs,
+ lists:member({addr,{0,0,0,0,0,0,0,1}}, Opts)]
+ of
+ [_] ->
+ test_pktoptions(
+ inet6, [{recvtclass,tclass,224}],
+ fun recvtclass_ok/2,
+ true);
+ [] ->
+ {skip,{ipv6_not_supported,IFs}}
+ end.
+
+%% These version numbers are above the highest noted
+%% in daily tests where the test fails for a plausible reason,
+%% so skip on platforms of lower version, i.e they are future
+%% versions where it is possible that it might not fail.
+%%
+%% When machines with newer versions gets installed,
+%% if the test still fails for a plausible reason these
+%% version numbers simply should be increased.
+%% Or maybe we should change to only test on known good
+%% platforms - change {unix,_} to false?
+
+%% pktoptions is not supported for IPv4
+recvtos_ok({unix,openbsd}, OSVer) -> not semver_lt(OSVer, {6,4,0});
+recvtos_ok({unix,darwin}, OSVer) -> not semver_lt(OSVer, {19,0,0});
+%% Using the option returns einval, so it is not implemented.
+recvtos_ok({unix,freebsd}, OSVer) -> not semver_lt(OSVer, {11,2,0});
+recvtos_ok({unix,sunos}, OSVer) -> not semver_lt(OSVer, {5,12,0});
+%% Does not return any value - not implemented for pktoptions
+recvtos_ok({unix,linux}, OSVer) -> not semver_lt(OSVer, {3,1,0});
+%%
+recvtos_ok({unix,_}, _) -> true;
+recvtos_ok(_, _) -> false.
+
+%% pktoptions is not supported for IPv4
+recvttl_ok({unix,openbsd}, OSVer) -> not semver_lt(OSVer, {6,4,0});
+recvttl_ok({unix,darwin}, OSVer) -> not semver_lt(OSVer, {19,0,0});
+%% Using the option returns einval, so it is not implemented.
+recvttl_ok({unix,freebsd}, OSVer) -> not semver_lt(OSVer, {11,2,0});
+recvttl_ok({unix,sunos}, OSVer) -> not semver_lt(OSVer, {5,12,0});
+%% Does not return any value - not implemented for pktoptions
+recvttl_ok({unix,linux}, OSVer) -> not semver_lt(OSVer, {2,7,0});
+%%
+recvttl_ok({unix,_}, _) -> true;
+recvttl_ok(_, _) -> false.
+
+%% pktoptions is not supported for IPv6
+recvtclass_ok({unix,openbsd}, OSVer) -> not semver_lt(OSVer, {6,4,0});
+recvtclass_ok({unix,darwin}, OSVer) -> not semver_lt(OSVer, {19,0,0});
+recvtclass_ok({unix,sunos}, OSVer) -> not semver_lt(OSVer, {5,12,0});
+%% Using the option returns einval, so it is not implemented.
+recvtclass_ok({unix,freebsd}, OSVer) -> not semver_lt(OSVer, {11,2,0});
+%% Does not return any value - not implemented for pktoptions
+recvtclass_ok({unix,linux}, OSVer) -> not semver_lt(OSVer, {3,1,0});
+%%
+recvtclass_ok({unix,_}, _) -> true;
+recvtclass_ok(_, _) -> false.
+
+semver_lt({X1,Y1,Z1}, {X2,Y2,Z2}) ->
+ if
+ X1 > X2 -> false;
+ X1 < X2 -> true;
+ Y1 > Y2 -> false;
+ Y1 < Y2 -> true;
+ Z1 > Z2 -> false;
+ Z1 < Z2 -> true;
+ true -> false
+ end;
+semver_lt(_, {_,_,_}) -> false.
+
+test_pktoptions(Family, Spec, OSFilter, CheckConnect) ->
+ OSType = os:type(),
+ OSVer = os:version(),
+ case OSFilter(OSType, OSVer) of
+ true ->
+ io:format("Os: ~p, ~p~n", [OSType,OSVer]),
+ test_pktoptions(Family, Spec, CheckConnect, OSType, OSVer);
+ false ->
+ {skip,{not_supported_for_os_version,{OSType,OSVer}}}
+ end.
+%%
+test_pktoptions(Family, Spec, CheckConnect, OSType, OSVer) ->
+ Timeout = 5000,
+ RecvOpts = [RecvOpt || {RecvOpt,_,_} <- Spec],
+ TrueRecvOpts = [{RecvOpt,true} || {RecvOpt,_,_} <- Spec],
+ FalseRecvOpts = [{RecvOpt,false} || {RecvOpt,_,_} <- Spec],
+ Opts = [Opt || {_,Opt,_} <- Spec],
+ OptsVals = [{Opt,Val} || {_,Opt,Val} <- Spec],
+ Address =
+ case Family of
+ inet ->
+ {127,0,0,1};
+ inet6 ->
+ {0,0,0,0,0,0,0,1}
+ end,
+ %%
+ %% Set RecvOpts on listen socket
+ {ok,L} =
+ gen_tcp:listen(
+ 0,
+ [Family,binary,{active,false},{send_timeout,Timeout}
+ |TrueRecvOpts]),
+ {ok,P} = inet:port(L),
+ {ok,TrueRecvOpts} = inet:getopts(L, RecvOpts),
+ {ok,OptsValsDefault} = inet:getopts(L, Opts),
+ %%
+ %% Set RecvOpts and Option values on connect socket
+ {ok,S2} =
+ gen_tcp:connect(
+ Address, P,
+ [Family,binary,{active,false},{send_timeout,Timeout}
+ |TrueRecvOpts ++ OptsVals],
+ Timeout),
+ {ok,TrueRecvOpts} = inet:getopts(S2, RecvOpts),
+ {ok,OptsVals} = inet:getopts(S2, Opts),
+ %%
+ %% Accept socket inherits the options from listen socket
+ {ok,S1} = gen_tcp:accept(L, Timeout),
+ {ok,TrueRecvOpts} = inet:getopts(S1, RecvOpts),
+ {ok,OptsValsDefault} = inet:getopts(S1, Opts),
+%%% %%
+%%% %% Handshake
+%%% ok = gen_tcp:send(S1, <<"hello">>),
+%%% {ok,<<"hello">>} = gen_tcp:recv(S2, 5, Timeout),
+%%% ok = gen_tcp:send(S2, <<"hi">>),
+%%% {ok,<<"hi">>} = gen_tcp:recv(S1, 2, Timeout),
+ %%
+ %% Verify returned remote options
+ {ok,[{pktoptions,OptsVals1}]} = inet:getopts(S1, [pktoptions]),
+ {ok,[{pktoptions,OptsVals2}]} = inet:getopts(S2, [pktoptions]),
+ (Result1 = sets_eq(OptsVals1, OptsVals))
+ orelse io:format(
+ "Accept differs: ~p neq ~p~n", [OptsVals1,OptsVals]),
+ (Result2 = sets_eq(OptsVals2, OptsValsDefault))
+ orelse io:format(
+ "Connect differs: ~p neq ~p~n",
+ [OptsVals2,OptsValsDefault]),
+ %%
+ ok = gen_tcp:close(S2),
+ ok = gen_tcp:close(S1),
+ %%
+ %%
+ %% Clear RecvOpts on listen socket and set Option values
+ ok = inet:setopts(L, FalseRecvOpts ++ OptsVals),
+ {ok,FalseRecvOpts} = inet:getopts(L, RecvOpts),
+ {ok,OptsVals} = inet:getopts(L, Opts),
+ %%
+ %% Set RecvOpts on connecting socket
+ %%
+ {ok,S4} =
+ gen_tcp:connect(
+ Address, P,
+ [Family,binary,{active,false},{send_timeout,Timeout}
+ |TrueRecvOpts],
+ Timeout),
+ {ok,TrueRecvOpts} = inet:getopts(S4, RecvOpts),
+ {ok,OptsValsDefault} = inet:getopts(S4, Opts),
+ %%
+ %% Accept socket inherits the options from listen socket
+ {ok,S3} = gen_tcp:accept(L, Timeout),
+ {ok,FalseRecvOpts} = inet:getopts(S3, RecvOpts),
+ {ok,OptsVals} = inet:getopts(S3, Opts),
+ %%
+ %% Verify returned remote options
+ {ok,[{pktoptions,[]}]} = inet:getopts(S3, [pktoptions]),
+ {ok,[{pktoptions,OptsVals4}]} = inet:getopts(S4, [pktoptions]),
+ (Result3 = sets_eq(OptsVals4, OptsVals))
+ orelse io:format(
+ "Accept2 differs: ~p neq ~p~n", [OptsVals4,OptsVals]),
+ %%
+ ok = gen_tcp:close(S4),
+ ok = gen_tcp:close(S3),
+ ok = gen_tcp:close(L),
+ (Result1 and ((not CheckConnect) or (Result2 and Result3)))
+ orelse
+ exit({failed,
+ [{OptsVals1,OptsVals4,OptsVals},
+ {OptsVals2,OptsValsDefault}],
+ {OSType,OSVer}}),
+%% exit({{OSType,OSVer},success}), % In search for the truth
+ ok.
+
+sets_eq(L1, L2) ->
+ lists:sort(L1) == lists:sort(L2).
+
+
+
%% Accept test utilities (suites are below)
millis() ->
@@ -2201,7 +2436,7 @@ wait_until_accepting(Proc,0) ->
exit({timeout_waiting_for_accepting,Proc});
wait_until_accepting(Proc,N) ->
case process_info(Proc,current_function) of
- {current_function,{prim_inet,accept0,2}} ->
+ {current_function,{prim_inet,accept0,3}} ->
case process_info(Proc,status) of
{status,waiting} ->
ok;
@@ -3052,3 +3287,172 @@ otp_13939(Config) when is_list(Config) ->
exit(Pid, normal),
ct:fail("Server process blocked on send.")
end.
+
+otp_12242(Config) when is_list(Config) ->
+ case os:type() of
+ {win32,_} ->
+ %% Even if we set sndbuf and recbuf to small sizes
+ %% Windows either happily accepts to send GBytes of data
+ %% in no time, so the second send below that is supposed
+ %% to time out just succedes, or the first send that
+ %% is supposed to fill the inet_drv I/O queue and
+ %% start waiting for when more data can be sent
+ %% instead sends all data but suffers a send
+ %% failure that closes the socket
+ {skipped,backpressure_broken_on_win32};
+ _ ->
+ %% Find the IPv4 address of an up and running interface
+ %% that is not loopback nor pointtopoint
+ {ok,IFList} = inet:getifaddrs(),
+ ct:pal("IFList ~p~n", [IFList]),
+ case
+ lists:flatten(
+ [lists:filtermap(
+ fun ({addr,Addr}) when tuple_size(Addr) =:= 4 ->
+ {true,Addr};
+ (_) ->
+ false
+ end, Opts)
+ || {_,Opts} <- IFList,
+ case lists:keyfind(flags, 1, Opts) of
+ {_,Flags} ->
+ lists:member(up, Flags)
+ andalso
+ lists:member(running, Flags)
+ andalso
+ not lists:member(loopback, Flags)
+ andalso
+ not lists:member(pointtopoint, Flags);
+ false ->
+ false
+ end])
+ of
+ [Addr|_] ->
+ otp_12242(Addr);
+ Other ->
+ {skipped,{no_external_address,Other}}
+ end
+ end;
+%%
+otp_12242(Addr) when tuple_size(Addr) =:= 4 ->
+ ct:timetrap(30000),
+ ct:pal("Using address ~p~n", [Addr]),
+ Bufsize = 16 * 1024,
+ Datasize = 128 * 1024 * 1024, % At least 1 s on GBit interface
+ Blob = binary:copy(<<$x>>, Datasize),
+ LOpts =
+ [{backlog,4},{reuseaddr,true},{ip,Addr},
+ binary,{active,false},
+ {recbuf,Bufsize},{sndbuf,Bufsize},{buffer,Bufsize}],
+ COpts =
+ [binary,{active,false},{ip,Addr},
+ {linger,{true,1}}, % 1 s
+ {send_timeout,500},
+ {recbuf,Bufsize},{sndbuf,Bufsize},{buffer,Bufsize}],
+ Dir = filename:dirname(code:which(?MODULE)),
+ {ok,ListenerNode} =
+ test_server:start_node(
+ ?UNIQ_NODE_NAME, slave, [{args,"-pa " ++ Dir}]),
+ Tester = self(),
+ Listener =
+ spawn(
+ ListenerNode,
+ fun () ->
+ {ok,L} = gen_tcp:listen(0, LOpts),
+ {ok,LPort} = inet:port(L),
+ Tester ! {self(),port,LPort},
+ {ok,A} = gen_tcp:accept(L),
+ ok = gen_tcp:close(L),
+ receive
+ {Tester,stop} ->
+ ok = gen_tcp:close(A)
+ end
+ end),
+ ListenerMref = monitor(process, Listener),
+ LPort = receive {Listener,port,P} -> P end,
+ {ok,C} = gen_tcp:connect(Addr, LPort, COpts, infinity),
+ {ok,ReadCOpts} = inet:getopts(C, [recbuf,sndbuf,buffer]),
+ ct:pal("ReadCOpts ~p~n", [ReadCOpts]),
+ %%
+ %% Fill the buffers
+ ct:pal("Sending ~p bytes~n", [Datasize]),
+ ok = gen_tcp:send(C, Blob),
+ ct:pal("Sent ~p bytes~n", [Datasize]),
+ %% Spawn the Closer,
+ %% try to ensure that the close call is in progress
+ %% before the owner proceeds with sending
+ Owner = self(),
+ {_Closer,CloserMref} =
+ spawn_opt(
+ fun () ->
+ Owner ! {tref, erlang:start_timer(50, Owner, closing)},
+ ct:pal("Calling gen_tcp:close(C)~n"),
+ try gen_tcp:close(C) of
+ Result ->
+ ct:pal("gen_tcp:close(C) -> ~p~n", [Result]),
+ ok = Result
+ catch
+ Class:Reason:Stacktrace ->
+ ct:pal(
+ "gen_tcp:close(C) >< ~p:~p~n ~p~n",
+ [Class,Reason,Stacktrace]),
+ erlang:raise(Class, Reason, Stacktrace)
+ end
+ end, [link,monitor]),
+ receive
+ {tref,Tref} ->
+ receive {timeout,Tref,_} -> ok end,
+ ct:pal("Sending ~p bytes again~n", [Datasize]),
+ %% Now should the close be in progress...
+ %% All buffers are full, remote end is not reading,
+ %% and the send timeout is 1 s so this will timeout:
+ {error,timeout} = gen_tcp:send(C, Blob),
+ ct:pal("Sending ~p bytes again timed out~n", [Datasize]),
+ ok = inet:setopts(C, [{send_timeout,10000}]),
+ %% There is a hidden timeout here. Port close is sampled
+ %% every 5 s by prim_inet:send_recv_reply.
+ %% Linger is 3 s so the Closer will finish this send:
+ ct:pal("Sending ~p bytes with 10 s timeout~n", [Datasize]),
+ {error,closed} = gen_tcp:send(C, Blob),
+ ct:pal("Sending ~p bytes with 10 s timeout was closed~n",
+ [Datasize]),
+ normal = wait(CloserMref),
+ ct:pal("The Closer has exited~n"),
+ Listener ! {Tester,stop},
+ receive {'DOWN',ListenerMref,_,_,_} -> ok end,
+ ct:pal("The Listener has exited~n"),
+ test_server:stop_node(ListenerNode),
+ ok
+ end.
+
+wait(Mref) ->
+ receive {'DOWN',Mref,_,_,Reason} -> Reason end.
+
+%% OTP-15536
+%% Test that send error works correctly for delay_send
+delay_send_error(Config) ->
+ {ok, LS} = gen_tcp:listen(0, [{reuseaddr, true}, {packet, 1}, {active, false}]),
+ {ok,{{0,0,0,0},PortNum}}=inet:sockname(LS),
+ P = spawn_link(
+ fun() ->
+ {ok, S} = gen_tcp:accept(LS),
+ receive die -> gen_tcp:close(S) end
+ end),
+ erlang:monitor(process, P),
+ {ok, S} = gen_tcp:connect("localhost", PortNum,
+ [{packet, 1}, {active, false}, {delay_send, true}]),
+
+ %% Do a couple of sends first to see that it works
+ ok = gen_tcp:send(S, "hello"),
+ ok = gen_tcp:send(S, "hello"),
+ ok = gen_tcp:send(S, "hello"),
+
+ %% Make the receiver close
+ P ! die,
+ receive _Down -> ok end,
+
+ ok = gen_tcp:send(S, "hello"),
+ timer:sleep(500), %% Sleep in order for delay_send to have time to trigger
+
+ %% This used to result in a double free
+ {error, closed} = gen_tcp:send(S, "hello").
diff --git a/lib/kernel/test/gen_udp_SUITE.erl b/lib/kernel/test/gen_udp_SUITE.erl
index aa616d43d6..af9985de45 100644
--- a/lib/kernel/test/gen_udp_SUITE.erl
+++ b/lib/kernel/test/gen_udp_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1998-2017. All Rights Reserved.
+%% Copyright Ericsson AB 1998-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -34,8 +34,9 @@
-export([init_per_testcase/2, end_per_testcase/2]).
-export([send_to_closed/1, active_n/1,
- buffer_size/1, binary_passive_recv/1, bad_address/1,
+ buffer_size/1, binary_passive_recv/1, max_buffer_size/1, bad_address/1,
read_packets/1, open_fd/1, connect/1, implicit_inet6/1,
+ recvtos/1, recvtosttl/1, recvttl/1, recvtclass/1,
local_basic/1, local_unbound/1,
local_fdopen/1, local_fdopen_unbound/1, local_abstract/1]).
@@ -44,9 +45,10 @@ suite() ->
{timetrap,{minutes,1}}].
all() ->
- [send_to_closed, buffer_size, binary_passive_recv,
+ [send_to_closed, buffer_size, binary_passive_recv, max_buffer_size,
bad_address, read_packets, open_fd, connect,
implicit_inet6, active_n,
+ recvtos, recvtosttl, recvttl, recvtclass,
{group, local}].
groups() ->
@@ -237,6 +239,14 @@ buffer_size_server_recv(Socket, IP, Port, Cnt) ->
end.
+%%-------------------------------------------------------------
+%% OTP-15206: Keep buffer small for udp
+%%-------------------------------------------------------------
+max_buffer_size(Config) when is_list(Config) ->
+ {ok, Socket} = gen_udp:open(0, [binary]),
+ ok = inet:setopts(Socket,[{recbuf, 1 bsl 20}]),
+ {ok, [{buffer, 65536}]} = inet:getopts(Socket,[buffer]),
+ gen_udp:close(Socket).
%%-------------------------------------------------------------
%% OTP-3823 gen_udp:recv does not return address in binary mode
@@ -288,58 +298,56 @@ bad_address(Config) when is_list(Config) ->
%%
%% Starts a slave node that on command sends a bunch of messages
%% to our UDP port. The receiving process just receives and
-%% ignores the incoming messages, but counts them.
-%% A tracing process traces the receiving process for
-%% 'receive' and scheduling events. From the trace,
-%% message contents is verified; and, how many messages
-%% are received per in/out scheduling, which should be
-%% the same as the read_packets parameter.
-%%
-%% What happens on the SMP emulator remains to be seen...
-%%
+%% ignores the incoming messages.
+%% A tracing process traces the receiving port for
+%% 'send' and scheduling events. From the trace,
+%% how many messages are received per in/out scheduling,
+%% which should never be more than the read_packet parameter.
%% OTP-6249 UDP option for number of packet reads.
read_packets(Config) when is_list(Config) ->
- case erlang:system_info(smp_support) of
- false ->
- read_packets_1();
- true ->
- %% We would need some new sort of tracing to test this
- %% option reliably in an SMP emulator.
- {skip,"SMP emulator"}
- end.
-
-read_packets_1() ->
N1 = 5,
- N2 = 7,
+ N2 = 1,
+ Msgs = 30000,
{ok,R} = gen_udp:open(0, [{read_packets,N1}]),
{ok,RP} = inet:port(R),
{ok,Node} = start_node(gen_udp_SUITE_read_packets),
Die = make_ref(),
- Loop = erlang:spawn_link(fun () -> infinite_loop(Die) end),
%%
- Msgs1 = [erlang:integer_to_list(M) || M <- lists:seq(1, N1*3)],
- [V1|_] = read_packets_test(R, RP, Msgs1, Node),
+ {V1, Trace1} = read_packets_test(R, RP, Msgs, Node),
{ok,[{read_packets,N1}]} = inet:getopts(R, [read_packets]),
%%
ok = inet:setopts(R, [{read_packets,N2}]),
- Msgs2 = [erlang:integer_to_list(M) || M <- lists:seq(1, N2*3)],
- [V2|_] = read_packets_test(R, RP, Msgs2, Node),
+ {V2, Trace2} = read_packets_test(R, RP, Msgs, Node),
{ok,[{read_packets,N2}]} = inet:getopts(R, [read_packets]),
%%
stop_node(Node),
- Mref = erlang:monitor(process, Loop),
- Loop ! Die,
- receive
- {'DOWN',Mref,_,_, normal} ->
- case {V1,V2} of
- {N1,N2} ->
- ok;
- _ when V1 =/= N1, V2 =/= N2 ->
- ok
- end
+ ct:log("N1=~p, V1=~p vs N2=~p, V2=~p",[N1,V1,N2,V2]),
+
+ dump_terms(Config, "trace1.terms", Trace2),
+ dump_terms(Config, "trace2.terms", Trace2),
+
+ %% Because of the inherit racy-ness of the feature it is
+ %% hard to test that it behaves correctly.
+ %% Right now (OTP 21) a port task takes 5% of the
+ %% allotted port task reductions to execute, so
+ %% the max number of executions a port is allowed to
+ %% do before being re-scheduled is N * 20
+
+ if
+ V1 > (N1 * 20) ->
+ ct:fail("Got ~p msgs, max was ~p", [V1, N1]);
+ V2 > (N2 * 20) ->
+ ct:fail("Got ~p msgs, max was ~p", [V2, N2]);
+ true ->
+ ok
end.
+dump_terms(Config, Name, Terms) ->
+ FName = filename:join(proplists:get_value(priv_dir, Config),Name),
+ file:write_file(FName, term_to_binary(Terms)),
+ ct:log("Logged terms to ~s",[FName]).
+
infinite_loop(Die) ->
receive
Die ->
@@ -350,7 +358,6 @@ infinite_loop(Die) ->
end.
read_packets_test(R, RP, Msgs, Node) ->
- Len = length(Msgs),
Receiver = self(),
Tracer =
spawn_link(
@@ -375,24 +382,24 @@ read_packets_test(R, RP, Msgs, Node) ->
[link,{priority,high}]),
receive
{Sender,{port,SP}} ->
- erlang:trace(self(), true,
- [running,'receive',{tracer,Tracer}]),
+ erlang:trace(R, true,
+ [running_ports,'send',{tracer,Tracer}]),
erlang:yield(),
Sender ! {Receiver,go},
- read_packets_recv(Len),
- erlang:trace(self(), false, [all]),
+ read_packets_recv(Msgs),
+ erlang:trace(R, false, [all]),
Tracer ! {Receiver,get_trace},
receive
{Tracer,{trace,Trace}} ->
- read_packets_verify(R, SP, Msgs, Trace)
+ {read_packets_verify(R, SP, Trace), Trace}
end
end.
-read_packets_send(S, RP, [Msg|Msgs]) ->
- ok = gen_udp:send(S, localhost, RP, Msg),
- read_packets_send(S, RP, Msgs);
-read_packets_send(_S, _RP, []) ->
- ok.
+read_packets_send(_S, _RP, 0) ->
+ ok;
+read_packets_send(S, RP, Msgs) ->
+ ok = gen_udp:send(S, localhost, RP, "UDP FLOOOOOOD"),
+ read_packets_send(S, RP, Msgs - 1).
read_packets_recv(0) ->
ok;
@@ -404,23 +411,24 @@ read_packets_recv(N) ->
timeout
end.
-read_packets_verify(R, SP, Msg, Trace) ->
- lists:reverse(
- lists:sort(read_packets_verify(R, SP, Msg, Trace, 0))).
-
-read_packets_verify(R, SP, Msgs, [{trace,Self,OutIn,_}|Trace], M)
- when Self =:= self(), OutIn =:= out;
- Self =:= self(), OutIn =:= in ->
- push(M, read_packets_verify(R, SP, Msgs, Trace, 0));
-read_packets_verify(R, SP, [Msg|Msgs],
- [{trace,Self,'receive',{udp,R,{127,0,0,1},SP,Msg}}
- |Trace], M)
+read_packets_verify(R, SP, Trace) ->
+ [Max | _] = Pkts = lists:reverse(lists:sort(read_packets_verify(R, SP, Trace, 0))),
+ ct:pal("~p",[lists:sublist(Pkts,10)]),
+ Max.
+
+read_packets_verify(R, SP, [{trace,R,OutIn,_}|Trace], M)
+ when OutIn =:= out; OutIn =:= in ->
+ push(M, read_packets_verify(R, SP, Trace, 0));
+read_packets_verify(R, SP, [{trace, R,'receive',timeout}|Trace], M) ->
+ push(M, read_packets_verify(R, SP, Trace, 0));
+read_packets_verify(R, SP,
+ [{trace,R,'send',{udp,R,{127,0,0,1},SP,_Msg}, Self} | Trace], M)
when Self =:= self() ->
- read_packets_verify(R, SP, Msgs, Trace, M+1);
-read_packets_verify(_R, _SP, [], [], M) ->
+ read_packets_verify(R, SP, Trace, M+1);
+read_packets_verify(_R, _SP, [], M) ->
push(M, []);
-read_packets_verify(_R, _SP, Msgs, Trace, M) ->
- ct:fail({read_packets_verify,mismatch,Msgs,Trace,M}).
+read_packets_verify(_R, _SP, Trace, M) ->
+ ct:fail({read_packets_verify,mismatch,Trace,M}).
push(0, Vs) ->
Vs;
@@ -566,6 +574,168 @@ active_n(Config) when is_list(Config) ->
ok.
+
+recvtos(_Config) ->
+ test_recv_opts(
+ inet, [{recvtos,tos,96}],
+ fun recvtos_ok/2).
+
+recvtosttl(_Config) ->
+ test_recv_opts(
+ inet, [{recvtos,tos,96},{recvttl,ttl,33}],
+ fun (OSType, OSVer) ->
+ recvtos_ok(OSType, OSVer) andalso recvttl_ok(OSType, OSVer)
+ end).
+
+recvttl(_Config) ->
+ test_recv_opts(
+ inet, [{recvttl,ttl,33}],
+ fun recvttl_ok/2).
+
+recvtclass(_Config) ->
+ {ok,IFs} = inet:getifaddrs(),
+ case
+ [Name ||
+ {Name,Opts} <- IFs,
+ lists:member({addr,{0,0,0,0,0,0,0,1}}, Opts)]
+ of
+ [_] ->
+ test_recv_opts(
+ inet6, [{recvtclass,tclass,224}],
+ fun recvtclass_ok/2);
+ [] ->
+ {skip,ipv6_not_supported,IFs}
+ end.
+
+%% These version numbers are just above the highest noted in daily tests
+%% where the test fails for a plausible reason, that is the lowest
+%% where we can expect that the test mighe succeed, so
+%% skip on platforms lower than this.
+%%
+%% On newer versions it might be fixed, but we'll see about that
+%% when machines with newer versions gets installed...
+%% If the test still fails for a plausible reason these
+%% version numbers simply should be increased.
+%% Or maybe we should change to only test on known good platforms?
+
+%% Using the option returns einval, so it is not implemented.
+recvtos_ok({unix,darwin}, OSVer) -> not semver_lt(OSVer, {17,6,0});
+%% Using the option returns einval, so it is not implemented.
+recvtos_ok({unix,openbsd}, OSVer) -> not semver_lt(OSVer, {6,4,0});
+%% Using the option returns einval, so it is not implemented.
+recvtos_ok({unix,sunos}, OSVer) -> not semver_lt(OSVer, {5,12,0});
+%%
+recvtos_ok({unix,_}, _) -> true;
+recvtos_ok(_, _) -> false.
+
+recvttl_ok({unix,_}, _) -> true;
+recvttl_ok(_, _) -> false.
+
+%% Using the option returns einval, so it is not implemented.
+recvtclass_ok({unix,darwin}, OSVer) -> not semver_lt(OSVer, {9,9,0});
+recvtclass_ok({unix,linux}, OSVer) -> not semver_lt(OSVer, {2,6,11});
+%%
+recvtclass_ok({unix,_}, _) -> true;
+recvtclass_ok(_, _) -> false.
+
+semver_lt({X1,Y1,Z1}, {X2,Y2,Z2}) ->
+ if
+ X1 > X2 -> false;
+ X1 < X2 -> true;
+ Y1 > Y2 -> false;
+ Y1 < Y2 -> true;
+ Z1 > Z2 -> false;
+ Z1 < Z2 -> true;
+ true -> false
+ end;
+semver_lt(_, {_,_,_}) -> false.
+
+test_recv_opts(Family, Spec, OSFilter) ->
+ OSType = os:type(),
+ OSVer = os:version(),
+ case OSFilter(OSType, OSVer) of
+ true ->
+ io:format("Os: ~p, ~p~n", [OSType,OSVer]),
+ test_recv_opts(Family, Spec, OSType, OSVer);
+ false ->
+ {skip,{not_supported_for_os_version,{OSType,OSVer}}}
+ end.
+%%
+test_recv_opts(Family, Spec, _OSType, _OSVer) ->
+ Timeout = 5000,
+ RecvOpts = [RecvOpt || {RecvOpt,_,_} <- Spec],
+ TrueRecvOpts = [{RecvOpt,true} || {RecvOpt,_,_} <- Spec],
+ FalseRecvOpts = [{RecvOpt,false} || {RecvOpt,_,_} <- Spec],
+ Opts = [Opt || {_,Opt,_} <- Spec],
+ OptsVals = [{Opt,Val} || {_,Opt,Val} <- Spec],
+ TrueRecvOpts_OptsVals = TrueRecvOpts ++ OptsVals,
+ Addr =
+ case Family of
+ inet ->
+ {127,0,0,1};
+ inet6 ->
+ {0,0,0,0,0,0,0,1}
+ end,
+ %%
+ {ok,S1} =
+ gen_udp:open(0, [Family,binary,{active,false}|TrueRecvOpts]),
+ {ok,P1} = inet:port(S1),
+ {ok,TrueRecvOpts} = inet:getopts(S1, RecvOpts),
+ ok = inet:setopts(S1, FalseRecvOpts),
+ {ok,FalseRecvOpts} = inet:getopts(S1, RecvOpts),
+ ok = inet:setopts(S1, TrueRecvOpts_OptsVals),
+ {ok,TrueRecvOpts_OptsVals} = inet:getopts(S1, RecvOpts ++ Opts),
+ %%
+ {ok,S2} =
+ gen_udp:open(0, [Family,binary,{active,true}|FalseRecvOpts]),
+ {ok,P2} = inet:port(S2),
+ {ok,FalseRecvOpts_OptsVals2} = inet:getopts(S2, RecvOpts ++ Opts),
+ OptsVals2 = FalseRecvOpts_OptsVals2 -- FalseRecvOpts,
+ %%
+ ok = gen_udp:send(S2, Addr, P1, <<"abcde">>),
+ ok = gen_udp:send(S1, Addr, P2, <<"fghij">>),
+ {ok,{_,P2,OptsVals3,<<"abcde">>}} = gen_udp:recv(S1, 0, Timeout),
+ verify_sets_eq(OptsVals3, OptsVals2),
+ receive
+ {udp,S2,_,P1,<<"fghij">>} ->
+ ok;
+ Other1 ->
+ exit({unexpected,Other1})
+ after Timeout ->
+ exit(timeout)
+ end,
+ %%
+ ok = inet:setopts(S1, FalseRecvOpts),
+ {ok,FalseRecvOpts} = inet:getopts(S1, RecvOpts),
+ ok = inet:setopts(S2, TrueRecvOpts),
+ {ok,TrueRecvOpts} = inet:getopts(S2, RecvOpts),
+ %%
+ ok = gen_udp:send(S2, Addr, P1, <<"klmno">>),
+ ok = gen_udp:send(S1, Addr, P2, <<"pqrst">>),
+ {ok,{_,P2,<<"klmno">>}} = gen_udp:recv(S1, 0, Timeout),
+ receive
+ {udp,S2,_,P1,OptsVals4,<<"pqrst">>} ->
+ verify_sets_eq(OptsVals4, OptsVals);
+ Other2 ->
+ exit({unexpected,Other2})
+ after Timeout ->
+ exit(timeout)
+ end,
+ ok = gen_udp:close(S1),
+ ok = gen_udp:close(S2),
+%% exit({{OSType,OSVer},success}), % In search for the truth
+ ok.
+
+verify_sets_eq(L1, L2) ->
+ L = lists:sort(L1),
+ case lists:sort(L2) of
+ L ->
+ ok;
+ _ ->
+ exit({sets_neq,L1,L2})
+ end.
+
+
local_basic(_Config) ->
SFile = local_filename(server),
SAddr = {local,bin_filename(SFile)},
@@ -757,9 +927,9 @@ ok({ok,V}) -> V;
ok(NotOk) ->
try throw(not_ok)
catch
- Thrown ->
+ throw:Thrown:Stacktrace ->
erlang:raise(
- error, {Thrown, NotOk}, tl(erlang:get_stacktrace()))
+ error, {Thrown, NotOk}, tl(Stacktrace))
end.
diff --git a/lib/kernel/test/global_SUITE.erl b/lib/kernel/test/global_SUITE.erl
index 0e7b7adc47..8eab36e308 100644
--- a/lib/kernel/test/global_SUITE.erl
+++ b/lib/kernel/test/global_SUITE.erl
@@ -1383,7 +1383,7 @@ ring(Config) when is_list(Config) ->
rpc_cast(Cp8, ?MODULE, single_node, [Time, Cp7, Config]),
%% sleep to make the partitioned net ready
- ct:sleep(Time - msec()),
+ sleep(Time - msec()),
pong = net_adm:ping(Cp0),
pong = net_adm:ping(Cp1),
@@ -1466,7 +1466,7 @@ simple_ring(Config) when is_list(Config) ->
rpc_cast(Cp5, ?MODULE, single_node, [Time, Cp4, Config]),
%% sleep to make the partitioned net ready
- ct:sleep(Time - msec()),
+ sleep(Time - msec()),
pong = net_adm:ping(Cp0),
pong = net_adm:ping(Cp1),
@@ -1542,7 +1542,7 @@ line(Config) when is_list(Config) ->
rpc_cast(Cp8, ?MODULE, single_node, [Time, Cp7, Config]),
%% Sleep to make the partitioned net ready
- ct:sleep(Time - msec()),
+ sleep(Time - msec()),
pong = net_adm:ping(Cp0),
pong = net_adm:ping(Cp1),
@@ -1626,7 +1626,7 @@ simple_line(Config) when is_list(Config) ->
rpc_cast(Cp5, ?MODULE, single_node, [Time, Cp4, Config]),
%% sleep to make the partitioned net ready
- ct:sleep(Time - msec()),
+ sleep(Time - msec()),
pong = net_adm:ping(Cp0),
pong = net_adm:ping(Cp1),
@@ -3555,7 +3555,7 @@ single_node(Time, Node, Config) ->
lists:foreach(fun(N) -> _ = erlang:disconnect_node(N) end, nodes()),
?UNTIL(get_known(node()) =:= [node()]),
spawn(?MODULE, init_2, []),
- ct:sleep(Time - msec()),
+ sleep(Time - msec()),
net_adm:ping(Node).
init_2() ->
@@ -4009,13 +4009,6 @@ collect_nodes(N, Max) ->
[Node | collect_nodes(N+1, Max)]
end.
-only_element(_E, []) ->
- true;
-only_element(E, [E|R]) ->
- only_element(E, R);
-only_element(_E, _) ->
- false.
-
exit_p(Pid) ->
Ref = erlang:monitor(process, Pid),
Pid ! die,
@@ -4038,6 +4031,11 @@ wait_for_exit_fast(Pid) ->
ok
end.
+sleep(Time) when Time > 0 ->
+ ct:sleep(Time);
+sleep(_Time) ->
+ ok.
+
check_everywhere(Nodes, Name, Config) ->
?UNTIL(begin
case rpc:multicall(Nodes, global, whereis_name, [Name]) of
@@ -4162,10 +4160,10 @@ rpc_cast(Node, Module, Function, Args, File) ->
%% The emulator now ensures that the node has been removed from
%% nodes().
-rpc_disconnect_node(Node, DisconnectedNode, _Config) ->
- True = rpc:call(Node, erlang, disconnect_node, [DisconnectedNode]),
- False = lists:member(DisconnectedNode, rpc:call(Node, erlang, nodes, [])),
- {true, false} = {True, False}.
+rpc_disconnect_node(Node, DisconnectedNode, Config) ->
+ true = rpc:call(Node, erlang, disconnect_node, [DisconnectedNode]),
+ ?UNTIL
+ (not lists:member(DisconnectedNode, rpc:call(Node, erlang, nodes, []))).
%%%
%%% Utility
diff --git a/lib/kernel/test/heart_SUITE.erl b/lib/kernel/test/heart_SUITE.erl
index 45032faf6d..f5ca6d0e1d 100644
--- a/lib/kernel/test/heart_SUITE.erl
+++ b/lib/kernel/test/heart_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2016. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -168,7 +168,7 @@ reboot(Config) when is_list(Config) ->
{ok, Node} = start_check(slave, ?UNIQ_NODE_NAME),
ok = rpc:call(Node, heart, set_cmd,
- [atom_to_list(lib:progname()) ++
+ [ct:get_progname() ++
" -noshell -heart " ++ name(Node) ++ "&"]),
rpc:call(Node, init, reboot, []),
receive
@@ -203,7 +203,7 @@ node_start_immediately_after_crash_test(Config) when is_list(Config) ->
[{"ERL_CRASH_DUMP_SECONDS", "0"}]),
ok = rpc:call(Node, heart, set_cmd,
- [atom_to_list(lib:progname()) ++
+ [ct:get_progname() ++
" -noshell -heart " ++ name(Node) ++ "&"]),
Mod = exhaust_atoms,
@@ -254,7 +254,7 @@ node_start_soon_after_crash_test(Config) when is_list(Config) ->
[{"ERL_CRASH_DUMP_SECONDS", "10"}]),
ok = rpc:call(Node, heart, set_cmd,
- [atom_to_list(lib:progname()) ++
+ [ct:get_progname() ++
" -noshell -heart " ++ name(Node) ++ "&"]),
Mod = exhaust_atoms,
@@ -309,7 +309,7 @@ set_cmd(Config) when is_list(Config) ->
clear_cmd(Config) when is_list(Config) ->
{ok, Node} = start_check(slave, ?UNIQ_NODE_NAME),
ok = rpc:call(Node, heart, set_cmd,
- [atom_to_list(lib:progname()) ++
+ [ct:get_progname() ++
" -noshell -heart " ++ name(Node) ++ "&"]),
rpc:call(Node, init, reboot, []),
receive
@@ -346,9 +346,16 @@ clear_cmd(Config) when is_list(Config) ->
get_cmd(Config) when is_list(Config) ->
{ok, Node} = start_check(slave, ?UNIQ_NODE_NAME),
- Cmd = "test",
- ok = rpc:call(Node, heart, set_cmd, [Cmd]),
- {ok, Cmd} = rpc:call(Node, heart, get_cmd, []),
+
+ ShortCmd = "test",
+ ok = rpc:call(Node, heart, set_cmd, [ShortCmd]),
+ {ok, ShortCmd} = rpc:call(Node, heart, get_cmd, []),
+
+ %% This would hang prior to OTP-15024 being fixed.
+ LongCmd = [$a || _ <- lists:seq(1, 160)],
+ ok = rpc:call(Node, heart, set_cmd, [LongCmd]),
+ {ok, LongCmd} = rpc:call(Node, heart, get_cmd, []),
+
stop_node(Node),
ok.
diff --git a/lib/kernel/test/inet_SUITE.erl b/lib/kernel/test/inet_SUITE.erl
index ba0d075ef2..5a2d809aa4 100644
--- a/lib/kernel/test/inet_SUITE.erl
+++ b/lib/kernel/test/inet_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2017. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -21,6 +21,7 @@
-include_lib("common_test/include/ct.hrl").
-include_lib("kernel/include/inet.hrl").
+-include_lib("kernel/src/inet_res.hrl").
-include_lib("kernel/src/inet_dns.hrl").
-export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1,
@@ -34,13 +35,14 @@
ipv4_to_ipv6/0, ipv4_to_ipv6/1,
host_and_addr/0, host_and_addr/1,
t_gethostnative/1,
- gethostnative_parallell/1, cname_loop/1,
+ gethostnative_parallell/1, cname_loop/1, missing_hosts_reload/1,
gethostnative_soft_restart/0, gethostnative_soft_restart/1,
gethostnative_debug_level/0, gethostnative_debug_level/1,
lookup_bad_search_option/1,
getif/1,
getif_ifr_name_overflow/1,getservbyname_overflow/1, getifaddrs/1,
- parse_strict_address/1, simple_netns/1, simple_netns_open/1,
+ parse_strict_address/1, ipv4_mapped_ipv6_address/1,
+ simple_netns/1, simple_netns_open/1,
simple_bind_to_device/1, simple_bind_to_device_open/1]).
-export([get_hosts/1, get_ipv6_hosts/1, parse_hosts/1, parse_address/1,
@@ -55,7 +57,7 @@ all() ->
[t_gethostbyaddr, t_gethostbyname, t_getaddr,
t_gethostbyaddr_v6, t_gethostbyname_v6, t_getaddr_v6,
ipv4_to_ipv6, host_and_addr, {group, parse},
- t_gethostnative, gethostnative_parallell, cname_loop,
+ t_gethostnative, gethostnative_parallell, cname_loop, missing_hosts_reload,
gethostnative_debug_level, gethostnative_soft_restart,
lookup_bad_search_option,
getif, getif_ifr_name_overflow, getservbyname_overflow,
@@ -667,6 +669,26 @@ parse_strict_address(Config) when is_list(Config) ->
{ok, {3089,3106,23603,50240,0,0,119,136}} =
inet:parse_strict_address("c11:0c22:5c33:c440::077:0088").
+ipv4_mapped_ipv6_address(Config) when is_list(Config) ->
+ {D1,D2,D3,D4} = IPv4Address =
+ {rand:uniform(256) - 1,
+ rand:uniform(256) - 1,
+ rand:uniform(256) - 1,
+ rand:uniform(256) - 1},
+ E7 = (D1 bsl 8) bor D2,
+ E8 = (D3 bsl 8) bor D4,
+ io:format("IPv4Address: ~p.~n", [IPv4Address]),
+ {0,0,0,0,0,65535,E7,E8} = inet:ipv4_mapped_ipv6_address(IPv4Address),
+ IPv6Address =
+ {rand:uniform(65536) - 1,
+ rand:uniform(65536) - 1,
+ rand:uniform(65536) - 1,
+ rand:uniform(65536) - 1,
+ rand:uniform(65536) - 1,
+ rand:uniform(65536) - 1, E7, E8},
+ IPv4Address = inet:ipv4_mapped_ipv6_address(IPv6Address),
+ ok.
+
t_gethostnative(Config) when is_list(Config) ->
%% this will result in 26 bytes sent which causes problem in Windows
%% if the port-program has not assured stdin to be read in BINARY mode
@@ -819,6 +841,32 @@ cname_loop(Config) when is_list(Config) ->
ok.
+%% Test that hosts file gets reloaded correctly in case when it
+% was missing during initial startup
+missing_hosts_reload(Config) when is_list(Config) ->
+ RootDir = proplists:get_value(priv_dir,Config),
+ HostsFile = filename:join(RootDir, atom_to_list(?MODULE) ++ ".hosts"),
+ InetRc = filename:join(RootDir, "inetrc"),
+ ok = file:write_file(InetRc, "{hosts_file, \"" ++ HostsFile ++ "\"}.\n"),
+ {error, enoent} = file:read_file_info(HostsFile),
+ % start a node
+ Pa = filename:dirname(code:which(?MODULE)),
+ {ok, TestNode} = test_server:start_node(?MODULE, slave,
+ [{args, "-pa " ++ Pa ++ " -kernel inetrc '\"" ++ InetRc ++ "\"'"}]),
+ % ensure it has our RC
+ Rc = rpc:call(TestNode, inet_db, get_rc, []),
+ {hosts_file, HostsFile} = lists:keyfind(hosts_file, 1, Rc),
+ % ensure it does not resolve
+ {error, nxdomain} = rpc:call(TestNode, inet_hosts, gethostbyname, ["somehost"]),
+ % write hosts file
+ ok = file:write_file(HostsFile, "1.2.3.4 somehost"),
+ % wait for cached timestamp to expire
+ timer:sleep(?RES_FILE_UPDATE_TM * 1000 + 100),
+ % ensure it DOES resolve
+ {ok,{hostent,"somehost",[],inet,4,[{1,2,3,4}]}} =
+ rpc:call(TestNode, inet_hosts, gethostbyname, ["somehost"]),
+ % cleanup
+ true = test_server:stop_node(TestNode).
%% These must be run in the whole suite since they need
%% the host list and require inet_gethost_native to be started.
@@ -1039,28 +1087,26 @@ getservbyname_overflow(Config) when is_list(Config) ->
getifaddrs(Config) when is_list (Config) ->
{ok,IfAddrs} = inet:getifaddrs(),
io:format("IfAddrs = ~p.~n", [IfAddrs]),
- case
- {os:type(),
- [If ||
- {If,Opts} <- IfAddrs,
- lists:keymember(hwaddr, 1, Opts)]} of
- {{unix,sunos},[]} -> ok;
- {OT,[]} ->
- ct:fail({should_have_hwaddr,OT});
- _ -> ok
+ case [If || {If,Opts} <- IfAddrs, lists:keymember(hwaddr, 1, Opts)] of
+ [] ->
+ case os:type() of
+ {unix,sunos} -> ok;
+ OT ->
+ ct:fail({should_have_hwaddr,OT})
+ end;
+ [_|_] -> ok
end,
- Addrs =
- [element(1, A) || A <- ifaddrs(IfAddrs)],
+ Addrs = ifaddrs(IfAddrs),
io:format("Addrs = ~p.~n", [Addrs]),
[check_addr(Addr) || Addr <- Addrs],
ok.
-check_addr({addr,Addr})
+check_addr(Addr)
when tuple_size(Addr) =:= 8,
element(1, Addr) band 16#FFC0 =:= 16#FE80 ->
io:format("Addr: ~p link local; SKIPPED!~n", [Addr]),
ok;
-check_addr({addr,Addr}) ->
+check_addr(Addr) ->
io:format("Addr: ~p.~n", [Addr]),
Ping = "ping",
Pong = "pong",
@@ -1076,78 +1122,86 @@ check_addr({addr,Addr}) ->
ok = gen_tcp:close(S2),
ok = gen_tcp:close(L).
--record(ifopts, {name,flags,addrs=[],hwaddr}).
-
-ifaddrs([]) -> [];
-ifaddrs([{If,Opts}|IOs]) ->
- #ifopts{flags=F} = Ifopts = check_ifopts(Opts, #ifopts{name=If}),
- case F of
- {flags,Flags} ->
- case lists:member(running, Flags) of
- true -> Ifopts#ifopts.addrs;
- false -> []
- end ++ ifaddrs(IOs);
- undefined ->
- ifaddrs(IOs)
+ifaddrs(IfOpts) ->
+ IfMap = collect_ifopts(IfOpts),
+ ChkFun =
+ fun Self({{_,Flags} = Key, Opts}, ok) ->
+ Broadcast = lists:member(broadcast, Flags),
+ P2P = lists:member(pointtopoint, Flags),
+ case Opts of
+ [{addr,_},{netmask,_},{broadaddr,_}|Os]
+ when Broadcast ->
+ Self({Key, Os}, ok);
+ [{addr,_},{netmask,_},{dstaddr,_}|Os]
+ when P2P ->
+ Self({Key, Os}, ok);
+ [{addr,_},{netmask,_}|Os] ->
+ Self({Key, Os}, ok);
+ [{hwaddr,_}|Os] ->
+ Self({Key, Os}, ok);
+ [] ->
+ ok
+ end
+ end,
+ fold_ifopts(ChkFun, ok, IfMap),
+ AddrsFun =
+ fun ({{_,Flags}, Opts}, Acc) ->
+ case
+ lists:member(running, Flags)
+ andalso (not lists:member(pointtopoint, Flags))
+ of
+ true ->
+ lists:reverse(
+ [Addr || {addr,Addr} <- Opts],
+ Acc);
+ false ->
+ Acc
+ end
+ end,
+ fold_ifopts(AddrsFun, [], IfMap).
+
+collect_ifopts(IfOpts) ->
+ collect_ifopts(IfOpts, #{}).
+%%
+collect_ifopts(IfOpts, IfMap) ->
+ case IfOpts of
+ [{If,[{flags,Flags}|Opts]}|IfOs] ->
+ Key = {If,Flags},
+ case maps:is_key(Key, IfMap) of
+ true ->
+ ct:fail({unexpected_ifopts,IfOpts,IfMap});
+ false ->
+ collect_ifopts(IfOs, IfMap, Opts, Key, [])
+ end;
+ [] ->
+ IfMap;
+ _ ->
+ ct:fail({unexpected_ifopts,IfOpts,IfMap})
+ end.
+%%
+collect_ifopts(IfOpts, IfMap, Opts, Key, R) ->
+ case Opts of
+ [{flags,_}|_] ->
+ {If,_} = Key,
+ collect_ifopts(
+ [{If,Opts}|IfOpts], maps:put(Key, lists:reverse(R), IfMap));
+ [OptVal|Os] ->
+ collect_ifopts(IfOpts, IfMap, Os, Key, [OptVal|R]);
+ [] ->
+ collect_ifopts(IfOpts, maps:put(Key, lists:reverse(R), IfMap))
end.
-check_ifopts([], #ifopts{flags=F,addrs=Raddrs}=Ifopts) ->
- Addrs = lists:reverse(Raddrs),
- R = Ifopts#ifopts{addrs=Addrs},
- io:format("~p.~n", [R]),
- %% See how we did...
- {flags,Flags} = F,
- case lists:member(broadcast, Flags) of
- true ->
- [case A of
- {{addr,_},{netmask,_},{broadaddr,_}} ->
- A;
- {{addr,T},{netmask,_}} when tuple_size(T) =:= 8 ->
- A
- end || A <- Addrs];
- false ->
- case lists:member(pointtopoint, Flags) of
- true ->
- [case A of
- {{addr,_},{netmask,_},{dstaddr,_}} ->
- A
- end || A <- Addrs];
- false ->
- [case A of
- {{addr,_},{netmask,_}} ->
- A
- end || A <- Addrs]
- end
- end,
- R;
-check_ifopts([{flags,_}=F|Opts], #ifopts{flags=undefined}=Ifopts) ->
- check_ifopts(Opts, Ifopts#ifopts{flags=F});
-check_ifopts([{flags,_}=F|Opts], #ifopts{flags=Flags}=Ifopts) ->
- case F of
- Flags ->
- check_ifopts(Opts, Ifopts);
- _ ->
- ct:fail({multiple_flags,F,Ifopts})
- end;
-check_ifopts(
- [{addr,_}=A,{netmask,_}=N,{dstaddr,_}=D|Opts],
- #ifopts{addrs=Addrs}=Ifopts) ->
- check_ifopts(Opts, Ifopts#ifopts{addrs=[{A,N,D}|Addrs]});
-check_ifopts(
- [{addr,_}=A,{netmask,_}=N,{broadaddr,_}=B|Opts],
- #ifopts{addrs=Addrs}=Ifopts) ->
- check_ifopts(Opts, Ifopts#ifopts{addrs=[{A,N,B}|Addrs]});
-check_ifopts(
- [{addr,_}=A,{netmask,_}=N|Opts],
- #ifopts{addrs=Addrs}=Ifopts) ->
- check_ifopts(Opts, Ifopts#ifopts{addrs=[{A,N}|Addrs]});
-check_ifopts([{addr,_}=A|Opts], #ifopts{addrs=Addrs}=Ifopts) ->
- check_ifopts(Opts, Ifopts#ifopts{addrs=[{A}|Addrs]});
-check_ifopts([{hwaddr,Hwaddr}=H|Opts], #ifopts{hwaddr=undefined}=Ifopts)
- when is_list(Hwaddr) ->
- check_ifopts(Opts, Ifopts#ifopts{hwaddr=H});
-check_ifopts([{hwaddr,_}=H|_], #ifopts{}=Ifopts) ->
- ct:fail({multiple_hwaddrs,H,Ifopts}).
+fold_ifopts(Fun, Acc, IfMap) ->
+ fold_ifopts(Fun, Acc, IfMap, maps:keys(IfMap)).
+%%
+fold_ifopts(Fun, Acc, IfMap, Keys) ->
+ case Keys of
+ [Key|Ks] ->
+ Opts = maps:get(Key, IfMap),
+ fold_ifopts(Fun, Fun({Key,Opts}, Acc), IfMap, Ks);
+ [] ->
+ Acc
+ end.
%% Works just like lists:member/2, except that any {127,_,_,_} tuple
%% matches any other {127,_,_,_}. We do this to handle Linux systems
diff --git a/lib/kernel/test/inet_res_SUITE.erl b/lib/kernel/test/inet_res_SUITE.erl
index 6691ad9c06..df6e48abae 100644
--- a/lib/kernel/test/inet_res_SUITE.erl
+++ b/lib/kernel/test/inet_res_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2009-2016. All Rights Reserved.
+%% Copyright Ericsson AB 2009-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -217,10 +217,10 @@ proxy_start(TC, {NS,P}) ->
spawn_link(
fun () ->
try proxy_start(TC, NS, P, Parent, Tag)
- catch C:X ->
+ catch C:X:Stacktrace ->
io:format(
"~w: ~w:~p ~p~n",
- [self(),C,X,erlang:get_stacktrace()])
+ [self(),C,X,Stacktrace])
end
end),
receive {started,Tag,Port} ->
diff --git a/lib/kernel/test/inet_sockopt_SUITE.erl b/lib/kernel/test/inet_sockopt_SUITE.erl
index ada9c2689c..27ff74e309 100644
--- a/lib/kernel/test/inet_sockopt_SUITE.erl
+++ b/lib/kernel/test/inet_sockopt_SUITE.erl
@@ -110,9 +110,14 @@ simple(Config) when is_list(Config) ->
{S1,S2} = create_socketpair(Opt, Opt),
{ok,Opt} = inet:getopts(S1,OptTags),
{ok,Opt} = inet:getopts(S2,OptTags),
- COpt = [{X,case X of nodelay -> false;_ -> Y end} || {X,Y} <- Opt],
+ NoPushOpt = case os:type() of
+ {unix, Osname} when Osname =:= linux; Osname =:= freebsd -> {nopush, true};
+ {_,_} -> {nopush, false}
+ end,
+ COpt = [{X,case X of nodelay -> false;_ -> Y end} || {X,Y} <- [NoPushOpt|Opt]],
+ COptTags = [X || {X,_} <- COpt],
inet:setopts(S1,COpt),
- {ok,COpt} = inet:getopts(S1,OptTags),
+ {ok,COpt} = inet:getopts(S1,COptTags),
{ok,Opt} = inet:getopts(S2,OptTags),
gen_tcp:close(S1),
gen_tcp:close(S2),
diff --git a/lib/kernel/test/init_SUITE.erl b/lib/kernel/test/init_SUITE.erl
index 2b59eb2bfe..4f90260f98 100644
--- a/lib/kernel/test/init_SUITE.erl
+++ b/lib/kernel/test/init_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2016. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -295,11 +295,11 @@ is_real_system(KernelVsn, StdlibVsn) ->
%% before restart.
%% ------------------------------------------------
many_restarts() ->
- [{timetrap,{minutes,8}}].
+ [{timetrap,{minutes,16}}].
many_restarts(Config) when is_list(Config) ->
{ok, Node} = loose_node:start(init_test, "", ?DEFAULT_TIMEOUT_SEC),
- loop_restart(50,Node,rpc:call(Node,erlang,whereis,[error_logger])),
+ loop_restart(50,Node,rpc:call(Node,erlang,whereis,[logger])),
loose_node:stop(Node),
ok.
@@ -315,14 +315,14 @@ loop_restart(N,Node,EHPid) ->
loose_node:stop(Node),
ct:fail(not_stopping)
end,
- ok = wait_for(30, Node, EHPid),
- loop_restart(N-1,Node,rpc:call(Node,erlang,whereis,[error_logger])).
+ ok = wait_for(60, Node, EHPid),
+ loop_restart(N-1,Node,rpc:call(Node,erlang,whereis,[logger])).
wait_for(0,Node,_) ->
loose_node:stop(Node),
error;
wait_for(N,Node,EHPid) ->
- case rpc:call(Node, erlang, whereis, [error_logger]) of
+ case rpc:call(Node, erlang, whereis, [logger]) of
Pid when is_pid(Pid), Pid =/= EHPid ->
%% erlang:display(ok),
ok;
@@ -365,7 +365,10 @@ restart(Config) when is_list(Config) ->
%% Ok, the node is up, now the real test test begins.
erlang:monitor_node(Node, true),
SysProcs0 = rpc:call(Node, ?MODULE, find_system_processes, []),
- [InitPid, PurgerPid, LitCollectorPid, DirtyCodePid] = SysProcs0,
+ io:format("SysProcs0=~p~n", [SysProcs0]),
+ [InitPid, PurgerPid, LitCollectorPid,
+ DirtySigNPid, DirtySigHPid, DirtySigMPid,
+ PrimFilePid] = SysProcs0,
InitPid = rpc:call(Node, erlang, whereis, [init]),
PurgerPid = rpc:call(Node, erlang, whereis, [erts_code_purger]),
Procs = rpc:call(Node, erlang, processes, []),
@@ -381,7 +384,10 @@ restart(Config) when is_list(Config) ->
ok = wait_restart(30, Node),
SysProcs1 = rpc:call(Node, ?MODULE, find_system_processes, []),
- [InitPid1, PurgerPid1, LitCollectorPid1, DirtyCodePid1] = SysProcs1,
+ io:format("SysProcs1=~p~n", [SysProcs1]),
+ [InitPid1, PurgerPid1, LitCollectorPid1,
+ DirtySigNPid1, DirtySigHPid1, DirtySigMPid1,
+ PrimFilePid1] = SysProcs1,
%% Still the same init process!
InitPid1 = rpc:call(Node, erlang, whereis, [init]),
@@ -394,20 +400,22 @@ restart(Config) when is_list(Config) ->
PurgerP = pid_to_list(PurgerPid1),
%% and same literal area collector process!
- case LitCollectorPid of
- undefined -> undefined = LitCollectorPid1;
- _ ->
- LitCollectorP = pid_to_list(LitCollectorPid),
- LitCollectorP = pid_to_list(LitCollectorPid1)
- end,
-
- %% and same dirty process code checker process!
- case DirtyCodePid of
- undefined -> undefined = DirtyCodePid1;
- _ ->
- DirtyCodeP = pid_to_list(DirtyCodePid),
- DirtyCodeP = pid_to_list(DirtyCodePid1)
- end,
+ LitCollectorP = pid_to_list(LitCollectorPid),
+ LitCollectorP = pid_to_list(LitCollectorPid1),
+
+ %% and same normal dirty signal handler process!
+ DirtySigNP = pid_to_list(DirtySigNPid),
+ DirtySigNP = pid_to_list(DirtySigNPid1),
+ %% and same high dirty signal handler process!
+ DirtySigHP = pid_to_list(DirtySigHPid),
+ DirtySigHP = pid_to_list(DirtySigHPid1),
+ %% and same max dirty signal handler process!
+ DirtySigMP = pid_to_list(DirtySigMPid),
+ DirtySigMP = pid_to_list(DirtySigMPid1),
+
+ %% and same prim_file helper process!
+ PrimFileP = pid_to_list(PrimFilePid),
+ PrimFileP = pid_to_list(PrimFilePid1),
NewProcs0 = rpc:call(Node, erlang, processes, []),
NewProcs = NewProcs0 -- SysProcs1,
@@ -433,7 +441,10 @@ restart(Config) when is_list(Config) ->
-record(sys_procs, {init,
code_purger,
literal_collector,
- dirty_proc_checker}).
+ dirty_sig_handler_normal,
+ dirty_sig_handler_high,
+ dirty_sig_handler_max,
+ prim_file}).
find_system_processes() ->
find_system_procs(processes(), #sys_procs{}).
@@ -442,21 +453,36 @@ find_system_procs([], SysProcs) ->
[SysProcs#sys_procs.init,
SysProcs#sys_procs.code_purger,
SysProcs#sys_procs.literal_collector,
- SysProcs#sys_procs.dirty_proc_checker];
+ SysProcs#sys_procs.dirty_sig_handler_normal,
+ SysProcs#sys_procs.dirty_sig_handler_high,
+ SysProcs#sys_procs.dirty_sig_handler_max,
+ SysProcs#sys_procs.prim_file];
find_system_procs([P|Ps], SysProcs) ->
- case process_info(P, initial_call) of
- {initial_call,{otp_ring0,start,2}} ->
+ case process_info(P, [initial_call, priority]) of
+ [{initial_call,{otp_ring0,start,2}},_] ->
undefined = SysProcs#sys_procs.init,
find_system_procs(Ps, SysProcs#sys_procs{init = P});
- {initial_call,{erts_code_purger,start,0}} ->
+ [{initial_call,{erts_code_purger,start,0}},_] ->
undefined = SysProcs#sys_procs.code_purger,
find_system_procs(Ps, SysProcs#sys_procs{code_purger = P});
- {initial_call,{erts_literal_area_collector,start,0}} ->
+ [{initial_call,{erts_literal_area_collector,start,0}},_] ->
undefined = SysProcs#sys_procs.literal_collector,
find_system_procs(Ps, SysProcs#sys_procs{literal_collector = P});
- {initial_call,{erts_dirty_process_code_checker,start,0}} ->
- undefined = SysProcs#sys_procs.dirty_proc_checker,
- find_system_procs(Ps, SysProcs#sys_procs{dirty_proc_checker = P});
+ [{initial_call,{erts_dirty_process_signal_handler,start,0}},
+ {priority,normal}] ->
+ undefined = SysProcs#sys_procs.dirty_sig_handler_normal,
+ find_system_procs(Ps, SysProcs#sys_procs{dirty_sig_handler_normal = P});
+ [{initial_call,{erts_dirty_process_signal_handler,start,0}},
+ {priority,high}] ->
+ undefined = SysProcs#sys_procs.dirty_sig_handler_high,
+ find_system_procs(Ps, SysProcs#sys_procs{dirty_sig_handler_high = P});
+ [{initial_call,{erts_dirty_process_signal_handler,start,0}},
+ {priority,max}] ->
+ undefined = SysProcs#sys_procs.dirty_sig_handler_max,
+ find_system_procs(Ps, SysProcs#sys_procs{dirty_sig_handler_max = P});
+ [{initial_call,{prim_file,start,0}},_] ->
+ undefined = SysProcs#sys_procs.prim_file,
+ find_system_procs(Ps, SysProcs#sys_procs{prim_file = P});
_ ->
find_system_procs(Ps, SysProcs)
end.
diff --git a/lib/kernel/test/kernel.spec b/lib/kernel/test/kernel.spec
index 62afc9f97b..eaa17f3a59 100644
--- a/lib/kernel/test/kernel.spec
+++ b/lib/kernel/test/kernel.spec
@@ -2,3 +2,4 @@
{config, "../test_server/ts.unix.config"}.
{suites,"../kernel_test", all}.
+{skip_suites,"../kernel_test",[logger_stress_SUITE],"Benchmarks only"}.
diff --git a/lib/kernel/test/kernel_SUITE.erl b/lib/kernel/test/kernel_SUITE.erl
index da56359294..3e5ed855b5 100644
--- a/lib/kernel/test/kernel_SUITE.erl
+++ b/lib/kernel/test/kernel_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2016. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -30,14 +30,14 @@
-export([init_per_testcase/2, end_per_testcase/2]).
%% Test cases must be exported.
--export([app_test/1, appup_test/1]).
+-export([app_test/1, appup_test/1, refc/1]).
suite() ->
[{ct_hooks,[ts_install_cth]},
{timetrap,{minutes,2}}].
all() ->
- [app_test, appup_test].
+ [app_test, appup_test, refc].
groups() ->
[].
@@ -163,3 +163,68 @@ check_appup([Vsn|Vsns],Instrs,Expected) ->
end;
check_appup([],_,_) ->
ok.
+
+%%% Check that refc module handles the counters as expected
+refc(_Config) ->
+ Enable = fun(Enable) -> erlang:system_flag(scheduler_wall_time, Enable) end,
+ IsOn = fun() -> undefined /= erlang:statistics(scheduler_wall_time) end,
+ Tester = self(),
+ Loop = fun Loop() ->
+ receive
+ die -> normal;
+ {apply, Bool} ->
+ Res = Enable(Bool),
+ Tester ! {self(), Res},
+ Loop()
+ end
+ end,
+
+ %% Counter should be 0
+ false = Enable(false),
+
+ false = Enable(true),
+ true = Enable(true),
+ true = Enable(false),
+ true = Enable(false),
+
+ %% Counter should be 0
+ false = IsOn(),
+
+ P1 = spawn_link(Loop),
+ P1 ! {apply, true},
+ receive {P1, R1} -> false = R1 end,
+
+ %% P1 has turned it on counter should be one
+ true = IsOn(),
+ true = Enable(true),
+ true = Enable(false),
+ true = IsOn(),
+
+ P1 ! {apply, false},
+ receive {P1, R2} -> true = R2 end,
+ false = IsOn(),
+
+ P1 ! {apply, true},
+ receive {P1, R3} -> false = R3 end,
+ true = IsOn(),
+ true = Enable(false),
+
+
+ P1 ! die,
+ timer:sleep(100),
+ false = IsOn(),
+ false = Enable(false),
+
+ P2 = spawn_link(Loop),
+ P2 ! {apply, true},
+ receive {P2, R4} -> false = R4 end,
+ true = IsOn(),
+ P2 ! {apply, true},
+ receive {P2, R5} -> true = R5 end,
+ true = IsOn(),
+
+ P2 ! die,
+ timer:sleep(100),
+ false = IsOn(),
+
+ ok.
diff --git a/lib/kernel/test/kernel_bench.spec b/lib/kernel/test/kernel_bench.spec
index 8de60dae31..898ceb59e0 100644
--- a/lib/kernel/test/kernel_bench.spec
+++ b/lib/kernel/test/kernel_bench.spec
@@ -1 +1,3 @@
{groups,"../kernel_test",zlib_SUITE,[bench]}.
+{groups,"../kernel_test",file_SUITE,[bench]}.
+{suites,"../kernel_test",[logger_stress_SUITE]}.
diff --git a/lib/kernel/test/kernel_config_SUITE.erl b/lib/kernel/test/kernel_config_SUITE.erl
index 9a4578917d..9207025a2c 100644
--- a/lib/kernel/test/kernel_config_SUITE.erl
+++ b/lib/kernel/test/kernel_config_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2016. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -76,7 +76,7 @@ sync(Conf) when is_list(Conf) ->
%% Reset wall_clock
{T1,_} = erlang:statistics(wall_clock),
io:format("~p~n", [{t1, T1}]),
- Command = lists:concat([lib:progname(),
+ Command = lists:append([ct:get_progname(),
" -detached -sname cp1 ",
"-config ", Config,
" -env ERL_CRASH_DUMP erl_crash_dump.cp1"]),
diff --git a/lib/kernel/test/logger.cover b/lib/kernel/test/logger.cover
new file mode 100644
index 0000000000..9691aa295e
--- /dev/null
+++ b/lib/kernel/test/logger.cover
@@ -0,0 +1,17 @@
+%% -*- erlang -*-
+{incl_mods,[error_logger,
+ logger,
+ logger_backend,
+ logger_config,
+ logger_disk_log_h,
+ logger_filters,
+ logger_formatter,
+ logger_handler_watcher,
+ logger_h_common,
+ logger_olp,
+ logger_proxy,
+ logger_server,
+ logger_simple_h,
+ logger_std_h,
+ logger_sup]}.
+
diff --git a/lib/kernel/test/logger.spec b/lib/kernel/test/logger.spec
new file mode 100644
index 0000000000..3aec37951d
--- /dev/null
+++ b/lib/kernel/test/logger.spec
@@ -0,0 +1,13 @@
+%% -*-erlang-*-
+{suites,"../kernel_test", [error_logger_SUITE,
+ error_logger_warn_SUITE,
+ logger_SUITE,
+ logger_disk_log_h_SUITE,
+ logger_env_var_SUITE,
+ logger_filters_SUITE,
+ logger_formatter_SUITE,
+ logger_legacy_SUITE,
+ logger_olp_SUITE,
+ logger_proxy_SUITE,
+ logger_simple_h_SUITE,
+ logger_std_h_SUITE]}.
diff --git a/lib/kernel/test/logger_SUITE.erl b/lib/kernel/test/logger_SUITE.erl
new file mode 100644
index 0000000000..035e5d8974
--- /dev/null
+++ b/lib/kernel/test/logger_SUITE.erl
@@ -0,0 +1,1390 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("kernel/include/logger.hrl").
+-include_lib("kernel/src/logger_internal.hrl").
+
+-define(str,"Log from "++atom_to_list(?FUNCTION_NAME)++
+ ":"++integer_to_list(?LINE)).
+-define(map_rep,#{function=>?FUNCTION_NAME, line=>?LINE}).
+-define(keyval_rep,[{function,?FUNCTION_NAME}, {line,?LINE}]).
+
+-define(MY_LOC(N),#{mfa=>{?MODULE,?FUNCTION_NAME,?FUNCTION_ARITY},
+ file=>?FILE, line=>?LINE-N}).
+
+-define(TRY(X), my_try(fun() -> X end)).
+
+
+suite() ->
+ [{timetrap,{seconds,30}},
+ {ct_hooks,[logger_test_lib]}].
+
+init_per_suite(Config) ->
+ case logger:get_handler_config(?STANDARD_HANDLER) of
+ {ok,StdH} ->
+ ok = logger:remove_handler(?STANDARD_HANDLER),
+ [{default_handler,StdH}|Config];
+ _ ->
+ Config
+ end.
+
+end_per_suite(Config) ->
+ case ?config(default_handler,Config) of
+ #{module:=HMod} = HConfig ->
+ ok = logger:add_handler(?STANDARD_HANDLER,HMod,HConfig);
+ _ ->
+ ok
+ end.
+
+init_per_group(_Group, Config) ->
+ Config.
+
+end_per_group(_Group, _Config) ->
+ ok.
+
+init_per_testcase(_TestCase, Config) ->
+ PC = logger:get_primary_config(),
+ [{logger_config,PC}|Config].
+
+end_per_testcase(Case, Config) ->
+ try apply(?MODULE,Case,[cleanup,Config])
+ catch error:undef -> ok
+ end,
+ ok.
+
+groups() ->
+ [].
+
+all() ->
+ [start_stop,
+ add_remove_handler,
+ multiple_handlers,
+ add_remove_filter,
+ change_config,
+ set_formatter,
+ log_no_levels,
+ log_all_levels_api,
+ macros,
+ set_level,
+ set_module_level,
+ set_application_level,
+ cache_module_level,
+ format_report,
+ filter_failed,
+ handler_failed,
+ config_sanity_check,
+ log_failed,
+ emulator,
+ via_logger_process,
+ other_node,
+ compare_levels,
+ process_metadata,
+ app_config,
+ kernel_config,
+ pretty_print].
+
+start_stop(_Config) ->
+ S = whereis(logger),
+ true = is_pid(S),
+ ok.
+
+add_remove_handler(_Config) ->
+ register(callback_receiver,self()),
+ Hs0 = logger:get_handler_config(),
+ {error,{not_found,h1}} = logger:get_handler_config(h1),
+ ok = logger:add_handler(h1,?MODULE,#{}),
+ [add] = test_server:messages_get(),
+ Hs = logger:get_handler_config(),
+ Hs0 = lists:filter(fun(#{id:=h1}) -> false; (_) -> true end, Hs),
+ {ok,#{module:=?MODULE,level:=all,filters:=[],filter_default:=log}} = %defaults
+ logger:get_handler_config(h1),
+ ok = logger:set_handler_config(h1,filter_default,stop),
+ [changing_config] = test_server:messages_get(),
+ ?LOG_NOTICE("hello",[]),
+ ok = check_no_log(),
+ ok = logger:set_handler_config(h1,filter_default,log),
+ [changing_config] = test_server:messages_get(),
+ {ok,#{filter_default:=log}} = logger:get_handler_config(h1),
+ ?LOG_NOTICE("hello",[]),
+ ok = check_logged(notice,"hello",[],?MY_LOC(1)),
+ ok = logger:remove_handler(h1),
+ [remove] = test_server:messages_get(),
+ Hs0 = logger:get_handler_config(),
+ {error,{not_found,h1}} = logger:get_handler_config(h1),
+ {error,{not_found,h1}} = logger:remove_handler(h1),
+ logger:notice("hello",[]),
+ ok = check_no_log(),
+ ok.
+
+add_remove_handler(cleanup,_Config) ->
+ logger:remove_handler(h1),
+ ok.
+
+multiple_handlers(_Config) ->
+ ok = logger:add_handler(h1,?MODULE,#{level=>notice,filter_default=>log}),
+ ok = logger:add_handler(h2,?MODULE,#{level=>error,filter_default=>log}),
+ ?LOG_ERROR("hello",[]),
+ ok = check_logged(error,"hello",[],?MY_LOC(1)),
+ ok = check_logged(error,"hello",[],?MY_LOC(2)),
+ ?LOG_NOTICE("hello",[]),
+ ok = check_logged(notice,"hello",[],?MY_LOC(1)),
+ ok = check_no_log(),
+ ok.
+
+multiple_handlers(cleanup,_Config) ->
+ logger:remove_handler(h1),
+ logger:remove_handler(h2),
+ ok.
+
+add_remove_filter(_Config) ->
+ ok = logger:add_handler(h1,?MODULE,#{level=>notice,filter_default=>log}),
+ LF = {fun(Log,_) -> Log#{level=>error} end, []},
+ ok = logger:add_primary_filter(lf,LF),
+ {error,{already_exist,lf}} = logger:add_primary_filter(lf,LF),
+ {error,{already_exist,lf}} = logger:add_primary_filter(lf,{fun(Log,_) ->
+ Log
+ end, []}),
+ ?LOG_NOTICE("hello",[]),
+ ok = check_logged(error,"hello",[],?MY_LOC(1)),
+ ok = check_no_log(),
+
+ ok = logger:add_handler(h2,?MODULE,#{level=>notice,filter_default=>log}),
+ HF = {fun(#{level:=error}=Log,_) ->
+ Log#{level=>mylevel};
+ (_,_) ->
+ ignore
+ end,
+ []},
+ ok = logger:add_handler_filter(h1,hf,HF),
+ {error,{already_exist,hf}} = logger:add_handler_filter(h1,hf,HF),
+ {error,{already_exist,hf}} = logger:add_handler_filter(h1,hf,{fun(Log,_) ->
+ Log
+ end, []}),
+ ?LOG_NOTICE("hello",[]),
+ ok = check_logged(mylevel,"hello",[],?MY_LOC(1)),
+ ok = check_logged(error,"hello",[],?MY_LOC(2)),
+
+ ok = logger:remove_primary_filter(lf),
+ {error,{not_found,lf}} = logger:remove_primary_filter(lf),
+
+ ?LOG_NOTICE("hello",[]),
+ ok = check_logged(notice,"hello",[],?MY_LOC(1)),
+ ok = check_logged(notice,"hello",[],?MY_LOC(2)),
+
+ ?LOG_ERROR("hello",[]),
+ ok = check_logged(mylevel,"hello",[],?MY_LOC(1)),
+ ok = check_logged(error,"hello",[],?MY_LOC(2)),
+
+ ok = logger:remove_handler_filter(h1,hf),
+ {error,{not_found,hf}} = logger:remove_handler_filter(h1,hf),
+ ?LOG_NOTICE("hello",[]),
+ ok = check_logged(notice,"hello",[],?MY_LOC(1)),
+ ok = check_logged(notice,"hello",[],?MY_LOC(2)),
+
+ ?LOG_ERROR("hello",[]),
+ ok = check_logged(error,"hello",[],?MY_LOC(1)),
+ ok = check_logged(error,"hello",[],?MY_LOC(2)),
+ ok.
+
+add_remove_filter(cleanup,_Config) ->
+ logger:remove_primary_filter(lf),
+ logger:remove_handler(h1),
+ logger:remove_handler(h2),
+ ok.
+
+change_config(_Config) ->
+ %% Overwrite handler config - check that defaults are added
+ {error,{not_found,h1}} = logger:set_handler_config(h1,#{}),
+ ok = logger:add_handler(h1,?MODULE,#{level=>notice,custom=>custom}),
+ {ok,#{module:=?MODULE,level:=notice,filter_default:=log,custom:=custom}} =
+ logger:get_handler_config(h1),
+ register(callback_receiver,self()),
+ ok = logger:set_handler_config(h1,#{filter_default=>stop}),
+ [changing_config] = test_server:messages_get(),
+ {ok,#{module:=?MODULE,level:=all,filter_default:=stop}=C2} =
+ logger:get_handler_config(h1),
+ false = maps:is_key(custom,C2),
+ {error,fail} = logger:set_handler_config(h1,#{conf_call=>fun() -> {error,fail} end}),
+ {error,{attempting_syncronous_call_to_self,_}} =
+ logger:set_handler_config(
+ h1,#{conf_call=>fun() -> logger:set_handler_config(?MODULE,#{}) end}),
+ ok =
+ logger:set_handler_config(
+ h1,#{conf_call=>fun() -> logger:set_module_level(?MODULE,debug) end}),
+ {ok,C2} = logger:get_handler_config(h1),
+
+ %% Change handler config: Single key
+ {error,fail} = logger:set_handler_config(h1,conf_call,fun() -> {error,fail} end),
+ ok = logger:set_handler_config(h1,custom,custom),
+ [changing_config] = test_server:messages_get(),
+ {ok,#{custom:=custom}=C3} = logger:get_handler_config(h1),
+ C2 = maps:remove(custom,C3),
+
+ %% Change handler config: Map
+ ok = logger:update_handler_config(h1,#{custom=>new_custom}),
+ [changing_config] = test_server:messages_get(),
+ {ok,C4} = logger:get_handler_config(h1),
+ C4 = C3#{custom:=new_custom},
+
+ %% Change handler config: Id and module can not be changed
+ {error,{illegal_config_change,Old,New}} =
+ logger:set_handler_config(h1,id,newid),
+ %% Check that only the faulty field is included in return
+ [{id,h1}] = maps:to_list(Old),
+ [{id,newid}] = maps:to_list(New),
+ %% Check that both fields are included when both are changed
+ {error,{illegal_config_change,
+ #{id:=h1,module:=?MODULE},
+ #{id:=newid,module:=newmodule}}} =
+ logger:set_handler_config(h1,#{id=>newid,module=>newmodule}),
+
+ %% Change primary config: Single key
+ PConfig0 = logger:get_primary_config(),
+ ok = logger:set_primary_config(level,warning),
+ PConfig1 = logger:get_primary_config(),
+ PConfig1 = PConfig0#{level:=warning},
+
+ %% Change primary config: Map
+ ok = logger:update_primary_config(#{level=>error}),
+ PConfig2 = logger:get_primary_config(),
+ PConfig2 = PConfig1#{level:=error},
+
+ %% Overwrite primary config - check that defaults are added
+ ok = logger:set_primary_config(#{filter_default=>stop}),
+ #{level:=notice,filters:=[],filter_default:=stop}=PC1 =
+ logger:get_primary_config(),
+ 3 = maps:size(PC1),
+ %% Check that internal 'handlers' field has not been changed
+ MS = [{{{?HANDLER_KEY,'$1'},'_','_'},[],['$1']}],
+ HIds1 = lists:sort(ets:select(?LOGGER_TABLE,MS)), % dirty, internal data
+ HIds2 = lists:sort(logger:get_handler_ids()),
+ HIds1 = HIds2,
+
+ %% Cleanup
+ ok = logger:set_primary_config(PConfig0),
+ [] = test_server:messages_get(),
+
+ ok.
+
+change_config(cleanup,Config) ->
+ logger:remove_handler(h1),
+ PC = ?config(logger_config,Config),
+ logger:set_primary_config(PC),
+ ok.
+
+set_formatter(_Config) ->
+ {error,{not_found,h1}}=logger:set_handler_config(h1,formatter,{?MODULE,[]}),
+ ok = logger:add_handler(h1,?MODULE,#{level=>notice,filter_default=>log}),
+ ok = logger:set_handler_config(h1,formatter,{?MODULE,[]}),
+ logger:notice("hello",[]),
+ receive
+ {_Log,#{formatter:={?MODULE,[]}}} ->
+ ok
+ after 500 ->
+ ct:fail({timeout,no_log,process_info(self(),messages)})
+ end,
+ ok.
+
+set_formatter(cleanup,_Config) ->
+ logger:remove_handler(h1),
+ ok.
+
+log_no_levels(_Config) ->
+ ok = logger:add_handler(h1,?MODULE,#{level=>all,filter_default=>log}),
+ logger:notice(M1=?map_rep),
+ ok = check_logged(notice,M1,#{}),
+
+ Levels = [emergency,alert,critical,error,warning,notice,info,debug],
+ ok = logger:set_primary_config(level,none),
+ [logger:Level(#{Level=>rep}) || Level <- Levels],
+ ok = check_no_log(),
+
+ ok = logger:set_primary_config(level,all),
+ M2 = ?map_rep,
+ ?LOG_NOTICE(M2),
+ ok = check_logged(notice,M2,#{}),
+
+ ok = logger:set_module_level(?MODULE,none),
+ ?LOG_EMERGENCY(?map_rep),
+ ?LOG_ALERT(?map_rep),
+ ?LOG_CRITICAL(?map_rep),
+ ?LOG_ERROR(?map_rep),
+ ?LOG_WARNING(?map_rep),
+ ?LOG_NOTICE(?map_rep),
+ ?LOG_INFO(?map_rep),
+ ?LOG_DEBUG(?map_rep),
+ ok = check_no_log(),
+
+ ok = logger:unset_module_level(?MODULE),
+ logger:notice(M3=?map_rep),
+ ok = check_logged(notice,M3,#{}),
+
+ ok = logger:set_handler_config(h1,level,none),
+ [logger:Level(#{Level=>rep}) || Level <- Levels],
+ ok = check_no_log(),
+
+ ok.
+log_no_levels(cleanup,_Config) ->
+ logger:remove_handler(h1),
+ logger:set_primary_config(level,notice),
+ logger:unset_module_level(?MODULE),
+ ok.
+
+log_all_levels_api(_Config) ->
+ ok = logger:set_primary_config(level,all),
+ ok = logger:add_handler(h1,?MODULE,#{level=>all,filter_default=>log}),
+ test_api(emergency),
+ test_api(alert),
+ test_api(critical),
+ test_api(error),
+ test_api(warning),
+ test_api(notice),
+ test_api(info),
+ test_api(debug),
+ test_log_function(emergency),
+ ok.
+
+log_all_levels_api(cleanup,_Config) ->
+ logger:remove_handler(h1),
+ logger:set_primary_config(level,notice),
+ ok.
+
+macros(_Config) ->
+ ok = logger:add_handler(h1,?MODULE,#{level=>all,filter_default=>log}),
+ test_macros(emergency),
+ test_log_macro(alert),
+ ok.
+
+macros(cleanup,_Config) ->
+ logger:remove_handler(h1),
+ logger:unset_module_level(?MODULE),
+ ok.
+
+set_level(_Config) ->
+ ok = logger:add_handler(h1,?MODULE,#{level=>all,filter_default=>log}),
+ logger:debug(?map_rep),
+ ok = check_no_log(),
+ logger:notice(M1=?map_rep),
+ ok = check_logged(notice,M1,#{}),
+ ok = logger:set_primary_config(level,debug),
+ logger:debug(M2=?map_rep),
+ ok = check_logged(debug,M2,#{}),
+ ok.
+
+set_level(cleanup,_Config) ->
+ logger:remove_handler(h1),
+ logger:set_primary_config(level,notice),
+ ok.
+
+set_module_level(_Config) ->
+ [] = logger:get_module_level([?MODULE,other]),
+ [] = logger:get_module_level(?MODULE),
+ [] = logger:get_module_level(),
+
+ ok = logger:add_handler(h1,?MODULE,#{level=>notice,filter_default=>log}),
+ {error,{invalid_level,bad}} = logger:set_module_level(?MODULE,bad),
+ {error,{not_a_list_of_modules,{bad}}} =
+ logger:set_module_level({bad},warning),
+ {error,{not_a_list_of_modules,[{bad}]}} =
+ logger:set_module_level([{bad}],warning),
+ ok = logger:set_module_level(?MODULE,warning),
+ [{?MODULE,warning}] = logger:get_module_level([?MODULE,other]),
+ [{?MODULE,warning}] = logger:get_module_level(?MODULE),
+ [{?MODULE,warning}] = logger:get_module_level(),
+ logger:notice(?map_rep,?MY_LOC(0)),
+ ok = check_no_log(),
+ logger:warning(M1=?map_rep,?MY_LOC(0)),
+ ok = check_logged(warning,M1,?MY_LOC(1)),
+ ok = logger:set_module_level(?MODULE,notice),
+ [{?MODULE,notice}] = logger:get_module_level([?MODULE,other]),
+ [{?MODULE,notice}] = logger:get_module_level(?MODULE),
+ [{?MODULE,notice}] = logger:get_module_level(),
+ logger:notice(M2=?map_rep,?MY_LOC(0)),
+ ok = check_logged(notice,M2,?MY_LOC(1)),
+
+ {error,{not_a_list_of_modules,{bad}}} = logger:unset_module_level({bad}),
+ {error,{not_a_list_of_modules,[{bad}]}} = logger:unset_module_level([{bad}]),
+ ok = logger:unset_module_level(?MODULE),
+ [] = logger:get_module_level([?MODULE,other]),
+ [] = logger:get_module_level(?MODULE),
+ [] = logger:get_module_level(),
+
+ ok = logger:set_module_level([m1,m2,m3],notice),
+ [{m1,notice},{m2,notice},{m3,notice}] = logger:get_module_level(),
+ ok = logger:unset_module_level(m2),
+ [{m1,notice},{m3,notice}] = logger:get_module_level(),
+ ok = logger:unset_module_level(),
+ [] = logger:get_module_level(),
+
+ ok.
+
+set_module_level(cleanup,_Config) ->
+ logger:remove_handler(h1),
+ logger:unset_module_level(?MODULE),
+ ok.
+
+set_application_level(_Config) ->
+
+ {error,{not_loaded,mnesia}} = logger:set_application_level(mnesia, warning),
+ {error,{not_loaded,mnesia}} = logger:unset_application_level(mnesia),
+
+ case application:load(mnesia) of
+ ok ->
+ {ok, Modules} = application:get_key(mnesia, modules),
+ [] = logger:get_module_level(Modules),
+
+ {error,{invalid_level,warn}} =
+ logger:set_application_level(mnesia, warn),
+
+ ok = logger:set_application_level(mnesia, debug),
+ DebugModules = lists:sort([{M,debug} || M <- Modules]),
+ DebugModules = lists:sort(logger:get_module_level(Modules)),
+
+ ok = logger:set_application_level(mnesia, warning),
+
+ WarnModules = lists:sort([{M,warning} || M <- Modules]),
+ WarnModules = lists:sort(logger:get_module_level(Modules)),
+
+ ok = logger:unset_application_level(mnesia),
+ [] = logger:get_module_level(Modules);
+ {error,{"no such file or directory","mnesia.app"}} ->
+ {skip, "Cannot load mnesia, does not exist"}
+ end.
+
+set_application_level(cleanup,_Config) ->
+ _ = logger:unset_application_level(mnesia),
+ _ = application:unload(mnesia),
+ ok.
+
+cache_module_level(_Config) ->
+ ok = logger:unset_module_level(?MODULE),
+ [] = ets:lookup(?LOGGER_TABLE,?MODULE), %dirty - add API in logger_config?
+ ?LOG_NOTICE(?map_rep),
+ %% Caching is done asynchronously, so wait a bit for the update
+ timer:sleep(100),
+ [_] = ets:lookup(?LOGGER_TABLE,?MODULE), %dirty - add API in logger_config?
+ ok = logger:unset_module_level(?MODULE),
+ [] = ets:lookup(?LOGGER_TABLE,?MODULE), %dirty - add API in logger_config?
+ ok.
+
+cache_module_level(cleanup,_Config) ->
+ logger:unset_module_level(?MODULE),
+ ok.
+
+format_report(_Config) ->
+ {"~ts",["string"]} = logger:format_report("string"),
+ {"~tp",[term]} = logger:format_report(term),
+ {"~tp",[[]]} = logger:format_report([]),
+ {" ~tp: ~tp",[key,value]} = logger:format_report([{key,value}]),
+ KeyVals = [{key1,value1},{key2,"value2"},{key3,[]}],
+ KeyValRes =
+ {" ~tp: ~tp\n ~tp: ~ts\n ~tp: ~tp",
+ [key1,value1,key2,"value2",key3,[]]} =
+ logger:format_report(KeyVals),
+ KeyValRes = logger:format_report(maps:from_list(KeyVals)),
+ KeyValRes = logger:format_otp_report(#{label=>{?MODULE,test},report=>KeyVals}),
+ {" ~tp: ~tp\n ~tp: ~tp",
+ [label,{?MODULE,test},report,KeyVals]} =
+ logger:format_report(#{label=>{?MODULE,test},report=>KeyVals}),
+
+ {" ~tp: ~tp\n ~tp",[key1,value1,term]} =
+ logger:format_report([{key1,value1},term]),
+
+ {" ~tp: ~tp\n ~tp",[key1,value1,[]]} =
+ logger:format_report([{key1,value1},[]]),
+
+ {"~tp",[[]]} = logger:format_report([[],[],[]]),
+
+ ok.
+
+filter_failed(_Config) ->
+ ok = logger:add_handler(h1,?MODULE,#{level=>notice,filter_default=>log}),
+
+ %% Logger filters
+ {error,{invalid_filter,_}} =
+ logger:add_primary_filter(lf,{fun(_) -> ok end,args}),
+ ok = logger:add_primary_filter(lf,
+ {fun(_,_) ->
+ erlang:error({badmatch,b})
+ end,
+ args}),
+ #{filters:=[_]} = logger:get_primary_config(),
+ ok = logger:notice(M1=?map_rep),
+ ok = check_logged(notice,M1,#{}),
+ {error,{not_found,lf}} = logger:remove_primary_filter(lf),
+
+ ok = logger:add_primary_filter(lf,{fun(_,_) -> faulty_return end,args}),
+ #{filters:=[_]} = logger:get_primary_config(),
+ ok = logger:notice(M2=?map_rep),
+ ok = check_logged(notice,M2,#{}),
+ {error,{not_found,lf}} = logger:remove_primary_filter(lf),
+
+ %% Handler filters
+ {error,{not_found,h0}} =
+ logger:add_handler_filter(h0,hf,{fun(_,_) -> ignore end,args}),
+ {error,{not_found,h0}} = logger:remove_handler_filter(h0,hf),
+ {error,{invalid_filter,_}} =
+ logger:add_handler_filter(h1,hf,{fun(_) -> ok end,args}),
+ ok = logger:add_handler_filter(h1,hf,
+ {fun(_,_) ->
+ erlang:error({badmatch,b})
+ end,
+ args}),
+ {ok,#{filters:=[_]}} = logger:get_handler_config(h1),
+ ok = logger:notice(M3=?map_rep),
+ ok = check_logged(notice,M3,#{}),
+ {error,{not_found,hf}} = logger:remove_handler_filter(h1,hf),
+
+ ok = logger:add_handler_filter(h1,hf,{fun(_,_) -> faulty_return end,args}),
+ {ok,#{filters:=[_]}} = logger:get_handler_config(h1),
+ ok = logger:notice(M4=?map_rep),
+ ok = check_logged(notice,M4,#{}),
+ {error,{not_found,hf}} = logger:remove_handler_filter(h1,hf),
+
+ ok.
+
+filter_failed(cleanup,_Config) ->
+ logger:remove_handler(h1),
+ ok.
+
+handler_failed(_Config) ->
+ register(callback_receiver,self()),
+ {error,{invalid_id,1}} = logger:add_handler(1,?MODULE,#{}),
+ {error,{invalid_module,"nomodule"}} = logger:add_handler(h1,"nomodule",#{}),
+ {error,{invalid_config,bad}} = logger:add_handler(h1,?MODULE,bad),
+ {error,{invalid_filters,false}} =
+ logger:add_handler(h1,?MODULE,#{filters=>false}),
+ {error,{invalid_filter_default,true}} =
+ logger:add_handler(h1,?MODULE,#{filter_default=>true}),
+ {error,{invalid_formatter,[]}} =
+ logger:add_handler(h1,?MODULE,#{formatter=>[]}),
+ {error,{invalid_handler,_}} = logger:add_handler(h1,nomodule,#{filter_default=>log}),
+ logger:notice(?map_rep),
+ check_no_log(),
+ H1 = logger:get_handler_config(),
+ false = lists:search(fun(#{id:=h1}) -> true; (_) -> false end,H1),
+ {error,{not_found,h1}} = logger:remove_handler(h1),
+
+ ok = logger:add_handler(h2,?MODULE,
+ #{filter_default => log,
+ log_call => fun() ->
+ erlang:error({badmatch,b})
+ end}),
+ {error,{already_exist,h2}} = logger:add_handler(h2,othermodule,#{}),
+ [add] = test_server:messages_get(),
+
+ logger:notice(?map_rep),
+ [remove] = test_server:messages_get(),
+ H2 = logger:get_handler_config(),
+ false = lists:search(fun(#{id:=h2}) -> true; (_) -> false end,H2),
+ {error,{not_found,h2}} = logger:remove_handler(h2),
+
+ CallAddHandler = fun() -> logger:add_handler(h2,?MODULE,#{}) end,
+ CrashHandler = fun() -> erlang:error({badmatch,b}) end,
+ KillHandler = fun() -> exit(self(), die) end,
+
+ {error,{handler_not_added,{attempting_syncronous_call_to_self,_}}} =
+ logger:add_handler(h1,?MODULE,#{add_call=>CallAddHandler}),
+ {error,{handler_not_added,{callback_crashed,_}}} =
+ logger:add_handler(h1,?MODULE,#{add_call=>CrashHandler}),
+ {error,{handler_not_added,{logger_process_exited,_,die}}} =
+ logger:add_handler(h1,?MODULE,#{add_call=>KillHandler}),
+
+ check_no_log(),
+ ok = logger:add_handler(h1,?MODULE,#{}),
+ {error,{attempting_syncronous_call_to_self,_}} =
+ logger:set_handler_config(h1,#{conf_call=>CallAddHandler}),
+ {error,{callback_crashed,_}} =
+ logger:set_handler_config(h1,#{conf_call=>CrashHandler}),
+ {error,{logger_process_exited,_,die}} =
+ logger:set_handler_config(h1,#{conf_call=>KillHandler}),
+
+ {error,{attempting_syncronous_call_to_self,_}} =
+ logger:set_handler_config(h1,conf_call,CallAddHandler),
+ {error,{callback_crashed,_}} =
+ logger:set_handler_config(h1,conf_call,CrashHandler),
+ {error,{logger_process_exited,_,die}} =
+ logger:set_handler_config(h1,conf_call,KillHandler),
+
+ ok = logger:remove_handler(h1),
+ [add,remove] = test_server:messages_get(),
+
+ check_no_log(),
+ ok = logger:add_handler(h1,?MODULE,#{rem_call=>CallAddHandler}),
+ ok = logger:remove_handler(h1),
+ ok = logger:add_handler(h1,?MODULE,#{rem_call=>CrashHandler}),
+ ok = logger:remove_handler(h1),
+ ok = logger:add_handler(h1,?MODULE,#{rem_call=>KillHandler}),
+ ok = logger:remove_handler(h1),
+ [add,add,add] = test_server:messages_get(),
+
+ ok.
+
+handler_failed(cleanup,_Config) ->
+ logger:remove_handler(h1),
+ logger:remove_handler(h2),
+ ok.
+
+config_sanity_check(_Config) ->
+ %% Primary config
+ {error,{invalid_config,bad}} = logger:set_primary_config(bad),
+ {error,{invalid_filter_default,bad}} =
+ logger:set_primary_config(filter_default,bad),
+ {error,{invalid_level,bad}} = logger:set_primary_config(level,bad),
+ {error,{invalid_filters,bad}} = logger:set_primary_config(filters,bad),
+ {error,{invalid_filter,bad}} = logger:set_primary_config(filters,[bad]),
+ {error,{invalid_filter,{_,_}}} =
+ logger:set_primary_config(filters,[{id,bad}]),
+ {error,{invalid_filter,{_,{_,_}}}} =
+ logger:set_primary_config(filters,[{id,{bad,args}}]),
+ {error,{invalid_filter,{_,{_,_}}}} =
+ logger:set_primary_config(filters,[{id,{fun() -> ok end,args}}]),
+ {error,{invalid_primary_config,{bad,bad}}} =
+ logger:set_primary_config(bad,bad),
+
+ %% Handler config
+ {error,{not_found,h1}} = logger:set_handler_config(h1,a,b),
+ ok = logger:add_handler(h1,?MODULE,#{}),
+ {error,{invalid_filter_default,bad}} =
+ logger:set_handler_config(h1,filter_default,bad),
+ {error,{invalid_level,bad}} = logger:set_handler_config(h1,level,bad),
+ {error,{invalid_filters,bad}} = logger:set_handler_config(h1,filters,bad),
+ {error,{invalid_filter,bad}} = logger:set_handler_config(h1,filters,[bad]),
+ {error,{invalid_filter,{_,_}}} =
+ logger:set_handler_config(h1,filters,[{id,bad}]),
+ {error,{invalid_filter,{_,{_,_}}}} =
+ logger:set_handler_config(h1,filters,[{id,{bad,args}}]),
+ {error,{invalid_filter,{_,{_,_}}}} =
+ logger:set_handler_config(h1,filters,[{id,{fun() -> ok end,args}}]),
+ {error,{invalid_formatter,bad}} =
+ logger:set_handler_config(h1,formatter,bad),
+ {error,{invalid_module,{bad}}} =
+ logger:set_handler_config(h1,formatter,{{bad},cfg}),
+ {error,{invalid_formatter_config,logger_formatter,bad}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,bad}),
+ {error,{invalid_formatter_config,logger_formatter,{bad,bad}}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,#{bad=>bad}}),
+ {error,{invalid_formatter_template,logger_formatter,bad}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{template=>bad}}),
+ {error,{invalid_formatter_template,logger_formatter,[1]}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{template=>[1]}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{template=>[]}}),
+ {error,{invalid_formatter_config,logger_formatter,{single_line,bad}}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{single_line=>bad}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{single_line=>true}}),
+ {error,{invalid_formatter_config,logger_formatter,{legacy_header,bad}}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{legacy_header=>bad}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{legacy_header=>true}}),
+ {error,{invalid_formatter_config,logger_formatter,{report_cb,bad}}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{report_cb=>bad}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{report_cb=>fun(R) ->
+ {"~p",[R]}
+ end}}),
+ {error,{invalid_formatter_config,logger_formatter,{chars_limit,bad}}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{chars_limit=>bad}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{chars_limit=>unlimited}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{chars_limit=>4}}),
+ {error,{invalid_formatter_config,logger_formatter,{depth,bad}}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{depth=>bad}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{depth=>unlimited}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{depth=>4}}),
+ {error,{invalid_formatter_config,logger_formatter,{max_size,bad}}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{max_size=>bad}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{max_size=>unlimited}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{max_size=>4}}),
+ ok = logger:set_handler_config(h1,formatter,{module,config}),
+ {error,{callback_crashed,{error,{badmatch,3},[{?MODULE,check_config,1,_}]}}} =
+ logger:set_handler_config(h1,formatter,{?MODULE,crash}),
+ ok = logger:set_handler_config(h1,custom,custom),
+
+ %% Old utc parameter is no longer allowed (replaced by time_offset)
+ {error,{invalid_formatter_config,logger_formatter,{utc,true}}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{utc=>true}}),
+ {error,{invalid_formatter_config,logger_formatter,{time_offset,bad}}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{time_offset=>bad}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{time_offset=>0}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{time_offset=>""}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{time_offset=>"Z"}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{time_offset=>"z"}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{time_offset=>"-0:0"}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{time_offset=>"+10:13"}}),
+
+ {error,{invalid_formatter_config,logger_formatter,{time_offset,"+0"}}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{time_offset=>"+0"}}),
+
+ {error,{invalid_formatter_config,logger_formatter,{time_designator,bad}}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{time_designator=>bad}}),
+ {error,{invalid_formatter_config,logger_formatter,{time_designator,"s"}}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{time_designator=>"s"}}),
+ {error,{invalid_formatter_config,logger_formatter,{time_designator,0}}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{time_designator=>0}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{time_designator=>$\s}}),
+ ok.
+
+config_sanity_check(cleanup,_Config) ->
+ logger:remove_handler(h1),
+ ok.
+
+log_failed(_Config) ->
+ ok = logger:add_handler(h1,?MODULE,#{level=>notice,filter_default=>log}),
+ {error,function_clause} = ?TRY(logger:log(bad,?map_rep)),
+ {error,function_clause} = ?TRY(logger:log(notice,?map_rep,bad)),
+ {error,function_clause} = ?TRY(logger:log(notice,fun() -> ?map_rep end,bad)),
+ {error,function_clause} = ?TRY(logger:log(notice,fun() -> ?map_rep end,bad,#{})),
+ {error,function_clause} = ?TRY(logger:log(notice,bad,bad,bad)),
+ {error,function_clause} = ?TRY(logger:log(notice,bad,bad,#{})),
+ check_no_log(),
+ ok = logger:log(notice,M1=?str,#{}),
+ check_logged(notice,M1,#{}),
+ ok = logger:log(notice,M2=?map_rep,#{}),
+ check_logged(notice,M2,#{}),
+ ok = logger:log(notice,M3=?keyval_rep,#{}),
+ check_logged(notice,M3,#{}),
+
+ %% Should we check report input more thoroughly?
+ ok = logger:log(notice,M4=?keyval_rep++[other,stuff,in,list],#{}),
+ check_logged(notice,M4,#{}),
+
+ %% This might break a handler since it is assumed to be a format
+ %% string and args, so it depends how the handler protects itself
+ %% against something like io_lib:format("ok","ok")
+ ok = logger:log(notice,"ok","ok",#{}),
+ check_logged(notice,"ok","ok",#{}),
+
+ ok.
+
+log_failed(cleanup,_Config) ->
+ logger:remove_handler(h1),
+ ok.
+
+emulator(_Config) ->
+ ok = logger:add_handler(h1,?MODULE,#{level=>notice,filter_default=>log,
+ tc_proc=>self()}),
+ Msg = "Error in process ~p on node ~p with exit value:~n~p~n",
+ Error = {badmatch,4},
+ Stack = [{module, function, 2, []}],
+ Pid = spawn(?MODULE, generate_error, [Error, Stack]),
+ check_logged(error, Msg, [Pid, node(), {Error, Stack}],
+ #{gl=>group_leader(),
+ error_logger=>#{tag=>error,emulator=>true}}),
+ ok.
+
+emulator(cleanup, _Config) ->
+ logger:remove_handler(h1),
+ ok.
+
+generate_error(Error, Stack) ->
+ erlang:raise(error, Error, Stack).
+
+via_logger_process(Config) ->
+ ok = logger:add_handler(h1,?MODULE,#{level=>notice,filter_default=>log,
+ tc_proc=>self()}),
+
+ %% Explicitly send a message to the logger process
+ %% This is used by code_server, erl_prim_loader, init, prim_file, ...
+ Msg = ?str,
+ logger ! {log,error,Msg,[],#{}},
+ check_logged(error, Msg, [], #{}),
+
+ case os:type() of
+ {win32,_} ->
+ %% Skip this part on windows - cant change file mode"
+ ok;
+ _ ->
+ %% This should trigger the same thing from erl_prim_loader
+ Dir = filename:join(?config(priv_dir,Config),"dummydir"),
+ ok = file:make_dir(Dir),
+ ok = file:change_mode(Dir,8#0222),
+ error = erl_prim_loader:list_dir(Dir),
+ check_logged(error,
+ #{report=>"File operation error: eacces. Target: " ++
+ Dir ++". Function: list_dir. "},
+ #{pid=>self(),
+ gl=>group_leader(),
+ error_logger=>#{tag=>error_report,
+ type=>std_error}}),
+ ok
+ end.
+
+via_logger_process(cleanup, Config) ->
+ Dir = filename:join(?config(priv_dir,Config),"dummydir"),
+ _ = file:change_mode(Dir,8#0664),
+ _ = file:del_dir(Dir),
+ logger:remove_handler(h1),
+ ok.
+
+other_node(_Config) ->
+ ok = logger:add_handler(h1,?MODULE,#{level=>notice,filter_default=>log,
+ tc_proc=>self()}),
+ {ok,Node} = test_server:start_node(?FUNCTION_NAME,slave,[]),
+ rpc:call(Node,logger,error,[Msg=?str,#{}]),
+ check_logged(error,Msg,#{}),
+ ok.
+
+other_node(cleanup,_Config) ->
+ Nodes = nodes(),
+ [test_server:stop_node(Node) || Node <- Nodes],
+ logger:remove_handler(h1),
+ ok.
+
+compare_levels(_Config) ->
+ Levels = [emergency,alert,critical,error,warning,notice,info,debug],
+ ok = compare(Levels),
+ {error,badarg} = ?TRY(logger:compare_levels(bad,bad)),
+ {error,badarg} = ?TRY(logger:compare_levels({bad},notice)),
+ {error,badarg} = ?TRY(logger:compare_levels(notice,"bad")),
+ ok.
+
+compare([L|Rest]) ->
+ eq = logger:compare_levels(L,L),
+ [gt = logger:compare_levels(L,L1) || L1 <- Rest],
+ [lt = logger:compare_levels(L1,L) || L1 <- Rest],
+ compare(Rest);
+compare([]) ->
+ ok.
+
+process_metadata(_Config) ->
+ undefined = logger:get_process_metadata(),
+ {error,badarg} = ?TRY(logger:set_process_metadata(bad)),
+ ok = logger:add_handler(h1,?MODULE,#{level=>notice,filter_default=>log}),
+ Time = logger:timestamp(),
+ ProcMeta = #{time=>Time,line=>0,custom=>proc},
+ ok = logger:set_process_metadata(ProcMeta),
+ S1 = ?str,
+ ?LOG_NOTICE(S1,#{custom=>macro}),
+ check_logged(notice,S1,#{time=>Time,line=>0,custom=>macro}),
+
+ Time2 = logger:timestamp(),
+ S2 = ?str,
+ ?LOG_NOTICE(S2,#{time=>Time2,line=>1,custom=>macro}),
+ check_logged(notice,S2,#{time=>Time2,line=>1,custom=>macro}),
+
+ logger:notice(S3=?str,#{custom=>func}),
+ check_logged(notice,S3,#{time=>Time,line=>0,custom=>func}),
+
+ ProcMeta = logger:get_process_metadata(),
+ ok = logger:update_process_metadata(#{custom=>changed,custom2=>added}),
+ Expected = ProcMeta#{custom:=changed,custom2=>added},
+ Expected = logger:get_process_metadata(),
+ ok = logger:unset_process_metadata(),
+ undefined = logger:get_process_metadata(),
+
+ ok = logger:update_process_metadata(#{custom=>added_again}),
+ {error,badarg} = ?TRY(logger:update_process_metadata(bad)),
+ #{custom:=added_again} = logger:get_process_metadata(),
+
+ ok.
+
+process_metadata(cleanup,_Config) ->
+ logger:remove_handler(h1),
+ ok.
+
+app_config(Config) ->
+ %% Start a node with default configuration
+ {ok,_,Node} = logger_test_lib:setup(Config,[]),
+
+ App1Name = app1,
+ App1 = {application, App1Name,
+ [{description, "Test of app with logger config"},
+ {applications, [kernel]}]},
+ ok = rpc:call(Node,application,load,[App1]),
+ ok = rpc:call(Node,application,set_env,
+ [App1Name,logger,[{handler,default,logger_std_h,#{}}]]),
+
+ %% Try to add an own default handler
+ {error,{bad_config,{handler,{app1,{already_exist,default}}}}} =
+ rpc:call(Node,logger,add_handlers,[App1Name]),
+
+ %% Add a different handler
+ ok = rpc:call(Node,application,set_env,[App1Name,logger,
+ [{handler,myh,logger_std_h,#{}}]]),
+ ok = rpc:call(Node,logger,add_handlers,[App1Name]),
+
+ {ok,#{filters:=DF}} = rpc:call(Node,logger,get_handler_config,[default]),
+ {ok,#{filters:=[]}} = rpc:call(Node,logger,get_handler_config,[myh]),
+
+ true = test_server:stop_node(Node),
+
+ %% Start a node with no default handler, then add an own default handler
+ {ok,#{handlers:=[#{id:=simple}]},Node} =
+ logger_test_lib:setup(Config,[{logger,[{handler,default,undefined}]}]),
+
+ ok = rpc:call(Node,application,load,[App1]),
+ ok = rpc:call(Node,application,set_env,
+ [App1Name,logger,[{handler,default,logger_std_h,#{}}]]),
+ ok = rpc:call(Node,logger,add_handlers,[App1Name]),
+
+ #{handlers:=[#{id:=default,filters:=DF}]} =
+ rpc:call(Node,logger,get_config,[]),
+
+ true = test_server:stop_node(Node),
+
+ %% Start a silent node, then add an own default handler
+ {ok,#{handlers:=[]},Node} =
+ logger_test_lib:setup(Config,[{error_logger,silent}]),
+
+ {error,{bad_config,{handler,[{some,bad,config}]}}} =
+ rpc:call(Node,logger,add_handlers,[[{some,bad,config}]]),
+ ok = rpc:call(Node,logger,add_handlers,
+ [[{handler,default,logger_std_h,#{}}]]),
+
+ #{handlers:=[#{id:=default,filters:=DF}]} =
+ rpc:call(Node,logger,get_config,[]),
+
+ ok.
+
+%% This test case is maintly to see code coverage. Note that
+%% logger_env_var_SUITE tests a lot of the same, and checks the
+%% functionality more thoroughly, but since it all happens at node
+%% start, it is not possible to see code coverage in that test.
+kernel_config(Config) ->
+ %% Start a node with simple handler only, then simulate kernel
+ %% start by calling internally exported
+ %% internal_init_logger(). This is to test all variants of kernel
+ %% config, including bad config, and see the code coverage.
+ {ok,#{handlers:=[#{id:=simple,filters:=DF}]}=LC,Node} =
+ logger_test_lib:setup(Config,[{error_logger,false}]),
+
+ %% Same once more, to get coverage
+ ok = rpc:call(Node,logger,internal_init_logger,[]),
+ ok = rpc:call(Node,logger,add_handlers,[kernel]),
+ LC = rpc:call(Node,logger,get_config,[]),
+
+ %% This shall mean the same as above, but using 'logger' parameter
+ %% instead of 'error_logger'
+ ok = rpc:call(Node,application,unset_env,[kernel,error_logger]),
+ ok = rpc:call(Node,application,set_env,
+ [kernel,logger,[{handler,default,undefined}]]),
+ ok = rpc:call(Node,logger,internal_init_logger,[]),
+ ok = rpc:call(Node,logger,add_handlers,[kernel]),
+ LC = rpc:call(Node,logger,get_config,[]),
+
+ %% Silent
+ ok = rpc:call(Node,application,unset_env,[kernel,logger]),
+ ok = rpc:call(Node,application,set_env,[kernel,error_logger,silent]),
+ ok = rpc:call(Node,logger,internal_init_logger,[]),
+ ok = rpc:call(Node,logger,add_handlers,[kernel]),
+ #{primary:=#{filter_default:=log,filters:=[]},
+ handlers:=[],
+ module_levels:=[]} = rpc:call(Node,logger,get_config,[]),
+
+ %% Default
+ ok = rpc:call(Node,application,unset_env,[kernel,error_logger]),
+ ok = rpc:call(Node,application,unset_env,[kernel,logger]),
+ ok = rpc:call(Node,logger,internal_init_logger,[]),
+ ok = rpc:call(Node,logger,add_handlers,[kernel]),
+ #{primary:=#{filter_default:=log,filters:=[]},
+ handlers:=[#{id:=default,filters:=DF,config:=#{type:=standard_io}}],
+ module_levels:=[]} = rpc:call(Node,logger,get_config,[]),
+
+ %% error_logger=tty (same as default)
+ ok = rpc:call(Node,logger,remove_handler,[default]),% so it can be added again
+ ok = rpc:call(Node,application,set_env,[kernel,error_logger,tty]),
+ ok = rpc:call(Node,application,unset_env,[kernel,logger]),
+ ok = rpc:call(Node,logger,internal_init_logger,[]),
+ ok = rpc:call(Node,logger,add_handlers,[kernel]),
+ #{primary:=#{filter_default:=log,filters:=[]},
+ handlers:=[#{id:=default,filters:=DF,config:=#{type:=standard_io}}],
+ module_levels:=[]} = rpc:call(Node,logger,get_config,[]),
+
+ %% error_logger={file,File}
+ ok = rpc:call(Node,logger,remove_handler,[default]),% so it can be added again
+ F = filename:join(?config(priv_dir,Config),
+ atom_to_list(?FUNCTION_NAME)++".log"),
+ ok = rpc:call(Node,application,set_env,[kernel,error_logger,{file,F}]),
+ ok = rpc:call(Node,application,unset_env,[kernel,logger]),
+ ok = rpc:call(Node,logger,internal_init_logger,[]),
+ ok = rpc:call(Node,logger,add_handlers,[kernel]),
+ #{primary:=#{filter_default:=log,filters:=[]},
+ handlers:=[#{id:=default,filters:=DF,
+ config:=#{type:=file,file:=F,modes:=Modes}}],
+ module_levels:=[]} = rpc:call(Node,logger,get_config,[]),
+ [append,delayed_write,raw] = lists:sort(Modes),
+
+
+ %% Same, but using 'logger' parameter instead of 'error_logger'
+ ok = rpc:call(Node,logger,remove_handler,[default]),% so it can be added again
+ ok = rpc:call(Node,application,unset_env,[kernel,error_logger]),
+ ok = rpc:call(Node,application,set_env,[kernel,logger,
+ [{handler,default,logger_std_h,
+ #{config=>#{type=>{file,F}}}}]]),
+ ok = rpc:call(Node,logger,internal_init_logger,[]),
+ ok = rpc:call(Node,logger,add_handlers,[kernel]),
+ #{primary:=#{filter_default:=log,filters:=[]},
+ handlers:=[#{id:=default,filters:=DF,
+ config:=#{type:=file,file:=F,modes:=Modes}}],
+ module_levels:=[]} = rpc:call(Node,logger,get_config,[]),
+
+ %% Same, but with type={file,File,Modes}
+ ok = rpc:call(Node,logger,remove_handler,[default]),% so it can be added again
+ ok = rpc:call(Node,application,unset_env,[kernel,error_logger]),
+ M = [raw,write],
+ ok = rpc:call(Node,application,set_env,[kernel,logger,
+ [{handler,default,logger_std_h,
+ #{config=>#{type=>{file,F,M}}}}]]),
+ ok = rpc:call(Node,logger,internal_init_logger,[]),
+ ok = rpc:call(Node,logger,add_handlers,[kernel]),
+ #{primary:=#{filter_default:=log,filters:=[]},
+ handlers:=[#{id:=default,filters:=DF,
+ config:=#{type:=file,file:=F,modes:=[delayed_write|M]}}],
+ module_levels:=[]} = rpc:call(Node,logger,get_config,[]),
+
+ %% Same, but with disk_log handler
+ ok = rpc:call(Node,logger,remove_handler,[default]),% so it can be added again
+ ok = rpc:call(Node,application,unset_env,[kernel,error_logger]),
+ ok = rpc:call(Node,application,set_env,[kernel,logger,
+ [{handler,default,logger_disk_log_h,
+ #{config=>#{file=>F}}}]]),
+ ok = rpc:call(Node,logger,internal_init_logger,[]),
+ ok = rpc:call(Node,logger,add_handlers,[kernel]),
+ #{primary:=#{filter_default:=log,filters:=[]},
+ handlers:=[#{id:=default,filters:=DF,config:=#{file:=F}}],
+ module_levels:=[]} = rpc:call(Node,logger,get_config,[]),
+
+ %% Set primary filters and module level. No default handler.
+ ok = rpc:call(Node,logger,remove_handler,[default]),% so it can be added again
+ ok = rpc:call(Node,application,unset_env,[kernel,error_logger]),
+ ok = rpc:call(Node,application,set_env,
+ [kernel,logger,[{handler,default,undefined},
+ {filters,stop,[{f1,{fun(_,_) -> log end,ok}}]},
+ {module_level,debug,[?MODULE]}]]),
+ ok = rpc:call(Node,logger,internal_init_logger,[]),
+ ok = rpc:call(Node,logger,add_handlers,[kernel]),
+ #{primary:=#{filter_default:=stop,filters:=[_]},
+ handlers:=[],
+ module_levels:=[{?MODULE,debug}]} = rpc:call(Node,logger,get_config,[]),
+
+ %% Bad config
+ ok = rpc:call(Node,application,unset_env,[kernel,logger]),
+
+ ok = rpc:call(Node,application,set_env,[kernel,error_logger,bad]),
+ {error,{bad_config,{kernel,{error_logger,bad}}}} =
+ rpc:call(Node,logger,internal_init_logger,[]),
+
+ ok = rpc:call(Node,application,unset_env,[kernel,error_logger]),
+ ok = rpc:call(Node,application,set_env,[kernel,logger_level,bad]),
+ {error,{bad_config,{kernel,{logger_level,bad}}}} =
+ rpc:call(Node,logger,internal_init_logger,[]),
+
+ ok = rpc:call(Node,application,unset_env,[kernel,logger_level]),
+ ok = rpc:call(Node,application,set_env,
+ [kernel,logger,[{filters,stop,[bad]}]]),
+ {error,{bad_config,{kernel,{invalid_filters,[bad]}}}} =
+ rpc:call(Node,logger,internal_init_logger,[]),
+
+ ok = rpc:call(Node,application,set_env,
+ [kernel,logger,[{filters,stop,[bad]}]]),
+ {error,{bad_config,{kernel,{invalid_filters,[bad]}}}} =
+ rpc:call(Node,logger,internal_init_logger,[]),
+
+ ok = rpc:call(Node,application,set_env,
+ [kernel,logger,[{filters,stop,[{f1,bad}]}]]),
+ {error,{bad_config,{kernel,{invalid_filter,{f1,bad}}}}} =
+ rpc:call(Node,logger,internal_init_logger,[]),
+
+ ok = rpc:call(Node,application,set_env,
+ [kernel,logger,MF=[{filters,stop,[]},{filters,log,[]}]]),
+ {error,{bad_config,{kernel,{multiple_filters,MF}}}} =
+ rpc:call(Node,logger,internal_init_logger,[]),
+
+ ok = rpc:call(Node,application,set_env,
+ [kernel,logger,[{module_level,bad,[?MODULE]}]]),
+ {error,{bad_config,{kernel,{invalid_level,bad}}}} =
+ rpc:call(Node,logger,internal_init_logger,[]),
+
+ ok.
+
+pretty_print(Config) ->
+ ok = logger:add_handler(?FUNCTION_NAME,logger_std_h,#{}),
+ ok = logger:set_module_level([module1,module2],debug),
+
+ ct:capture_start(),
+ logger:i(),
+ ct:capture_stop(),
+ I0 = ct:capture_get(),
+
+ ct:capture_start(),
+ logger:i(primary),
+ ct:capture_stop(),
+ IPrim = ct:capture_get(),
+
+ ct:capture_start(),
+ logger:i(handlers),
+ ct:capture_stop(),
+ IHs = ct:capture_get(),
+
+ ct:capture_start(),
+ logger:i(proxy),
+ ct:capture_stop(),
+ IProxy = ct:capture_get(),
+
+ ct:capture_start(),
+ logger:i(modules),
+ ct:capture_stop(),
+ IMs = ct:capture_get(),
+
+ I02 = lists:append([IPrim,IHs,IProxy,IMs]),
+ %% ct:log("~p~n",[I0]),
+ %% ct:log("~p~n",[I02]),
+ I0 = I02,
+
+ ct:capture_start(),
+ logger:i(handlers),
+ ct:capture_stop(),
+ IHs = ct:capture_get(),
+
+ Ids = logger:get_handler_ids(),
+ IHs2 =
+ lists:append(
+ [begin
+ ct:capture_start(),
+ logger:i(Id),
+ ct:capture_stop(),
+ [_|IH] = ct:capture_get(),
+ IH
+ end || Id <- Ids]),
+
+ %% ct:log("~p~n",[IHs]),
+ %% ct:log("~p~n",[["Handler configuration: \n"|IHs2]]),
+ IHs = ["Handler configuration: \n"|IHs2],
+ ok.
+
+%%%-----------------------------------------------------------------
+%%% Internal
+check_logged(Level,Format,Args,Meta) ->
+ do_check_logged(Level,{Format,Args},Meta).
+
+check_logged(Level,Msg,Meta) when ?IS_REPORT(Msg) ->
+ do_check_logged(Level,{report,Msg},Meta);
+check_logged(Level,Msg,Meta) when ?IS_STRING(Msg) ->
+ do_check_logged(Level,{string,Msg},Meta).
+
+do_check_logged(Level,Msg0,Meta0) ->
+ receive
+ {#{level:=Level,msg:=Msg,meta:=Meta},_} ->
+ check_msg(Msg0,Msg),
+ check_maps(Meta0,Meta,meta)
+ after 500 ->
+ ct:fail({timeout,no_log,process_info(self(),messages)})
+ end.
+
+check_no_log() ->
+ receive
+ X -> ct:fail({got_unexpected_log,X})
+ after 500 ->
+ ok
+ end.
+
+check_msg(Msg,Msg) ->
+ ok;
+check_msg({report,Expected},{report,Got}) when is_map(Expected), is_map(Got) ->
+ check_maps(Expected,Got,msg);
+check_msg(Expected,Got) ->
+ ct:fail({unexpected,msg,Expected,Got}).
+
+check_maps(Expected,Got,What) ->
+ case maps:merge(Got,Expected) of
+ Got ->
+ ok;
+ _ ->
+ ct:fail({unexpected,What,Expected,Got})
+ end.
+
+%% Handler
+adding_handler(#{add_call:=Fun}) ->
+ Fun();
+adding_handler(Config) ->
+ maybe_send(add),
+ {ok,Config}.
+
+removing_handler(#{rem_call:=Fun}) ->
+ Fun();
+removing_handler(_Config) ->
+ maybe_send(remove),
+ ok.
+changing_config(_Old,#{conf_call:=Fun}) ->
+ Fun();
+changing_config(_Old,Config) ->
+ maybe_send(changing_config),
+ {ok,Config}.
+
+maybe_send(Msg) ->
+ case whereis(callback_receiver) of
+ undefined -> ok;
+ Pid -> Pid ! Msg
+ end.
+
+log(_Log,#{log_call:=Fun}) ->
+ Fun();
+log(Log,Config) ->
+ TcProc = maps:get(tc_proc,Config,self()),
+ TcProc ! {Log,Config},
+ ok.
+
+test_api(Level) ->
+ logger:Level(#{Level=>rep}),
+ ok = check_logged(Level,#{Level=>rep},#{}),
+ logger:Level(#{Level=>rep},#{my=>meta}),
+ ok = check_logged(Level,#{Level=>rep},#{my=>meta}),
+ logger:Level("~w: ~w",[Level,fa]),
+ ok = check_logged(Level,"~w: ~w",[Level,fa],#{}),
+ logger:Level("~w: ~w ~w",[Level,fa,meta],#{my=>meta}),
+ ok = check_logged(Level,"~w: ~w ~w",[Level,fa,meta],#{my=>meta}),
+ logger:Level(fun(x) -> {"~w: ~w ~w",[Level,fun_to_fa,meta]} end,x,
+ #{my=>meta}),
+ ok = check_logged(Level,"~w: ~w ~w",[Level,fun_to_fa,meta],#{my=>meta}),
+ logger:Level(fun(x) -> #{Level=>fun_to_r,meta=>true} end,x,
+ #{my=>meta}),
+ ok = check_logged(Level,#{Level=>fun_to_r,meta=>true},#{my=>meta}),
+ logger:Level(fun(x) -> <<"fun_to_s">> end,x,#{}),
+ ok = check_logged(Level,<<"fun_to_s">>,#{}),
+ logger:Level(F1=fun(x) -> {fun_to_bad} end,x,#{}),
+ ok = check_logged(Level,"LAZY_FUN ERROR: ~tp; Returned: ~tp",
+ [{F1,x},{fun_to_bad}],#{}),
+ logger:Level(F2=fun(x) -> erlang:error(fun_that_crashes) end,x,#{}),
+ ok = check_logged(Level,"LAZY_FUN CRASH: ~tp; Reason: ~tp",
+ [{F2,x},{error,fun_that_crashes}],#{}),
+ ok.
+
+test_log_function(Level) ->
+ logger:log(Level,#{Level=>rep}),
+ ok = check_logged(Level,#{Level=>rep},#{}),
+ logger:log(Level,#{Level=>rep},#{my=>meta}),
+ ok = check_logged(Level,#{Level=>rep},#{my=>meta}),
+ logger:log(Level,"~w: ~w",[Level,fa]),
+ ok = check_logged(Level,"~w: ~w",[Level,fa],#{}),
+ logger:log(Level,"~w: ~w ~w",[Level,fa,meta],#{my=>meta}),
+ ok = check_logged(Level,"~w: ~w ~w",[Level,fa,meta],#{my=>meta}),
+ logger:log(Level,fun(x) -> {"~w: ~w ~w",[Level,fun_to_fa,meta]} end,
+ x, #{my=>meta}),
+ ok = check_logged(Level,"~w: ~w ~w",[Level,fun_to_fa,meta],#{my=>meta}),
+ logger:log(Level,fun(x) -> #{Level=>fun_to_r,meta=>true} end,
+ x, #{my=>meta}),
+ ok = check_logged(Level,#{Level=>fun_to_r,meta=>true},#{my=>meta}),
+ logger:log(Level,fun(x) -> <<"fun_to_s">> end,x,#{}),
+ ok = check_logged(Level,<<"fun_to_s">>,#{}),
+ logger:log(Level,F1=fun(x) -> {fun_to_bad} end,x,#{}),
+ ok = check_logged(Level,"LAZY_FUN ERROR: ~tp; Returned: ~tp",
+ [{F1,x},{fun_to_bad}],#{}),
+ logger:log(Level,F2=fun(x) -> erlang:error(fun_that_crashes) end,x,#{}),
+ ok = check_logged(Level,"LAZY_FUN CRASH: ~tp; Reason: ~tp",
+ [{F2,x},{error,fun_that_crashes}],#{}),
+ ok.
+
+test_macros(emergency=Level) ->
+ ?LOG_EMERGENCY(#{Level=>rep}),
+ ok = check_logged(Level,#{Level=>rep},?MY_LOC(1)),
+ ?LOG_EMERGENCY(#{Level=>rep},#{my=>meta}),
+ ok = check_logged(Level,#{Level=>rep},(?MY_LOC(1))#{my=>meta}),
+ ?LOG_EMERGENCY("~w: ~w",[Level,fa]),
+ ok = check_logged(Level,"~w: ~w",[Level,fa],?MY_LOC(1)),
+ ?LOG_EMERGENCY("~w: ~w ~w",[Level,fa,meta],#{my=>meta}),
+ ok = check_logged(Level,"~w: ~w ~w",[Level,fa,meta],(?MY_LOC(1))#{my=>meta}),
+ ?LOG_EMERGENCY(fun(x) -> {"~w: ~w ~w",[Level,fun_to_fa,meta]} end,
+ x, #{my=>meta}),
+ ok = check_logged(Level,"~w: ~w ~w",[Level,fun_to_fa,meta],
+ (?MY_LOC(3))#{my=>meta}),
+ ?LOG_EMERGENCY(fun(x) -> #{Level=>fun_to_r,meta=>true} end, x, #{my=>meta}),
+ ok = check_logged(Level,#{Level=>fun_to_r,meta=>true},
+ (?MY_LOC(2))#{my=>meta}),
+ ?LOG_EMERGENCY(fun(x) -> <<"fun_to_s">> end,x,#{}),
+ ok = check_logged(Level,<<"fun_to_s">>,?MY_LOC(1)),
+ F1=fun(x) -> {fun_to_bad} end,
+ ?LOG_EMERGENCY(F1,x,#{}),
+ ok = check_logged(Level,"LAZY_FUN ERROR: ~tp; Returned: ~tp",
+ [{F1,x},{fun_to_bad}],#{}),
+ F2=fun(x) -> erlang:error(fun_that_crashes) end,
+ ?LOG_EMERGENCY(F2,x,#{}),
+ ok = check_logged(Level,"LAZY_FUN CRASH: ~tp; Reason: ~tp",
+ [{F2,x},{error,fun_that_crashes}],#{}),
+ ok.
+
+test_log_macro(Level) ->
+ ?LOG(Level,#{Level=>rep}),
+ ok = check_logged(Level,#{Level=>rep},?MY_LOC(1)),
+ ?LOG(Level,#{Level=>rep},#{my=>meta}),
+ ok = check_logged(Level,#{Level=>rep},(?MY_LOC(1))#{my=>meta}),
+ ?LOG(Level,"~w: ~w",[Level,fa]),
+ ok = check_logged(Level,"~w: ~w",[Level,fa],?MY_LOC(1)),
+ ?LOG(Level,"~w: ~w ~w",[Level,fa,meta],#{my=>meta}),
+ ok = check_logged(Level,"~w: ~w ~w",[Level,fa,meta],(?MY_LOC(1))#{my=>meta}),
+ ?LOG(Level,fun(x) -> {"~w: ~w ~w",[Level,fun_to_fa,meta]} end,
+ x, #{my=>meta}),
+ ok = check_logged(Level,"~w: ~w ~w",[Level,fun_to_fa,meta],
+ (?MY_LOC(3))#{my=>meta}),
+ ?LOG(Level,fun(x) -> #{Level=>fun_to_r,meta=>true} end, x, #{my=>meta}),
+ ok = check_logged(Level,#{Level=>fun_to_r,meta=>true},
+ (?MY_LOC(2))#{my=>meta}),
+ ?LOG(Level,fun(x) -> <<"fun_to_s">> end,x,#{}),
+ ok = check_logged(Level,<<"fun_to_s">>,?MY_LOC(1)),
+ F1=fun(x) -> {fun_to_bad} end,
+ ?LOG(Level,F1,x,#{}),
+ ok = check_logged(Level,"LAZY_FUN ERROR: ~tp; Returned: ~tp",
+ [{F1,x},{fun_to_bad}],#{}),
+ F2=fun(x) -> erlang:error(fun_that_crashes) end,
+ ?LOG(Level,F2,x,#{}),
+ ok = check_logged(Level,"LAZY_FUN CRASH: ~tp; Reason: ~tp",
+ [{F2,x},{error,fun_that_crashes}],#{}),
+ ok.
+
+%%%-----------------------------------------------------------------
+%%% Called by macro ?TRY(X)
+my_try(Fun) ->
+ try Fun() catch C:R -> {C,R} end.
+
+check_config(crash) ->
+ erlang:error({badmatch,3});
+check_config(_) ->
+ ok.
diff --git a/lib/kernel/test/logger_disk_log_h_SUITE.erl b/lib/kernel/test/logger_disk_log_h_SUITE.erl
new file mode 100644
index 0000000000..9bbec42de8
--- /dev/null
+++ b/lib/kernel/test/logger_disk_log_h_SUITE.erl
@@ -0,0 +1,1668 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_disk_log_h_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("kernel/include/logger.hrl").
+-include_lib("kernel/src/logger_internal.hrl").
+-include_lib("kernel/src/logger_olp.hrl").
+-include_lib("kernel/src/logger_h_common.hrl").
+-include_lib("stdlib/include/ms_transform.hrl").
+-include_lib("kernel/include/file.hrl").
+
+-define(check_no_log, [] = test_server:messages_get()).
+
+-define(check(Expected),
+ receive {log,Expected} ->
+ [] = test_server:messages_get()
+ after 1000 ->
+ ct:fail({report_not_received,
+ {line,?LINE},
+ {got,test_server:messages_get()}})
+ end).
+
+-define(msg,"Log from "++atom_to_list(?FUNCTION_NAME)++
+ ":"++integer_to_list(?LINE)).
+-define(bin(Msg), list_to_binary(Msg++"\n")).
+-define(log_no(File,N), lists:concat([File,".",N])).
+-define(domain,#{domain=>[?MODULE]}).
+
+suite() ->
+ [{timetrap,{seconds,30}},
+ {ct_hooks,[logger_test_lib]}].
+
+init_per_suite(Config) ->
+ timer:start(), % to avoid progress report
+ Config.
+
+end_per_suite(_Config) ->
+ ok.
+
+init_per_group(_Group, Config) ->
+ Config.
+
+end_per_group(_Group, _Config) ->
+ ok.
+
+init_per_testcase(TestHooksCase, Config) when
+ TestHooksCase == write_failure;
+ TestHooksCase == sync_failure ->
+ case (fun() -> ?TEST_HOOKS_TAB == undefined end)() of
+ true ->
+ {skip,"Define the TEST_HOOKS macro to run this test"};
+ false ->
+ ct:print("********** ~w **********", [TestHooksCase]),
+ Config
+ end;
+init_per_testcase(TestCase, Config) ->
+ ct:print("********** ~w **********", [TestCase]),
+ Config.
+
+end_per_testcase(Case, Config) ->
+ try apply(?MODULE,Case,[cleanup,Config])
+ catch error:undef -> ok
+ end,
+ ok.
+
+groups() ->
+ [].
+
+all() ->
+ [start_stop_handler,
+ create_log,
+ open_existing_log,
+ disk_log_opts,
+ default_formatter,
+ logging,
+ filter_config,
+ errors,
+ formatter_fail,
+ config_fail,
+ bad_input,
+ reconfig,
+ sync,
+ disk_log_full,
+ disk_log_wrap,
+ disk_log_events,
+ write_failure,
+ sync_failure,
+ op_switch_to_sync,
+ op_switch_to_drop,
+ op_switch_to_flush,
+ limit_burst_disabled,
+ limit_burst_enabled_one,
+ limit_burst_enabled_period,
+ kill_disabled,
+ qlen_kill_new,
+ %% qlen_kill_std,
+ mem_kill_new,
+ %% mem_kill_std,
+ restart_after,
+ handler_requests_under_load
+ ].
+
+start_stop_handler(_Config) ->
+ ok = logger:add_handler(?MODULE, logger_disk_log_h, #{}),
+ {error,{already_exist,?MODULE}} =
+ logger:add_handler(?MODULE, logger_disk_log_h, #{}),
+ true = is_pid(whereis(h_proc_name())),
+ ok = logger:remove_handler(?MODULE),
+ timer:sleep(500),
+ undefined = whereis(h_proc_name()).
+start_stop_handler(cleanup, _Config) ->
+ logger:remove_handler(?MODULE).
+
+create_log(Config) ->
+ PrivDir = ?config(priv_dir,Config),
+ %% test new handler
+ Name1 = list_to_atom(lists:concat([?FUNCTION_NAME,"_A"])),
+ LogFile1 = filename:join(PrivDir, Name1),
+ ok = start_and_add(Name1, #{filter_default=>stop,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()}},
+ #{file=>LogFile1}),
+ logger:notice("hello", ?domain),
+ logger_disk_log_h:filesync(Name1),
+ ct:pal("Checking contents of ~p", [?log_no(LogFile1,1)]),
+ try_read_file(?log_no(LogFile1,1), {ok,<<"hello\n">>}, 5000),
+
+ %% test second handler
+ Name2 = list_to_atom(lists:concat([?FUNCTION_NAME,"_B"])),
+ DLName = lists:concat([?FUNCTION_NAME,"_B_log"]),
+ LogFile2 = filename:join(PrivDir, DLName),
+ ok = start_and_add(Name2, #{filter_default=>stop,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()}},
+ #{file=>LogFile2}),
+ logger:notice("dummy", ?domain),
+ logger_disk_log_h:filesync(Name2),
+ ct:pal("Checking contents of ~p", [?log_no(LogFile2,1)]),
+ try_read_file(?log_no(LogFile2,1), {ok,<<"dummy\n">>}, 5000),
+
+ remove_and_stop(Name1),
+ remove_and_stop(Name2),
+ try_read_file(?log_no(LogFile1,1), {ok,<<"hello\ndummy\n">>}, 1),
+ try_read_file(?log_no(LogFile2,1), {ok,<<"dummy\n">>}, 5000),
+ ok.
+
+open_existing_log(Config) ->
+ PrivDir = ?config(priv_dir,Config),
+ %% test new handler
+ HName = ?FUNCTION_NAME,
+ DLName = lists:concat([?FUNCTION_NAME,"_log"]),
+ LogFile1 = filename:join(PrivDir, DLName),
+ ok = start_and_add(HName, #{filter_default=>stop,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()}},
+ #{file=>LogFile1}),
+ logger:notice("one", ?domain),
+ logger_disk_log_h:filesync(HName),
+ ct:pal("Checking contents of ~p", [?log_no(LogFile1,1)]),
+ try_read_file(?log_no(LogFile1,1), {ok,<<"one\n">>}, 5000),
+ logger:notice("two", ?domain),
+ ok = remove_and_stop(HName),
+ try_read_file(?log_no(LogFile1,1), {ok,<<"one\ntwo\n">>}, 5000),
+
+ logger:notice("two and a half", ?domain),
+
+ ok = start_and_add(HName, #{filter_default=>stop,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()}},
+ #{file=>LogFile1}),
+ logger:notice("three", ?domain),
+ logger_disk_log_h:filesync(HName),
+ try_read_file(?log_no(LogFile1,1), {ok,<<"one\ntwo\nthree\n">>}, 5000),
+ remove_and_stop(HName),
+ try_read_file(?log_no(LogFile1,1), {ok,<<"one\ntwo\nthree\n">>}, 5000).
+
+disk_log_opts(Config) ->
+ Get = fun(Key, PL) -> proplists:get_value(Key, PL) end,
+ PrivDir = ?config(priv_dir,Config),
+ WName = list_to_atom(lists:concat([?FUNCTION_NAME,"_W"])),
+ WFile = lists:concat([?FUNCTION_NAME,"_W_log"]),
+ Size = length("12345"),
+ ConfigW = #{filter_default=>stop,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter => {?MODULE,no_nl}},
+ WFileFull = filename:join(PrivDir, WFile),
+ DLOptsW = #{file => WFileFull,
+ type => wrap,
+ max_no_bytes => Size,
+ max_no_files => 2},
+ ok = start_and_add(WName, ConfigW, DLOptsW),
+ WInfo1 = disk_log:info(WName),
+ ct:log("Fullname = ~s", [WFileFull]),
+ {WFileFull,wrap,{Size,2},1} = {Get(file,WInfo1),Get(type,WInfo1),
+ Get(size,WInfo1),Get(current_file,WInfo1)},
+ logger:notice("123", ?domain),
+ logger_disk_log_h:filesync(WName),
+ timer:sleep(500),
+ 1 = Get(current_file, disk_log:info(WName)),
+
+ logger:notice("45", ?domain),
+ logger_disk_log_h:filesync(WName),
+ timer:sleep(500),
+ 1 = Get(current_file, disk_log:info(WName)),
+
+ logger:notice("6", ?domain),
+ logger_disk_log_h:filesync(WName),
+ timer:sleep(500),
+ 2 = Get(current_file, disk_log:info(WName)),
+
+ logger:notice("7890", ?domain),
+ logger_disk_log_h:filesync(WName),
+ timer:sleep(500),
+ 2 = Get(current_file, disk_log:info(WName)),
+
+ HName1 = list_to_atom(lists:concat([?FUNCTION_NAME,"_H1"])),
+ HFile1 = lists:concat([?FUNCTION_NAME,"_H1_log"]),
+ ConfigH = #{filter_default=>stop,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter => {?MODULE,no_nl}},
+ HFile1Full = filename:join(PrivDir, HFile1),
+ DLOptsH1 = #{file => HFile1Full,
+ type => halt},
+ ok = start_and_add(HName1, ConfigH, DLOptsH1),
+ HInfo1 = disk_log:info(HName1),
+ ct:log("Fullname = ~s", [HFile1Full]),
+ {HFile1Full,halt,infinity} = {Get(file,HInfo1),Get(type,HInfo1),
+ Get(size,HInfo1)},
+ logger:notice("12345", ?domain),
+ logger_disk_log_h:filesync(HName1),
+ timer:sleep(500),
+ 1 = Get(no_written_items, disk_log:info(HName1)),
+
+ HName2 = list_to_atom(lists:concat([?FUNCTION_NAME,"_H2"])),
+ HFile2 = lists:concat([?FUNCTION_NAME,"_H2_log"]),
+ HFile2Full = filename:join(PrivDir, HFile2),
+ DLOptsH2 = DLOptsH1#{file => HFile2Full,
+ max_no_bytes => 1000},
+ ok = start_and_add(HName2, ConfigH, DLOptsH2),
+ HInfo3 = disk_log:info(HName2),
+ ct:log("Fullname = ~s", [HFile2Full]),
+ {HFile2Full,halt,1000} = {Get(file,HInfo3),Get(type,HInfo3),
+ Get(size,HInfo3)},
+
+ remove_and_stop(WName),
+ remove_and_stop(HName1),
+ remove_and_stop(HName2),
+ ok.
+
+default_formatter(Config) ->
+ PrivDir = ?config(priv_dir,Config),
+ LogFile = filename:join(PrivDir,atom_to_list(?FUNCTION_NAME)),
+ HandlerConfig = #{config => #{file=>LogFile},
+ filter_default=>log},
+ ct:pal("Log: ~p", [LogFile]),
+ ok = logger:add_handler(?MODULE, logger_disk_log_h, HandlerConfig),
+ ok = logger:set_handler_config(?MODULE,formatter,
+ {?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}),
+ LogName = lists:concat([LogFile, ".1"]),
+ logger:notice("dummy"),
+ wait_until_written(LogName),
+ {ok,Bin} = file:read_file(LogName),
+ match = re:run(Bin, "=NOTICE REPORT====.*\ndummy", [{capture,none}]),
+ ok.
+default_formatter(cleanup, _Config) ->
+ logger:remove_handler(?MODULE).
+
+logging(Config) ->
+ PrivDir = ?config(priv_dir,Config),
+ %% test new handler
+ Name = list_to_atom(lists:concat([?FUNCTION_NAME,"_1"])),
+ LogFile = filename:join(PrivDir, Name),
+ ok = start_and_add(Name, #{filter_default=>log,
+ formatter=>{?MODULE,self()}},
+ #{file => LogFile}),
+ MsgFormatter = fun(Term) -> {io_lib:format("Term:~p",[Term]),[]} end,
+ logger:notice([{x,y}], #{report_cb => MsgFormatter}),
+ logger:notice([{x,y}], #{}),
+ ct:pal("Checking contents of ~p", [?log_no(LogFile,1)]),
+ try_read_file(?log_no(LogFile,1), {ok,<<"Term:[{x,y}]\n x: y\n">>}, 5000).
+
+logging(cleanup, _Config) ->
+ Name = list_to_atom(lists:concat([?FUNCTION_NAME,"_1"])),
+ remove_and_stop(Name).
+
+filter_config(_Config) ->
+ ok = logger:add_handler(?MODULE,logger_disk_log_h,#{}),
+ {ok,#{config:=HConfig}=Config} = logger:get_handler_config(?MODULE),
+ HConfig = maps:without([olp],HConfig),
+
+ FakeFullHConfig = HConfig#{olp=>{regname,self(),erlang:make_ref()}},
+ #{config:=HConfig} =
+ logger_disk_log_h:filter_config(Config#{config=>FakeFullHConfig}),
+ ok.
+
+filter_config(cleanup,_Config) ->
+ logger:remove_handler(?MODULE),
+ ok.
+
+errors(Config) ->
+ PrivDir = ?config(priv_dir,Config),
+ Name1 = list_to_atom(lists:concat([?FUNCTION_NAME,"_1"])),
+ LogFile1 = filename:join(PrivDir,Name1),
+ HandlerConfig = #{config=>#{file=>LogFile1},
+ filter_default=>log,
+ formatter=>{?MODULE,self()}},
+ ok = logger:add_handler(Name1, logger_disk_log_h, HandlerConfig),
+ {error,{already_exist,Name1}} =
+ logger:add_handler(Name1, logger_disk_log_h, #{}),
+
+ %%! TODO:
+ %%! Check how bad log_opts are handled!
+
+ {error,{illegal_config_change,
+ logger_disk_log_h,
+ #{type:=wrap},
+ #{type:=halt}}} =
+ logger:update_handler_config(Name1,
+ config,
+ #{type=>halt,
+ file=>LogFile1}),
+
+ {error,{illegal_config_change,
+ logger_disk_log_h,
+ #{file:=LogFile1},
+ #{file:="newfilename"}}} =
+ logger:update_handler_config(Name1,
+ config,
+ #{file=>"newfilename"}),
+
+ %% Read-only fields may (accidentially) be included in the change,
+ %% but it won't take effect
+ {ok,C} = logger:get_handler_config(Name1),
+ ok = logger:set_handler_config(Name1,config,#{olp=>dummyvalue}),
+ {ok,C} = logger:get_handler_config(Name1),
+
+
+ ok = logger:remove_handler(Name1),
+ {error,{not_found,Name1}} = logger:remove_handler(Name1),
+ ok.
+
+errors(cleanup, _Config) ->
+ Name1 = list_to_atom(lists:concat([?FUNCTION_NAME,"_1"])),
+ _ = logger:remove_handler(Name1).
+
+formatter_fail(Config) ->
+ PrivDir = ?config(priv_dir,Config),
+ Name = ?FUNCTION_NAME,
+ LogFile = filename:join(PrivDir,Name),
+ ct:pal("Log = ~p", [LogFile]),
+ HandlerConfig = #{config => #{file=>LogFile},
+ filter_default=>stop,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE])},
+ %% no formatter!
+ logger:add_handler(Name, logger_disk_log_h, HandlerConfig),
+ Pid = whereis(h_proc_name(Name)),
+ true = is_pid(Pid),
+ H = logger:get_handler_ids(),
+ true = lists:member(Name,H),
+
+ %% Formatter is added automatically
+ {ok,#{formatter:={logger_formatter,_}}} = logger:get_handler_config(Name),
+ logger:notice(M1=?msg,?domain),
+ Got1 = try_match_file(?log_no(LogFile,1),"[0-9\\+\\-T:\\.]* notice: "++M1,5000),
+
+ ok = logger:set_handler_config(Name,formatter,{nonexistingmodule,#{}}),
+ logger:notice(M2=?msg,?domain),
+ Got2 = try_match_file(?log_no(LogFile,1),
+ escape(Got1)++"[0-9\\+\\-T:\\.]* notice: FORMATTER CRASH: .*"++M2,
+ 5000),
+
+ ok = logger:set_handler_config(Name,formatter,{?MODULE,crash}),
+ logger:notice(M3=?msg,?domain),
+ Got3 = try_match_file(?log_no(LogFile,1),
+ escape(Got2)++"[0-9\\+\\-T:\\.]* notice: FORMATTER CRASH: .*"++M3,
+ 5000),
+
+ ok = logger:set_handler_config(Name,formatter,{?MODULE,bad_return}),
+ logger:notice(?msg,?domain),
+ try_match_file(?log_no(LogFile,1),
+ escape(Got3)++"FORMATTER ERROR: bad return value",
+ 5000),
+
+ %% Check that handler is still alive and was never dead
+ Pid = whereis(h_proc_name(Name)),
+ H = logger:get_handler_ids(),
+ ok.
+
+formatter_fail(cleanup,_Config) ->
+ _ = logger:remove_handler(?FUNCTION_NAME),
+ ok.
+
+config_fail(_Config) ->
+ {error,{handler_not_added,{invalid_config,logger_disk_log_h,#{bad:=bad}}}} =
+ logger:add_handler(?MODULE,logger_disk_log_h,
+ #{config => #{bad => bad},
+ filter_default=>log,
+ formatter=>{?MODULE,self()}}),
+
+ {error,{handler_not_added,{invalid_olp_levels,#{drop_mode_qlen:=1}}}} =
+ logger:add_handler(?MODULE,logger_disk_log_h,
+ #{config => #{drop_mode_qlen=>1}}),
+ {error,{handler_not_added,{invalid_olp_levels,#{sync_mode_qlen:=43,
+ drop_mode_qlen:=42}}}} =
+ logger:add_handler(?MODULE,logger_disk_log_h,
+ #{config => #{sync_mode_qlen=>43,
+ drop_mode_qlen=>42}}),
+ {error,{handler_not_added,{invalid_olp_levels,#{drop_mode_qlen:=43,
+ flush_qlen:=42}}}} =
+ logger:add_handler(?MODULE,logger_disk_log_h,
+ #{config => #{drop_mode_qlen=>43,
+ flush_qlen=>42}}),
+
+ ok = logger:add_handler(?MODULE,logger_disk_log_h,
+ #{filter_default=>log,
+ formatter=>{?MODULE,self()}}),
+ %% can't change the disk log options for a log already in use
+ {error,{illegal_config_change,logger_disk_log_h,_,_}} =
+ logger:update_handler_config(?MODULE,config,
+ #{max_no_files=>2}),
+ %% incorrect values of OP params
+ {ok,#{config := HConfig}} = logger:get_handler_config(?MODULE),
+ {error,{invalid_olp_levels,_}} =
+ logger:update_handler_config(?MODULE,config,
+ HConfig#{sync_mode_qlen=>100,
+ flush_qlen=>99}),
+ %% invalid name of config parameter
+ {error,{invalid_config,logger_disk_log_h,#{filesync_rep_int:=2000}}} =
+ logger:update_handler_config(?MODULE, config,
+ HConfig#{filesync_rep_int => 2000}),
+ ok.
+config_fail(cleanup,_Config) ->
+ logger:remove_handler(?MODULE).
+
+bad_input(_Config) ->
+ {error,{badarg,{filesync,["BadType"]}}} =
+ logger_disk_log_h:filesync("BadType").
+
+reconfig(Config) ->
+ Dir = ?config(priv_dir,Config),
+ ok = logger:add_handler(?MODULE,
+ logger_disk_log_h,
+ #{filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()}}),
+ #{%id := ?MODULE,
+ sync_mode_qlen := ?SYNC_MODE_QLEN,
+ drop_mode_qlen := ?DROP_MODE_QLEN,
+ flush_qlen := ?FLUSH_QLEN,
+ burst_limit_enable := ?BURST_LIMIT_ENABLE,
+ burst_limit_max_count := ?BURST_LIMIT_MAX_COUNT,
+ burst_limit_window_time := ?BURST_LIMIT_WINDOW_TIME,
+ overload_kill_enable := ?OVERLOAD_KILL_ENABLE,
+ overload_kill_qlen := ?OVERLOAD_KILL_QLEN,
+ overload_kill_mem_size := ?OVERLOAD_KILL_MEM_SIZE,
+ overload_kill_restart_after := ?OVERLOAD_KILL_RESTART_AFTER,
+ cb_state :=
+ #{handler_state :=
+ #{log_opts := #{type := ?DISK_LOG_TYPE,
+ max_no_files := ?DISK_LOG_MAX_NO_FILES,
+ max_no_bytes := ?DISK_LOG_MAX_NO_BYTES,
+ file := DiskLogFile}},
+ filesync_repeat_interval := ?FILESYNC_REPEAT_INTERVAL}} =
+ logger_olp:info(h_proc_name()),
+ {ok,#{config :=
+ #{sync_mode_qlen := ?SYNC_MODE_QLEN,
+ drop_mode_qlen := ?DROP_MODE_QLEN,
+ flush_qlen := ?FLUSH_QLEN,
+ burst_limit_enable := ?BURST_LIMIT_ENABLE,
+ burst_limit_max_count := ?BURST_LIMIT_MAX_COUNT,
+ burst_limit_window_time := ?BURST_LIMIT_WINDOW_TIME,
+ overload_kill_enable := ?OVERLOAD_KILL_ENABLE,
+ overload_kill_qlen := ?OVERLOAD_KILL_QLEN,
+ overload_kill_mem_size := ?OVERLOAD_KILL_MEM_SIZE,
+ overload_kill_restart_after := ?OVERLOAD_KILL_RESTART_AFTER,
+ filesync_repeat_interval := ?FILESYNC_REPEAT_INTERVAL,
+ file := DiskLogFile,
+ max_no_files := ?DISK_LOG_MAX_NO_FILES,
+ max_no_bytes := ?DISK_LOG_MAX_NO_BYTES,
+ type := wrap} = HConfig0}} =
+ logger:get_handler_config(?MODULE),
+
+ HConfig1 = HConfig0#{sync_mode_qlen => 1,
+ drop_mode_qlen => 2,
+ flush_qlen => 3,
+ burst_limit_enable => false,
+ burst_limit_max_count => 10,
+ burst_limit_window_time => 10,
+ overload_kill_enable => true,
+ overload_kill_qlen => 100000,
+ overload_kill_mem_size => 10000000,
+ overload_kill_restart_after => infinity,
+ filesync_repeat_interval => no_repeat},
+ ok = logger:set_handler_config(?MODULE, config, HConfig1),
+ #{%id := ?MODULE,
+ sync_mode_qlen := 1,
+ drop_mode_qlen := 2,
+ flush_qlen := 3,
+ burst_limit_enable := false,
+ burst_limit_max_count := 10,
+ burst_limit_window_time := 10,
+ overload_kill_enable := true,
+ overload_kill_qlen := 100000,
+ overload_kill_mem_size := 10000000,
+ overload_kill_restart_after := infinity,
+ cb_state := #{filesync_repeat_interval := no_repeat}} =
+ logger_olp:info(h_proc_name()),
+ {ok,#{config:=HConfig1}} = logger:get_handler_config(?MODULE),
+
+ ok = logger:update_handler_config(?MODULE, config,
+ #{flush_qlen => ?FLUSH_QLEN}),
+ {ok,#{config:=C1}} = logger:get_handler_config(?MODULE),
+ ct:log("C1: ~p",[C1]),
+ C1 = HConfig1#{flush_qlen => ?FLUSH_QLEN},
+
+ ok = logger:set_handler_config(?MODULE, config, #{sync_mode_qlen => 1}),
+ {ok,#{config:=C2}} = logger:get_handler_config(?MODULE),
+ ct:log("C2: ~p",[C2]),
+ C2 = HConfig0#{sync_mode_qlen => 1},
+
+ ok = logger:set_handler_config(?MODULE, config, #{drop_mode_qlen => 100}),
+ {ok,#{config:=C3}} = logger:get_handler_config(?MODULE),
+ ct:log("C3: ~p",[C3]),
+ C3 = HConfig0#{drop_mode_qlen => 100},
+
+ ok = logger:update_handler_config(?MODULE, config, #{sync_mode_qlen => 1}),
+ {ok,#{config:=C4}} = logger:get_handler_config(?MODULE),
+ ct:log("C4: ~p",[C4]),
+ C4 = HConfig0#{sync_mode_qlen => 1,
+ drop_mode_qlen => 100},
+
+ ok = logger:remove_handler(?MODULE),
+
+ File = filename:join(Dir, "logfile"),
+ ok = logger:add_handler(?MODULE,
+ logger_disk_log_h,
+ #{filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()},
+ config=>
+ #{type => halt,
+ max_no_files => 1,
+ max_no_bytes => 1024,
+ file => File}}),
+ #{cb_state :=
+ #{handler_state :=
+ #{log_opts := #{type := halt,
+ max_no_files := 1,
+ max_no_bytes := 1024,
+ file := File}}}} =
+ logger_olp:info(h_proc_name()),
+ {ok,#{config :=
+ #{type := halt,
+ max_no_files := 1,
+ max_no_bytes := 1024,
+ file := File}=HaltHConfig} = Config2} =
+ logger:get_handler_config(?MODULE),
+
+ ok = logger:update_handler_config(?MODULE, level, notice),
+ {ok,C5} = logger:get_handler_config(?MODULE),
+ ct:log("C5: ~p",[C5]),
+ C5 = Config2#{level => notice},
+
+ ok = logger:set_handler_config(?MODULE, level, info),
+ {ok,C6} = logger:get_handler_config(?MODULE),
+ ct:log("C6: ~p",[C6]),
+ C6 = Config2#{level => info},
+
+ %% You are not allowed to actively set the write once fields
+ %% (type, max_no_files, max_no_bytes, file) in runtime.
+ {error, {illegal_config_change,_,_,_}} =
+ logger:set_handler_config(?MODULE,config,#{type=>wrap}),
+ {error, {illegal_config_change,_,_,_}} =
+ logger:set_handler_config(?MODULE,config,#{max_no_files=>2}),
+ {error, {illegal_config_change,_,_,_}} =
+ logger:set_handler_config(?MODULE,config,#{max_no_bytes=>2048}),
+ {error, {illegal_config_change,_,_,_}} =
+ logger:set_handler_config(?MODULE,config,#{file=>"otherfile.log"}),
+ {ok,C7} = logger:get_handler_config(?MODULE),
+ ct:log("C7: ~p",[C7]),
+ C7 = C6,
+
+ %% ... but if you don't specify the write once fields, then
+ %% set_handler_config shall NOT reset them to their default value
+ ok = logger:set_handler_config(?MODULE,config,#{sync_mode_qlen=>1}),
+ {ok,#{config:=C8}} = logger:get_handler_config(?MODULE),
+ ct:log("C8: ~p",[C8]),
+ C8 = HaltHConfig#{sync_mode_qlen=>1},
+ ok.
+
+reconfig(cleanup, _Config) ->
+ logger:remove_handler(?MODULE).
+
+sync(Config) ->
+ Dir = ?config(priv_dir,Config),
+ File = filename:join(Dir, ?FUNCTION_NAME),
+ Log = lists:concat([File,".1"]),
+ ok = logger:add_handler(?MODULE,
+ logger_disk_log_h,
+ #{config => #{file => File},
+ filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,nl}}),
+
+ start_tracer([{logger_disk_log_h,disk_log_write,3},
+ {disk_log,sync,1}],
+ [{logger_disk_log_h,disk_log_write,<<"first\n">>},
+ {disk_log,sync}]),
+
+ logger:notice("first", ?domain),
+ %% wait for automatic disk_log_sync
+ check_tracer(?FILESYNC_REPEAT_INTERVAL*2),
+
+ %% check that if there's no repeated filesync active,
+ %% a disk_log_sync is still performed when handler goes idle
+ {ok,#{config := HConfig}} = logger:get_handler_config(?MODULE),
+ HConfig1 = HConfig#{filesync_repeat_interval => no_repeat},
+ ok = logger:update_handler_config(?MODULE, config, HConfig1),
+ no_repeat = maps:get(filesync_repeat_interval,
+ maps:get(cb_state,logger_olp:info(h_proc_name()))),
+
+ start_tracer([{logger_disk_log_h,disk_log_write,3},
+ {disk_log,sync,1}],
+ [{logger_disk_log_h,disk_log_write,<<"second\n">>},
+ {disk_log,sync},
+ {logger_disk_log_h,disk_log_write,<<"third\n">>},
+ {disk_log,sync}]),
+
+ logger:notice("second", ?domain),
+ timer:sleep(?IDLE_DETECT_TIME*2),
+ logger:notice("third", ?domain),
+ %% wait for automatic disk_log_sync
+ check_tracer(?IDLE_DETECT_TIME*2),
+
+ try_read_file(Log, {ok,<<"first\nsecond\nthird\n">>}, 1000),
+
+ %% switch repeated filesync on and verify that the looping works
+ SyncInt = 1000,
+ WaitT = 4500,
+ OneSync = {logger_h_common,handle_cast,repeated_filesync},
+ %% receive 1 repeated_filesync per sec
+ start_tracer([{{logger_h_common,handle_cast,2},
+ [{[repeated_filesync,'_'],[],[{message,{caller}}]}]}],
+ [OneSync || _ <- lists:seq(1, trunc(WaitT/SyncInt))]),
+
+ HConfig2 = HConfig#{filesync_repeat_interval => SyncInt},
+ ok = logger:update_handler_config(?MODULE, config, HConfig2),
+
+ SyncInt = maps:get(filesync_repeat_interval,
+ maps:get(cb_state,logger_olp:info(h_proc_name()))),
+ timer:sleep(WaitT),
+ HConfig3 = HConfig#{filesync_repeat_interval => no_repeat},
+ ok = logger:update_handler_config(?MODULE, config, HConfig3),
+ check_tracer(100),
+ ok.
+sync(cleanup,_Config) ->
+ dbg:stop_clear(),
+ logger:remove_handler(?MODULE).
+
+disk_log_wrap(Config) ->
+ Get = fun(Key, PL) -> proplists:get_value(Key, PL) end,
+ Dir = ?config(priv_dir,Config),
+ File = filename:join(Dir, ?FUNCTION_NAME),
+ ct:pal("Log = ~p", [File]),
+ MaxFiles = 3,
+ MaxBytes = 5,
+ ok = logger:add_handler(?MODULE,
+ logger_disk_log_h,
+ #{filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()},
+ config=>
+ #{type => wrap,
+ max_no_files => MaxFiles,
+ max_no_bytes => MaxBytes,
+ file => File}}),
+ Info = disk_log:info(?MODULE),
+ {File,wrap,{MaxBytes,MaxFiles},1} =
+ {Get(file,Info),Get(type,Info),Get(size,Info),Get(current_file,Info)},
+ Tester = self(),
+ TraceFun = fun({trace,_,call,{Mod,Func,Details}}, Pid) ->
+ Pid ! {trace,Mod,Func,Details},
+ Pid
+ end,
+ {ok,_} = dbg:tracer(process, {TraceFun, Tester}),
+ {ok,_} = dbg:p(whereis(h_proc_name()), [c]),
+ {ok,_} = dbg:tp(logger_disk_log_h, handle_info, 3, []),
+
+ Text = [34 + rand:uniform(126-34) || _ <- lists:seq(1,MaxBytes)],
+ ct:pal("String = ~p (~w)", [Text, erts_debug:size(Text)]),
+ %% fill first file
+ lists:foreach(fun(N) ->
+ Log = lists:concat([File,".",N]),
+ logger:notice(Text, ?domain),
+ wait_until_written(Log),
+ ct:pal("N = ~w",
+ [N = Get(current_file,
+ disk_log:info(?MODULE))])
+ end, lists:seq(1,MaxFiles)),
+
+ %% wait for trace messages
+ timer:sleep(1000),
+ dbg:stop_clear(),
+ Received = lists:flatmap(fun({trace,_M,handle_info,
+ [_,{disk_log,_Node,_Name,What},_]}) ->
+ [{trace,What}];
+ ({log,_}) ->
+ []
+ end, test_server:messages_get()),
+ ct:pal("Trace =~n~p", [Received]),
+ Received = [{trace,{wrap,0}} || _ <- lists:seq(1,MaxFiles-1)],
+ ok.
+
+disk_log_wrap(cleanup,_Config) ->
+ dbg:stop_clear(),
+ logger:remove_handler(?MODULE).
+
+disk_log_full(Config) ->
+ Dir = ?config(priv_dir,Config),
+ File = filename:join(Dir, ?FUNCTION_NAME),
+ ct:pal("Log = ~p", [File]),
+ MaxBytes = 50,
+ ok = logger:add_handler(?MODULE,
+ logger_disk_log_h,
+ #{filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()},
+ config=>
+ #{type => halt,
+ max_no_files => 1,
+ max_no_bytes => MaxBytes,
+ file => File}}),
+
+ Tester = self(),
+ TraceFun = fun({trace,_,call,{Mod,Func,Details}}, Pid) ->
+ Pid ! {trace,Mod,Func,Details},
+ Pid
+ end,
+ {ok,_} = dbg:tracer(process, {TraceFun, Tester}),
+ {ok,_} = dbg:p(whereis(h_proc_name()), [c]),
+ {ok,_} = dbg:tp(logger_disk_log_h, handle_info, 3, []),
+
+ NoOfChars = 5,
+ Text = [34 + rand:uniform(126-34) || _ <- lists:seq(1,NoOfChars)],
+ [logger:notice(Text, ?domain) || _ <- lists:seq(1,trunc(MaxBytes/NoOfChars)+1)],
+
+ %% wait for trace messages
+ timer:sleep(2000),
+ dbg:stop_clear(),
+ Received = lists:flatmap(fun({trace,_M,handle_info,
+ [_,{disk_log,_Node,_Name,What},_]}) ->
+ [{trace,What}];
+ ({log,_}) ->
+ []
+ end, test_server:messages_get()),
+ ct:pal("Trace =~n~p", [Received]),
+
+ %% The tail here could be an error_status notification, if the
+ %% last write was synchronous, but in most cases it will not be
+ [{trace,full}|_] = Received,
+ %% [{trace,full},
+ %% {trace,{error_status,{error,{full,_}}}}] = Received,
+ ok.
+disk_log_full(cleanup, _Config) ->
+ dbg:stop_clear(),
+ logger:remove_handler(?MODULE).
+
+disk_log_events(_Config) ->
+ Node = node(),
+ Log = ?MODULE,
+ ok = logger:add_handler(?MODULE,
+ logger_disk_log_h,
+ #{filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()}}),
+
+ %% Events copied from disk_log API
+ Events =
+ [{disk_log, Node, Log, {wrap, 0}},
+ {disk_log, Node, Log, {truncated, 0}},
+ {disk_log, Node, Log, {read_only, 42}},
+ {disk_log, Node, Log, {blocked_log, 42}},
+ {disk_log, Node, Log, {format_external, 42}},
+ {disk_log, Node, Log, full},
+ {disk_log, Node, Log, {error_status, ok}}],
+
+ Tester = self(),
+ TraceFun = fun({trace,_,call,{Mod,Func,Details}}, Pid) ->
+ Pid ! {trace,Mod,Func,Details},
+ Pid
+ end,
+ {ok,_} = dbg:tracer(process, {TraceFun, Tester}),
+ {ok,_} = dbg:p(whereis(h_proc_name()), [c]),
+ {ok,_} = dbg:tp(logger_disk_log_h, handle_info, 3, []),
+
+ [whereis(h_proc_name()) ! E || E <- Events],
+ %% wait for trace messages
+ timer:sleep(2000),
+ dbg:stop_clear(),
+ Received = lists:map(fun({trace,_M,handle_info,
+ [_,Got,_]}) -> Got
+ end, test_server:messages_get()),
+ ct:pal("Trace =~n~p", [Received]),
+ NoOfEvents = length(Events),
+ NoOfEvents = length(Received),
+ lists:foreach(fun(Event) ->
+ true = lists:member(Event, Received)
+ end, Received),
+ ok.
+disk_log_events(cleanup, _Config) ->
+ dbg:stop_clear(),
+ logger:remove_handler(?MODULE).
+
+write_failure(Config) ->
+ Dir = ?config(priv_dir, Config),
+ File = filename:join(Dir, ?FUNCTION_NAME),
+ Log = lists:concat([File,".1"]),
+ ct:pal("Log = ~p", [Log]),
+
+ Node = start_h_on_new_node(Config, File),
+ false = (undefined == rpc:call(Node, ets, whereis, [?TEST_HOOKS_TAB])),
+ rpc:call(Node, ets, insert, [?TEST_HOOKS_TAB,{tester,self()}]),
+ rpc:call(Node, ?MODULE, set_internal_log, [?MODULE,internal_log]),
+ rpc:call(Node, ?MODULE, set_result, [disk_log_write,ok]),
+ HState = rpc:call(Node, logger_olp, info, [h_proc_name(?STANDARD_HANDLER)]),
+ LogOpts = maps:get(log_opts,
+ maps:get(handler_state,
+ maps:get(cb_state,HState))),
+ ct:pal("LogOpts = ~p", [LogOpts]),
+
+ %% ?check and ?check_no_log in this test only check for internal log events
+ ok = log_on_remote_node(Node, "Logged1"),
+ rpc:call(Node, logger_disk_log_h, filesync, [?STANDARD_HANDLER]),
+ ?check_no_log, % no internal log when write ok
+
+ SyncRepInt = case (fun() -> is_atom(?FILESYNC_REPEAT_INTERVAL) end)() of
+ true -> 5500;
+ false -> ?FILESYNC_REPEAT_INTERVAL + 500
+ end,
+
+ try_read_file(Log, {ok,<<"Logged1\n">>}, SyncRepInt),
+
+ rpc:call(Node, ?MODULE, set_result, [disk_log_write,{error,no_such_log}]),
+ ok = log_on_remote_node(Node, "Cause simple error printout"),
+
+ %% this should have caused an internal log
+ ?check({error,{?STANDARD_HANDLER,log,LogOpts,{error,no_such_log}}}),
+
+ ok = log_on_remote_node(Node, "No second error printout"),
+ ?check_no_log, % but don't log same error twice
+
+ rpc:call(Node, ?MODULE, set_result, [disk_log_write,
+ {error,{full,?STANDARD_HANDLER}}]),
+ ok = log_on_remote_node(Node, "Cause simple error printout"),
+ %% this was a different error, so it should be logged
+ ?check({error,{?STANDARD_HANDLER,log,LogOpts,
+ {error,{full,?STANDARD_HANDLER}}}}),
+
+ rpc:call(Node, ?MODULE, set_result, [disk_log_write,ok]),
+ ok = log_on_remote_node(Node, "Logged2"),
+ rpc:call(Node, logger_disk_log_h, filesync, [?STANDARD_HANDLER]),
+ ?check_no_log, % no internal log when write ok
+ try_read_file(Log, {ok,<<"Logged1\nLogged2\n">>}, SyncRepInt),
+ ok.
+write_failure(cleanup, _Config) ->
+ Nodes = nodes(),
+ [test_server:stop_node(Node) || Node <- Nodes].
+
+
+sync_failure(Config) ->
+ Dir = ?config(priv_dir, Config),
+ FileName = lists:concat([?MODULE,"_",?FUNCTION_NAME]),
+ File = filename:join(Dir, FileName),
+
+
+ Node = start_h_on_new_node(Config, File),
+ false = (undefined == rpc:call(Node, ets, whereis, [?TEST_HOOKS_TAB])),
+ rpc:call(Node, ets, insert, [?TEST_HOOKS_TAB,{tester,self()}]),
+ rpc:call(Node, ?MODULE, set_internal_log, [?MODULE,internal_log]),
+ rpc:call(Node, ?MODULE, set_result, [disk_log_sync,ok]),
+ HState = rpc:call(Node, logger_olp, info, [h_proc_name(?STANDARD_HANDLER)]),
+ LogOpts = maps:get(log_opts, maps:get(handler_state,
+ maps:get(cb_state,HState))),
+
+ SyncInt = 500,
+ ok = rpc:call(Node, logger, update_handler_config,
+ [?STANDARD_HANDLER, config,
+ #{filesync_repeat_interval => SyncInt}]),
+ Info = rpc:call(Node, logger_olp, info, [h_proc_name(?STANDARD_HANDLER)]),
+ SyncInt = maps:get(filesync_repeat_interval, maps:get(cb_state, Info)),
+
+ ok = log_on_remote_node(Node, "Logged1"),
+ ?check_no_log,
+
+ rpc:call(Node, ?MODULE, set_result, [disk_log_sync,{error,no_such_log}]),
+ ok = log_on_remote_node(Node, "Cause simple error printout"),
+
+ ?check({error,{?STANDARD_HANDLER,filesync,LogOpts,{error,no_such_log}}}),
+
+ ok = log_on_remote_node(Node, "No second error printout"),
+ ?check_no_log,
+
+ rpc:call(Node, ?MODULE, set_result,
+ [disk_log_sync,{error,{blocked_log,?STANDARD_HANDLER}}]),
+ ok = log_on_remote_node(Node, "Cause simple error printout"),
+ ?check({error,{?STANDARD_HANDLER,filesync,LogOpts,
+ {error,{blocked_log,?STANDARD_HANDLER}}}}),
+
+ rpc:call(Node, ?MODULE, set_result, [disk_log_sync,ok]),
+ ok = log_on_remote_node(Node, "Logged2"),
+ ?check_no_log,
+ ok.
+sync_failure(cleanup, _Config) ->
+ Nodes = nodes(),
+ [test_server:stop_node(Node) || Node <- Nodes].
+
+start_h_on_new_node(Config, File) ->
+ {ok,_,Node} =
+ logger_test_lib:setup(
+ Config,
+ [{logger,[{handler,default,logger_disk_log_h,
+ #{ config => #{ file => File }}}]}]),
+ ok = rpc:call(Node,logger,set_handler_config,[?STANDARD_HANDLER,formatter,
+ {?MODULE,nl}]),
+ Node.
+
+log_on_remote_node(Node,Msg) ->
+ _ = spawn_link(Node,
+ fun() -> erlang:group_leader(whereis(user),self()),
+ logger:notice(Msg)
+ end),
+ ok.
+
+%% functions for test hook macros to be called by rpc
+set_internal_log(_Mod, _Func) ->
+ ?set_internal_log({_Mod,_Func}).
+set_result(_Op, _Result) ->
+ ?set_result(_Op, _Result).
+set_defaults() ->
+ ?set_defaults().
+
+%% internal log function that sends the term to the test case process
+internal_log(Type, Term) ->
+ [{tester,Tester}] = ets:lookup(?TEST_HOOKS_TAB, tester),
+ Tester ! {log,{Type,Term}},
+ logger:internal_log(Type, Term),
+ ok.
+
+
+%%%-----------------------------------------------------------------
+%%% Overload protection tests
+
+op_switch_to_sync(Config) ->
+ {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ NumOfReqs = 500,
+ NewHConfig =
+ HConfig#{config => DLHConfig#{sync_mode_qlen => 2,
+ drop_mode_qlen => NumOfReqs+1,
+ flush_qlen => 2*NumOfReqs,
+ burst_limit_enable => false}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+ send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
+ Lines = count_lines(Log),
+ NumOfReqs = Lines,
+ ok = file_delete(Log),
+ ok.
+op_switch_to_sync(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+op_switch_to_drop() ->
+ [{timetrap,{seconds,180}}].
+op_switch_to_drop(Config) ->
+ Test =
+ fun() ->
+ {Log,HConfig,DLHConfig} =
+ start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ NumOfReqs = 300,
+ Procs = 2,
+ Bursts = 10,
+ NewHConfig =
+ HConfig#{config =>
+ DLHConfig#{sync_mode_qlen => 1,
+ drop_mode_qlen => 2,
+ flush_qlen => Procs*NumOfReqs*Bursts,
+ burst_limit_enable => false}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+ %% It sometimes happens that the handler either gets
+ %% the requests in a slow enough pace so that dropping
+ %% never occurs. Therefore, lets generate a number of
+ %% bursts to increase the chance of message buildup.
+ [send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, notice) ||
+ _ <- lists:seq(1, Bursts)],
+ Logged = count_lines(Log),
+ ok = stop_handler(?MODULE),
+ ct:pal("Number of messages dropped = ~w (~w)",
+ [Procs*NumOfReqs*Bursts-Logged,Procs*NumOfReqs*Bursts]),
+ true = (Logged < (Procs*NumOfReqs*Bursts)),
+ true = (Logged > 0),
+ _ = file_delete(Log),
+ ok
+ end,
+ %% As it's tricky to get the timing right in only one go, we perform the
+ %% test repeatedly, hoping that will generate a successful result.
+ case repeat_until_ok(Test, 10) of
+ {ok,{Failures,_Result}} ->
+ ct:log("Failed ~w times before success!", [Failures]);
+ {fails,Reason} ->
+ ct:fail(Reason)
+ end.
+op_switch_to_drop(cleanup, _Config) ->
+ _ = stop_handler(?MODULE).
+
+op_switch_to_flush() ->
+ [{timetrap,{minutes,3}}].
+op_switch_to_flush(Config) ->
+ Test =
+ fun() ->
+ {Log,HConfig,DLHConfig} =
+ start_handler(?MODULE, ?FUNCTION_NAME, Config),
+
+ %% NOTE: it's important that both async and sync
+ %% requests have been queued when the flush happens
+ %% (verify with coverage of flush_log_requests/2)
+
+ NewHConfig =
+ HConfig#{config =>
+ DLHConfig#{sync_mode_qlen => 2,
+ %% disable drop mode
+ drop_mode_qlen => 300,
+ flush_qlen => 300,
+ burst_limit_enable => false}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+ NumOfReqs = 1500,
+ Procs = 10,
+ Bursts = 10,
+ %% It sometimes happens that the handler either gets
+ %% the requests in a slow enough pace so that flushing
+ %% never occurs, or it gets all messages at once,
+ %% causing all messages to get flushed (no dropping of
+ %% sync messages gets tested). Therefore, lets
+ %% generate a number of bursts to increase the chance
+ %% of message buildup in some random fashion.
+ [send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, notice) ||
+ _ <- lists:seq(1,Bursts)],
+ Logged = count_lines(Log),
+ ok= stop_handler(?MODULE),
+ ct:pal("Number of messages flushed/dropped = ~w (~w)",
+ [NumOfReqs*Procs*Bursts-Logged,NumOfReqs*Procs*Bursts]),
+ true = (Logged < (NumOfReqs*Procs*Bursts)),
+ true = (Logged > 0),
+ _ = file_delete(Log),
+ ok
+ end,
+ %% As it's tricky to get the timing right in only one go, we perform the
+ %% test repeatedly, hoping that will generate a successful result.
+ case repeat_until_ok(Test, 10) of
+ {ok,{Failures,_Result}} ->
+ ct:log("Failed ~w times before success!", [Failures]);
+ {fails,Reason} ->
+ ct:fail(Reason)
+ end.
+op_switch_to_flush(cleanup, _Config) ->
+ _ = stop_handler(?MODULE).
+
+
+limit_burst_disabled(Config) ->
+ {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ NewHConfig =
+ HConfig#{config => DLHConfig#{burst_limit_enable => false,
+ burst_limit_max_count => 10,
+ burst_limit_window_time => 2000,
+ drop_mode_qlen => 200,
+ flush_qlen => 300}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+ NumOfReqs = 100,
+ send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
+ Logged = count_lines(Log),
+ ct:pal("Number of messages logged = ~w", [Logged]),
+ NumOfReqs = Logged,
+ ok = file_delete(Log),
+ ok.
+limit_burst_disabled(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+limit_burst_enabled_one(Config) ->
+ {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ ReqLimit = 10,
+ NewHConfig =
+ HConfig#{config => DLHConfig#{burst_limit_enable => true,
+ burst_limit_max_count => ReqLimit,
+ burst_limit_window_time => 2000,
+ drop_mode_qlen => 200,
+ flush_qlen => 300}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+ NumOfReqs = 100,
+ send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
+ Logged = count_lines(Log),
+ ct:pal("Number of messages logged = ~w", [Logged]),
+ ReqLimit = Logged,
+ ok = file_delete(Log),
+ ok.
+limit_burst_enabled_one(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+limit_burst_enabled_period(Config) ->
+ {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ ReqLimit = 10,
+ BurstTWin = 1000,
+ NewHConfig =
+ HConfig#{config => DLHConfig#{burst_limit_enable => true,
+ burst_limit_max_count => ReqLimit,
+ burst_limit_window_time => BurstTWin,
+ drop_mode_qlen => 20000,
+ flush_qlen => 20001}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+
+ Windows = 3,
+ Sent = send_burst({t,BurstTWin*Windows}, seq, {chars,79}, notice),
+ Logged = count_lines(Log),
+ ct:pal("Number of messages sent = ~w~nNumber of messages logged = ~w",
+ [Sent,Logged]),
+ true = (Logged > (ReqLimit*Windows)) andalso
+ (Logged < (ReqLimit*(Windows+2))),
+ ok = file_delete(Log),
+ ok.
+limit_burst_enabled_period(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+kill_disabled(Config) ->
+ {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ NewHConfig =
+ HConfig#{config=>DLHConfig#{overload_kill_enable=>false,
+ overload_kill_qlen=>10,
+ overload_kill_mem_size=>100}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+ NumOfReqs = 100,
+ send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
+ Logged = count_lines(Log),
+ ct:pal("Number of messages logged = ~w", [Logged]),
+ ok = file_delete(Log),
+ true = is_pid(whereis(h_proc_name())),
+ ok.
+kill_disabled(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+qlen_kill_new(Config) ->
+ {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ Pid0 = whereis(h_proc_name()),
+ {_,Mem0} = process_info(Pid0, memory),
+ RestartAfter = ?OVERLOAD_KILL_RESTART_AFTER,
+ NewHConfig =
+ HConfig#{config =>
+ DLHConfig#{overload_kill_enable=>true,
+ overload_kill_qlen=>10,
+ overload_kill_mem_size=>Mem0+50000,
+ overload_kill_restart_after=>RestartAfter}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+ MRef = erlang:monitor(process, Pid0),
+ NumOfReqs = 100,
+ Procs = 4,
+ send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, notice),
+ %% send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
+ receive
+ {'DOWN', MRef, _, _, Info} ->
+ case Info of
+ {shutdown,{overloaded,QLen,Mem}} ->
+ ct:pal("Terminated with qlen = ~w, mem = ~w", [QLen,Mem]);
+ killed ->
+ ct:pal("Slow shutdown, handler process was killed!", [])
+ end,
+ file_delete(Log),
+ {ok,_} = wait_for_process_up(RestartAfter * 3),
+ ok
+ after
+ 5000 ->
+ Info = logger_olp:info(h_proc_name()),
+ ct:pal("Handler state = ~p", [Info]),
+ ct:fail("Handler not dead! It should not have survived this!")
+ end.
+qlen_kill_new(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+mem_kill_new(Config) ->
+ {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ Pid0 = whereis(h_proc_name()),
+ {_,Mem0} = process_info(Pid0, memory),
+ RestartAfter = ?OVERLOAD_KILL_RESTART_AFTER,
+ NewHConfig =
+ HConfig#{config =>
+ DLHConfig#{overload_kill_enable=>true,
+ overload_kill_qlen=>50000,
+ overload_kill_mem_size=>Mem0+500,
+ overload_kill_restart_after=>RestartAfter}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+ MRef = erlang:monitor(process, Pid0),
+ NumOfReqs = 100,
+ Procs = 4,
+ send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, notice),
+ %% send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
+ receive
+ {'DOWN', MRef, _, _, Info} ->
+ case Info of
+ {shutdown,{overloaded,QLen,Mem}} ->
+ ct:pal("Terminated with qlen = ~w, mem = ~w", [QLen,Mem]);
+ killed ->
+ ct:pal("Slow shutdown, handler process was killed!", [])
+ end,
+ file_delete(Log),
+ {ok,_} = wait_for_process_up(RestartAfter * 3),
+ ok
+ after
+ 5000 ->
+ Info = logger_olp:info(h_proc_name()),
+ ct:pal("Handler state = ~p", [Info]),
+ ct:fail("Handler not dead! It should not have survived this!")
+ end.
+mem_kill_new(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+restart_after() ->
+ [{timetrap,{minutes,2}}].
+restart_after(Config) ->
+ {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ NewHConfig1 =
+ HConfig#{config=>DLHConfig#{overload_kill_enable=>true,
+ overload_kill_qlen=>10,
+ overload_kill_restart_after=>infinity}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig1),
+ MRef1 = erlang:monitor(process, whereis(h_proc_name())),
+ %% kill handler
+ send_burst({n,100}, {spawn,4,0}, {chars,79}, notice),
+ receive
+ {'DOWN', MRef1, _, _, _Reason1} ->
+ file_delete(Log),
+ error = wait_for_process_up(?OVERLOAD_KILL_RESTART_AFTER * 3),
+ ok
+ after
+ 5000 ->
+ Info1 = logger_olp:info(h_proc_name()),
+ ct:pal("Handler state = ~p", [Info1]),
+ ct:fail("Handler not dead! It should not have survived this!")
+ end,
+
+ {Log,_,_} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ RestartAfter = ?OVERLOAD_KILL_RESTART_AFTER,
+ NewHConfig2 =
+ HConfig#{config=>DLHConfig#{overload_kill_enable=>true,
+ overload_kill_qlen=>10,
+ overload_kill_restart_after=>RestartAfter}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig2),
+ Pid0 = whereis(h_proc_name()),
+ MRef2 = erlang:monitor(process, Pid0),
+ %% kill handler
+ send_burst({n,100}, {spawn,4,0}, {chars,79}, notice),
+ receive
+ {'DOWN', MRef2, _, _, _Reason2} ->
+ file_delete(Log),
+ {ok,Pid1} = wait_for_process_up(RestartAfter * 3),
+ false = (Pid1 == Pid0),
+ ok
+ after
+ 5000 ->
+ Info2 = logger_olp:info(h_proc_name()),
+ ct:pal("Handler state = ~p", [Info2]),
+ ct:fail("Handler not dead! It should not have survived this!")
+ end,
+ ok.
+restart_after(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+%% send handler requests (sync, info, reset, change_config)
+%% during high load to verify that sync, dropping and flushing is
+%% handled correctly.
+handler_requests_under_load() ->
+ [{timetrap,{minutes,5}}].
+handler_requests_under_load(Config) ->
+ {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ NewHConfig =
+ HConfig#{config => DLHConfig#{sync_mode_qlen => 2,
+ drop_mode_qlen => 1000,
+ flush_qlen => 2000,
+ burst_limit_enable => false}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+ Pid = spawn_link(
+ fun() -> send_requests(1,[{logger_disk_log_h,filesync,[?MODULE],[]},
+ {logger_olp,info,[h_proc_name()],[]},
+ {logger_olp,reset,[h_proc_name()],[]},
+ {logger,update_handler_config,
+ [?MODULE, config,
+ #{overload_kill_enable => false}],
+ []}])
+ end),
+ Procs = 100,
+ Sent = Procs * send_burst({n,5000}, {spawn,Procs,10}, {chars,79}, notice),
+ Pid ! {self(),finish},
+ ReqResult = receive {Pid,Result} -> Result end,
+ Logged = count_lines(Log),
+ ct:pal("Number of messages sent = ~w~nNumber of messages logged = ~w",
+ [Sent,Logged]),
+ FindError = fun(Res) ->
+ [E || E <- Res,
+ is_tuple(E) andalso (element(1,E) == error)]
+ end,
+ Errors = [{Func,FindError(Res)} || {_,Func,_,Res} <- ReqResult],
+ NoOfReqs = lists:foldl(fun({_,_,_,Res}, N) -> N + length(Res) end,
+ 0, ReqResult),
+ ct:pal("~w requests made. Errors: ~n~p", [NoOfReqs,Errors]),
+ ok = file_delete(Log).
+handler_requests_under_load(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+send_requests(TO, Reqs = [{Mod,Func,Args,Res}|Rs]) ->
+ receive
+ {From,finish} ->
+ From ! {self(),Reqs}
+ after
+ TO ->
+ Result = apply(Mod,Func,Args),
+ send_requests(TO, Rs ++ [{Mod,Func,Args,[Result|Res]}])
+ end.
+
+%%%-----------------------------------------------------------------
+%%%
+start_handler(Name, FuncName, Config) ->
+ Dir = ?config(priv_dir,Config),
+ File = filename:join(Dir, FuncName),
+ ct:pal("Logging to ~tp", [File]),
+ FullFile = lists:concat([File,".1"]),
+ _ = file_delete(FullFile),
+ ok = logger:add_handler(Name,
+ logger_disk_log_h,
+ #{config=>#{file => File,
+ max_no_files => 1,
+ max_no_bytes => 100000000},
+ filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([Name]),
+ formatter=>{?MODULE,op}}),
+ {ok,HConfig = #{config := DLHConfig}} = logger:get_handler_config(Name),
+ {FullFile,HConfig,DLHConfig}.
+
+stop_handler(Name) ->
+ ct:pal("Stopping handler ~p!", [Name]),
+ logger:remove_handler(Name).
+
+send_burst(NorT, Type, {chars,Sz}, Class) ->
+ Text = [34 + rand:uniform(126-34) || _ <- lists:seq(1,Sz)],
+ case NorT of
+ {n,N} ->
+ %% process_flag(priority, high),
+ send_n_burst(N, Type, Text, Class),
+ %% process_flag(priority, normal),
+ N;
+ {t,T} ->
+ ct:pal("Sending messages sequentially for ~w ms", [T]),
+ T0 = erlang:monotonic_time(millisecond),
+ send_t_burst(T0, T, Text, Class, 0)
+ end.
+
+send_n_burst(0, _, _Text, _Class) ->
+ ok;
+send_n_burst(N, seq, Text, Class) ->
+ ok = logger:Class(Text, ?domain),
+ send_n_burst(N-1, seq, Text, Class);
+send_n_burst(N, {spawn,Ps,TO}, Text, Class) ->
+ ct:pal("~w processes each sending ~w messages", [Ps,N]),
+ MRefs = [begin if TO == 0 -> ok; true -> timer:sleep(TO) end,
+ monitor(process,spawn_link(per_proc_fun(N,Text,Class,X)))
+ end || X <- lists:seq(1,Ps)],
+ lists:foreach(fun(MRef) ->
+ receive
+ {'DOWN', MRef, _, _, _} ->
+ ok
+ end
+ end, MRefs),
+ ct:pal("Message burst sent", []),
+ ok.
+
+send_t_burst(T0, T, Text, Class, N) ->
+ T1 = erlang:monotonic_time(millisecond),
+ if (T1-T0) > T ->
+ N;
+ true ->
+ ok = logger:Class(Text, ?domain),
+ send_t_burst(T0, T, Text, Class, N+1)
+ end.
+
+per_proc_fun(N,Text,Class,X) when X rem 2 == 0 ->
+ fun() ->
+ process_flag(priority,high),
+ send_n_burst(N, seq, Text, Class)
+ end;
+per_proc_fun(N,Text,Class,_) ->
+ fun() ->
+ send_n_burst(N, seq, Text, Class)
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Formatter callback
+%%% Using this to send the formatted string back to the test case
+%%% process - so it can check for logged events.
+format(_,bad_return) ->
+ bad_return;
+format(_,crash) ->
+ erlang:error(formatter_crashed);
+format(#{msg:={report,R},meta:=#{report_cb:=Fun}}=Log,Config) ->
+ format(Log#{msg=>Fun(R)},Config);
+format(#{msg:={string,String0}},no_nl) ->
+ String = unicode:characters_to_list(String0),
+ String;
+format(#{msg:={string,String0}},nl) ->
+ String = unicode:characters_to_list(String0),
+ String++"\n";
+format(#{msg:={string,String0}},op) ->
+ String = unicode:characters_to_list(String0),
+ String++"\n";
+format(#{msg:={report,#{label:={supervisor,progress}}}},op) ->
+ "";
+format(#{msg:={report,#{label:={gen_server,terminate}}}},op) ->
+ "";
+format(#{msg:={report,#{label:={proc_lib,crash}}}},op) ->
+ "";
+format(#{msg:={F,A}},OpOrPid) when is_list(F), is_list(A) ->
+ String = lists:flatten(io_lib:format(F,A)),
+ if is_pid(OpOrPid) -> OpOrPid ! {log,String};
+ true -> ok
+ end,
+ String++"\n";
+format(#{msg:={string,String0}},Pid) ->
+ String = unicode:characters_to_list(String0),
+ Pid ! {log,String},
+ String++"\n";
+format(Msg,Tag) ->
+ Error = {unexpected_format,Msg,Tag},
+ erlang:display(Error),
+ exit(Error).
+
+start_and_add(Name, Config, LogOpts) ->
+ HConfig = maps:get(config, Config, #{}),
+ HConfig1 = maps:merge(HConfig, LogOpts),
+ Config1 = Config#{config=>HConfig1},
+ ct:pal("Adding handler ~w with: ~p", [Name,Config1]),
+ ok = logger:add_handler(Name, logger_disk_log_h, Config1),
+ Pid = whereis(h_proc_name(Name)),
+ true = is_pid(Pid),
+ Name = proplists:get_value(name, disk_log:info(Name)),
+ ok.
+
+remove_and_stop(Handler) ->
+ ok = logger:remove_handler(Handler),
+ timer:sleep(500),
+ undefined = whereis(h_proc_name(Handler)),
+ ok.
+
+try_read_file(FileName, Expected, Time) ->
+ try_read_file(FileName, Expected, Time, undefined).
+
+try_read_file(FileName, Expected, Time, _) when Time > 0 ->
+ case file:read_file(FileName) of
+ Expected ->
+ ok;
+ Error = {error,_Reason} ->
+ erlang:error(Error);
+ SomethingElse ->
+ ct:pal("try_read_file read unexpected: ~p~n", [SomethingElse]),
+ timer:sleep(500),
+ try_read_file(FileName, Expected, Time-500, SomethingElse)
+ end;
+
+try_read_file(_, _, _, Incorrect) ->
+ ct:pal("try_read_file got incorrect pattern: ~p~n", [Incorrect]),
+ erlang:error({error,not_matching_pattern,Incorrect}).
+
+try_match_file(FileName, Pattern, Time) ->
+ try_match_file(FileName, Pattern, Time, <<>>).
+
+try_match_file(FileName, Pattern, Time, _) when Time > 0 ->
+ case file:read_file(FileName) of
+ {ok, Bin} ->
+ case re:run(Bin,Pattern,[{capture,none}]) of
+ match ->
+ unicode:characters_to_list(Bin);
+ _ ->
+ timer:sleep(100),
+ try_match_file(FileName, Pattern, Time-100, Bin)
+ end;
+ Error ->
+ erlang:error(Error)
+ end;
+try_match_file(_,Pattern,_,Incorrect) ->
+ ct:pal("try_match_file did not match pattern: ~p~nGot: ~p~n",
+ [Pattern,Incorrect]),
+ erlang:error({error,not_matching_pattern,Pattern,Incorrect}).
+
+count_lines(File) ->
+ wait_until_written(File),
+ count_lines1(File).
+
+wait_until_written(File) ->
+ wait_until_written(File, -1).
+
+wait_until_written(File, Sz) ->
+ timer:sleep(2000),
+ case file:read_file_info(File) of
+ {ok,#file_info{size = Sz}} ->
+ timer:sleep(1000),
+ case file:read_file_info(File) of
+ {ok,#file_info{size = Sz}} ->
+ ok;
+ {ok,#file_info{size = Sz1}} ->
+ wait_until_written(File, Sz1)
+ end;
+ {ok,#file_info{size = Sz1}} ->
+ wait_until_written(File, Sz1)
+ end.
+
+count_lines1(File) ->
+ {_,Dev} = file:open(File, [read]),
+ Lines = count_lines2(Dev, 0),
+ file:close(Dev),
+ Lines.
+
+count_lines2(Dev, LC) ->
+ case file:read_line(Dev) of
+ {ok,"Handler logger_disk_log_h_SUITE " ++_} ->
+ %% Not counting handler info
+ count_lines2(Dev,LC);
+ {ok,_} ->
+ count_lines2(Dev,LC+1);
+ eof -> LC
+ end.
+
+repeat_until_ok(Fun, N) ->
+ repeat_until_ok(Fun, 0, N, undefined).
+
+repeat_until_ok(_Fun, Stop, Stop, Reason) ->
+ {fails,Reason};
+
+repeat_until_ok(Fun, C, Stop, FirstReason) ->
+ if C > 0 -> timer:sleep(5000);
+ true -> ok
+ end,
+ try Fun() of
+ Result ->
+ {ok,{C,Result}}
+ catch
+ _:Reason:Stack ->
+ ct:pal("Test fails: ~p (~p)~n", [Reason,hd(Stack)]),
+ if FirstReason == undefined ->
+ repeat_until_ok(Fun, C+1, Stop, {Reason,Stack});
+ true ->
+ repeat_until_ok(Fun, C+1, Stop, FirstReason)
+ end
+ end.
+
+start_tracer(Trace,Expected) ->
+ Pid = self(),
+ dbg:tracer(process,{fun tracer/2,{Pid,Expected}}),
+ dbg:p(h_proc_name(),[c]),
+ tpl(Trace),
+ ok.
+
+tpl([{M,F,A}|Trace]) ->
+ tpl([{{M,F,A},c}|Trace]);
+tpl([{{M,F,A},MS}|Trace]) ->
+ {ok,Match} = dbg:tpl(M,F,A,MS),
+ case lists:keyfind(matched,1,Match) of
+ {_,_,1} ->
+ ok;
+ _ ->
+ dbg:stop_clear(),
+ throw({skip,"Can't trace "++atom_to_list(M)++":"++
+ atom_to_list(F)++"/"++integer_to_list(A)})
+ end,
+ tpl(Trace);
+tpl([]) ->
+ ok.
+
+tracer({trace,_,call,{logger_h_common,handle_cast,[Op|_]},Caller},
+ {Pid,[{Mod,Func,Op}|Expected]}) ->
+ maybe_tracer_done(Pid,Expected,{Mod,Func,Op},Caller);
+tracer({trace,_,call,{Mod=logger_disk_log_h,Func=disk_log_write,[_,_,Data]},Caller}, {Pid,[{Mod,Func,Data}|Expected]}) ->
+ maybe_tracer_done(Pid,Expected,{Mod,Func,Data},Caller);
+tracer({trace,_,call,{Mod,Func,_},Caller}, {Pid,[{Mod,Func}|Expected]}) ->
+ maybe_tracer_done(Pid,Expected,{Mod,Func},Caller);
+tracer({trace,_,call,Call,Caller}, {Pid,Expected}) ->
+ ct:log("Tracer got unexpected: ~p~nCaller: ~p~nExpected: ~p~n",[Call,Caller,Expected]),
+ Pid ! {tracer_got_unexpected,Call,Expected},
+ {Pid,Expected}.
+
+maybe_tracer_done(Pid,[],Got,Caller) ->
+ ct:log("Tracer got: ~p~nCaller: ~p~n",[Got,Caller]),
+ Pid ! tracer_done;
+maybe_tracer_done(Pid,Expected,Got,Caller) ->
+ ct:log("Tracer got: ~p~nCaller: ~p~n",[Got,Caller]),
+ {Pid,Expected}.
+
+check_tracer(T) ->
+ receive
+ tracer_done ->
+ dbg:stop_clear(),
+ ok;
+ {tracer_got_unexpected,Got,Expected} ->
+ dbg:stop_clear(),
+ ct:fail({tracer_got_unexpected,Got,Expected})
+ after T ->
+ dbg:stop_clear(),
+ ct:fail({timeout,tracer})
+ end.
+
+escape([$+|Rest]) ->
+ [$\\,$+|escape(Rest)];
+escape([H|T]) ->
+ [H|escape(T)];
+escape([]) ->
+ [].
+
+h_proc_name() ->
+ h_proc_name(?MODULE).
+h_proc_name(Name) ->
+ list_to_atom(lists:concat([logger_disk_log_h,"_",Name])).
+
+wait_for_process_up(T) ->
+ wait_for_process_up(?MODULE, h_proc_name(), T).
+
+wait_for_process_up(Name, RegName, T) ->
+ N = (T div 500) + 1,
+ wait_for_process_up1(Name, RegName, N).
+
+wait_for_process_up1(_Name, _RegName, 0) ->
+ error;
+wait_for_process_up1(Name, RegName, N) ->
+ timer:sleep(500),
+ case whereis(RegName) of
+ Pid when is_pid(Pid) ->
+ case logger:get_handler_config(Name) of
+ {ok,_} ->
+ %% ct:pal("Process ~p up (~p tries left)",[Name,N]),
+ {ok,Pid};
+ _ ->
+ wait_for_process_up1(Name, RegName, N-1)
+ end;
+ undefined ->
+ %% ct:pal("Waiting for process ~p (~p tries left)",[Name,N]),
+ wait_for_process_up1(Name, RegName, N-1)
+ end.
+
+file_delete(Log) ->
+ file:delete(Log).
diff --git a/lib/kernel/test/logger_env_var_SUITE.erl b/lib/kernel/test/logger_env_var_SUITE.erl
new file mode 100644
index 0000000000..9d2ad11be8
--- /dev/null
+++ b/lib/kernel/test/logger_env_var_SUITE.erl
@@ -0,0 +1,697 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_env_var_SUITE).
+
+-compile(export_all).
+
+-include_lib("kernel/include/logger.hrl").
+-include_lib("kernel/src/logger_internal.hrl").
+
+-import(logger_test_lib,[setup/2,log/3,sync_and_read/3]).
+
+suite() ->
+ [{timetrap,{seconds,60}},
+ {ct_hooks,[logger_test_lib]}].
+
+init_per_suite(Config) ->
+ Config.
+
+end_per_suite(_Config) ->
+ ok.
+
+groups() ->
+ [{error_logger,[],[error_logger_tty,
+ error_logger_tty_sasl_compatible,
+ error_logger_false,
+ error_logger_false_progress,
+ error_logger_false_sasl_compatible,
+ error_logger_silent,
+ error_logger_silent_sasl_compatible,
+ error_logger_file]},
+ {logger,[],[logger_file,
+ logger_file_sasl_compatible,
+ logger_file_log_progress,
+ logger_file_no_filter,
+ logger_file_no_filter_level,
+ logger_file_formatter,
+ logger_filters,
+ logger_filters_stop,
+ logger_module_level,
+ logger_disk_log,
+ logger_disk_log_formatter,
+ logger_undefined,
+ logger_many_handlers_default_first,
+ logger_many_handlers_default_last,
+ logger_many_handlers_default_last_broken_filter,
+ logger_proxy
+ ]},
+ {bad,[],[bad_error_logger,
+ bad_level,
+ bad_sasl_compatibility]}].
+
+all() ->
+ [default,
+ default_sasl_compatible,
+ sasl_compatible_false,
+ sasl_compatible_false_no_progress,
+ sasl_compatible,
+ all_logger_level,
+ {group,bad},
+ {group,error_logger},
+ {group,logger}
+ ].
+
+default(Config) ->
+ {ok,#{primary:=P,handlers:=Hs,module_levels:=ML},_Node} = setup(Config,[]),
+ notice = maps:get(level,P),
+ #{module:=logger_std_h} = StdC = find(?STANDARD_HANDLER,Hs),
+ all = maps:get(level,StdC),
+ StdFilters = maps:get(filters,StdC),
+ {domain,{_,{log,super,[otp,sasl]}}} = lists:keyfind(domain,1,StdFilters),
+ false = exists(simple,Hs),
+ false = exists(sasl,Hs),
+ [] = ML,
+ ok.
+
+default_sasl_compatible(Config) ->
+ {ok,#{primary:=P,handlers:=Hs,module_levels:=ML},_Node} =
+ setup(Config,[{logger_sasl_compatible,true}]),
+ info = maps:get(level,P),
+ #{module:=logger_std_h} = StdC = find(?STANDARD_HANDLER,Hs),
+ all = maps:get(level,StdC),
+ StdFilters = maps:get(filters,StdC),
+ {domain,{_,{log,super,[otp]}}} = lists:keyfind(domain,1,StdFilters),
+ false = exists(simple,Hs),
+ true = exists(sasl,Hs),
+ [] = ML,
+ ok.
+
+error_logger_tty(Config) ->
+ {ok,#{primary:=P,handlers:=Hs,module_levels:=ML},_Node} =
+ setup(Config,[{error_logger,tty}]),
+ notice = maps:get(level,P),
+ #{module:=logger_std_h} = StdC = find(?STANDARD_HANDLER,Hs),
+ all = maps:get(level,StdC),
+ StdFilters = maps:get(filters,StdC),
+ {domain,{_,{log,super,[otp,sasl]}}} = lists:keyfind(domain,1,StdFilters),
+ false = exists(simple,Hs),
+ false = exists(sasl,Hs),
+ [] = ML,
+ ok.
+
+error_logger_tty_sasl_compatible(Config) ->
+ {ok,#{primary:=P,handlers:=Hs,module_levels:=ML},_Node} =
+ setup(Config,
+ [{error_logger,tty},
+ {logger_sasl_compatible,true}]),
+ info = maps:get(level,P),
+ #{module:=logger_std_h} = StdC = find(?STANDARD_HANDLER,Hs),
+ all = maps:get(level,StdC),
+ StdFilters = maps:get(filters,StdC),
+ {domain,{_,{log,super,[otp]}}} = lists:keyfind(domain,1,StdFilters),
+ false = exists(simple,Hs),
+ true = exists(sasl,Hs),
+ [] = ML,
+ ok.
+
+error_logger_false(Config) ->
+ {ok,#{handlers:=Hs,primary:=P,module_levels:=ML},_Node} =
+ setup(Config,
+ [{error_logger,false},
+ {logger_level,notice}]),
+ false = exists(?STANDARD_HANDLER,Hs),
+ #{module:=logger_simple_h} = SimpleC = find(simple,Hs),
+ all = maps:get(level,SimpleC),
+ notice = maps:get(level,P),
+ SimpleFilters = maps:get(filters,SimpleC),
+ {domain,{_,{log,super,[otp,sasl]}}} = lists:keyfind(domain,1,SimpleFilters),
+ false = exists(sasl,Hs),
+ [] = ML,
+ ok.
+
+error_logger_false_progress(Config) ->
+ {ok,#{handlers:=Hs,primary:=P,module_levels:=ML},_Node} =
+ setup(Config,
+ [{error_logger,false},
+ {logger_level,notice}]),
+ false = exists(?STANDARD_HANDLER,Hs),
+ #{module:=logger_simple_h} = SimpleC = find(simple,Hs),
+ all = maps:get(level,SimpleC),
+ notice = maps:get(level,P),
+ SimpleFilters = maps:get(filters,SimpleC),
+ {domain,{_,{log,super,[otp,sasl]}}} = lists:keyfind(domain,1,SimpleFilters),
+ false = exists(sasl,Hs),
+ [] = ML,
+ ok.
+
+error_logger_false_sasl_compatible(Config) ->
+ {ok,#{handlers:=Hs,primary:=P,module_levels:=ML},_Node} =
+ setup(Config,
+ [{error_logger,false},
+ {logger_level,notice},
+ {logger_sasl_compatible,true}]),
+ false = exists(?STANDARD_HANDLER,Hs),
+ #{module:=logger_simple_h} = SimpleC = find(simple,Hs),
+ all = maps:get(level,SimpleC),
+ info = maps:get(level,P),
+ SimpleFilters = maps:get(filters,SimpleC),
+ {domain,{_,{log,super,[otp]}}} = lists:keyfind(domain,1,SimpleFilters),
+ true = exists(sasl,Hs),
+ [] = ML,
+ ok.
+
+error_logger_silent(Config) ->
+ {ok,#{handlers:=Hs},_Node} = setup(Config,
+ [{error_logger,silent}]),
+ false = exists(?STANDARD_HANDLER,Hs),
+ false = exists(simple,Hs),
+ false = exists(sasl,Hs),
+ ok.
+
+error_logger_silent_sasl_compatible(Config) ->
+ {ok,#{handlers:=Hs},_Node} = setup(Config,
+ [{error_logger,silent},
+ {logger_sasl_compatible,true}]),
+ false = exists(?STANDARD_HANDLER,Hs),
+ false = exists(simple,Hs),
+ true = exists(sasl,Hs),
+ ok.
+
+
+error_logger_file(Config) ->
+ Log = file(Config,?FUNCTION_NAME),
+ {ok,_,Node} = setup(Config,
+ [{error_logger,{file,Log}}]),
+ check_default_log(Node,Log,
+ file,% dest
+ 0),% progress in std logger
+ ok.
+
+
+logger_file(Config) ->
+ Log = file(Config,?FUNCTION_NAME),
+ {ok,#{primary:=P,handlers:=Hs,module_levels:=ML},Node}
+ = setup(Config,
+ [{logger,
+ [{handler,?STANDARD_HANDLER,logger_std_h,
+ #{config=>#{type=>{file,Log}}}}]}]),
+ check_default_log(Node,Log,
+ file,% dest
+ 0),% progress in std logger
+
+ notice = maps:get(level,P),
+ #{module:=logger_std_h} = StdC = find(?STANDARD_HANDLER,Hs),
+ all = maps:get(level,StdC),
+ StdFilters = maps:get(filters,StdC),
+ {domain,{_,{log,super,[otp,sasl]}}} = lists:keyfind(domain,1,StdFilters),
+ false = exists(simple,Hs),
+ false = exists(sasl,Hs),
+ [] = ML,
+ ok.
+
+logger_file_sasl_compatible(Config) ->
+ Log = file(Config,?FUNCTION_NAME),
+ {ok,#{primary:=P,handlers:=Hs,module_levels:=ML},Node}
+ = setup(Config,
+ [{logger_sasl_compatible,true},
+ {logger,
+ [{handler,?STANDARD_HANDLER,logger_std_h,
+ #{config=>#{type=>{file,Log}}}}]}]),
+ check_default_log(Node,Log,
+ file,% dest
+ 0),% progress in std logger
+
+ info = maps:get(level,P),
+ #{module:=logger_std_h} = StdC = find(?STANDARD_HANDLER,Hs),
+ all = maps:get(level,StdC),
+ StdFilters = maps:get(filters,StdC),
+ {domain,{_,{log,super,[otp]}}} = lists:keyfind(domain,1,StdFilters),
+ false = exists(simple,Hs),
+ true = exists(sasl,Hs),
+ [] = ML,
+ ok.
+
+logger_file_log_progress(Config) ->
+ Log = file(Config,?FUNCTION_NAME),
+ {ok,#{primary:=P,handlers:=Hs,module_levels:=ML},Node}
+ = setup(Config,
+ [{logger_level,info},
+ {logger,
+ [{handler,?STANDARD_HANDLER,logger_std_h,
+ #{config=>#{type=>{file,Log}}}}]}]),
+ check_default_log(Node,Log,
+ file,% dest
+ 6,% progress in std logger
+ info),
+
+ info = maps:get(level,P),
+ #{module:=logger_std_h} = StdC = find(?STANDARD_HANDLER,Hs),
+ all = maps:get(level,StdC),
+ StdFilters = maps:get(filters,StdC),
+ {domain,{_,{log,super,[otp,sasl]}}} = lists:keyfind(domain,1,StdFilters),
+ false = exists(simple,Hs),
+ false = exists(sasl,Hs),
+ [] = ML,
+ ok.
+
+logger_file_no_filter(Config) ->
+ Log = file(Config,?FUNCTION_NAME),
+ {ok,#{handlers:=Hs},Node}
+ = setup(Config,
+ [{logger,
+ [{handler,?STANDARD_HANDLER,logger_std_h,
+ #{filter_default=>log,filters=>[],
+ config=>#{type=>{file,Log}}}}]}]),
+ check_default_log(Node,Log,
+ file,% dest
+ 6),% progress in std logger
+
+ #{module:=logger_std_h} = StdC = find(?STANDARD_HANDLER,Hs),
+ all = maps:get(level,StdC),
+ [] = maps:get(filters,StdC),
+ false = exists(simple,Hs),
+ false = exists(sasl,Hs),
+
+ ok.
+
+logger_file_no_filter_level(Config) ->
+ Log = file(Config,?FUNCTION_NAME),
+ {ok,#{handlers:=Hs},Node}
+ = setup(Config,
+ [{logger,
+ [{handler,?STANDARD_HANDLER,logger_std_h,
+ #{filters=>[],level=>error,
+ config=>#{type=>{file,Log}}}}]}]),
+ check_default_log(Node,Log,
+ file,% dest
+ 0,% progress in std logger
+ error),% level
+
+ #{module:=logger_std_h} = StdC = find(?STANDARD_HANDLER,Hs),
+ error = maps:get(level,StdC),
+ [] = maps:get(filters,StdC),
+ false = exists(simple,Hs),
+ false = exists(sasl,Hs),
+
+ ok.
+
+logger_file_formatter(Config) ->
+ Log = file(Config,?FUNCTION_NAME),
+ {ok,#{handlers:=Hs},Node}
+ = setup(Config,
+ [{logger,
+ [{handler,?STANDARD_HANDLER,logger_std_h,
+ #{filters=>[],
+ formatter=>{logger_formatter,#{}},
+ config=>#{type=>{file,Log}}}}]}]),
+ check_single_log(Node,Log,
+ file,% dest
+ 6),% progress in std logger
+
+ #{module:=logger_std_h} = StdC = find(?STANDARD_HANDLER,Hs),
+ all = maps:get(level,StdC),
+ [] = maps:get(filters,StdC),
+ false = exists(simple,Hs),
+ false = exists(sasl,Hs),
+
+ ok.
+
+logger_filters(Config) ->
+ Log = file(Config,?FUNCTION_NAME),
+ {ok,#{handlers:=Hs,primary:=P},Node}
+ = setup(Config,
+ [{logger_level,info},
+ {logger,
+ [{handler,?STANDARD_HANDLER,logger_std_h,
+ #{config=>#{type=>{file,Log}}}},
+ {filters,log,[{stop_progress,{fun logger_filters:progress/2,stop}}]}
+ ]}]),
+ check_default_log(Node,Log,
+ file,% dest
+ 0,% progress in std logger
+ info),
+
+ #{module:=logger_std_h} = StdC = find(?STANDARD_HANDLER,Hs),
+ all = maps:get(level,StdC),
+ StdFilters = maps:get(filters,StdC),
+ {domain,{_,{log,super,[otp,sasl]}}} = lists:keyfind(domain,1,StdFilters),
+ false = exists(simple,Hs),
+ false = exists(sasl,Hs),
+ LoggerFilters = maps:get(filters,P),
+ true = lists:keymember(stop_progress,1,LoggerFilters),
+
+ ok.
+
+logger_filters_stop(Config) ->
+ Log = file(Config,?FUNCTION_NAME),
+ {ok,#{handlers:=Hs,primary:=P},Node}
+ = setup(Config,
+ [{logger_level,info},
+ {logger,
+ [{handler,?STANDARD_HANDLER,logger_std_h,
+ #{filters=>[],
+ config=>#{type=>{file,Log}}}},
+ {filters,stop,[{log_error,{fun logger_filters:level/2,{log,gt,info}}}]}
+ ]}]),
+ check_default_log(Node,Log,
+ file,% dest
+ 0,% progress in std logger
+ info),
+
+ #{module:=logger_std_h} = StdC = find(?STANDARD_HANDLER,Hs),
+ all = maps:get(level,StdC),
+ [] = maps:get(filters,StdC),
+ false = exists(simple,Hs),
+ false = exists(sasl,Hs),
+ LoggerFilters = maps:get(filters,P),
+ true = lists:keymember(log_error,1,LoggerFilters),
+
+ ok.
+
+logger_module_level(Config) ->
+ Log = file(Config,?FUNCTION_NAME),
+ {ok,#{handlers:=Hs,module_levels:=ModuleLevels},Node}
+ = setup(Config,
+ [{logger_level,info},
+ {logger,
+ [{handler,?STANDARD_HANDLER,logger_std_h,
+ #{config=>#{type=>{file,Log}}}},
+ {module_level,error,[supervisor]}
+ ]}]),
+ check_default_log(Node,Log,
+ file,% dest
+ 3,% progress in std logger
+ info),
+
+ #{module:=logger_std_h} = StdC = find(?STANDARD_HANDLER,Hs),
+ all = maps:get(level,StdC),
+ StdFilters = maps:get(filters,StdC),
+ {domain,{_,{log,super,[otp,sasl]}}} = lists:keyfind(domain,1,StdFilters),
+ false = exists(simple,Hs),
+ false = exists(sasl,Hs),
+ [{supervisor,error}] = ModuleLevels,
+ ok.
+
+logger_disk_log(Config) ->
+ Log = file(Config,?FUNCTION_NAME),
+ {ok,#{handlers:=Hs},Node}
+ = setup(Config,
+ [{logger,
+ [{handler,?STANDARD_HANDLER,logger_disk_log_h,
+ #{config=>#{file=>Log}}}]}]),
+ check_default_log(Node,Log,
+ disk_log,% dest
+ 0),% progress in std logger
+
+ #{module:=logger_disk_log_h} = StdC = find(?STANDARD_HANDLER,Hs),
+ all = maps:get(level,StdC),
+ StdFilters = maps:get(filters,StdC),
+ {domain,{_,{log,super,[otp,sasl]}}} = lists:keyfind(domain,1,StdFilters),
+ false = exists(simple,Hs),
+ false = exists(sasl,Hs),
+
+ ok.
+
+logger_disk_log_formatter(Config) ->
+ Log = file(Config,?FUNCTION_NAME),
+ {ok,#{handlers:=Hs},Node}
+ = setup(Config,
+ [{logger,
+ [{handler,?STANDARD_HANDLER,logger_disk_log_h,
+ #{filters=>[],
+ formatter=>{logger_formatter,#{}},
+ config=>#{file=>Log}}}]}]),
+ check_single_log(Node,Log,
+ disk_log,% dest
+ 6),% progress in std logger
+
+ #{module:=logger_disk_log_h} = StdC = find(?STANDARD_HANDLER,Hs),
+ all = maps:get(level,StdC),
+ [] = maps:get(filters,StdC),
+ false = exists(simple,Hs),
+ false = exists(sasl,Hs),
+
+ ok.
+
+logger_undefined(Config) ->
+ {ok,#{handlers:=Hs,primary:=P},_Node} =
+ setup(Config,[{logger,[{handler,?STANDARD_HANDLER,undefined}]}]),
+ false = exists(?STANDARD_HANDLER,Hs),
+ #{module:=logger_simple_h} = SimpleC = find(simple,Hs),
+ all = maps:get(level,SimpleC),
+ notice = maps:get(level,P),
+ SimpleFilters = maps:get(filters,SimpleC),
+ {domain,{_,{log,super,[otp,sasl]}}} = lists:keyfind(domain,1,SimpleFilters),
+ false = exists(sasl,Hs),
+ ok.
+
+
+%% Test that we can add multiple handlers with the default first
+logger_many_handlers_default_first(Config) ->
+ LogErr = file(Config,logger_many_handlers_default_first_error),
+ LogInfo = file(Config,logger_many_handlers_default_first_info),
+
+ logger_many_handlers(
+ Config,[{logger,
+ [{handler,?STANDARD_HANDLER,logger_std_h,
+ #{level=>error,
+ filters=>[],
+ formatter=>{logger_formatter,#{}},
+ config=>#{type=>{file,LogErr}}}
+ },
+ {handler,info,logger_std_h,
+ #{level=>info,
+ filters=>[{level,{fun logger_filters:level/2,{stop,gteq,error}}}],
+ config=>#{type=>{file,LogInfo}}}
+ }
+ ]},
+ {logger_level,info}], LogErr, LogInfo, 6).
+
+%% Test that we can add multiple handlers with the default last
+logger_many_handlers_default_last(Config) ->
+ LogErr = file(Config,logger_many_handlers_default_last_error),
+ LogInfo = file(Config,logger_many_handlers_default_last_info),
+ logger_many_handlers(
+ Config,[{logger,
+ [{handler,info,logger_std_h,
+ #{level=>info,
+ filters=>[{level,{fun logger_filters:level/2,{stop,gteq,error}}}],
+ config=>#{type=>{file,LogInfo}}}
+ },
+ {handler,?STANDARD_HANDLER,logger_std_h,
+ #{level=>error,
+ filters=>[],
+ formatter=>{logger_formatter,#{}},
+ config=>#{type=>{file,LogErr}}}
+ }
+ ]},
+ {logger_level,info}], LogErr, LogInfo, 7).
+
+%% Check that we can handle that an added logger has a broken filter
+%% This used to cause a deadlock.
+logger_many_handlers_default_last_broken_filter(Config) ->
+ LogErr = file(Config,logger_many_handlers_default_first_broken_filter_error),
+ LogInfo = file(Config,logger_many_handlers_default_first_broken_filter_info),
+
+ logger_many_handlers(
+ Config,[{logger,
+ [{handler,info,logger_std_h,
+ #{level=>info,
+ filters=>[{broken,{fun logger_filters:level/2,broken_state}},
+ {level,{fun logger_filters:level/2,{stop,gteq,error}}}],
+ config=>#{type=>{file,LogInfo}}}
+ },
+ {handler,?STANDARD_HANDLER,logger_std_h,
+ #{level=>error,
+ filters=>[],
+ formatter=>{logger_formatter,#{}},
+ config=>#{type=>{file,LogErr}}}
+ }
+ ]},
+ {logger_level,info}], LogErr, LogInfo, 7).
+
+logger_many_handlers(Config, Env, LogErr, LogInfo, NumProgress) ->
+ {ok,_,Node} = setup(Config,Env),
+ check_single_log(Node,LogErr,
+ file,% dest
+ 0,% progress in std logger
+ error), % level
+ ok = rpc:call(Node,logger_std_h,filesync,[info]),
+ {ok, Bin} = file:read_file(LogInfo),
+ ct:log("Log content:~n~s",[Bin]),
+ match(Bin,<<"info:">>,NumProgress,info,info),
+ match(Bin,<<"notice:">>,1,notice,info),
+ match(Bin,<<"alert:">>,0,alert,info),
+
+ ok.
+
+logger_proxy(Config) ->
+ %% assume current node runs with default settings
+ DefOpts = logger_olp:get_opts(logger_proxy),
+ {ok,_,Node} = setup(Config,
+ [{logger,[{proxy,#{sync_mode_qlen=>0,
+ drop_mode_qlen=>2}}]}]),
+ Expected = DefOpts#{sync_mode_qlen:=0,
+ drop_mode_qlen:=2},
+ Expected = rpc:call(Node,logger_olp,get_opts,[logger_proxy]),
+ Expected = rpc:call(Node,logger,get_proxy_config,[]),
+
+ ok.
+
+sasl_compatible_false(Config) ->
+ Log = file(Config,?FUNCTION_NAME),
+ {ok,_,Node} = setup(Config,
+ [{error_logger,{file,Log}},
+ {logger_sasl_compatible,false},
+ {logger_level,info}]), % to get progress
+ check_default_log(Node,Log,
+ file,% dest
+ 6,% progress in std logger
+ info),
+ ok.
+
+sasl_compatible_false_no_progress(Config) ->
+ Log = file(Config,?FUNCTION_NAME),
+ {ok,_,Node} = setup(Config,
+ [{error_logger,{file,Log}},
+ {logger_sasl_compatible,false}]),
+ check_default_log(Node,Log,
+ file,% dest
+ 0),% progress in std logger
+ ok.
+
+sasl_compatible(Config) ->
+ Log = file(Config,?FUNCTION_NAME),
+ {ok,_,Node} = setup(Config,
+ [{error_logger,{file,Log}},
+ {sasl_compatible,true}]),
+ check_default_log(Node,Log,
+ file,% dest
+ 0),% progress in std logger
+ ok.
+
+all_logger_level(Config) ->
+ [all_logger_level(Config,Level) || Level <- [none,
+ emergency,
+ alert,
+ critical,
+ error,
+ warning,
+ notice,
+ info,
+ debug,
+ all]],
+ ok.
+
+all_logger_level(Config,Level) ->
+ {ok,#{primary:=#{level:=Level}},Node} = setup(Config,[{logger_level,Level}]),
+ true = test_server:stop_node(Node),
+ ok.
+
+bad_error_logger(Config) ->
+ error = setup(Config,[{error_logger,baddest}]).
+
+bad_level(Config) ->
+ error = setup(Config,[{logger_level,badlevel}]).
+
+bad_sasl_compatibility(Config) ->
+ error = setup(Config,[{logger_sasl_compatible,badcomp}]).
+
+%%%-----------------------------------------------------------------
+%%% Internal
+file(Config,Func) ->
+ filename:join(proplists:get_value(priv_dir,Config),
+ lists:concat([Func,".log"])).
+
+check_default_log(Node,Log,Dest,NumProgress) ->
+ check_default_log(Node,Log,Dest,NumProgress,notice).
+check_default_log(Node,Log,Dest,NumProgress,Level) ->
+
+ {ok,Bin1,Bin2} = check_log(Node,Log,Dest),
+
+ match(Bin1,<<"PROGRESS REPORT">>,NumProgress,info,Level),
+ match(Bin1,<<"ALERT REPORT">>,1,alert,Level),
+ match(Bin1,<<"INFO REPORT">>,0,notice,Level),
+ match(Bin1,<<"DEBUG REPORT">>,0,debug,Level),
+
+ match(Bin2,<<"INFO REPORT">>,1,notice,Level),
+ match(Bin2,<<"DEBUG REPORT">>,0,debug,Level),
+ ok.
+
+check_single_log(Node,Log,Dest,NumProgress) ->
+ check_single_log(Node,Log,Dest,NumProgress,notice).
+check_single_log(Node,Log,Dest,NumProgress,Level) ->
+
+ {ok,Bin1,Bin2} = check_log(Node,Log,Dest),
+
+ match(Bin1,<<"info:">>,NumProgress,info,Level),
+ match(Bin1,<<"alert:">>,1,alert,Level),
+ match(Bin1,<<"debug:">>,0,debug,Level),
+
+ match(Bin2,<<"info:">>,NumProgress+1,info,Level),
+ match(Bin2,<<"debug:">>,0,debug,Level),
+
+ ok.
+
+check_log(Node,Log,Dest) ->
+
+ ok = log(Node,alert,["dummy1"]),
+ ok = log(Node,debug,["dummy1"]),
+
+ %% Check that there are progress reports (supervisor and
+ %% application_controller) and an error report (the call above) in
+ %% the log. There should not be any info reports yet.
+ {ok,Bin1} = sync_and_read(Node,Dest,Log),
+ ct:log("Log content:~n~s",[Bin1]),
+
+ %% Then stop sasl and see that the info report from
+ %% application_controller is there
+ ok = rpc:call(Node,application,stop,[sasl]),
+ {ok,Bin2} = sync_and_read(Node,Dest,Log),
+ ct:log("Log content:~n~s",[Bin2]),
+ {ok,Bin1,Bin2}.
+
+match(Bin,Pattern,0,_,_) ->
+ nomatch = re:run(Bin,Pattern,[{capture,none}]);
+match(Bin,Pattern,N,LogLevel,ConfLevel) ->
+ case logger:compare_levels(LogLevel,ConfLevel) of
+ lt -> match(Bin,Pattern,0,LogLevel,ConfLevel);
+ _ ->
+ {match,M} = re:run(Bin,Pattern,[{capture,all},global]),
+ N = length(M)
+ end.
+
+find(Id,Handlers) ->
+ case lists:search(fun(#{id:=Id0}) when Id0=:=Id-> true;
+ (_) -> false end,
+ Handlers) of
+ {value,Config} ->
+ Config;
+ false ->
+ false
+ end.
+
+exists(Id,Handlers) ->
+ case find(Id,Handlers) of
+ false ->
+ false;
+ _ ->
+ true
+ end.
diff --git a/lib/kernel/test/logger_filters_SUITE.erl b/lib/kernel/test/logger_filters_SUITE.erl
new file mode 100644
index 0000000000..11cce8fd20
--- /dev/null
+++ b/lib/kernel/test/logger_filters_SUITE.erl
@@ -0,0 +1,227 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_filters_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("kernel/include/logger.hrl").
+
+-define(ndlog,
+ #{level=>info,msg=>{"Line: ~p",[?LINE]},meta=>#{}}).
+-define(dlog(Domain),
+ #{level=>info,msg=>{"Line: ~p",[?LINE]},meta=>#{domain=>Domain}}).
+-define(llog(Level),
+ #{level=>Level,msg=>{"Line: ~p",[?LINE]},meta=>#{}}).
+-define(plog,
+ #{level=>info,
+ msg=>{report,#{label=>{?MODULE,progress}}},
+ meta=>#{line=>?LINE}}).
+-define(rlog(Node),
+ #{level=>info,
+ msg=>{"Line: ~p",[?LINE]},
+ meta=>#{gl=>rpc:call(Node,erlang,whereis,[user])}}).
+
+-define(TRY(X), my_try(fun() -> X end)).
+
+suite() ->
+ [{timetrap,{seconds,30}}].
+
+init_per_suite(Config) ->
+ Config.
+
+end_per_suite(_Config) ->
+ ok.
+
+init_per_group(_Group, Config) ->
+ Config.
+
+end_per_group(_Group, _Config) ->
+ ok.
+
+init_per_testcase(_TestCase, Config) ->
+ Config.
+
+end_per_testcase(Case, Config) ->
+ try apply(?MODULE,Case,[cleanup,Config])
+ catch error:undef -> ok
+ end,
+ ok.
+
+groups() ->
+ [].
+
+all() ->
+ [domain,
+ level,
+ progress,
+ remote_gl].
+
+domain(_Config) ->
+ L1 = logger_filters:domain(L1=?dlog([]),{log,super,[]}),
+ stop = logger_filters:domain(?dlog([]),{stop,super,[]}),
+ L2 = logger_filters:domain(L2=?dlog([]),{log,sub,[]}),
+ stop = logger_filters:domain(?dlog([]),{stop,sub,[]}),
+ L3 = logger_filters:domain(L3=?dlog([]),{log,equal,[]}),
+ stop = logger_filters:domain(?dlog([]),{stop,equal,[]}),
+ ignore = logger_filters:domain(?dlog([]),{log,not_equal,[]}),
+ ignore = logger_filters:domain(?dlog([]),{stop,not_equal,[]}),
+ ignore = logger_filters:domain(?dlog([]),{log,undefined,[]}),
+ ignore = logger_filters:domain(?dlog([]),{stop,undefined,[]}),
+
+ L4 = logger_filters:domain(L4=?dlog([a]),{log,super,[a,b]}),
+ stop = logger_filters:domain(?dlog([a]),{stop,super,[a,b]}),
+ ignore = logger_filters:domain(?dlog([a]),{log,sub,[a,b]}),
+ ignore = logger_filters:domain(?dlog([a]),{stop,sub,[a,b]}),
+ ignore = logger_filters:domain(?dlog([a]),{log,equal,[a,b]}),
+ ignore = logger_filters:domain(?dlog([a]),{stop,equal,[a,b]}),
+ L5 = logger_filters:domain(L5=?dlog([a]),{log,not_equal,[a,b]}),
+ stop = logger_filters:domain(?dlog([a]),{stop,not_equal,[a,b]}),
+ ignore = logger_filters:domain(?dlog([a]),{log,undefined,[a,b]}),
+ ignore = logger_filters:domain(?dlog([a]),{stop,undefined,[a,b]}),
+
+ ignore = logger_filters:domain(?dlog([a,b]),{log,super,[a]}),
+ ignore = logger_filters:domain(?dlog([a,b]),{stop,super,[a]}),
+ L6 = logger_filters:domain(L6=?dlog([a,b]),{log,sub,[a]}),
+ stop = logger_filters:domain(?dlog([a,b]),{stop,sub,[a]}),
+ ignore = logger_filters:domain(?dlog([a,b]),{log,equal,[a]}),
+ ignore = logger_filters:domain(?dlog([a,b]),{stop,equal,[a]}),
+ L7 = logger_filters:domain(L7=?dlog([a,b]),{log,not_equal,[a]}),
+ stop = logger_filters:domain(?dlog([a,b]),{stop,not_equal,[a]}),
+ ignore = logger_filters:domain(?dlog([a,b]),{log,undefined,[a]}),
+ ignore = logger_filters:domain(?dlog([a,b]),{stop,undefined,[a]}),
+
+ ignore = logger_filters:domain(?ndlog,{log,super,[a]}),
+ ignore = logger_filters:domain(?ndlog,{stop,super,[a]}),
+ ignore = logger_filters:domain(?ndlog,{log,sub,[a]}),
+ ignore = logger_filters:domain(?ndlog,{stop,sub,[a]}),
+ ignore = logger_filters:domain(?ndlog,{log,equal,[a]}),
+ ignore = logger_filters:domain(?ndlog,{stop,equal,[a]}),
+ L8 = logger_filters:domain(L8=?ndlog,{log,not_equal,[a]}),
+ stop = logger_filters:domain(?ndlog,{stop,not_equal,[a]}),
+ L9 = logger_filters:domain(L9=?ndlog,{log,undefined,[a]}),
+ stop = logger_filters:domain(?ndlog,{stop,undefined,[a]}),
+
+ L10 = logger_filters:domain(L10=?dlog([a,b,c,d]),{log,super,[a,b,c,d]}),
+ stop = logger_filters:domain(?dlog([a,b,c,d]),{stop,super,[a,b,c,d]}),
+ L11 = logger_filters:domain(L11=?dlog([a,b,c,d]),{log,sub,[a,b,c,d]}),
+ stop = logger_filters:domain(?dlog([a,b,c,d]),{stop,sub,[a,b,c,d]}),
+ L12 = logger_filters:domain(L12=?dlog([a,b,c,d]),{log,equal,[a,b,c,d]}),
+ stop = logger_filters:domain(?dlog([a,b,c,d]),{stop,equal,[a,b,c,d]}),
+ ignore = logger_filters:domain(?dlog([a,b,c,d]),{log,not_equal,[a,b,c,d]}),
+ ignore = logger_filters:domain(?dlog([a,b,c,d]),{stop,not_equal,[a,b,c,d]}),
+ ignore = logger_filters:domain(?dlog([a,b,c,d]),{log,undefined,[a,b,c,d]}),
+ ignore = logger_filters:domain(?dlog([a,b,c,d]),{stop,undefined,[a,b,c,d]}),
+
+ %% A domain field in meta which is not a list is allowed by the
+ %% filter, but since MatchDomain is always a list of atoms, only
+ %% Action=not_equal can ever match.
+ ignore = logger_filters:domain(?dlog(dummy),{log,super,[a,b,c,d]}),
+ ignore = logger_filters:domain(?dlog(dummy),{stop,super,[a,b,c,d]}),
+ ignore = logger_filters:domain(?dlog(dummy),{log,sub,[a,b,c,d]}),
+ ignore = logger_filters:domain(?dlog(dummy),{stop,sub,[a,b,c,d]}),
+ ignore = logger_filters:domain(?dlog(dummy),{log,equal,[a,b,c,d]}),
+ ignore = logger_filters:domain(?dlog(dummy),{stop,equal,[a,b,c,d]}),
+ L13 = logger_filters:domain(L13=?dlog(dummy),{log,not_equal,[a,b,c,d]}),
+ stop = logger_filters:domain(?dlog(dummy),{stop,not_equal,[a,b,c,d]}),
+ ignore = logger_filters:domain(?dlog(dummy),{log,undefined,[a,b,c,d]}),
+ ignore = logger_filters:domain(?dlog(dummy),{stop,undefined,[a,b,c,d]}),
+
+ {error,badarg} = ?TRY(logger_filters:domain(?ndlog,bad)),
+ {error,badarg} = ?TRY(logger_filters:domain(?ndlog,{bad,super,[]})),
+ {error,badarg} = ?TRY(logger_filters:domain(?ndlog,{log,bad,[]})),
+ {error,badarg} = ?TRY(logger_filters:domain(?ndlog,{log,super,bad})),
+
+ ok.
+
+level(_Config) ->
+ ignore = logger_filters:level(?llog(info),{log,lt,info}),
+ ignore = logger_filters:level(?llog(info),{stop,lt,info}),
+ ignore = logger_filters:level(?llog(info),{log,gt,info}),
+ ignore = logger_filters:level(?llog(info),{stop,gt,info}),
+ L1 = logger_filters:level(L1=?llog(info),{log,lteq,info}),
+ stop = logger_filters:level(?llog(info),{stop,lteq,info}),
+ L2 = logger_filters:level(L2=?llog(info),{log,gteq,info}),
+ stop = logger_filters:level(?llog(info),{stop,gteq,info}),
+ L3 = logger_filters:level(L3=?llog(info),{log,eq,info}),
+ stop = logger_filters:level(?llog(info),{stop,eq,info}),
+ ignore = logger_filters:level(?llog(info),{log,neq,info}),
+ ignore = logger_filters:level(?llog(info),{stop,neq,info}),
+
+ ignore = logger_filters:level(?llog(error),{log,lt,info}),
+ ignore = logger_filters:level(?llog(error),{stop,lt,info}),
+ L4 = logger_filters:level(L4=?llog(error),{log,gt,info}),
+ stop = logger_filters:level(?llog(error),{stop,gt,info}),
+ ignore = logger_filters:level(?llog(error),{log,lteq,info}),
+ ignore = logger_filters:level(?llog(error),{stop,lteq,info}),
+ L5 = logger_filters:level(L5=?llog(error),{log,gteq,info}),
+ stop = logger_filters:level(?llog(error),{stop,gteq,info}),
+ ignore = logger_filters:level(?llog(error),{log,eq,info}),
+ ignore = logger_filters:level(?llog(error),{stop,eq,info}),
+ L6 = logger_filters:level(L6=?llog(error),{log,neq,info}),
+ stop = logger_filters:level(?llog(error),{stop,neq,info}),
+
+ L7 = logger_filters:level(L7=?llog(info),{log,lt,error}),
+ stop = logger_filters:level(?llog(info),{stop,lt,error}),
+ ignore = logger_filters:level(?llog(info),{log,gt,error}),
+ ignore = logger_filters:level(?llog(info),{stop,gt,error}),
+ L8 = logger_filters:level(L8=?llog(info),{log,lteq,error}),
+ stop = logger_filters:level(?llog(info),{stop,lteq,error}),
+ ignore = logger_filters:level(?llog(info),{log,gteq,error}),
+ ignore = logger_filters:level(?llog(info),{stop,gteq,error}),
+ ignore = logger_filters:level(?llog(info),{log,eq,error}),
+ ignore = logger_filters:level(?llog(info),{stop,eq,error}),
+ L9 = logger_filters:level(L9=?llog(info),{log,neq,error}),
+ stop = logger_filters:level(?llog(info),{stop,neq,error}),
+
+ {error,badarg} = ?TRY(logger_filters:level(?llog(info),bad)),
+ {error,badarg} = ?TRY(logger_filters:level(?llog(info),{bad,eq,info})),
+ {error,badarg} = ?TRY(logger_filters:level(?llog(info),{log,bad,info})),
+ {error,badarg} = ?TRY(logger_filters:level(?llog(info),{log,eq,bad})),
+
+ ok.
+
+progress(_Config) ->
+ L1 = logger_filters:progress(L1=?plog,log),
+ stop = logger_filters:progress(?plog,stop),
+ ignore = logger_filters:progress(?ndlog,log),
+ ignore = logger_filters:progress(?ndlog,stop),
+
+ {error,badarg} = ?TRY(logger_filters:progress(?plog,bad)),
+
+ ok.
+
+remote_gl(_Config) ->
+ {ok,Node} = test_server:start_node(?FUNCTION_NAME,slave,[]),
+ L1 = logger_filters:remote_gl(L1=?rlog(Node),log),
+ stop = logger_filters:remote_gl(?rlog(Node),stop),
+ ignore = logger_filters:remote_gl(?ndlog,log),
+ ignore = logger_filters:remote_gl(?ndlog,stop),
+
+ {error,badarg} = ?TRY(logger_filters:remote_gl(?rlog(Node),bad)),
+ ok.
+
+remote_gl(cleanup,_Config) ->
+ [test_server:stop_node(N) || N<-nodes()].
+
+%%%-----------------------------------------------------------------
+%%% Called by macro ?TRY(X)
+my_try(Fun) ->
+ try Fun() catch C:R -> {C,R} end.
diff --git a/lib/kernel/test/logger_formatter_SUITE.erl b/lib/kernel/test/logger_formatter_SUITE.erl
new file mode 100644
index 0000000000..83e3e6c40a
--- /dev/null
+++ b/lib/kernel/test/logger_formatter_SUITE.erl
@@ -0,0 +1,886 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_formatter_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("kernel/include/logger.hrl").
+
+-define(TRY(X), my_try(fun() -> X end)).
+
+suite() ->
+ [{timetrap,{seconds,30}}].
+
+init_per_suite(Config) ->
+ Config.
+
+end_per_suite(_Config) ->
+ ok.
+
+init_per_group(_Group, Config) ->
+ Config.
+
+end_per_group(_Group, _Config) ->
+ ok.
+
+init_per_testcase(_TestCase, Config) ->
+ Config.
+
+end_per_testcase(Case, Config) ->
+ try apply(?MODULE,Case,[cleanup,Config])
+ catch error:undef -> ok
+ end,
+ ok.
+
+groups() ->
+ [].
+
+all() ->
+ [default,
+ legacy_header,
+ error_logger_notice_header,
+ single_line,
+ template,
+ format_msg,
+ report_cb,
+ max_size,
+ depth,
+ chars_limit,
+ format_mfa,
+ format_time,
+ level_or_msg_in_meta,
+ faulty_log,
+ faulty_config,
+ faulty_msg,
+ check_config,
+ update_config].
+
+default(_Config) ->
+ String1 = format(info,{"~p",[term]},#{},#{}),
+ ct:log(String1),
+ [_DateTime,"info:","term\n"] = string:lexemes(String1," "),
+
+ Time = timestamp(),
+ ExpectedTimestamp = default_time_format(Time),
+ String2 = format(info,{"~p",[term]},#{time=>Time},#{}),
+ ct:log(String2),
+ " info: term\n" = string:prefix(String2,ExpectedTimestamp),
+ ok.
+
+legacy_header(_Config) ->
+ Time = timestamp(),
+ String1 = format(info,{"~p",[term]},#{time=>Time},#{legacy_header=>true,
+ single_line=>false}),
+ ct:log(String1),
+ "=INFO REPORT==== "++Rest = String1,
+ [Timestamp,"\nterm\n"] = string:lexemes(Rest," ="),
+ [D,M,Y,H,Min,S,Micro] = string:lexemes(Timestamp,"-:."),
+ integer(D,31),
+ integer(Y,2018,infinity),
+ integer(H,23),
+ integer(Min,59),
+ integer(S,59),
+ integer(Micro,999999),
+ true = lists:member(M,["Jan","Feb","Mar","Apr","May","Jun",
+ "Jul","Aug","Sep","Oct","Nov","Dec"]),
+
+ String2 = format(info,{"~p",[term]},#{time=>Time},#{legacy_header=>false,
+ single_line=>false}),
+ ct:log(String2),
+ ExpectedTimestamp = default_time_format(Time),
+ " info:\nterm\n" = string:prefix(String2,ExpectedTimestamp),
+
+ String3 = format(info,{"~p",[term]},#{time=>Time},#{legacy_header=>bad,
+ single_line=>false}),
+ ct:log(String3),
+ String3 = String2,
+
+ String4 = format(info,{"~p",[term]},#{time=>Time},
+ #{legacy_header=>true,
+ single_line=>true}), % <---ignored
+ ct:log(String4),
+ String4 = String1,
+
+ String5 = format(info,{"~p",[term]},#{}, % <--- no time
+ #{legacy_header=>true,
+ single_line=>false}),
+ ct:log(String5),
+ "=INFO REPORT==== "++_ = String5,
+ ok.
+
+error_logger_notice_header(_Config) ->
+ Meta1 = #{error_logger=>#{tag => info_report,type => std_info}},
+ String1 = format(notice,{"~p",[term]},Meta1,
+ #{legacy_header=>true,
+ error_logger_notice_header=>notice}),
+ ct:log(String1),
+ "=NOTICE REPORT==== "++_ = String1,
+
+ String2 = format(notice,{"~p",[term]},Meta1,
+ #{legacy_header=>true,
+ error_logger_notice_header=>info}),
+ ct:log(String2),
+ "=INFO REPORT==== "++_ = String2,
+
+ String3 = format(notice,{"~p",[term]},#{},
+ #{legacy_header=>true,
+ error_logger_notice_header=>notice}),
+ ct:log(String3),
+ "=NOTICE REPORT==== "++_ = String3,
+
+ String4 = format(notice,{"~p",[term]},#{},
+ #{legacy_header=>true,
+ error_logger_notice_header=>info}),
+ ct:log(String4),
+ "=NOTICE REPORT==== "++_ = String4,
+
+ ok.
+
+single_line(_Config) ->
+ Time = timestamp(),
+ ExpectedTimestamp = default_time_format(Time),
+ String1 = format(info,{"~p",[term]},#{time=>Time},#{single_line=>true}),
+ ct:log(String1),
+ " info: term\n" = string:prefix(String1,ExpectedTimestamp),
+
+ String2 = format(info,{"~p",[term]},#{time=>Time},#{single_line=>false}),
+ ct:log(String2),
+ " info:\nterm\n" = string:prefix(String2,ExpectedTimestamp),
+
+ String2 = format(info,{"~p",[term]},#{time=>Time},#{single_line=>bad}),
+
+
+ %% Test that no extra commas/spaces are added when removing
+ %% newlines, especially not after "=>" in a map association (as
+ %% was the case in OTP-21.0, when the only single_line adjustment
+ %% was done by regexp replacement of "\n" by ", ").
+ Prefix =
+ "Some characters to fill the line ------------------------------------- ",
+ String3 = format(info,{"~s~p~n~s~p~n",[Prefix,
+ lists:seq(1,10),
+ Prefix,
+ #{a=>map,with=>a,few=>accociations}]},
+ #{time=>Time},
+ #{single_line=>true}),
+ ct:log(String3),
+ match = re:run(String3,"\\[1,2,3,4,5,6,7,8,9,10\\]",[{capture,none}]),
+ match = re:run(String3,
+ "#{a => map,few => accociations,with => a}",
+ [{capture,none}]),
+
+ %% This part is added to make sure that the previous test made
+ %% sense, i.e. that there would actually be newlines inside the
+ %% list and map.
+ String4 = format(info,{"~s~p~n~s~p~n",[Prefix,
+ lists:seq(1,10),
+ Prefix,
+ #{a=>map,with=>a,few=>accociations}]},
+ #{time=>Time},
+ #{single_line=>false}),
+ ct:log(String4),
+ match = re:run(String4,"\\[1,2,3,\n",[global,{capture,none}]),
+ {match,Match4} = re:run(String4,"=>\n",[global,{capture,all}]),
+ 3 = length(Match4),
+
+ %% Test that big metadata fields do not get line breaks
+ String5 = format(info,"",
+ #{mymeta=>lists:seq(1,100)},
+ #{single_line=>true,template=>[mymeta,"\n"]}),
+ ct:log(String5),
+ [_] = string:lexemes(String5,"\n"),
+
+ %% Ensure that the previous test made sense, i.e. that the
+ %% metadata field does produce multiple lines if
+ %% single_line==false.
+ String6 = format(info,"",
+ #{mymeta=>lists:seq(1,100)},
+ #{single_line=>false,template=>[mymeta,"\n"]}),
+ ct:log(String6),
+ [_,_|_] = string:lexemes(String6,"\n"),
+
+ ok.
+
+template(_Config) ->
+ Time = timestamp(),
+
+ Template1 = [msg],
+ String1 = format(info,{"~p",[term]},#{time=>Time},#{template=>Template1}),
+ ct:log(String1),
+ "term" = String1,
+
+ Template2 = [msg,unknown],
+ String2 = format(info,{"~p",[term]},#{time=>Time},#{template=>Template2}),
+ ct:log(String2),
+ "term" = String2,
+
+ Template3 = ["string"],
+ String3 = format(info,{"~p",[term]},#{time=>Time},#{template=>Template3}),
+ ct:log(String3),
+ "string" = String3,
+
+ Template4 = ["string\nnewline"],
+ String4 = format(info,{"~p",[term]},#{time=>Time},#{template=>Template4,
+ single_line=>true}),
+ ct:log(String4),
+ "string\nnewline" = String4,
+
+ Template5 = [],
+ String5 = format(info,{"~p",[term]},#{time=>Time},#{template=>Template5}),
+ ct:log(String5),
+ "" = String5,
+
+ Ref6 = erlang:make_ref(),
+ Meta6 = #{atom=>some_atom,
+ integer=>632,
+ list=>[list,"string",4321,#{},{tuple}],
+ mfa=>{mod,func,0},
+ pid=>self(),
+ ref=>Ref6,
+ string=>"some string",
+ time=>Time,
+ tuple=>{1,atom,"list"},
+ nested=>#{subkey=>subvalue}},
+ Template6 = lists:join(";",lists:sort(maps:keys(maps:remove(nested,Meta6))) ++
+ [[nested,subkey]]),
+ String6 = format(info,{"~p",[term]},Meta6,#{template=>Template6,
+ single_line=>true}),
+ ct:log(String6),
+ SelfStr = pid_to_list(self()),
+ RefStr6 = ref_to_list(Ref6),
+ ListStr = "[list,\"string\",4321,#{},{tuple}]",
+ ExpectedTime6 = default_time_format(Time),
+ ["some_atom",
+ "632",
+ ListStr,
+ "mod:func/0",
+ SelfStr,
+ RefStr6,
+ "some string",
+ ExpectedTime6,
+ "{1,atom,\"list\"}",
+ "subvalue"] = string:lexemes(String6,";"),
+
+ Meta7 = #{time=>Time,
+ nested=>#{key1=>#{subkey1=>value1},
+ key2=>value2}},
+ Template7 = lists:join(";",[nested,
+ [nested,key1],
+ [nested,key1,subkey1],
+ [nested,key2],
+ [nested,key2,subkey2],
+ [nested,key3],
+ [nested,key3,subkey3]]),
+ String7 = format(info,{"~p",[term]},Meta7,#{template=>Template7,
+ single_line=>true}),
+ ct:log(String7),
+ [MultipleKeysStr7,
+ "#{subkey1 => value1}",
+ "value1",
+ "value2",
+ "",
+ "",
+ ""] = string:split(String7,";",all),
+ %% Order of keys is not fixed
+ case MultipleKeysStr7 of
+ "#{key2 => value2,key1 => #{subkey1 => value1}}" -> ok;
+ "#{key1 => #{subkey1 => value1},key2 => value2}" -> ok;
+ _ -> ct:fail({full_nested_map_unexpected,MultipleKeysStr7})
+ end,
+
+ Meta8 = #{time=>Time,
+ nested=>#{key1=>#{subkey1=>value1},
+ key2=>value2}},
+ Template8 =
+ lists:join(
+ ";",
+ [{nested,["exist:",nested],["noexist"]},
+ {[nested,key1],["exist:",[nested,key1]],["noexist"]},
+ {[nested,key1,subkey1],["exist:",[nested,key1,subkey1]],["noexist"]},
+ {[nested,key2],["exist:",[nested,key2]],["noexist"]},
+ {[nested,key2,subkey2],["exist:",[nested,key2,subkey2]],["noexist"]},
+ {[nested,key3],["exist:",[nested,key3]],["noexist"]},
+ {[nested,key3,subkey3],["exist:",[nested,key3,subkey3]],["noexist"]}]),
+ String8 = format(info,{"~p",[term]},Meta8,#{template=>Template8,
+ single_line=>true}),
+ ct:log(String8),
+ [MultipleKeysStr8,
+ "exist:#{subkey1 => value1}",
+ "exist:value1",
+ "exist:value2",
+ "noexist",
+ "noexist",
+ "noexist"] = string:split(String8,";",all),
+ %% Order of keys is not fixed
+ case MultipleKeysStr8 of
+ "exist:#{key2 => value2,key1 => #{subkey1 => value1}}" -> ok;
+ "exist:#{key1 => #{subkey1 => value1},key2 => value2}" -> ok;
+ _ -> ct:fail({full_nested_map_unexpected,MultipleKeysStr8})
+ end,
+
+ ok.
+
+format_msg(_Config) ->
+ Template = [msg],
+
+ String1 = format(info,{"~p",[term]},#{},#{template=>Template}),
+ ct:log(String1),
+ "term" = String1,
+
+ String2 = format(info,{"list",[term]},#{},#{template=>Template}),
+ ct:log(String2),
+ "FORMAT ERROR: \"list\" - [term]" = String2,
+
+ String3 = format(info,{report,term},#{},#{template=>Template}),
+ ct:log(String3),
+ "term" = String3,
+
+ String4 = format(info,{report,term},
+ #{report_cb=>fun(_)-> {"formatted",[]} end},
+ #{template=>Template}),
+ ct:log(String4),
+ "formatted" = String4,
+
+ String5 = format(info,{report,term},
+ #{report_cb=>fun(_)-> faulty_return end},
+ #{template=>Template}),
+ ct:log(String5),
+ "REPORT_CB/1 ERROR: term; Returned: faulty_return" = String5,
+
+ String6 = format(info,{report,term},
+ #{report_cb=>fun(_)-> erlang:error(fun_crashed) end},
+ #{template=>Template}),
+ ct:log(String6),
+ "REPORT_CB/1 CRASH: term; Reason: {error,fun_crashed,"++_ = String6,
+
+ String7 = format(info,{report,term},
+ #{report_cb=>fun(_,_)-> ['not',a,string] end},
+ #{template=>Template}),
+ ct:log(String7),
+ "REPORT_CB/2 ERROR: term; Returned: ['not',a,string]" = String7,
+
+ String8 = format(info,{report,term},
+ #{report_cb=>fun(_,_)-> faulty_return end},
+ #{template=>Template}),
+ ct:log(String8),
+ "REPORT_CB/2 ERROR: term; Returned: faulty_return" = String8,
+
+ String9 = format(info,{report,term},
+ #{report_cb=>fun(_,_)-> erlang:error(fun_crashed) end},
+ #{template=>Template}),
+ ct:log(String9),
+ "REPORT_CB/2 CRASH: term; Reason: {error,fun_crashed,"++_ = String9,
+
+ %% strings are not formatted
+ String10 = format(info,{string,"string"},
+ #{report_cb=>fun(_)-> {"formatted",[]} end},
+ #{template=>Template}),
+ ct:log(String10),
+ "string" = String10,
+
+ String11 = format(info,{string,['not',printable,list]},
+ #{report_cb=>fun(_)-> {"formatted",[]} end},
+ #{template=>Template}),
+ ct:log("~ts",[String11]), % avoiding ct_log crash
+ "FORMAT ERROR: \"~ts\" - [['not',printable,list]]" = String11,
+
+ String12 = format(info,{string,"string"},#{},#{template=>Template}),
+ ct:log(String12),
+ "string" = String12,
+
+ ok.
+
+report_cb(_Config) ->
+ Template = [msg],
+ MetaFun = fun(_) -> {"meta_rcb",[]} end,
+ ConfigFun = fun(_) -> {"config_rcb",[]} end,
+ "term" = format(info,{report,term},#{},#{template=>Template}),
+ "meta_rcb" =
+ format(info,{report,term},#{report_cb=>MetaFun},#{template=>Template}),
+ "config_rcb" =
+ format(info,{report,term},#{},#{template=>Template,
+ report_cb=>ConfigFun}),
+ "config_rcb" =
+ format(info,{report,term},#{report_cb=>MetaFun},#{template=>Template,
+ report_cb=>ConfigFun}),
+ ok.
+
+max_size(_Config) ->
+ Cfg = #{template=>[msg],
+ single_line=>false},
+ "12345678901234567890" =
+ format(info,{"12345678901234567890",[]},#{},Cfg),
+ %% application:set_env(kernel,logger_max_size,11),
+ %% "12345678901234567890" = % min value is 50, so this is not limited
+ %% format(info,{"12345678901234567890",[]},#{},Cfg),
+ %% "12345678901234567890123456789012345678901234567..." = % 50
+ %% format(info,
+ %% {"123456789012345678901234567890123456789012345678901234567890",
+ %% []},
+ %% #{},
+ %% Cfg),
+ %% application:set_env(kernel,logger_max_size,53),
+ %% "12345678901234567890123456789012345678901234567890..." = %53
+ %% format(info,
+ %% {"123456789012345678901234567890123456789012345678901234567890",
+ %% []},
+ %% #{},
+ %% Cfg),
+ "123456789012..." =
+ format(info,{"12345678901234567890",[]},#{},Cfg#{max_size=>15}),
+ "12345678901234567890" =
+ format(info,{"12345678901234567890",[]},#{},Cfg#{max_size=>unlimited}),
+ %% Check that one newline at the end of the line is kept (if it exists)
+ "12345678901...\n" =
+ format(info,{"12345678901234567890\n",[]},#{},Cfg#{max_size=>15}),
+ "12345678901...\n" =
+ format(info,{"12345678901234567890",[]},#{},Cfg#{template=>[msg,"\n"],
+ max_size=>15}),
+ ok.
+max_size(cleanup,_Config) ->
+ application:unset_env(kernel,logger_max_size),
+ ok.
+
+depth(_Config) ->
+ Template = [msg],
+ "[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]" =
+ format(info,
+ {"~p",[[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]]},
+ #{},
+ #{template=>Template}),
+ application:set_env(kernel,error_logger_format_depth,11),
+ "[1,2,3,4,5,6,7,8,9,0|...]" =
+ format(info,
+ {"~p",[[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]]},
+ #{},
+ #{template=>Template}),
+ "[1,2,3,4,5,6,7,8,9,0,1,2|...]" =
+ format(info,
+ {"~p",[[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]]},
+ #{},
+ #{template=>Template,
+ depth=>13}),
+ "[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]" =
+ format(info,
+ {"~p",[[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]]},
+ #{},
+ #{template=>Template,
+ depth=>unlimited}),
+ ok.
+depth(cleanup,_Config) ->
+ application:unset_env(kernel,error_logger_format_depth),
+ ok.
+
+chars_limit(_Config) ->
+ FA = {"LoL: ~p~nL: ~p~nMap: ~p~n",
+ [lists:duplicate(10,lists:seq(1,100)),
+ lists:seq(1,100),
+ maps:from_list(lists:zip(lists:seq(1,100),
+ lists:duplicate(100,value)))]},
+ Meta = #{time=>timestamp()},
+ Template = [time," - ", msg, "\n"],
+ FC = #{template=>Template,
+ depth=>unlimited,
+ max_size=>unlimited,
+ chars_limit=>unlimited,
+ single_line=>true},
+ CL1 = 80,
+ String1 = format(info,FA,Meta,FC#{chars_limit=>CL1}),
+ L1 = string:length(String1),
+ ct:log("String1: ~p~nLength1: ~p~n",[lists:flatten(String1),L1]),
+ true = L1 > CL1,
+ true = L1 < CL1 + 15,
+
+ String2 = format(info,FA,Meta,FC#{chars_limit=>CL1,depth=>10}),
+ L2 = string:length(String2),
+ ct:log("String2: ~p~nLength2: ~p~n",[lists:flatten(String2),L2]),
+ String2 = String1,
+
+ CL3 = 200,
+ String3 = format(info,FA,Meta,FC#{chars_limit=>CL3}),
+ L3 = string:length(String3),
+ ct:log("String3: ~p~nLength3: ~p~n",[lists:flatten(String3),L3]),
+ true = L3 > CL3,
+ true = L3 < CL3 + 15,
+
+ String4 = format(info,FA,Meta,FC#{chars_limit=>CL3,depth=>10}),
+ L4 = string:length(String4),
+ ct:log("String4: ~p~nLength4: ~p~n",[lists:flatten(String4),L4]),
+ true = L4 > CL3,
+ true = L4 < CL3 + 15,
+
+ %% Test that max_size truncates the string which is limited by
+ %% depth and chars_limit
+ MS5 = 150,
+ String5 = format(info,FA,Meta,FC#{chars_limit=>CL3,depth=>10,max_size=>MS5}),
+ L5 = string:length(String5),
+ ct:log("String5: ~p~nLength5: ~p~n",[String5,L5]),
+ L5 = MS5,
+ true = lists:prefix(lists:sublist(String5,L5-4),String4),
+
+ %% Test that chars_limit limits string also
+ Str = "123456789012345678901234567890123456789012345678901234567890123456789",
+ CL6 = 80,
+ String6 = format(info,{string,Str},Meta,FC#{chars_limit=>CL6}),
+ L6 = string:length(String6),
+ ct:log("String6: ~p~nLength6: ~p~n",[String6,L6]),
+ L6 = CL6,
+
+ ok.
+
+format_mfa(_Config) ->
+ Template = [mfa],
+
+ Meta1 = #{mfa=>{mod,func,0}},
+ String1 = format(info,{"~p",[term]},Meta1,#{template=>Template}),
+ ct:log(String1),
+ "mod:func/0" = String1,
+
+ Meta2 = #{mfa=>{mod,func,[]}},
+ String2 = format(info,{"~p",[term]},Meta2,#{template=>Template}),
+ ct:log(String2),
+ "mod:func/0" = String2,
+
+ Meta3 = #{mfa=>"mod:func/0"},
+ String3 = format(info,{"~p",[term]},Meta3,#{template=>Template}),
+ ct:log(String3),
+ "mod:func/0" = String3,
+
+ Meta4 = #{mfa=>othermfa},
+ String4 = format(info,{"~p",[term]},Meta4,#{template=>Template}),
+ ct:log(String4),
+ "othermfa" = String4,
+
+ ok.
+
+format_time(_Config) ->
+ Time = timestamp(),
+ Meta = #{time=>Time},
+ FC = #{template=>[time]},
+ Msg = {string,""},
+ ExpectedLocal = default_time_format(Time,false),
+ ExpectedUtc = default_time_format(Time,true),
+
+ %% default - local time
+ ExpectedLocal = format(info,Msg,Meta,FC),
+
+ %% time_offset config parameter to formatter
+ ExpectedLocal = format(info,Msg,Meta,FC#{time_offset=>""}),
+ ExpectedUtc = format(info,Msg,Meta,FC#{time_offset=>"Z"}),
+
+ %% stdlib utc_log works when time_offset parameter is not set
+ application:set_env(stdlib,utc_log,true),
+ ExpectedUtc = format(info,Msg,Meta,FC),
+
+ %% sasl utc_log overwrites stdlib utc_log
+ application:set_env(sasl,utc_log,false),
+ ExpectedLocal = format(info,Msg,Meta,FC),
+
+ %% sasl utc_log overwrites stdlib utc_log
+ application:set_env(sasl,utc_log,true),
+ application:set_env(stdlib,utc_log,false),
+ ExpectedUtc = format(info,Msg,Meta,FC),
+
+ %% time_offset config parameter to formatter
+ %% overwrites sasl and stdlib utc_log
+ application:set_env(sasl,utc_log,false),
+ ExpectedUtc = format(info,Msg,Meta,FC#{time_offset=>"Z"}),
+
+ %% time_offset config parameter to formatter
+ %% overwrites sasl and stdlib utc_log
+ application:set_env(sasl,utc_log,true),
+ application:set_env(stdlib,utc_log,true),
+ ExpectedLocal = format(info,Msg,Meta,FC#{time_offset=>""}),
+
+ %% time_designator config parameter to formatter
+ ExpectedLocalS = default_time_format(Time,false,$\s),
+ ExpectedUtcS = default_time_format(Time,true,$\s),
+
+ ExpectedLocalS = format(info,Msg,Meta,FC#{time_offset=>"",
+ time_designator=>$\s}),
+ ExpectedUtcS = format(info,Msg,Meta,FC#{time_offset=>"Z",
+ time_designator=>$\s}),
+
+ ok.
+
+format_time(cleanup,_Config) ->
+ application:unset_env(sasl,utc_log),
+ application:unset_env(stdlib,utc_log),
+ ok.
+
+level_or_msg_in_meta(_Config) ->
+ %% The template contains atoms to pick out values from meta,
+ %% or level/msg to add these from the log event. What if you have
+ %% a key named 'level' or 'msg' in meta and want to display
+ %% its value?
+ %% For now we simply ignore Meta on this and display the
+ %% actual level and msg from the log event.
+
+ Meta = #{level=>mylevel,
+ msg=>"metamsg"},
+ Template = [level,";",msg],
+ String = format(info,{"~p",[term]},Meta,#{template=>Template}),
+ ct:log(String),
+ "info;term" = String, % so mylevel and "metamsg" are ignored
+
+ ok.
+
+faulty_log(_Config) ->
+ %% Unexpected log (should be type logger:log_event()) - print error
+ {error,
+ function_clause,
+ {logger_formatter,format,[_,_],_}} =
+ ?TRY(logger_formatter:format(unexp_log,#{})),
+ ok.
+
+faulty_config(_Config) ->
+ {error,
+ function_clause,
+ {logger_formatter,format,[_,_],_}} =
+ ?TRY(logger_formatter:format(#{level=>info,
+ msg=>{"~p",[term]},
+ meta=>#{time=>timestamp()}},
+ unexp_config)),
+ ok.
+
+faulty_msg(_Config) ->
+ {error,
+ function_clause,
+ {logger_formatter,_,_,_}} =
+ ?TRY(logger_formatter:format(#{level=>info,
+ msg=>term,
+ meta=>#{time=>timestamp()}},
+ #{})),
+ ok.
+
+-define(cfgerr(X), {error,{invalid_formatter_config,logger_formatter,X}}).
+check_config(_Config) ->
+ ok = logger_formatter:check_config(#{}),
+ ?cfgerr(bad) = logger_formatter:check_config(bad),
+
+ C1 = #{chars_limit => 1,
+ depth => 1,
+ legacy_header => true,
+ error_logger_notice_header => info,
+ max_size => 1,
+ report_cb => fun(R) -> {"~p",[R]} end,
+ single_line => false,
+ template => [],
+ time_designator => $T,
+ time_offset => 0},
+ ok = logger_formatter:check_config(C1),
+
+ ok = logger_formatter:check_config(#{chars_limit => unlimited}),
+ ?cfgerr({chars_limit,bad}) =
+ logger_formatter:check_config(#{chars_limit => bad}),
+
+ ok = logger_formatter:check_config(#{depth => unlimited}),
+ ?cfgerr({depth,bad}) =
+ logger_formatter:check_config(#{depth => bad}),
+
+ ok = logger_formatter:check_config(#{legacy_header => false}),
+ ?cfgerr({legacy_header,bad}) =
+ logger_formatter:check_config(#{legacy_header => bad}),
+
+ ok = logger_formatter:check_config(#{error_logger_notice_header => notice}),
+ ?cfgerr({error_logger_notice_header,bad}) =
+ logger_formatter:check_config(#{error_logger_notice_header => bad}),
+
+ ok = logger_formatter:check_config(#{max_size => unlimited}),
+ ?cfgerr({max_size,bad}) =
+ logger_formatter:check_config(#{max_size => bad}),
+
+ ok =
+ logger_formatter:check_config(#{report_cb => fun(_,_) -> "" end}),
+ ?cfgerr({report_cb,F}) =
+ logger_formatter:check_config(#{report_cb => F=fun(_,_,_) -> {"",[]} end}),
+ ?cfgerr({report_cb,bad}) =
+ logger_formatter:check_config(#{report_cb => bad}),
+
+ ok = logger_formatter:check_config(#{single_line => true}),
+ ?cfgerr({single_line,bad}) =
+ logger_formatter:check_config(#{single_line => bad}),
+
+ Ts = [[key],
+ [[key1,key2]],
+ [{key,[key],[]}],
+ [{[key1,key2],[[key1,key2]],["noexist"]}],
+ ["string"]],
+ [begin
+ ct:log("check template: ~p",[T]),
+ ok = logger_formatter:check_config(#{template => T})
+ end
+ || T <- Ts],
+
+ ETs = [bad,
+ [{key,bad}],
+ [{key,[key],bad}],
+ [{key,[key],"bad"}],
+ "bad",
+ [[key,$a,$b,$c]],
+ [[$a,$b,$c,key]]],
+ [begin
+ ct:log("check template: ~p",[T]),
+ {error,{invalid_formatter_template,logger_formatter,T}} =
+ logger_formatter:check_config(#{template => T})
+ end
+ || T <- ETs],
+
+ ?cfgerr({time_designator,bad}) =
+ logger_formatter:check_config(#{time_designator => bad}),
+ ?cfgerr({time_designator,"b"}) =
+ logger_formatter:check_config(#{time_designator => "b"}),
+
+ ok = logger_formatter:check_config(#{time_offset => -1}),
+ ok = logger_formatter:check_config(#{time_offset => "+02:00"}),
+ ok = logger_formatter:check_config(#{time_offset => "-23:59"}),
+ ok = logger_formatter:check_config(#{time_offset => "+24:00"}),
+ ok = logger_formatter:check_config(#{time_offset => "-25:00"}),
+ ?cfgerr({time_offset,bad}) =
+ logger_formatter:check_config(#{time_offset => bad}),
+ ?cfgerr({time_offset,"02:00"}) =
+ logger_formatter:check_config(#{time_offset => "02:00"}),
+ ?cfgerr({time_offset,"+02"}) =
+ logger_formatter:check_config(#{time_offset => "+02"}),
+
+ ok.
+
+%% Test that formatter config can be changed, and that the default
+%% template is updated accordingly
+update_config(_Config) ->
+ {error,{not_found,?MODULE}} = logger:update_formatter_config(?MODULE,#{}),
+
+ logger:add_handler_filter(default,silence,{fun(_,_) -> stop end,ok}),
+ ok = logger:add_handler(?MODULE,?MODULE,#{}),
+ D = lists:seq(1,1000),
+ logger:notice("~p~n",[D]),
+ {Lines1,C1} = check_log(),
+ [ct:log(L) || L <- Lines1],
+ ct:log("~p",[C1]),
+ [Line1] = Lines1,
+ [_Time,"notice: "++D1] = string:split(Line1," "),
+ true = length(D1)>3000,
+ true = #{}==C1,
+
+ ok = logger:update_formatter_config(?MODULE,single_line,false),
+ logger:notice("~p~n",[D]),
+ {Lines2,C2} = check_log(),
+ [ct:log(L) || L <- Lines2],
+ ct:log("~p",[C2]),
+ true = length(Lines2)>50,
+ true = #{single_line=>false}==C2,
+
+ ok = logger:update_formatter_config(?MODULE,#{legacy_header=>true}),
+ logger:notice("~p~n",[D]),
+ {Lines3,C3} = check_log(),
+ [ct:log(L) || L <- Lines3],
+ ct:log("~p",[C3]),
+ ["=NOTICE REPORT==== "++_|D3] = Lines3,
+ true = length(D3)>50,
+ true = #{legacy_header=>true,single_line=>false}==C3,
+
+ ok = logger:update_formatter_config(?MODULE,single_line,true),
+ logger:notice("~p~n",[D]),
+ {Lines4,C4} = check_log(),
+ [ct:log(L) || L <- Lines4],
+ ct:log("~p",[C4]),
+ ["=NOTICE REPORT==== "++_,D4] = Lines4,
+ true = length(D4)>3000,
+ true = #{legacy_header=>true,single_line=>true}==C4,
+
+ %% Finally, check that error_logger_notice_header works, default=info
+ error_logger:info_msg("~p",[D]),
+ {Lines5,C5} = check_log(),
+ [ct:log(L) || L <- Lines5],
+ ct:log("~p",[C5]),
+ ["=INFO REPORT==== "++_,_D5] = Lines5,
+
+ ok=logger:update_formatter_config(?MODULE,error_logger_notice_header,notice),
+ error_logger:info_msg("~p",[D]),
+ {Lines6,C6} = check_log(),
+ [ct:log(L) || L <- Lines6],
+ ct:log("~p",[C6]),
+ ["=NOTICE REPORT==== "++_,_D6] = Lines6,
+
+ {error,{invalid_formatter_config,bad}} =
+ logger:update_formatter_config(?MODULE,bad),
+ {error,{invalid_formatter_config,logger_formatter,{depth,bad}}} =
+ logger:update_formatter_config(?MODULE,depth,bad),
+
+ ok.
+
+update_config(cleanup,_Config) ->
+ _ = logger:remove_handler(?MODULE),
+ _ = logger:remove_handler_filter(default,silence),
+ ok.
+
+%%%-----------------------------------------------------------------
+%%% Internal
+format(Level,Msg,Meta,Config) ->
+ format(#{level=>Level,msg=>Msg,meta=>add_time(Meta)},Config).
+
+format(Log,Config) ->
+ lists:flatten(logger_formatter:format(Log,Config)).
+
+default_time_format(Timestamp) ->
+ default_time_format(Timestamp,false).
+
+default_time_format(Timestamp,Utc) ->
+ default_time_format(Timestamp,Utc,$T).
+
+default_time_format(Timestamp,Utc,Sep) ->
+ Offset = if Utc -> "Z";
+ true -> ""
+ end,
+ calendar:system_time_to_rfc3339(Timestamp,[{unit,microsecond},
+ {time_designator,Sep},
+ {offset,Offset}]).
+
+integer(Str) ->
+ is_integer(list_to_integer(Str)).
+integer(Str,Max) ->
+ integer(Str,0,Max).
+integer(Str,Min,Max) ->
+ Int = list_to_integer(Str),
+ Int >= Min andalso Int =<Max.
+
+%%%-----------------------------------------------------------------
+%%% Called by macro ?TRY(X)
+my_try(Fun) ->
+ try Fun() catch C:R:S -> {C,R,hd(S)} end.
+
+timestamp() ->
+ logger:timestamp().
+
+%% necessary?
+add_time(#{time:=_}=Meta) ->
+ Meta;
+add_time(Meta) ->
+ Meta#{time=>timestamp()}.
+
+%%%-----------------------------------------------------------------
+%%% handler callback
+log(Log,#{formatter:={M,C}}) ->
+ put(log,{M:format(Log,C),C}),
+ ok.
+
+check_log() ->
+ {S,C} = erase(log),
+ {string:lexemes(S,"\n"),C}.
diff --git a/lib/kernel/test/logger_legacy_SUITE.erl b/lib/kernel/test/logger_legacy_SUITE.erl
new file mode 100644
index 0000000000..c3cab07d81
--- /dev/null
+++ b/lib/kernel/test/logger_legacy_SUITE.erl
@@ -0,0 +1,288 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_legacy_SUITE).
+
+-compile(export_all).
+-compile({nowarn_deprecated_function,[{gen_fsm,start,3},
+ {gen_fsm,send_all_state_event,2}]}).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("kernel/include/logger.hrl").
+-include_lib("kernel/src/logger_internal.hrl").
+
+%%%-----------------------------------------------------------------
+%%% This test suite test that log events from within OTP can be
+%%% delivered to legacy error_logger event handlers on the same format
+%%% as before 'logger' was introduced.
+%%%
+%%% Before changing the expected format of any of the log events in
+%%% this suite, please make sure that the backwards incompatibility it
+%%% introduces is ok.
+%%% -----------------------------------------------------------------
+
+-define(check(Expected),
+ receive Expected ->
+ [] = test_server:messages_get()
+ after 1000 ->
+ ct:fail({report_not_received,
+ {line,?LINE},
+ {got,test_server:messages_get()}})
+ end).
+-define(check_no_flush(Expected),
+ receive Expected ->
+ ok
+ after 1000 ->
+ ct:fail({report_not_received,
+ {line,?LINE},
+ {got,test_server:messages_get()}})
+ end).
+
+suite() ->
+ [{timetrap,{seconds,30}}].
+
+init_per_suite(Config) ->
+ logger:add_handler(error_logger,error_logger,
+ #{level=>info,filter_default=>stop}),
+ Config.
+
+end_per_suite(_Config) ->
+ logger:remove_handler(error_logger),
+ ok.
+
+init_per_group(std, Config) ->
+ ok = logger:set_handler_config(
+ error_logger,filters,
+ [{domain,{fun logger_filters:domain/2,{log,super,[otp]}}}]),
+ Config;
+init_per_group(sasl, Config) ->
+ %% Since default level is notice, and progress reports are info,
+ %% we need to raise the global logger level to info in order to
+ %% receive these.
+ ok = logger:set_primary_config(level,info),
+ ok = logger:set_handler_config(
+ error_logger,filters,
+ [{domain,{fun logger_filters:domain/2,{log,super,[otp,sasl]}}}]),
+
+ %% cth_log_redirect checks if sasl is started before displaying
+ %% any sasl reports - so just to see the real sasl reports in tc
+ %% log:
+ {ok,Apps} = application:ensure_all_started(sasl),
+ [{stop_apps,Apps}|Config];
+init_per_group(_Group, Config) ->
+ Config.
+
+end_per_group(sasl, Config) ->
+ Apps = ?config(stop_apps,Config),
+ [application:stop(App) || App <- Apps],
+ ok = logger:set_primary_config(level,notice),
+ ok;
+end_per_group(_Group, _Config) ->
+ ok.
+
+init_per_testcase(_TestCase, Config) ->
+ error_logger:add_report_handler(?MODULE,{event_handler,self()}),
+ Config.
+
+end_per_testcase(Case, Config) ->
+ %% Using gen_event directly here, instead of
+ %% error_logger:delete_report_handler. This is to avoid
+ %% automatically stopping the error_logger process due to removing
+ %% the last handler.
+ gen_event:delete_handler(error_logger,?MODULE,[]),
+ try apply(?MODULE,Case,[cleanup,Config])
+ catch error:undef -> ok
+ end,
+ ok.
+
+groups() ->
+ [{std,[],[gen_server,
+ gen_event,
+ gen_fsm,
+ gen_statem]},
+ {sasl,[],[sasl_reports,
+ supervisor_handle_info]}].
+
+all() ->
+ [{group,std},
+ {group,sasl}].
+
+gen_server(_Config) ->
+ {ok,Pid} = gen_server:start(?MODULE,gen_server,[]),
+ Msg = fun() -> erlang:error({badmatch,b}) end,
+ Pid ! Msg,
+ ?check({warning_msg,"** Undefined handle_info in ~p"++_,[?MODULE,Msg]}),
+ ok = gen_server:cast(Pid,Msg),
+ ?check({error,"** Generic server ~tp terminating"++_,
+ [Pid,{'$gen_cast',Msg},gen_server,{{badmatch,b},_}]}).
+
+gen_event(_Config) ->
+ {ok,Pid} = gen_event:start(),
+ ok = gen_event:add_handler(Pid,?MODULE,gen_event),
+ Msg = fun() -> erlang:error({badmatch,b}) end,
+ Pid ! Msg,
+ ?check({warning_msg,"** Undefined handle_info in ~tp"++_,[?MODULE,Msg]}),
+ gen_event:notify(Pid,Msg),
+ ?check({error,"** gen_event handler ~p crashed."++_,
+ [?MODULE,Pid,Msg,gen_event,{{badmatch,b},_}]}).
+
+gen_fsm(_Config) ->
+ {ok,Pid} = gen_fsm:start(?MODULE,gen_fsm,[]),
+ Msg = fun() -> erlang:error({badmatch,b}) end,
+ Pid ! Msg,
+ ?check({warning_msg,"** Undefined handle_info in ~p"++_,[?MODULE,Msg]}),
+ gen_fsm:send_all_state_event(Pid,Msg),
+ ?check({error,"** State machine ~tp terminating"++_,
+ [Pid,Msg,mystate,gen_fsm,{{badmatch,b},_}]}).
+
+gen_statem(_Config) ->
+ {ok,Pid} = gen_statem:start(?MODULE,gen_statem,[]),
+ Msg = fun() -> erlang:error({badmatch,b}) end,
+ Pid ! Msg,
+ ?check({error,"** State machine ~tp terminating"++_,
+ [Pid,{info,Msg},{mystate,gen_statem},error,{badmatch,b}|_]}).
+
+sasl_reports(Config) ->
+ App = {application,?MODULE,[{description, ""},
+ {vsn, "1.0"},
+ {modules, [?MODULE]},
+ {registered, []},
+ {applications, []},
+ {mod, {?MODULE, []}}]},
+ AppStr = io_lib:format("~p.",[App]),
+ Dir = ?config(priv_dir,Config),
+ AppFile = filename:join(Dir,?MODULE_STRING++".app"),
+ ok = file:write_file(AppFile,AppStr),
+ true = code:add_patha(Dir),
+ ok = application:start(?MODULE),
+ SupName = sup_name(),
+ Pid = whereis(SupName),
+ [{ch,ChPid,_,_}] = supervisor:which_children(Pid),
+ Node = node(),
+ ?check_no_flush({info_report,progress,[{application,?MODULE},
+ {started_at,Node}]}),
+ ?check({info_report,progress,[{supervisor,{local,SupName}},
+ {started,[{pid,ChPid}|_]}]}),
+ ok = gen_server:cast(ChPid, fun() ->
+ spawn_link(fun() -> receive x->ok end end)
+ end),
+ Msg = fun() -> erlang:error({badmatch,b}) end,
+ ok = gen_server:cast(ChPid,Msg),
+ ?check_no_flush({error,"** Generic server ~tp terminating"++_,
+ [ChPid,{'$gen_cast',Msg},gen_server,{{badmatch,b},_}]}),
+ ?check_no_flush({error_report,crash_report,
+ [[{initial_call,_},
+ {pid,ChPid},
+ {registered_name,[]},
+ {error_info,{error,{badmatch,b},_}},
+ {ancestors,_},
+ {message_queue_len,_},
+ {messages,_},
+ {links,[Pid,Neighbour]},
+ {dictionary,_},
+ {trap_exit,_},
+ {status,_},
+ {heap_size,_},
+ {stack_size,_},
+ {reductions,_}],
+ [{neighbour,[{pid,Neighbour},
+ {registered_name,_},
+ {initial_call,_},
+ {current_function,_},
+ {ancestors,_},
+ {message_queue_len,_},
+ {links,[ChPid]},
+ {trap_exit,_},
+ {status,_},
+ {heap_size,_},
+ {stack_size,_},
+ {reductions,_},
+ {current_stacktrace,_}]}]]}),
+ ?check_no_flush({error_report,supervisor_report,
+ [{supervisor,{local,SupName}},
+ {errorContext,child_terminated},
+ {reason,{{badmatch,b},_}},
+ {offender,[{pid,ChPid}|_]}]}),
+ ?check({info_report,progress,[{supervisor,{local,SupName}},
+ {started,_}]}),
+
+ ok = application:stop(?MODULE),
+ ?check({info_report,std_info,[{application,?MODULE},
+ {exited,stopped},
+ {type,temporary}]}).
+
+sasl_reports(cleanup,_Config) ->
+ application:stop(?MODULE).
+
+supervisor_handle_info(_Config) ->
+ {ok,Pid} = supervisor:start_link({local,sup_name()},?MODULE,supervisor),
+ ?check({info_report,progress,[{supervisor,_},{started,_}]}),
+ Pid ! msg,
+ ?check({error,"Supervisor received unexpected message: ~tp~n",[msg]}).
+
+supervisor_handle_info(cleanup,_Config) ->
+ Pid = whereis(sup_name()),
+ unlink(Pid),
+ exit(Pid,shutdown).
+
+%%%-----------------------------------------------------------------
+%%% Callbacks for error_logger event handler, gen_server, gen_statem,
+%%% gen_fsm, gen_event, supervisor and application.
+start(_,_) ->
+ supervisor:start_link({local,sup_name()},?MODULE,supervisor).
+
+init(supervisor) ->
+ {ok,{#{},[#{id=>ch,start=>{gen_server,start_link,[?MODULE,gen_server,[]]}}]}};
+init(StateMachine) when StateMachine==gen_statem; StateMachine==gen_fsm ->
+ {ok,mystate,StateMachine};
+init(State) ->
+ {ok,State}.
+
+%% error_logger event handler
+handle_event({Tag,_Gl,{_Pid,Type,Report}},{_,Pid}=State) ->
+ Pid ! {Tag,Type,Report},
+ {ok,State};
+%% other gen_event
+handle_event(Fun,State) when is_function(Fun) ->
+ Fun(),
+ {next_state,State}.
+
+%% gen_fsm
+handle_event(Fun,State,Data) when is_function(Fun) ->
+ Fun(),
+ {next_state,State,Data}.
+
+%% gen_statem
+handle_event(info,Fun,State,Data) when is_function(Fun) ->
+ Fun(),
+ {next_state,State,Data}.
+
+%% gen_server
+handle_cast(Fun,State) when is_function(Fun) ->
+ Fun(),
+ {noreply,State}.
+
+%% gen_statem
+callback_mode() ->
+ handle_event_function.
+
+%%%-----------------------------------------------------------------
+%%% Internal
+sup_name() ->
+ list_to_atom(?MODULE_STRING++"_sup").
diff --git a/lib/kernel/test/logger_olp_SUITE.erl b/lib/kernel/test/logger_olp_SUITE.erl
new file mode 100644
index 0000000000..ea3eec89f5
--- /dev/null
+++ b/lib/kernel/test/logger_olp_SUITE.erl
@@ -0,0 +1,90 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_olp_SUITE).
+
+-compile(export_all).
+
+-include_lib("kernel/src/logger_olp.hrl").
+
+suite() ->
+ [{timetrap,{seconds,30}}].
+
+init_per_suite(Config) ->
+ Config.
+
+end_per_suite(_Config) ->
+ ok.
+
+init_per_group(_Group, Config) ->
+ Config.
+
+end_per_group(_Group, _Config) ->
+ ok.
+
+init_per_testcase(_TestCase, Config) ->
+ Config.
+
+end_per_testcase(Case, Config) ->
+ try apply(?MODULE,Case,[cleanup,Config])
+ catch error:undef -> ok
+ end,
+ ok.
+
+groups() ->
+ [].
+
+all() ->
+ [idle_timer].
+
+%%%-----------------------------------------------------------------
+%%% Test cases
+idle_timer(_Config) ->
+ {ok,_Pid,Olp} = logger_olp:start_link(?MODULE,?MODULE,self(),#{}),
+ [logger_olp:load(Olp,{msg,N}) || N<-lists:seq(1,3)],
+ timer:sleep(?IDLE_DETECT_TIME*2),
+ [{load,{msg,1}},
+ {load,{msg,2}},
+ {load,{msg,3}},
+ {notify,idle}] = test_server:messages_get(),
+ logger_olp:cast(Olp,hello),
+ timer:sleep(?IDLE_DETECT_TIME*2),
+ [{cast,hello}] = test_server:messages_get(),
+ ok.
+idle_timer(cleanup,_Config) ->
+ unlink(whereis(?MODULE)),
+ logger_olp:stop(?MODULE),
+ ok.
+
+%%%-----------------------------------------------------------------
+%%% Olp callbacks
+init(P) ->
+ {ok,P}.
+
+handle_load(M,P) ->
+ P ! {load,M},
+ P.
+
+handle_cast(M,P) ->
+ P ! {cast,M},
+ {noreply,P}.
+
+notify(N,P) ->
+ P ! {notify,N},
+ P.
diff --git a/lib/kernel/test/logger_proxy_SUITE.erl b/lib/kernel/test/logger_proxy_SUITE.erl
new file mode 100644
index 0000000000..777531e4ed
--- /dev/null
+++ b/lib/kernel/test/logger_proxy_SUITE.erl
@@ -0,0 +1,274 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_proxy_SUITE).
+
+-compile(export_all).
+
+%% -include_lib("common_test/include/ct.hrl").
+%% -include_lib("kernel/include/logger.hrl").
+%% -include_lib("kernel/src/logger_internal.hrl").
+
+%% -define(str,"Log from "++atom_to_list(?FUNCTION_NAME)++
+%% ":"++integer_to_list(?LINE)).
+%% -define(map_rep,#{function=>?FUNCTION_NAME, line=>?LINE}).
+%% -define(keyval_rep,[{function,?FUNCTION_NAME}, {line,?LINE}]).
+
+%% -define(MY_LOC(N),#{mfa=>{?MODULE,?FUNCTION_NAME,?FUNCTION_ARITY},
+%% file=>?FILE, line=>?LINE-N}).
+
+%% -define(TRY(X), my_try(fun() -> X end)).
+
+
+-define(HNAME,list_to_atom(lists:concat([?MODULE,"_",?FUNCTION_NAME]))).
+-define(LOC,#{mfa=>{?MODULE,?FUNCTION_NAME,?FUNCTION_ARITY},line=>?LINE}).
+-define(ENSURE_TIME,5000).
+
+suite() ->
+ [{timetrap,{seconds,30}},
+ {ct_hooks,[logger_test_lib]}].
+
+init_per_suite(Config) ->
+ Config.
+
+end_per_suite(_Config) ->
+ ok.
+
+init_per_group(_Group, Config) ->
+ Config.
+
+end_per_group(_Group, _Config) ->
+ ok.
+
+init_per_testcase(_TestCase, Config) ->
+ Config.
+
+end_per_testcase(Case, Config) ->
+ try apply(?MODULE,Case,[cleanup,Config])
+ catch error:undef -> ok
+ end,
+ ok.
+
+groups() ->
+ [].
+
+all() ->
+ [basic,
+ emulator,
+ remote,
+ remote_emulator,
+ config,
+ restart_after,
+ terminate].
+
+%%%-----------------------------------------------------------------
+%%% Test cases
+basic(_Config) ->
+ ok = logger:add_handler(?HNAME,?MODULE,#{config=>self()}),
+ logger_proxy ! {log,notice,"Log from: ~p; ~p",[?FUNCTION_NAME,?LINE],L1=?LOC},
+ ok = ensure(L1),
+ logger_proxy ! {log,notice,[{test_case,?FUNCTION_NAME},{line,?LINE}],L2=?LOC},
+ ok = ensure(L2),
+ logger_proxy:log({remote,node(),{log,notice,
+ "Log from: ~p; ~p",
+ [?FUNCTION_NAME,?LINE],
+ L3=?LOC}}),
+ ok = ensure(L3),
+ logger_proxy:log({remote,node(),{log,notice,
+ [{test_case,?FUNCTION_NAME},
+ {line,?LINE}],
+ L4=?LOC}}),
+ ok = ensure(L4),
+ ok.
+basic(cleanup,_Config) ->
+ ok = logger:remove_handler(?HNAME).
+
+emulator(_Config) ->
+ ok = logger:add_handler(?HNAME,?MODULE,#{config=>self()}),
+ Pid = spawn(fun() -> erlang:error(some_reason) end),
+ ok = ensure(#{pid=>Pid}),
+ ok.
+emulator(cleanup,_Config) ->
+ ok = logger:remove_handler(?HNAME).
+
+remote(Config) ->
+ {ok,_,Node} = logger_test_lib:setup(Config,[{logger,[{proxy,#{}}]}]),
+ ok = logger:add_handler(?HNAME,?MODULE,#{config=>self()}),
+ L1 = ?LOC, spawn(Node,fun() -> logger:notice("Log from ~p; ~p",[?FUNCTION_NAME,?LINE],L1) end),
+ ok = ensure(L1),
+ L2 = ?LOC, spawn(Node,fun() -> logger:notice([{test_case,?FUNCTION_NAME},{line,?LINE}],L2) end),
+ ok = ensure(L2),
+ ok.
+remote(cleanup,_Config) ->
+ ok = logger:remove_handler(?HNAME).
+
+remote_emulator(Config) ->
+ {ok,_,Node} = logger_test_lib:setup(Config,[{logger,[{proxy,#{}}]}]),
+ ok = logger:add_handler(?HNAME,?MODULE,#{config=>self()}),
+ Pid = spawn(Node,fun() -> erlang:error(some_reason) end),
+ ok = ensure(#{pid=>Pid}),
+ ok.
+remote_emulator(cleanup,_Config) ->
+ ok = logger:remove_handler(?HNAME).
+
+config(_Config) ->
+ C1 = #{sync_mode_qlen:=SQ,
+ drop_mode_qlen:=DQ} = logger:get_proxy_config(),
+ C1 = logger_olp:get_opts(logger_proxy),
+
+ %% Update the existing config with these two values
+ SQ1 = SQ+1,
+ DQ1 = DQ+1,
+ ok = logger:update_proxy_config(#{sync_mode_qlen=>SQ1,
+ drop_mode_qlen=>DQ1}),
+ C2 = logger:get_proxy_config(), % reads from ets table
+ C2 = logger_olp:get_opts(logger_proxy), % ensure consistency with process opts
+ C2 = C1#{sync_mode_qlen:=SQ1,
+ drop_mode_qlen:=DQ1},
+
+ %% Update the existing again with only one value
+ SQ2 = SQ+2,
+ ok = logger:update_proxy_config(#{sync_mode_qlen=>SQ2}),
+ C3 = logger:get_proxy_config(),
+ C3 = logger_olp:get_opts(logger_proxy),
+ C3 = C2#{sync_mode_qlen:=SQ2},
+
+ %% Set the config, i.e. merge with defaults
+ ok = logger:set_proxy_config(#{sync_mode_qlen=>SQ1}),
+ C4 = logger:get_proxy_config(),
+ C4 = logger_olp:get_opts(logger_proxy),
+ C4 = C1#{sync_mode_qlen:=SQ1},
+
+ %% Reset to default
+ ok = logger:set_proxy_config(#{}),
+ C5 = logger:get_proxy_config(),
+ C5 = logger_olp:get_opts(logger_proxy),
+ C5 = logger_proxy:get_default_config(),
+
+ %% Errors
+ {error,{invalid_olp_config,_}} =
+ logger:set_proxy_config(#{faulty_key=>1}),
+ {error,{invalid_olp_config,_}} =
+ logger:set_proxy_config(#{sync_mode_qlen=>infinity}),
+ {error,{invalid_config,[]}} = logger:set_proxy_config([]),
+
+ {error,{invalid_olp_config,_}} =
+ logger:update_proxy_config(#{faulty_key=>1}),
+ {error,{invalid_olp_config,_}} =
+ logger:update_proxy_config(#{sync_mode_qlen=>infinity}),
+ {error,{invalid_config,[]}} = logger:update_proxy_config([]),
+
+ C5 = logger:get_proxy_config(),
+ C5 = logger_olp:get_opts(logger_proxy),
+
+ ok.
+config(cleanup,_Config) ->
+ _ = logger:set_logger_proxy(logger_proxy:get_default_config()),
+ ok.
+
+restart_after(_Config) ->
+ Restart = 3000,
+ ok = logger:update_proxy_config(#{overload_kill_enable => true,
+ overload_kill_qlen => 10,
+ overload_kill_restart_after => Restart}),
+ Proxy = whereis(logger_proxy),
+ Proxy = erlang:system_info(system_logger),
+ ProxyConfig = logger:get_proxy_config(),
+ ProxyConfig = logger_olp:get_opts(logger_proxy),
+
+ Ref = erlang:monitor(process,Proxy),
+ spawn(fun() ->
+ [logger_proxy ! {log,debug,
+ [{test_case,?FUNCTION_NAME},
+ {line,?LINE}],
+ ?LOC} || _ <- lists:seq(1,100)]
+ end),
+ receive
+ {'DOWN',Ref,_,_,_Reason} ->
+ undefined = erlang:system_info(system_logger),
+ timer:sleep(Restart),
+ poll_restarted(10)
+ after 5000 ->
+ ct:fail(proxy_not_terminated)
+ end,
+
+ Proxy1 = whereis(logger_proxy),
+ Proxy1 = erlang:system_info(system_logger),
+ ProxyConfig = logger:get_proxy_config(),
+ ProxyConfig = logger_olp:get_opts(logger_proxy),
+
+ ok.
+restart_after(cleanup,_Config) ->
+ _ = logger:set_logger_proxy(logger_proxy:get_default_config()),
+ ok.
+
+%% Test that system_logger flag is set to logger process if
+%% logger_proxy terminates for other reason than overloaded.
+terminate(_Config) ->
+ Logger = whereis(logger),
+ Proxy = whereis(logger_proxy),
+ Proxy = erlang:system_info(system_logger),
+ ProxyConfig = logger:get_proxy_config(),
+ ProxyConfig = logger_olp:get_opts(logger_proxy),
+
+ Ref = erlang:monitor(process,Proxy),
+ ok = logger_olp:stop(Proxy),
+ receive
+ {'DOWN',Ref,_,_,_Reason} ->
+ Logger = erlang:system_info(system_logger),
+ logger_proxy:restart(),
+ poll_restarted(10)
+ after 5000 ->
+ ct:fail(proxy_not_terminated)
+ end,
+
+ Proxy1 = whereis(logger_proxy),
+ Proxy1 = erlang:system_info(system_logger),
+ ProxyConfig = logger:get_proxy_config(),
+ ProxyConfig = logger_olp:get_opts(logger_proxy),
+
+ ok.
+
+%%%-----------------------------------------------------------------
+%%% Internal functions
+
+poll_restarted(0) ->
+ ct:fail(proxy_not_restarted);
+poll_restarted(N) ->
+ timer:sleep(1000),
+ case whereis(logger_proxy) of
+ undefined ->
+ poll_restarted(N-1);
+ _Pid ->
+ ok
+ end.
+
+%% Logger handler callback
+log(#{meta:=Meta},#{config:=Pid}) ->
+ Pid ! {logged,Meta}.
+
+%% Check that the log from the logger callback function log/2 is received
+ensure(Match) ->
+ receive {logged,Meta} ->
+ case maps:with(maps:keys(Match),Meta) of
+ Match -> ok;
+ _NoMatch -> {error,Match,Meta,test_server:messages_get()}
+ end
+ after ?ENSURE_TIME -> {error,Match,test_server:messages_get()}
+ end.
diff --git a/lib/kernel/test/logger_simple_h_SUITE.erl b/lib/kernel/test/logger_simple_h_SUITE.erl
new file mode 100644
index 0000000000..e0ad792bdb
--- /dev/null
+++ b/lib/kernel/test/logger_simple_h_SUITE.erl
@@ -0,0 +1,209 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_simple_h_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("kernel/include/logger.hrl").
+-include_lib("kernel/src/logger_internal.hrl").
+
+-import(logger_test_lib, [setup/2, log/3, sync_and_read/3]).
+
+-define(check_no_log,[] = test_server:messages_get()).
+-define(check(Expected),
+ receive {log,Expected} ->
+ [] = test_server:messages_get()
+ after 1000 ->
+ ct:fail({report_not_received,
+ {line,?LINE},
+ {expected,Expected},
+ {got,test_server:messages_get()}})
+ end).
+
+-define(str,"Log from "++atom_to_list(?FUNCTION_NAME)++
+ ":"++integer_to_list(?LINE)).
+-define(map_rep,#{function=>?FUNCTION_NAME, line=>?LINE}).
+-define(keyval_rep,[{function,?FUNCTION_NAME}, {line,?LINE}]).
+
+suite() ->
+ [{timetrap,{seconds,30}},
+ {ct_hooks, [logger_test_lib]}].
+
+init_per_suite(Config) ->
+ Hs0 = logger:get_handler_config(),
+ Hs = lists:keydelete(cth_log_redirect,1,Hs0),
+ [ok = logger:remove_handler(Id) || {Id,_,_} <- Hs],
+ Env = [{App,Key,application:get_env(App,Key)} ||
+ {App,Key} <- [{kernel,logger_level}]],
+ [{env,Env},{logger,Hs}|Config].
+
+end_per_suite(Config) ->
+ [application:set_env(App,Key,Val) || {App,Key,Val} <- ?config(env,Config)],
+ Hs = ?config(logger,Config),
+ [ok = logger:add_handler(Id,Mod,C) || {Id,Mod,C} <- Hs],
+ ok.
+
+init_per_group(_Group, Config) ->
+ Config.
+
+end_per_group(_Group, _Config) ->
+ ok.
+
+init_per_testcase(_TestCase, Config) ->
+ Config.
+
+end_per_testcase(Case, Config) ->
+ try apply(?MODULE,Case,[cleanup,Config])
+ catch error:undef -> ok
+ end,
+ ok.
+
+groups() ->
+ [].
+
+all() ->
+ [start_stop,
+ replace_default,
+ replace_file,
+ replace_disk_log
+ ].
+
+start_stop(_Config) ->
+ undefined = whereis(logger_simple_h),
+ register(logger_simple_h,self()),
+ {error,_} = logger:add_handler(simple,
+ logger_simple_h,
+ #{filter_default=>log}),
+ unregister(logger_simple_h),
+ ok = logger:add_handler(simple,logger_simple_h,#{filter_default=>log}),
+ Pid = whereis(logger_simple_h),
+ true = is_pid(Pid),
+ ok = logger:remove_handler(simple),
+ false = is_pid(whereis(logger_simple_h)),
+ ok.
+start_stop(cleanup,_Config) ->
+ logger:remove_handler(simple).
+
+%% This testcase just tests that it does not crash, the default handler prints
+%% to stdout which we cannot read from in a detached slave.
+replace_default(Config) ->
+
+ {ok, _, Node} = logger_test_lib:setup(Config, [{logger, [{handler, default, undefined}]}]),
+ log(Node, emergency, [?str]),
+ log(Node, alert, [?str,[]]),
+ log(Node, error, [?map_rep]),
+ log(Node, info, [?keyval_rep]),
+ log(Node, info, [?keyval_rep++[not_key_val]]),
+ rpc:call(Node, error_logger, error_report, [some_type,?map_rep]),
+ rpc:call(Node, error_logger, warning_report, ["some_type",?map_rep]),
+ log(Node, critical, [?str,[?keyval_rep]]),
+ log(Node, notice, [["fake",string,"line:",?LINE]]),
+
+ ok = rpc:call(Node, logger, add_handlers, [kernel]),
+
+ ok.
+
+replace_file(Config) ->
+
+ {ok, _, Node} = logger_test_lib:setup(Config, [{logger, [{handler, default, undefined}]}]),
+ log(Node, emergency, [M1=?str]),
+ log(Node, alert, [M2=?str,[]]),
+ log(Node, error, [?map_rep]),
+ log(Node, warning, [?keyval_rep]),
+ log(Node, warning, [?keyval_rep++[not_key_val]]),
+ log(Node, critical, [?str,[?keyval_rep]]),
+ log(Node, notice, [["fake",string,"line:",?LINE]]),
+
+ File = filename:join(proplists:get_value(priv_dir,Config),
+ atom_to_list(?FUNCTION_NAME)++".log"),
+
+ ok = rpc:call(Node, logger, add_handlers,
+ [[{handler, default, logger_std_h,
+ #{ config => #{ type => {file, File} },
+ formatter => {?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}}}]]),
+
+ {ok,Bin} = sync_and_read(Node, file, File),
+ Lines = [unicode:characters_to_list(L) ||
+ L <- binary:split(Bin,<<"\n">>,[global,trim])],
+ ["=EMERGENCY REPORT===="++_,
+ M1,
+ "=ALERT REPORT===="++_,
+ M2,
+ "=ERROR REPORT===="++_,
+ _,
+ _,
+ "=WARNING REPORT===="++_,
+ _,
+ _,
+ "=WARNING REPORT===="++_,
+ _,
+ _,
+ _,
+ "=CRITICAL REPORT===="++_,
+ _,
+ _,
+ "=NOTICE REPORT===="++_,
+ _
+ ] = Lines,
+ ok.
+
+replace_disk_log(Config) ->
+
+ {ok, _, Node} = logger_test_lib:setup(Config, [{logger, [{handler, default, undefined}]}]),
+ log(Node, emergency, [M1=?str]),
+ log(Node, alert, [M2=?str,[]]),
+ log(Node, error, [?map_rep]),
+ log(Node, warning, [?keyval_rep]),
+ log(Node, warning, [?keyval_rep++[not_key_val]]),
+ log(Node, critical, [?str,[?keyval_rep]]),
+ log(Node, notice, [["fake",string,"line:",?LINE]]),
+
+ File = filename:join(proplists:get_value(priv_dir,Config),
+ atom_to_list(?FUNCTION_NAME)++".log"),
+
+ ok = rpc:call(Node, logger, add_handlers,
+ [[{handler, default, logger_disk_log_h,
+ #{ config => #{ file => File },
+ formatter => {?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}}}]]),
+ {ok,Bin} = sync_and_read(Node, disk_log, File),
+ Lines = [unicode:characters_to_list(L) ||
+ L <- binary:split(Bin,<<"\n">>,[global,trim])],
+ ["=EMERGENCY REPORT===="++_,
+ M1,
+ "=ALERT REPORT===="++_,
+ M2,
+ "=ERROR REPORT===="++_,
+ _,
+ _,
+ "=WARNING REPORT===="++_,
+ _,
+ _,
+ "=WARNING REPORT===="++_,
+ _,
+ _,
+ _,
+ "=CRITICAL REPORT===="++_,
+ _,
+ _,
+ "=NOTICE REPORT===="++_,
+ _
+ ] = Lines,
+ ok.
diff --git a/lib/kernel/test/logger_std_h_SUITE.erl b/lib/kernel/test/logger_std_h_SUITE.erl
new file mode 100644
index 0000000000..0c5516f82b
--- /dev/null
+++ b/lib/kernel/test/logger_std_h_SUITE.erl
@@ -0,0 +1,2127 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_std_h_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("kernel/include/logger.hrl").
+-include_lib("kernel/src/logger_internal.hrl").
+-include_lib("kernel/src/logger_h_common.hrl").
+-include_lib("kernel/src/logger_olp.hrl").
+-include_lib("stdlib/include/ms_transform.hrl").
+-include_lib("kernel/include/file.hrl").
+
+-define(check_no_log,
+ begin
+ timer:sleep(?IDLE_DETECT_TIME*2),
+ [] = test_server:messages_get()
+ end).
+-define(check(Expected),
+ receive
+ {log,Expected} ->
+ [] = test_server:messages_get()
+ after 5000 ->
+ ct:fail({report_not_received,
+ {line,?LINE},
+ {expected,Expected},
+ {got,test_server:messages_get()}})
+ end).
+
+-define(msg,"Log from "++atom_to_list(?FUNCTION_NAME)++
+ ":"++integer_to_list(?LINE)).
+-define(bin(Msg), list_to_binary(Msg++"\n")).
+-define(domain,#{domain=>[?MODULE]}).
+
+suite() ->
+ [{timetrap,{seconds,30}},
+ {ct_hooks,[logger_test_lib]}].
+
+init_per_suite(Config) ->
+ timer:start(), % to avoid progress report
+ {ok,#{formatter:=OrigFormatter}} =
+ logger:get_handler_config(?STANDARD_HANDLER),
+ [{formatter,OrigFormatter}|Config].
+
+end_per_suite(Config) ->
+ {OrigMod,OrigConf} = proplists:get_value(formatter,Config),
+ logger:set_handler_config(?STANDARD_HANDLER,formatter,{OrigMod,OrigConf}),
+ ok.
+
+init_per_group(_Group, Config) ->
+ Config.
+
+end_per_group(_Group, _Config) ->
+ ok.
+
+init_per_testcase(TestHooksCase, Config) when
+ TestHooksCase == write_failure;
+ TestHooksCase == sync_failure ->
+ case (fun() -> ?TEST_HOOKS_TAB == undefined end)() of
+ true ->
+ {skip,"Define the TEST_HOOKS macro to run this test"};
+ false ->
+ ct:print("********** ~w **********", [TestHooksCase]),
+ Config
+ end;
+init_per_testcase(OPCase, Config) when
+ OPCase == qlen_kill_new;
+ OPCase == restart_after ->
+ case re:run(erlang:system_info(system_version),
+ "dirty-schedulers-TEST",
+ [{capture,none}]) of
+ match ->
+ {skip,"Overload protection test skipped on dirty-schedulers-TEST"};
+ nomatch ->
+ ct:print("********** ~w **********", [OPCase]),
+ Config
+ end;
+init_per_testcase(TestCase, Config) ->
+ ct:print("********** ~w **********", [TestCase]),
+ Config.
+
+end_per_testcase(Case, Config) ->
+ try apply(?MODULE,Case,[cleanup,Config])
+ catch error:undef -> ok
+ end,
+ ok.
+
+groups() ->
+ [].
+
+all() ->
+ [add_remove_instance_tty,
+ add_remove_instance_standard_io,
+ add_remove_instance_standard_error,
+ add_remove_instance_file1,
+ add_remove_instance_file2,
+ add_remove_instance_file3,
+ add_remove_instance_file4,
+ default_formatter,
+ filter_config,
+ errors,
+ formatter_fail,
+ config_fail,
+ crash_std_h_to_file,
+ crash_std_h_to_disk_log,
+ bad_input,
+ reconfig,
+ file_opts,
+ sync,
+ write_failure,
+ sync_failure,
+ op_switch_to_sync_file,
+ op_switch_to_sync_tty,
+ op_switch_to_drop_file,
+ op_switch_to_drop_tty,
+ op_switch_to_flush_file,
+ op_switch_to_flush_tty,
+ limit_burst_disabled,
+ limit_burst_enabled_one,
+ limit_burst_enabled_period,
+ kill_disabled,
+ qlen_kill_new,
+ qlen_kill_std,
+ mem_kill_new,
+ mem_kill_std,
+ restart_after,
+ handler_requests_under_load,
+ recreate_deleted_log,
+ reopen_changed_log,
+ rotate_size,
+ rotate_size_compressed,
+ rotate_size_reopen,
+ rotation_opts,
+ rotation_opts_restart_handler
+ ].
+
+add_remove_instance_tty(_Config) ->
+ {error,{handler_not_added,{invalid_config,logger_std_h,#{type:=tty}}}} =
+ logger:add_handler(?MODULE,logger_std_h,
+ #{config => #{type => tty},
+ filter_default=>log,
+ formatter=>{?MODULE,self()}}),
+ ok.
+
+add_remove_instance_standard_io(_Config) ->
+ add_remove_instance_nofile(standard_io).
+add_remove_instance_standard_io(cleanup,_Config) ->
+ logger_std_h_remove().
+
+add_remove_instance_standard_error(_Config) ->
+ add_remove_instance_nofile(standard_error).
+add_remove_instance_standard_error(cleanup,_Config) ->
+ logger_std_h_remove().
+
+add_remove_instance_file1(Config) ->
+ Dir = ?config(priv_dir,Config),
+ Log = filename:join(Dir,"stdlog1.txt"),
+ Type = {file,Log},
+ add_remove_instance_file(Log, Type).
+add_remove_instance_file1(cleanup,_Config) ->
+ logger_std_h_remove().
+
+add_remove_instance_file2(Config) ->
+ Dir = ?config(priv_dir,Config),
+ Log = filename:join(Dir,"stdlog2.txt"),
+ Type = {file,Log,[raw,append]},
+ add_remove_instance_file(Log, Type).
+add_remove_instance_file2(cleanup,_Config) ->
+ logger_std_h_remove().
+
+add_remove_instance_file3(_Config) ->
+ Log = atom_to_list(?MODULE),
+ StdHConfig = #{type=>file},
+ add_remove_instance_file(Log, StdHConfig).
+add_remove_instance_file3(cleanup,_Config) ->
+ logger_std_h_remove().
+
+add_remove_instance_file4(Config) ->
+ Dir = ?config(priv_dir,Config),
+ Log = filename:join(Dir,"stdlog4.txt"),
+ StdHConfig = #{file=>Log,modes=>[]},
+ add_remove_instance_file(Log, StdHConfig).
+add_remove_instance_file4(cleanup,_Config) ->
+ logger_std_h_remove().
+
+add_remove_instance_file(Log, Type) when not is_map(Type) ->
+ add_remove_instance_file(Log,#{type=>Type});
+add_remove_instance_file(Log, StdHConfig) when is_map(StdHConfig) ->
+ ok = logger:add_handler(?MODULE,
+ logger_std_h,
+ #{config => StdHConfig,
+ filter_default=>stop,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()}}),
+ Pid = whereis(h_proc_name()),
+ true = is_pid(Pid),
+ logger:notice(M1=?msg,?domain),
+ ?check(M1),
+ B1 = ?bin(M1),
+ try_read_file(Log, {ok,B1}, filesync_rep_int()),
+ ok = logger:remove_handler(?MODULE),
+ timer:sleep(500),
+ undefined = whereis(h_proc_name()),
+ logger:notice(?msg,?domain),
+ ?check_no_log,
+ try_read_file(Log, {ok,B1}, filesync_rep_int()),
+ ok.
+
+default_formatter(_Config) ->
+ ok = logger:set_handler_config(?STANDARD_HANDLER,formatter,
+ {?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}),
+ ct:capture_start(),
+ logger:notice(M1=?msg),
+ timer:sleep(100),
+ ct:capture_stop(),
+ [Msg] = ct:capture_get(),
+ match = re:run(Msg,"=NOTICE REPORT====.*\n"++M1,[{capture,none}]),
+ ok.
+
+filter_config(_Config) ->
+ ok = logger:add_handler(?MODULE,logger_std_h,#{}),
+ {ok,#{config:=HConfig}=Config} = logger:get_handler_config(?MODULE),
+ HConfig = maps:without([olp],HConfig),
+
+ FakeFullHConfig = HConfig#{olp=>{regname,self(),erlang:make_ref()}},
+ #{config:=HConfig} =
+ logger_std_h:filter_config(Config#{config=>FakeFullHConfig}),
+ ok.
+
+filter_config(cleanup,_Config) ->
+ logger:remove_handler(?MODULE),
+ ok.
+
+errors(Config) ->
+ Dir = ?config(priv_dir,Config),
+ Log = filename:join(Dir,?FUNCTION_NAME),
+
+ ok = logger:add_handler(?MODULE,logger_std_h,#{}),
+ {error,{already_exist,?MODULE}} =
+ logger:add_handler(?MODULE,logger_std_h,#{}),
+
+ {error,{not_found,no_such_name}} = logger:remove_handler(no_such_name),
+
+ ok = logger:remove_handler(?MODULE),
+ {error,{not_found,?MODULE}} = logger:remove_handler(?MODULE),
+
+ {error,
+ {handler_not_added,
+ {invalid_config,logger_std_h,#{type:=faulty_type}}}} =
+ logger:add_handler(?MODULE,logger_std_h,
+ #{config => #{type => faulty_type}}),
+
+ case os:type() of
+ {win32,_} ->
+ %% No use in testing file access on windows
+ ok;
+ _ ->
+ NoDir = lists:concat(["/",?MODULE,"_dir"]),
+ {error,
+ {handler_not_added,{open_failed,NoDir,eacces}}} =
+ logger:add_handler(myh2,logger_std_h,
+ #{config=>#{type=>{file,NoDir}}})
+ end,
+
+ {error,
+ {handler_not_added,
+ {invalid_config,logger_std_h,#{modes:=bad_file_opt}}}} =
+ logger:add_handler(myh3,logger_std_h,
+ #{config=>#{type=>{file,Log,bad_file_opt}}}),
+
+ ok = logger:notice(?msg).
+
+errors(cleanup,_Config) ->
+ logger:remove_handler(?MODULE).
+
+formatter_fail(Config) ->
+ Dir = ?config(priv_dir,Config),
+ Log = filename:join(Dir,?FUNCTION_NAME),
+
+ %% no formatter
+ ok = logger:add_handler(?MODULE,
+ logger_std_h,
+ #{config => #{type => {file,Log}},
+ filter_default=>stop,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE])}),
+ Pid = whereis(h_proc_name()),
+ true = is_pid(Pid),
+ H = logger:get_handler_ids(),
+ true = lists:member(?MODULE,H),
+
+ %% Formatter is added automatically
+ {ok,#{formatter:={logger_formatter,_}}} = logger:get_handler_config(?MODULE),
+ logger:notice(M1=?msg,?domain),
+ Got1 = try_match_file(Log,"[0-9\\+\\-T:\\.]* notice: "++M1,5000),
+
+ ok = logger:set_handler_config(?MODULE,formatter,{nonexistingmodule,#{}}),
+ logger:notice(M2=?msg,?domain),
+ Got2 = try_match_file(Log,
+ escape(Got1)++"[0-9\\+\\-T:\\.]* notice: FORMATTER CRASH: .*"++M2,
+ 5000),
+
+ ok = logger:set_handler_config(?MODULE,formatter,{?MODULE,crash}),
+ logger:notice(M3=?msg,?domain),
+ Got3 = try_match_file(Log,
+ escape(Got2)++"[0-9\\+\\-T:\\.]* notice: FORMATTER CRASH: .*"++M3,
+ 5000),
+
+ ok = logger:set_handler_config(?MODULE,formatter,{?MODULE,bad_return}),
+ logger:notice(?msg,?domain),
+ try_match_file(Log,
+ escape(Got3)++"FORMATTER ERROR: bad return value",
+ 5000),
+
+ %% Check that handler is still alive and was never dead
+ Pid = whereis(h_proc_name()),
+ H = logger:get_handler_ids(),
+
+ ok.
+
+formatter_fail(cleanup,_Config) ->
+ logger:remove_handler(?MODULE).
+
+config_fail(_Config) ->
+ {error,{handler_not_added,{invalid_config,logger_std_h,#{bad:=bad}}}} =
+ logger:add_handler(?MODULE,logger_std_h,
+ #{config => #{bad => bad},
+ filter_default=>log,
+ formatter=>{?MODULE,self()}}),
+ {error,{handler_not_added,{invalid_config,logger_std_h,
+ #{restart_type:=bad}}}} =
+ logger:add_handler(?MODULE,logger_std_h,
+ #{config => #{restart_type => bad},
+ filter_default=>log,
+ formatter=>{?MODULE,self()}}),
+ {error,{handler_not_added,{invalid_olp_levels,#{drop_mode_qlen:=1}}}} =
+ logger:add_handler(?MODULE,logger_std_h,
+ #{config => #{drop_mode_qlen=>1}}),
+ {error,{handler_not_added,{invalid_olp_levels,#{sync_mode_qlen:=43,
+ drop_mode_qlen:=42}}}} =
+ logger:add_handler(?MODULE,logger_std_h,
+ #{config => #{sync_mode_qlen=>43,
+ drop_mode_qlen=>42}}),
+ {error,{handler_not_added,{invalid_olp_levels,#{drop_mode_qlen:=43,
+ flush_qlen:=42}}}} =
+ logger:add_handler(?MODULE,logger_std_h,
+ #{config => #{drop_mode_qlen=>43,
+ flush_qlen=>42}}),
+
+ ok = logger:add_handler(?MODULE,logger_std_h,
+ #{filter_default=>log,
+ formatter=>{?MODULE,self()}}),
+ {error,{illegal_config_change,logger_std_h,#{type:=_},#{type:=_}}} =
+ logger:set_handler_config(?MODULE,config,
+ #{type=>{file,"file"}}),
+
+ {error,{invalid_olp_levels,_}} =
+ logger:set_handler_config(?MODULE,config,
+ #{sync_mode_qlen=>100,
+ flush_qlen=>99}),
+ {error,{invalid_config,logger_std_h,#{filesync_rep_int:=2000}}} =
+ logger:set_handler_config(?MODULE, config,
+ #{filesync_rep_int => 2000}),
+
+ %% Read-only fields may (accidentially) be included in the change,
+ %% but it won't take effect
+ {ok,C} = logger:get_handler_config(?MODULE),
+ ok = logger:set_handler_config(?MODULE,config,#{olp=>dummyvalue}),
+ {ok,C} = logger:get_handler_config(?MODULE),
+
+ ok.
+
+config_fail(cleanup,_Config) ->
+ logger:remove_handler(?MODULE).
+
+crash_std_h_to_file(Config) ->
+ Dir = ?config(priv_dir,Config),
+ Log = filename:join(Dir,lists:concat([?MODULE,"_",?FUNCTION_NAME,".log"])),
+ crash_std_h(Config,?FUNCTION_NAME,
+ [{handler,default,logger_std_h,
+ #{ config => #{ type => {file, Log} }}}],
+ file, Log).
+crash_std_h_to_file(cleanup,_Config) ->
+ crash_std_h(cleanup).
+
+crash_std_h_to_disk_log(Config) ->
+ Dir = ?config(priv_dir,Config),
+ Log = filename:join(Dir,lists:concat([?MODULE,"_",?FUNCTION_NAME,".log"])),
+ crash_std_h(Config,?FUNCTION_NAME,
+ [{handler,default,logger_disk_log_h,
+ #{ config => #{ file => Log }}}],
+ disk_log,Log).
+crash_std_h_to_disk_log(cleanup,_Config) ->
+ crash_std_h(cleanup).
+
+crash_std_h(Config,Func,Var,Type,Log) ->
+ Dir = ?config(priv_dir,Config),
+ SysConfig = filename:join(Dir,lists:concat([?MODULE,"_",Func,".config"])),
+ ok = file:write_file(SysConfig, io_lib:format("[{kernel,[{logger,~p}]}].",[Var])),
+ Pa = filename:dirname(code:which(?MODULE)),
+ Name = lists:concat([?MODULE,"_",Func]),
+ Args = lists:concat([" -config ",filename:rootname(SysConfig)," -pa ",Pa]),
+ ct:pal("Starting ~p with ~tp", [Name,Args]),
+ %% Start a node which prints kernel logs to the destination specified by Type
+ {ok,Node} = test_server:start_node(Name, peer, [{args, Args}]),
+ HProcName =
+ case Type of
+ file -> ?name_to_reg_name(logger_std_h,?STANDARD_HANDLER);
+ disk_log -> ?name_to_reg_name(logger_disk_log_h,?STANDARD_HANDLER)
+ end,
+ Pid = rpc:call(Node,erlang,whereis,[HProcName]),
+ ok = rpc:call(Node,logger,set_handler_config,[?STANDARD_HANDLER,formatter,
+ {?MODULE,self()}]),
+ ok = log_on_remote_node(Node,"dummy1"),
+ ?check("dummy1"),
+ {ok,Bin1} = sync_and_read(Node,Type,Log),
+ <<"dummy1\n">> = binary:part(Bin1,{byte_size(Bin1),-7}),
+
+ %% Kill the logger_std_h process
+ exit(Pid, kill),
+
+ %% Wait a bit, then check that it is gone
+ timer:sleep(2000),
+ undefined = rpc:call(Node,erlang,whereis,[HProcName]),
+
+ %% Check that file is not empty
+ {ok,Bin2} = sync_and_read(Node,Type,Log),
+ <<"dummy1\n">> = binary:part(Bin2,{byte_size(Bin2),-7}),
+ ok.
+
+%% Can not use rpc:call here, since the code would execute on a
+%% process with group_leader on this (the calling) node, and thus
+%% logger would send the log event to the logger process here instead
+%% of logging it itself.
+log_on_remote_node(Node,Msg) ->
+ Pid = self(),
+ _ = spawn_link(Node,
+ fun() -> erlang:group_leader(whereis(user),self()),
+ logger:notice(Msg),
+ Pid ! done
+ end),
+ receive done -> ok end,
+ ok.
+
+
+crash_std_h(cleanup) ->
+ Nodes = nodes(),
+ [test_server:stop_node(Node) || Node <- Nodes].
+
+sync_and_read(Node,disk_log,Log) ->
+ rpc:call(Node,logger_disk_log_h,filesync,[?STANDARD_HANDLER]),
+ case file:read_file(Log ++ ".1") of
+ {ok,<<>>} ->
+ timer:sleep(5000),
+ file:read_file(Log ++ ".1");
+ Ok ->
+ Ok
+ end;
+sync_and_read(Node,file,Log) ->
+ rpc:call(Node,logger_std_h,filesync,[?STANDARD_HANDLER]),
+ case file:read_file(Log) of
+ {ok,<<>>} ->
+ timer:sleep(5000),
+ file:read_file(Log);
+ Ok ->
+ Ok
+ end.
+
+bad_input(_Config) ->
+ {error,{badarg,{filesync,["BadType"]}}} = logger_std_h:filesync("BadType").
+
+reconfig(Config) ->
+ Dir = ?config(priv_dir,Config),
+ ok = logger:add_handler(?MODULE,
+ logger_std_h,
+ #{config => #{type => standard_io},
+ filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()}}),
+ #{%id := ?MODULE,
+ cb_state:=#{handler_state := #{type := standard_io,
+ file_ctrl_pid := FileCtrlPid},
+ filesync_repeat_interval := no_repeat},
+ sync_mode_qlen := ?SYNC_MODE_QLEN,
+ drop_mode_qlen := ?DROP_MODE_QLEN,
+ flush_qlen := ?FLUSH_QLEN,
+ burst_limit_enable := ?BURST_LIMIT_ENABLE,
+ burst_limit_max_count := ?BURST_LIMIT_MAX_COUNT,
+ burst_limit_window_time := ?BURST_LIMIT_WINDOW_TIME,
+ overload_kill_enable := ?OVERLOAD_KILL_ENABLE,
+ overload_kill_qlen := ?OVERLOAD_KILL_QLEN,
+ overload_kill_mem_size := ?OVERLOAD_KILL_MEM_SIZE,
+ overload_kill_restart_after := ?OVERLOAD_KILL_RESTART_AFTER} =
+ logger_olp:info(h_proc_name()),
+
+ {ok,
+ #{config:=
+ #{type := standard_io,
+ sync_mode_qlen := ?SYNC_MODE_QLEN,
+ drop_mode_qlen := ?DROP_MODE_QLEN,
+ flush_qlen := ?FLUSH_QLEN,
+ burst_limit_enable := ?BURST_LIMIT_ENABLE,
+ burst_limit_max_count := ?BURST_LIMIT_MAX_COUNT,
+ burst_limit_window_time := ?BURST_LIMIT_WINDOW_TIME,
+ overload_kill_enable := ?OVERLOAD_KILL_ENABLE,
+ overload_kill_qlen := ?OVERLOAD_KILL_QLEN,
+ overload_kill_mem_size := ?OVERLOAD_KILL_MEM_SIZE,
+ overload_kill_restart_after := ?OVERLOAD_KILL_RESTART_AFTER,
+ filesync_repeat_interval := no_repeat} =
+ DefaultHConf}}
+ = logger:get_handler_config(?MODULE),
+
+ ok = logger:set_handler_config(?MODULE, config,
+ #{sync_mode_qlen => 1,
+ drop_mode_qlen => 2,
+ flush_qlen => 3,
+ burst_limit_enable => false,
+ burst_limit_max_count => 10,
+ burst_limit_window_time => 10,
+ overload_kill_enable => true,
+ overload_kill_qlen => 100000,
+ overload_kill_mem_size => 10000000,
+ overload_kill_restart_after => infinity,
+ filesync_repeat_interval => 5000}),
+ #{%id := ?MODULE,
+ cb_state := #{handler_state := #{type := standard_io,
+ file_ctrl_pid := FileCtrlPid},
+ filesync_repeat_interval := no_repeat},
+ sync_mode_qlen := 1,
+ drop_mode_qlen := 2,
+ flush_qlen := 3,
+ burst_limit_enable := false,
+ burst_limit_max_count := 10,
+ burst_limit_window_time := 10,
+ overload_kill_enable := true,
+ overload_kill_qlen := 100000,
+ overload_kill_mem_size := 10000000,
+ overload_kill_restart_after := infinity} = logger_olp:info(h_proc_name()),
+
+ {ok,#{config :=
+ #{type := standard_io,
+ sync_mode_qlen := 1,
+ drop_mode_qlen := 2,
+ flush_qlen := 3,
+ burst_limit_enable := false,
+ burst_limit_max_count := 10,
+ burst_limit_window_time := 10,
+ overload_kill_enable := true,
+ overload_kill_qlen := 100000,
+ overload_kill_mem_size := 10000000,
+ overload_kill_restart_after := infinity,
+ filesync_repeat_interval := no_repeat} = HConf}} =
+ logger:get_handler_config(?MODULE),
+
+ ok = logger:update_handler_config(?MODULE, config,
+ #{flush_qlen => ?FLUSH_QLEN}),
+ {ok,#{config:=C1}} = logger:get_handler_config(?MODULE),
+ ct:log("C1: ~p",[C1]),
+ C1 = HConf#{flush_qlen => ?FLUSH_QLEN},
+
+ ok = logger:set_handler_config(?MODULE, config, #{sync_mode_qlen => 1}),
+ {ok,#{config:=C2}} = logger:get_handler_config(?MODULE),
+ ct:log("C2: ~p",[C2]),
+ C2 = DefaultHConf#{sync_mode_qlen => 1},
+
+ ok = logger:set_handler_config(?MODULE, config, #{drop_mode_qlen => 100}),
+ {ok,#{config:=C3}} = logger:get_handler_config(?MODULE),
+ ct:log("C3: ~p",[C3]),
+ C3 = DefaultHConf#{drop_mode_qlen => 100},
+
+ ok = logger:update_handler_config(?MODULE, config, #{sync_mode_qlen => 1}),
+ {ok,#{config:=C4}} = logger:get_handler_config(?MODULE),
+ ct:log("C4: ~p",[C4]),
+ C4 = DefaultHConf#{sync_mode_qlen => 1,
+ drop_mode_qlen => 100},
+
+ ok = logger:remove_handler(?MODULE),
+
+ File = filename:join(Dir,lists:concat([?FUNCTION_NAME,".log"])),
+ ok = logger:add_handler(?MODULE,
+ logger_std_h,
+ #{config => #{type => {file,File}},
+ filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()}}),
+
+ {ok,#{config:=#{filesync_repeat_interval:=FSI}=FileHConfig}} =
+ logger:get_handler_config(?MODULE),
+ ok = logger:update_handler_config(?MODULE,config,
+ #{filesync_repeat_interval=>FSI+2000}),
+ {ok,#{config:=C5}} = logger:get_handler_config(?MODULE),
+ ct:log("C5: ~p",[C5]),
+ C5 = FileHConfig#{filesync_repeat_interval=>FSI+2000},
+
+ %% You are not allowed to actively set 'type' in runtime, since
+ %% this is a write once field.
+ {error, {illegal_config_change,logger_std_h,_,_}} =
+ logger:set_handler_config(?MODULE,config,#{type=>standard_io}),
+ {ok,#{config:=C6}} = logger:get_handler_config(?MODULE),
+ ct:log("C6: ~p",[C6]),
+ C6 = C5,
+
+ %% ... but if you don't specify 'type', then set_handler_config shall
+ %% NOT reset it to its default value
+ ok = logger:set_handler_config(?MODULE,config,#{sync_mode_qlen=>1}),
+ {ok,#{config:=C7}} = logger:get_handler_config(?MODULE),
+ ct:log("C7: ~p",[C7]),
+ C7 = FileHConfig#{sync_mode_qlen=>1},
+ ok.
+
+reconfig(cleanup, _Config) ->
+ logger:remove_handler(?MODULE).
+
+
+file_opts(Config) ->
+ Dir = ?config(priv_dir,Config),
+ Log = filename:join(Dir, lists:concat([?FUNCTION_NAME,".log"])),
+ MissingOpts = [raw],
+ Type1 = {file,Log,MissingOpts},
+ ok = logger:add_handler(?MODULE, logger_std_h,
+ #{config => #{type => Type1}}),
+ {ok,#{config:=#{type:=file,file:=Log,modes:=Modes1}}} =
+ logger:get_handler_config(?MODULE),
+ [append,delayed_write,raw] = lists:sort(Modes1),
+ ok = logger:remove_handler(?MODULE),
+
+ OkFileOpts = [raw,append],
+ OkType = {file,Log,OkFileOpts},
+ ok = logger:add_handler(?MODULE,
+ logger_std_h,
+ #{config => #{type => OkType}, % old format
+ filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()}}),
+
+ ModOpts = [delayed_write|OkFileOpts],
+ #{cb_state := #{handler_state := #{type:=file,
+ file:=Log,
+ modes:=ModOpts}}} =
+ logger_olp:info(h_proc_name()),
+ {ok,#{config := #{type:=file,
+ file:=Log,
+ modes:=ModOpts}}} = logger:get_handler_config(?MODULE),
+ ok = logger:remove_handler(?MODULE),
+
+ ok = logger:add_handler(?MODULE,
+ logger_std_h,
+ #{config => #{type => file,
+ file => Log,
+ modes => OkFileOpts}, % new format
+ filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()}}),
+
+ #{cb_state := #{handler_state := #{type:=file,
+ file:=Log,
+ modes:=ModOpts}}} =
+ logger_olp:info(h_proc_name()),
+ {ok,#{config := #{type:=file,
+ file:=Log,
+ modes:=ModOpts}}} =
+ logger:get_handler_config(?MODULE),
+ logger:notice(M1=?msg,?domain),
+ ?check(M1),
+ B1 = ?bin(M1),
+ try_read_file(Log, {ok,B1}, filesync_rep_int()),
+ ok.
+file_opts(cleanup, _Config) ->
+ logger:remove_handler(?MODULE).
+
+
+sync(Config) ->
+ Dir = ?config(priv_dir,Config),
+ Log = filename:join(Dir, lists:concat([?FUNCTION_NAME,".log"])),
+ Type = {file,Log},
+ ok = logger:add_handler(?MODULE,
+ logger_std_h,
+ #{config => #{type => Type,
+ file_check => 10000},
+ filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,nl}}),
+
+ %% check repeated filesync happens
+ start_tracer([{logger_std_h, write_to_dev, 2},
+ {file, datasync, 1}],
+ [{logger_std_h, write_to_dev, <<"first\n">>},
+ {file,datasync}]),
+
+ logger:notice("first", ?domain),
+ %% wait for automatic filesync
+ check_tracer(filesync_rep_int()*2),
+
+ %% check that explicit filesync is only done once
+ start_tracer([{logger_std_h, write_to_dev, 2},
+ {file, datasync, 1}],
+ [{logger_std_h, write_to_dev, <<"second\n">>},
+ {file,datasync},
+ {no_more,500}
+ ]),
+ logger:notice("second", ?domain),
+ %% do explicit sync
+ logger_std_h:filesync(?MODULE),
+ %% a second sync should be ignored
+ logger_std_h:filesync(?MODULE),
+ check_tracer(100),
+
+ %% check that if there's no repeated filesync active,
+ %% a filesync is still performed when handler goes idle
+ ok = logger:update_handler_config(?MODULE, config,
+ #{filesync_repeat_interval => no_repeat}),
+ no_repeat = maps:get(filesync_repeat_interval,
+ maps:get(cb_state, logger_olp:info(h_proc_name()))),
+ start_tracer([{logger_std_h, write_to_dev, 2},
+ {file, datasync, 1}],
+ [{logger_std_h, write_to_dev, <<"third\n">>},
+ {file,datasync},
+ {logger_std_h, write_to_dev, <<"fourth\n">>},
+ {file,datasync}]),
+ logger:notice("third", ?domain),
+ %% wait for automatic filesync
+ timer:sleep(?IDLE_DETECT_TIME*2),
+ logger:notice("fourth", ?domain),
+ %% wait for automatic filesync
+ check_tracer(?IDLE_DETECT_TIME*2),
+
+ %% switch repeated filesync on and verify that the looping works
+ SyncInt = 1000,
+ WaitT = 4500,
+ OneSync = {logger_h_common,handle_cast,repeated_filesync},
+ %% receive 1 repeated_filesync per sec
+ start_tracer([{{logger_h_common,handle_cast,2},
+ [{[repeated_filesync,'_'],[],[]}]}],
+ [OneSync || _ <- lists:seq(1, trunc(WaitT/SyncInt))]),
+
+ ok = logger:update_handler_config(?MODULE, config,
+ #{filesync_repeat_interval => SyncInt}),
+ SyncInt = maps:get(filesync_repeat_interval,
+ maps:get(cb_state,logger_olp:info(h_proc_name()))),
+ timer:sleep(WaitT),
+ ok = logger:update_handler_config(?MODULE, config,
+ #{filesync_repeat_interval => no_repeat}),
+ check_tracer(100),
+ ok.
+sync(cleanup, _Config) ->
+ dbg:stop_clear(),
+ logger:remove_handler(?MODULE).
+
+write_failure(Config) ->
+ Dir = ?config(priv_dir, Config),
+ File = lists:concat([?MODULE,"_",?FUNCTION_NAME,".log"]),
+ Log = filename:join(Dir, File),
+ Node = start_std_h_on_new_node(Config, Log),
+ false = (undefined == rpc:call(Node, ets, whereis, [?TEST_HOOKS_TAB])),
+ rpc:call(Node, ets, insert, [?TEST_HOOKS_TAB,{tester,self()}]),
+ rpc:call(Node, ?MODULE, set_internal_log, [?MODULE,internal_log]),
+ rpc:call(Node, ?MODULE, set_result, [file_write,ok]),
+
+ ok = log_on_remote_node(Node, "Logged1"),
+ rpc:call(Node, logger_std_h, filesync, [?STANDARD_HANDLER]),
+ ?check_no_log,
+ try_read_file(Log, {ok,<<"Logged1\n">>}, filesync_rep_int()),
+
+ rpc:call(Node, ?MODULE, set_result, [file_write,{error,terminated}]),
+ ok = log_on_remote_node(Node, "Cause simple error printout"),
+
+ ?check({error,{?STANDARD_HANDLER,write,Log,{error,terminated}}}),
+
+ ok = log_on_remote_node(Node, "No second error printout"),
+ ?check_no_log,
+
+ rpc:call(Node, ?MODULE, set_result, [file_write,{error,eacces}]),
+ ok = log_on_remote_node(Node, "Cause simple error printout"),
+ ?check({error,{?STANDARD_HANDLER,write,Log,{error,eacces}}}),
+
+ rpc:call(Node, ?MODULE, set_result, [file_write,ok]),
+ ok = log_on_remote_node(Node, "Logged2"),
+ rpc:call(Node, logger_std_h, filesync, [?STANDARD_HANDLER]),
+ ?check_no_log,
+ try_read_file(Log, {ok,<<"Logged1\nLogged2\n">>}, filesync_rep_int()),
+ ok.
+write_failure(cleanup, _Config) ->
+ Nodes = nodes(),
+ [test_server:stop_node(Node) || Node <- Nodes].
+
+sync_failure(Config) ->
+ Dir = ?config(priv_dir, Config),
+ File = lists:concat([?MODULE,"_",?FUNCTION_NAME,".log"]),
+ Log = filename:join(Dir, File),
+ Node = start_std_h_on_new_node(Config, Log),
+ false = (undefined == rpc:call(Node, ets, whereis, [?TEST_HOOKS_TAB])),
+ rpc:call(Node, ets, insert, [?TEST_HOOKS_TAB,{tester,self()}]),
+ rpc:call(Node, ?MODULE, set_internal_log, [?MODULE,internal_log]),
+ rpc:call(Node, ?MODULE, set_result, [file_datasync,ok]),
+
+ SyncInt = 500,
+ ok = rpc:call(Node, logger, update_handler_config,
+ [?STANDARD_HANDLER, config,
+ #{filesync_repeat_interval => SyncInt}]),
+
+ ok = log_on_remote_node(Node, "Logged1"),
+ ?check_no_log,
+
+ rpc:call(Node, ?MODULE, set_result, [file_datasync,{error,terminated}]),
+ ok = log_on_remote_node(Node, "Cause simple error printout"),
+
+ ?check({error,{?STANDARD_HANDLER,filesync,Log,{error,terminated}}}),
+
+ ok = log_on_remote_node(Node, "No second error printout"),
+ ?check_no_log,
+
+ rpc:call(Node, ?MODULE, set_result, [file_datasync,{error,eacces}]),
+ ok = log_on_remote_node(Node, "Cause simple error printout"),
+ ?check({error,{?STANDARD_HANDLER,filesync,Log,{error,eacces}}}),
+
+ rpc:call(Node, ?MODULE, set_result, [file_datasync,ok]),
+ ok = log_on_remote_node(Node, "Logged2"),
+ ?check_no_log,
+ ok.
+sync_failure(cleanup, _Config) ->
+ Nodes = nodes(),
+ [test_server:stop_node(Node) || Node <- Nodes].
+
+start_std_h_on_new_node(Config, Log) ->
+ {ok,_,Node} =
+ logger_test_lib:setup(
+ Config,
+ [{logger,[{handler,default,logger_std_h,
+ #{ config => #{ type => {file,Log}}}}]}]),
+ ok = rpc:call(Node,logger,set_handler_config,[?STANDARD_HANDLER,formatter,
+ {?MODULE,nl}]),
+ Node.
+
+%% functions for test hook macros to be called by rpc
+set_internal_log(_Mod, _Func) ->
+ ?set_internal_log({_Mod,_Func}).
+set_result(_Op, _Result) ->
+ ?set_result(_Op, _Result).
+set_defaults() ->
+ ?set_defaults().
+
+%% internal log function that sends the term to the test case process
+internal_log(Type, Term) ->
+ [{tester,Tester}] = ets:lookup(?TEST_HOOKS_TAB, tester),
+ Tester ! {log,{Type,Term}},
+ logger:internal_log(Type, Term),
+ ok.
+
+
+%%%-----------------------------------------------------------------
+%%% Overload protection tests
+
+op_switch_to_sync_file(Config) ->
+ {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ NumOfReqs = 500,
+ NewHConfig =
+ HConfig#{config => StdHConfig#{sync_mode_qlen => 2,
+ drop_mode_qlen => NumOfReqs+1,
+ flush_qlen => 2*NumOfReqs,
+ burst_limit_enable => false}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+ %% TRecvPid = start_op_trace(),
+ send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
+ Lines = count_lines(Log),
+ %% true = analyse_trace(TRecvPid,
+ %% fun(Events) -> find_mode(async,Events) end),
+ %% true = analyse_trace(TRecvPid,
+ %% fun(Events) -> find_mode(sync,Events) end),
+ %% true = analyse_trace(TRecvPid,
+ %% fun(Events) -> find_switch(async,sync,Events) end),
+ %% false = analyse_trace(TRecvPid,
+ %% fun(Events) -> find_mode(drop,Events) end),
+ %% false = analyse_trace(TRecvPid,
+ %% fun(Events) -> find_mode(flush,Events) end),
+ %% stop_op_trace(TRecvPid),
+ NumOfReqs = Lines,
+ ok = file_delete(Log),
+ ok.
+op_switch_to_sync_file(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+op_switch_to_sync_tty(Config) ->
+ {HConfig,StdHConfig} = start_handler(?MODULE, standard_io, Config),
+ NumOfReqs = 500,
+ NewHConfig =
+ HConfig#{config => StdHConfig#{sync_mode_qlen => 3,
+ drop_mode_qlen => NumOfReqs+1,
+ flush_qlen => 2*NumOfReqs,
+ burst_limit_enable => false}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+ send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
+ ok.
+op_switch_to_sync_tty(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+op_switch_to_drop_file() ->
+ [{timetrap,{seconds,180}}].
+op_switch_to_drop_file(Config) ->
+ Test =
+ fun() ->
+ {Log,HConfig,StdHConfig} =
+ start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ NumOfReqs = 300,
+ Procs = 2,
+ Bursts = 10,
+ NewHConfig =
+ HConfig#{config =>
+ StdHConfig#{sync_mode_qlen => 1,
+ drop_mode_qlen => 2,
+ flush_qlen =>
+ Procs*NumOfReqs*Bursts,
+ burst_limit_enable => false}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+ %% It sometimes happens that the handler gets the
+ %% requests in a slow enough pace so that dropping
+ %% never occurs. Therefore, lets generate a number of
+ %% bursts to increase the chance of message buildup.
+ [send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, notice) ||
+ _ <- lists:seq(1, Bursts)],
+ Logged = count_lines(Log),
+ ok = stop_handler(?MODULE),
+ ct:pal("Number of messages dropped = ~w (~w)",
+ [Procs*NumOfReqs*Bursts-Logged,Procs*NumOfReqs*Bursts]),
+ true = (Logged < (Procs*NumOfReqs*Bursts)),
+ true = (Logged > 0),
+ _ = file_delete(Log),
+ ok
+ end,
+ %% As it's tricky to get the timing right in only one go, we perform the
+ %% test repeatedly, hoping that will generate a successful result.
+ case repeat_until_ok(Test, 10) of
+ {ok,{Failures,_Result}} ->
+ ct:log("Failed ~w times before success!", [Failures]);
+ {fails,Reason} ->
+ ct:fail(Reason)
+ end.
+op_switch_to_drop_file(cleanup, _Config) ->
+ _ = stop_handler(?MODULE).
+
+op_switch_to_drop_tty(Config) ->
+ {HConfig,StdHConfig} = start_handler(?MODULE, standard_io, Config),
+ NumOfReqs = 300,
+ Procs = 2,
+ NewHConfig =
+ HConfig#{config => StdHConfig#{sync_mode_qlen => 1,
+ drop_mode_qlen => 2,
+ flush_qlen =>
+ Procs*NumOfReqs+1,
+ burst_limit_enable => false}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+ send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, notice),
+ ok.
+op_switch_to_drop_tty(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+op_switch_to_flush_file() ->
+ [{timetrap,{minutes,5}}].
+op_switch_to_flush_file(Config) ->
+ Test =
+ fun() ->
+ {Log,HConfig,StdHConfig} =
+ start_handler(?MODULE, ?FUNCTION_NAME, Config),
+
+ %% NOTE: it's important that both async and sync
+ %% requests have been queued when the flush happens
+ %% (verify with coverage of flush_log_requests/2)
+
+ NewHConfig =
+ HConfig#{config =>
+ StdHConfig#{sync_mode_qlen => 2,
+ %% disable drop mode
+ drop_mode_qlen => 300,
+ flush_qlen => 300,
+ burst_limit_enable => false}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+ NumOfReqs = 1500,
+ Procs = 10,
+ Bursts = 10,
+ %% It sometimes happens that the handler either gets
+ %% the requests in a slow enough pace so that flushing
+ %% never occurs, or it gets all messages at once,
+ %% causing all messages to get flushed (no dropping of
+ %% sync messages gets tested). Therefore, lets
+ %% generate a number of bursts to increase the chance
+ %% of message buildup in some random fashion.
+ [send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, notice) ||
+ _ <- lists:seq(1,Bursts)],
+ Logged = count_lines(Log),
+ ok = stop_handler(?MODULE),
+ ct:pal("Number of messages flushed/dropped = ~w (~w)",
+ [NumOfReqs*Procs*Bursts-Logged,NumOfReqs*Procs*Bursts]),
+ true = (Logged < (NumOfReqs*Procs*Bursts)),
+ true = (Logged > 0),
+ _ = file_delete(Log),
+ ok
+ end,
+ %% As it's tricky to get the timing right in only one go, we perform the
+ %% test repeatedly, hoping that will generate a successful result.
+ case repeat_until_ok(Test, 10) of
+ {ok,{Failures,_Result}} ->
+ ct:log("Failed ~w times before success!", [Failures]);
+ {fails,Reason} ->
+ ct:fail(Reason)
+ end.
+op_switch_to_flush_file(cleanup, _Config) ->
+ _ = stop_handler(?MODULE).
+
+op_switch_to_flush_tty() ->
+ [{timetrap,{minutes,5}}].
+op_switch_to_flush_tty(Config) ->
+ {HConfig,StdHConfig} = start_handler(?MODULE, standard_io, Config),
+
+ %% it's important that both async and sync requests have been queued
+ %% when the flush happens (verify with coverage of flush_log_requests/2)
+
+ NewHConfig =
+ HConfig#{config => StdHConfig#{sync_mode_qlen => 2,
+ %% disable drop mode
+ drop_mode_qlen => 100,
+ flush_qlen => 100,
+ burst_limit_enable => false}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+ NumOfReqs = 1000,
+ Procs = 100,
+ send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, notice),
+ ok.
+op_switch_to_flush_tty(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+limit_burst_disabled(Config) ->
+ {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ NewHConfig =
+ HConfig#{config => StdHConfig#{burst_limit_enable => false,
+ burst_limit_max_count => 10,
+ burst_limit_window_time => 2000,
+ drop_mode_qlen => 200,
+ flush_qlen => 300}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+ NumOfReqs = 100,
+ send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
+ Logged = count_lines(Log),
+ ct:pal("Number of messages logged = ~w", [Logged]),
+ NumOfReqs = Logged,
+ ok = file_delete(Log),
+ ok.
+limit_burst_disabled(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+limit_burst_enabled_one(Config) ->
+ {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ ReqLimit = 10,
+ NewHConfig =
+ HConfig#{config => StdHConfig#{burst_limit_enable => true,
+ burst_limit_max_count => ReqLimit,
+ burst_limit_window_time => 2000,
+ drop_mode_qlen => 200,
+ flush_qlen => 300}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+ NumOfReqs = 100,
+ send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
+ Logged = count_lines(Log),
+ ct:pal("Number of messages logged = ~w", [Logged]),
+ ReqLimit = Logged,
+ ok = file_delete(Log),
+ ok.
+limit_burst_enabled_one(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+limit_burst_enabled_period(Config) ->
+ {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ ReqLimit = 10,
+ BurstTWin = 1000,
+ NewHConfig =
+ HConfig#{config => StdHConfig#{burst_limit_enable => true,
+ burst_limit_max_count => ReqLimit,
+ burst_limit_window_time => BurstTWin,
+ drop_mode_qlen => 20000,
+ flush_qlen => 20001}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+
+ Windows = 3,
+ Sent = send_burst({t,BurstTWin*Windows}, seq, {chars,79}, notice),
+ Logged = count_lines(Log),
+ ct:pal("Number of messages sent = ~w~nNumber of messages logged = ~w",
+ [Sent,Logged]),
+ true = (Logged > (ReqLimit*Windows)) andalso
+ (Logged < (ReqLimit*(Windows+2))),
+ ok = file_delete(Log),
+ ok.
+limit_burst_enabled_period(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+kill_disabled(Config) ->
+ {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ NewHConfig =
+ HConfig#{config=>StdHConfig#{overload_kill_enable=>false,
+ overload_kill_qlen=>10,
+ overload_kill_mem_size=>100}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+ NumOfReqs = 100,
+ send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
+ Logged = count_lines(Log),
+ ct:pal("Number of messages logged = ~w", [Logged]),
+ ok = file_delete(Log),
+ true = is_pid(whereis(h_proc_name())),
+ ok.
+kill_disabled(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+qlen_kill_new(Config) ->
+ {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ Pid0 = whereis(h_proc_name()),
+ {_,Mem0} = process_info(Pid0, memory),
+ RestartAfter = ?OVERLOAD_KILL_RESTART_AFTER,
+ NewHConfig =
+ HConfig#{config=>StdHConfig#{overload_kill_enable=>true,
+ overload_kill_qlen=>10,
+ overload_kill_mem_size=>Mem0+50000,
+ overload_kill_restart_after=>RestartAfter}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+ MRef = erlang:monitor(process, Pid0),
+ NumOfReqs = 100,
+ Procs = 4,
+ send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, notice),
+ %% send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
+ receive
+ {'DOWN', MRef, _, _, Info} ->
+ case Info of
+ {shutdown,{overloaded,QLen,Mem}} ->
+ ct:pal("Terminated with qlen = ~w, mem = ~w", [QLen,Mem]);
+ killed ->
+ ct:pal("Slow shutdown, handler process was killed!", [])
+ end,
+ file_delete(Log),
+ {ok,_} = wait_for_process_up(RestartAfter * 3),
+ ok
+ after
+ 5000 ->
+ Info = logger_olp:info(h_proc_name()),
+ ct:pal("Handler state = ~p", [Info]),
+ ct:fail("Handler not dead! It should not have survived this!")
+ end.
+qlen_kill_new(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+%% choke the standard handler on remote node to verify the termination
+%% works as expected
+qlen_kill_std(_Config) ->
+ %%! HERE
+ %% Dir = ?config(priv_dir, Config),
+ %% File = lists:concat([?MODULE,"_",?FUNCTION_NAME,".log"]),
+ %% Log = filename:join(Dir, File),
+ %% Node = start_std_h_on_new_node(Config, ?FUNCTION_NAME, Log),
+ %% ok = rpc:call(Node, logger, update_handler_config,
+ %% [?STANDARD_HANDLER, config,
+ %% #{overload_kill_enable=>true,
+ %% overload_kill_qlen=>10,
+ %% overload_kill_mem_size=>100000}]),
+ {skip,"Not done yet"}.
+
+mem_kill_new(Config) ->
+ {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ Pid0 = whereis(h_proc_name()),
+ {_,Mem0} = process_info(Pid0, memory),
+ RestartAfter = ?OVERLOAD_KILL_RESTART_AFTER,
+ NewHConfig =
+ HConfig#{config=>StdHConfig#{overload_kill_enable=>true,
+ overload_kill_qlen=>50000,
+ overload_kill_mem_size=>Mem0+500,
+ overload_kill_restart_after=>RestartAfter}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+ MRef = erlang:monitor(process, Pid0),
+ NumOfReqs = 100,
+ Procs = 4,
+ send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, notice),
+ %% send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
+ receive
+ {'DOWN', MRef, _, _, Info} ->
+ case Info of
+ {shutdown,{overloaded,QLen,Mem}} ->
+ ct:pal("Terminated with qlen = ~w, mem = ~w", [QLen,Mem]);
+ killed ->
+ ct:pal("Slow shutdown, handler process was killed!", [])
+ end,
+ file_delete(Log),
+ {ok,_} = wait_for_process_up(RestartAfter * 3),
+ ok
+ after
+ 5000 ->
+ Info = logger_olp:info(h_proc_name()),
+ ct:pal("Handler state = ~p", [Info]),
+ ct:fail("Handler not dead! It should not have survived this!")
+ end.
+mem_kill_new(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+%% choke the standard handler on remote node to verify the termination
+%% works as expected
+mem_kill_std(_Config) ->
+ {skip,"Not done yet"}.
+
+restart_after() ->
+ [{timetrap,{minutes,2}}].
+restart_after(Config) ->
+ {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ NewHConfig1 =
+ HConfig#{config=>StdHConfig#{overload_kill_enable=>true,
+ overload_kill_qlen=>10,
+ overload_kill_restart_after=>infinity}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig1),
+ MRef1 = erlang:monitor(process, whereis(h_proc_name())),
+ %% kill handler
+ send_burst({n,100}, {spawn,4,0}, {chars,79}, notice),
+ receive
+ {'DOWN', MRef1, _, _, _Reason1} ->
+ file_delete(Log),
+ error = wait_for_process_up(?OVERLOAD_KILL_RESTART_AFTER * 3),
+ ok
+ after
+ 5000 ->
+ Info1 = logger_olp:info(h_proc_name()),
+ ct:pal("Handler state = ~p", [Info1]),
+ ct:fail("Handler not dead! It should not have survived this!")
+ end,
+
+ {Log,_,_} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ RestartAfter = ?OVERLOAD_KILL_RESTART_AFTER,
+
+ NewHConfig2 =
+ HConfig#{config=>StdHConfig#{overload_kill_enable=>true,
+ overload_kill_qlen=>10,
+ overload_kill_restart_after=>RestartAfter}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig2),
+ Pid0 = whereis(h_proc_name()),
+ MRef2 = erlang:monitor(process, Pid0),
+ %% kill handler
+ send_burst({n,100}, {spawn,4,0}, {chars,79}, notice),
+ receive
+ {'DOWN', MRef2, _, _, _Reason2} ->
+ file_delete(Log),
+ {ok,Pid1} = wait_for_process_up(RestartAfter * 3),
+ false = (Pid1 == Pid0),
+ ok
+ after
+ 5000 ->
+ Info2 = logger_olp:info(h_proc_name()),
+ ct:pal("Handler state = ~p", [Info2]),
+ ct:fail("Handler not dead! It should not have survived this!")
+ end,
+ ok.
+restart_after(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+%% send handler requests (sync, info, reset, change_config)
+%% during high load to verify that sync, dropping and flushing is
+%% handled correctly.
+handler_requests_under_load() ->
+ [{timetrap,{minutes,3}}].
+handler_requests_under_load(Config) ->
+ {Log,HConfig,StdHConfig} =
+ start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ NewHConfig =
+ HConfig#{config => StdHConfig#{sync_mode_qlen => 2,
+ drop_mode_qlen => 1000,
+ flush_qlen => 2000,
+ burst_limit_enable => false}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+ Pid = spawn_link(
+ fun() -> send_requests(1,[{logger_std_h,filesync,[?MODULE],[]},
+ {logger_olp,info,[h_proc_name()],[]},
+ {logger_olp,reset,[h_proc_name()],[]},
+ {logger,update_handler_config,
+ [?MODULE, config,
+ #{overload_kill_enable => false}],
+ []}])
+ end),
+ Sent = send_burst({t,10000}, seq, {chars,79}, notice),
+ Pid ! {self(),finish},
+ ReqResult = receive {Pid,Result} -> Result end,
+ Logged = count_lines(Log),
+ ct:pal("Number of messages sent = ~w~nNumber of messages logged = ~w",
+ [Sent,Logged]),
+ FindError = fun(Res) ->
+ [E || E <- Res,
+ is_tuple(E) andalso (element(1,E) == error)]
+ end,
+ Errors = [{Func,FindError(Res)} || {_,Func,_,Res} <- ReqResult],
+ NoOfReqs = lists:foldl(fun({_,_,_,Res}, N) -> N + length(Res) end,
+ 0, ReqResult),
+ ct:pal("~w requests made. Errors: ~n~p", [NoOfReqs,Errors]),
+ ok = file_delete(Log).
+handler_requests_under_load(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+recreate_deleted_log(Config) ->
+ {Log,_HConfig,_StdHConfig} =
+ start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ logger:notice("first",?domain),
+ logger_std_h:filesync(?MODULE),
+ ok = file:rename(Log,Log++".old"),
+ logger:notice("second",?domain),
+ logger_std_h:filesync(?MODULE),
+ {ok,<<"first\n">>} = file:read_file(Log++".old"),
+ {ok,<<"second\n">>} = file:read_file(Log),
+ ok.
+recreate_deleted_log(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+reopen_changed_log(Config) ->
+ {Log,_HConfig,_StdHConfig} =
+ start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ logger:notice("first",?domain),
+ logger_std_h:filesync(?MODULE),
+ ok = file:rename(Log,Log++".old"),
+ ok = file:write_file(Log,""),
+ logger:notice("second",?domain),
+ logger_std_h:filesync(?MODULE),
+ {ok,<<"first\n">>} = file:read_file(Log++".old"),
+ {ok,<<"second\n">>} = file:read_file(Log),
+ ok.
+reopen_changed_log(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+rotate_size(Config) ->
+ {Log,_HConfig,_StdHConfig} =
+ start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ ok = logger:update_handler_config(?MODULE,#{config=>#{max_no_bytes=>1000,
+ max_no_files=>2}}),
+
+ Str = lists:duplicate(19,$a),
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,50)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=1000}} = file:read_file_info(Log),
+ {error,enoent} = file:read_file_info(Log++".0"),
+
+ logger:notice(Str,?domain),
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ {ok,#file_info{size=1020}} = file:read_file_info(Log++".0"),
+ {error,enoent} = file:read_file_info(Log++".1"),
+
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,51)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ {ok,#file_info{size=1020}} = file:read_file_info(Log++".0"),
+ {ok,#file_info{size=1020}} = file:read_file_info(Log++".1"),
+ {error,enoent} = file:read_file_info(Log++".2"),
+
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,50)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=1000}} = file:read_file_info(Log),
+ {ok,#file_info{size=1020}} = file:read_file_info(Log++".0"),
+ {ok,#file_info{size=1020}} = file:read_file_info(Log++".1"),
+ {error,enoent} = file:read_file_info(Log++".2"),
+
+ logger:notice("bbbb",?domain),
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ {ok,#file_info{size=1005}} = file:read_file_info(Log++".0"),
+ {ok,#file_info{size=1020}} = file:read_file_info(Log++".1"),
+ {error,enoent} = file:read_file_info(Log++".2"),
+
+ ok.
+rotate_size(cleanup,_Config) ->
+ ok = stop_handler(?MODULE).
+
+rotate_size_compressed(Config) ->
+ {Log,_HConfig,_StdHConfig} =
+ start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ ok = logger:update_handler_config(?MODULE,
+ #{config=>#{max_no_bytes=>1000,
+ max_no_files=>2,
+ compress_on_rotate=>true}}),
+ Str = lists:duplicate(19,$a),
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,50)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=1000}} = file:read_file_info(Log),
+ {error,enoent} = file:read_file_info(Log++".0"),
+ {error,enoent} = file:read_file_info(Log++".0.gz"),
+
+ logger:notice(Str,?domain),
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ {error,enoent} = file:read_file_info(Log++".0"),
+ {ok,#file_info{size=35}} = file:read_file_info(Log++".0.gz"),
+ {error,enoent} = file:read_file_info(Log++".1"),
+ {error,enoent} = file:read_file_info(Log++".1.gz"),
+
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,51)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ {error,enoent} = file:read_file_info(Log++".0"),
+ {ok,#file_info{size=35}} = file:read_file_info(Log++".0.gz"),
+ {error,enoent} = file:read_file_info(Log++".1"),
+ {ok,#file_info{size=35}} = file:read_file_info(Log++".1.gz"),
+ {error,enoent} = file:read_file_info(Log++".2"),
+ {error,enoent} = file:read_file_info(Log++".2.gz"),
+
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,50)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=1000}} = file:read_file_info(Log),
+ {error,enoent} = file:read_file_info(Log++".0"),
+ {ok,#file_info{size=35}} = file:read_file_info(Log++".0.gz"),
+ {error,enoent} = file:read_file_info(Log++".1"),
+ {ok,#file_info{size=35}} = file:read_file_info(Log++".1.gz"),
+ {error,enoent} = file:read_file_info(Log++".2"),
+ {error,enoent} = file:read_file_info(Log++".2.gz"),
+
+ logger:notice("bbbb",?domain),
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ {error,enoent} = file:read_file_info(Log++".0"),
+ {ok,#file_info{size=38}} = file:read_file_info(Log++".0.gz"),
+ {error,enoent} = file:read_file_info(Log++".1"),
+ {ok,#file_info{size=35}} = file:read_file_info(Log++".1.gz"),
+ {error,enoent} = file:read_file_info(Log++".2"),
+ {error,enoent} = file:read_file_info(Log++".2.gz"),
+
+ ok.
+rotate_size_compressed(cleanup,_Config) ->
+ ok = stop_handler(?MODULE).
+
+rotate_size_reopen(Config) ->
+ {Log,_HConfig,_StdHConfig} =
+ start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ ok = logger:update_handler_config(?MODULE,#{config=>#{max_no_bytes=>1000,
+ max_no_files=>2}}),
+
+ Str = lists:duplicate(19,$a),
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,40)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=800}} = file:read_file_info(Log),
+
+ {ok,HConfig} = logger:get_handler_config(?MODULE),
+ ok = logger:remove_handler(?MODULE),
+ ok = logger:add_handler(?MODULE,maps:get(module,HConfig),HConfig),
+ {ok,#file_info{size=800}} = file:read_file_info(Log),
+
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,40)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=580}} = file:read_file_info(Log),
+ {ok,#file_info{size=1020}} = file:read_file_info(Log++".0"),
+ ok.
+rotate_size_reopen(cleanup,_Config) ->
+ ok = stop_handler(?MODULE).
+
+rotation_opts(Config) ->
+ {Log,_HConfig,StdHConfig} =
+ start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ #{max_no_bytes:=infinity,
+ max_no_files:=0,
+ compress_on_rotate:=false} = StdHConfig,
+
+ %% Test bad rotation config
+ {error,{invalid_config,_,_}} =
+ logger:update_handler_config(?MODULE,config,#{max_no_bytes=>0}),
+ {error,{invalid_config,_,_}} =
+ logger:update_handler_config(?MODULE,config,#{max_no_files=>infinity}),
+ {error,{invalid_config,_,_}} =
+ logger:update_handler_config(?MODULE,config,
+ #{compress_on_rotate=>undefined}),
+
+
+ %% Test good rotation config - start with no rotation
+ Str = lists:duplicate(19,$a),
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,10)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=200}} = file:read_file_info(Log),
+ [] = filelib:wildcard(Log++".*"),
+
+ %% Turn on rotation, check that existing file is rotated since its
+ %% size exceeds max_no_bytes
+ ok = logger:update_handler_config(?MODULE,
+ config,
+ #{max_no_bytes=>100,
+ max_no_files=>2}),
+ timer:sleep(100), % give some time to execute config_changed
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ Log0 = Log++".0",
+ {ok,#file_info{size=200}} = file:read_file_info(Log0),
+ [Log0] = filelib:wildcard(Log++".*"),
+
+ %% Fill all logs
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,13)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=20}} = file:read_file_info(Log),
+ {ok,#file_info{size=120}} = file:read_file_info(Log0),
+ {ok,#file_info{size=120}} = file:read_file_info(Log++".1"),
+ [_,_] = filelib:wildcard(Log++".*"),
+
+ %% Extend size and count and check that nothing changes with existing files
+ ok = logger:update_handler_config(?MODULE,
+ config,
+ #{max_no_bytes=>200,
+ max_no_files=>3}),
+ timer:sleep(100), % give some time to execute config_changed
+ {ok,#file_info{size=20}} = file:read_file_info(Log),
+ {ok,#file_info{size=120}} = file:read_file_info(Log0),
+ {ok,#file_info{size=120}} = file:read_file_info(Log++".1"),
+ [_,_] = filelib:wildcard(Log++".*"),
+
+ %% Add more log events and see that extended size and count works
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,10)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ {ok,#file_info{size=220}} = file:read_file_info(Log0),
+ {ok,#file_info{size=120}} = file:read_file_info(Log++".1"),
+ {ok,#file_info{size=120}} = file:read_file_info(Log++".2"),
+ [_,_,_] = filelib:wildcard(Log++".*"),
+
+ %% Reduce count and check that archive files that exceed the new
+ %% count are moved
+ ok = logger:update_handler_config(?MODULE,
+ config,
+ #{max_no_files=>1}),
+ timer:sleep(100), % give some time to execute config_changed
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ {ok,#file_info{size=220}} = file:read_file_info(Log0),
+ [Log0] = filelib:wildcard(Log++".*"),
+
+ %% Extend size and count again, and turn on compression. Check
+ %% that archives are compressed
+ ok = logger:update_handler_config(?MODULE,
+ config,
+ #{max_no_bytes=>100,
+ max_no_files=>2,
+ compress_on_rotate=>true}),
+ timer:sleep(100), % give some time to execute config_changed
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ Log0gz = Log0++".gz",
+ {ok,#file_info{size=29}} = file:read_file_info(Log0gz),
+ [Log0gz] = filelib:wildcard(Log++".*"),
+
+ %% Fill all logs
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,13)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=20}} = file:read_file_info(Log),
+ {ok,#file_info{size=29}} = file:read_file_info(Log0gz),
+ {ok,#file_info{size=29}} = file:read_file_info(Log++".1.gz"),
+ [_,_] = filelib:wildcard(Log++".*"),
+
+ %% Reduce count and turn off compression. Check that archives that
+ %% exceeds the new count are removed, and the rest are
+ %% uncompressed.
+ ok = logger:update_handler_config(?MODULE,
+ config,
+ #{max_no_files=>1,
+ compress_on_rotate=>false}),
+ timer:sleep(100), % give some time to execute config_changed
+ {ok,#file_info{size=20}} = file:read_file_info(Log),
+ {ok,#file_info{size=120}} = file:read_file_info(Log0),
+ [Log0] = filelib:wildcard(Log++".*"),
+
+ %% Check that config and handler state agree on the current rotation settings
+ {ok,#{config:=#{max_no_bytes:=100,
+ max_no_files:=1,
+ compress_on_rotate:=false}}} =
+ logger:get_handler_config(?MODULE),
+ #{cb_state:=#{handler_state:=#{max_no_bytes:=100,
+ max_no_files:=1,
+ compress_on_rotate:=false}}} =
+ logger_olp:info(h_proc_name()),
+ ok.
+rotation_opts(cleanup,_Config) ->
+ ok = stop_handler(?MODULE).
+
+rotation_opts_restart_handler(Config) ->
+ {Log,_HConfig,_StdHConfig} =
+ start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ ok = logger:update_handler_config(?MODULE,
+ config,
+ #{max_no_bytes=>100,
+ max_no_files=>2}),
+
+ %% Fill all logs
+ Str = lists:duplicate(19,$a),
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,15)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=60}} = file:read_file_info(Log),
+ {ok,#file_info{size=120}} = file:read_file_info(Log++".0"),
+ {ok,#file_info{size=120}} = file:read_file_info(Log++".1"),
+ [_,_] = filelib:wildcard(Log++".*"),
+
+ %% Stop/start handler and turn off rotation. Check that archives are removed.
+ {ok,#{config:=StdHConfig1}=HConfig1} = logger:get_handler_config(?MODULE),
+ ok = logger:remove_handler(?MODULE),
+ ok = logger:add_handler(
+ ?MODULE,logger_std_h,
+ HConfig1#{config=>StdHConfig1#{max_no_bytes=>infinity}}),
+ timer:sleep(100),
+ {ok,#file_info{size=60}} = file:read_file_info(Log),
+ [] = filelib:wildcard(Log++".*"),
+
+ %% Add some log events and check that file is no longer rotated.
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,10)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=260}} = file:read_file_info(Log),
+ [] = filelib:wildcard(Log++".*"),
+
+ %% Stop/start handler and trun on rotation. Check that file is rotated.
+ {ok,#{config:=StdHConfig2}=HConfig2} = logger:get_handler_config(?MODULE),
+ ok = logger:remove_handler(?MODULE),
+ ok = logger:add_handler(
+ ?MODULE,logger_std_h,
+ HConfig2#{config=>StdHConfig2#{max_no_bytes=>100,
+ max_no_files=>2}}),
+ timer:sleep(100),
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ {ok,#file_info{size=260}} = file:read_file_info(Log++".0"),
+ [_] = filelib:wildcard(Log++".*"),
+
+ %% Fill all logs
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,10)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=80}} = file:read_file_info(Log),
+ {ok,#file_info{size=120}} = file:read_file_info(Log++".0"),
+ {ok,#file_info{size=260}} = file:read_file_info(Log++".1"),
+
+ %% Stop/start handler, reduce count and turn on compression. Check
+ %% that excess archives are removed, and the rest compressed.
+ {ok,#{config:=StdHConfig3}=HConfig3} = logger:get_handler_config(?MODULE),
+ ok = logger:remove_handler(?MODULE),
+ ok = logger:add_handler(
+ ?MODULE,logger_std_h,
+ HConfig3#{config=>StdHConfig3#{max_no_bytes=>75,
+ max_no_files=>1,
+ compress_on_rotate=>true}}),
+ timer:sleep(100),
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ {ok,#file_info{size=29}} = file:read_file_info(Log++".0.gz"),
+ [_] = filelib:wildcard(Log++".*"),
+
+ %% Stop/start handler and turn off compression. Check that achives
+ %% are decompressed.
+ {ok,#{config:=StdHConfig4}=HConfig4} = logger:get_handler_config(?MODULE),
+ ok = logger:remove_handler(?MODULE),
+ ok = logger:add_handler(
+ ?MODULE,logger_std_h,
+ HConfig4#{config=>StdHConfig4#{compress_on_rotate=>false}}),
+ timer:sleep(100),
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ {ok,#file_info{size=80}} = file:read_file_info(Log++".0"),
+ [_] = filelib:wildcard(Log++".*"),
+
+ ok.
+rotation_opts_restart_handler(cleanup,_Config) ->
+ ok = stop_handler(?MODULE).
+
+%%%-----------------------------------------------------------------
+%%%
+send_requests(TO, Reqs = [{Mod,Func,Args,Res}|Rs]) ->
+ receive
+ {From,finish} ->
+ From ! {self(),Reqs}
+ after
+ TO ->
+ Result = apply(Mod,Func,Args),
+ send_requests(TO, Rs ++ [{Mod,Func,Args,[Result|Res]}])
+ end.
+
+
+%%%-----------------------------------------------------------------
+%%%
+start_handler(Name, TTY, _Config) when TTY == standard_io;
+ TTY == standard_error->
+ ok = logger:add_handler(Name,
+ logger_std_h,
+ #{config => #{type => TTY},
+ filter_default=>stop,
+ filters=>filter_only_this_domain(Name),
+ formatter=>{?MODULE,op}}),
+ {ok,HConfig = #{config := StdHConfig}} = logger:get_handler_config(Name),
+ {HConfig,StdHConfig};
+
+start_handler(Name, FuncName, Config) ->
+ Dir = ?config(priv_dir,Config),
+ Log = filename:join(Dir, lists:concat([FuncName,".log"])),
+ ct:pal("Logging to ~tp", [Log]),
+ Type = {file,Log},
+ _ = file_delete(Log),
+ ok = logger:add_handler(Name,
+ logger_std_h,
+ #{config => #{type => Type},
+ filter_default=>stop,
+ filters=>filter_only_this_domain(Name),
+ formatter=>{?MODULE,op}}),
+ {ok,HConfig = #{config := StdHConfig}} = logger:get_handler_config(Name),
+ {Log,HConfig,StdHConfig}.
+
+filter_only_this_domain(Name) ->
+ [{remote_gl,{fun logger_filters:remote_gl/2,stop}},
+ {domain,{fun logger_filters:domain/2,{log,super,[Name]}}}].
+
+
+stop_handler(Name) ->
+ R = logger:remove_handler(Name),
+ ct:pal("Handler ~p stopped! Result: ~p", [Name,R]),
+ R.
+
+count_lines(File) ->
+ wait_until_written(File, -1),
+ count_lines1(File).
+
+wait_until_written(File, Sz) ->
+ timer:sleep(2000),
+ case file:read_file_info(File) of
+ {ok,#file_info{size = Sz}} ->
+ timer:sleep(1000),
+ case file:read_file_info(File) of
+ {ok,#file_info{size = Sz}} ->
+ ok;
+ {ok,#file_info{size = Sz1}} ->
+ wait_until_written(File, Sz1)
+ end;
+ {ok,#file_info{size = Sz1}} ->
+ wait_until_written(File, Sz1)
+ end.
+
+count_lines1(File) ->
+ {_,Dev} = file:open(File, [read]),
+ Lines = count_lines2(Dev, 0),
+ file:close(Dev),
+ Lines.
+
+count_lines2(Dev, LC) ->
+ case file:read_line(Dev) of
+ {ok,"Handler logger_std_h_SUITE " ++_} ->
+ %% Not counting handler info
+ count_lines2(Dev,LC);
+ {ok,_} ->
+ count_lines2(Dev,LC+1);
+ eof -> LC
+ end.
+
+send_burst(NorT, Type, {chars,Sz}, Class) ->
+ Text = [34 + rand:uniform(126-34) || _ <- lists:seq(1,Sz)],
+ case NorT of
+ {n,N} ->
+ %% process_flag(priority, high),
+ send_n_burst(N, Type, Text, Class),
+ %% process_flag(priority, normal),
+ N;
+ {t,T} ->
+ ct:pal("Sending messages sequentially for ~w ms", [T]),
+ T0 = erlang:monotonic_time(millisecond),
+ send_t_burst(T0, T, Text, Class, 0)
+ end.
+
+send_n_burst(0, _, _Text, _Class) ->
+ ok;
+send_n_burst(N, seq, Text, Class) ->
+ ok = logger:Class(Text, ?domain),
+ send_n_burst(N-1, seq, Text, Class);
+send_n_burst(N, {spawn,Ps,TO}, Text, Class) ->
+ ct:pal("~w processes each sending ~w messages", [Ps,N]),
+ MRefs = [begin if TO == 0 -> ok; true -> timer:sleep(TO) end,
+ monitor(process,spawn_link(per_proc_fun(N,Text,Class,X)))
+ end || X <- lists:seq(1,Ps)],
+ lists:foreach(fun(MRef) ->
+ receive
+ {'DOWN', MRef, _, _, _} ->
+ ok
+ end
+ end, MRefs),
+ ct:pal("Message burst sent", []),
+ ok.
+
+send_t_burst(T0, T, Text, Class, N) ->
+ T1 = erlang:monotonic_time(millisecond),
+ if (T1-T0) > T ->
+ N;
+ true ->
+ ok = logger:Class(Text, ?domain),
+ send_t_burst(T0, T, Text, Class, N+1)
+ end.
+
+per_proc_fun(N,Text,Class,X) when X rem 2 == 0 ->
+ fun() ->
+ process_flag(priority,high),
+ send_n_burst(N, seq, Text, Class)
+ end;
+per_proc_fun(N,Text,Class,_) ->
+ fun() ->
+ send_n_burst(N, seq, Text, Class)
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Formatter callback
+%%% Using this to send the formatted string back to the test case
+%%% process - so it can check for logged events.
+format(_,bad_return) ->
+ bad_return;
+format(_,crash) ->
+ erlang:error(formatter_crashed);
+format(#{msg:={string,String0}},no_nl) ->
+ String = unicode:characters_to_list(String0),
+ String;
+format(#{msg:={string,String0}},nl) ->
+ String = unicode:characters_to_list(String0),
+ String++"\n";
+format(#{msg:={string,String0}},op) ->
+ String = unicode:characters_to_list(String0),
+ String++"\n";
+format(#{msg:={report,#{label:={supervisor,progress}}}},op) ->
+ "";
+format(#{msg:={report,#{label:={gen_server,terminate}}}},op) ->
+ "";
+format(#{msg:={report,#{label:={proc_lib,crash}}}},op) ->
+ "";
+format(#{msg:={F,A}},OpOrPid) when is_list(F), is_list(A) ->
+ String = lists:flatten(io_lib:format(F,A)),
+ if is_pid(OpOrPid) -> OpOrPid ! {log,String};
+ true -> ok
+ end,
+ String++"\n";
+format(#{msg:={string,String0}},Pid) ->
+ String = unicode:characters_to_list(String0),
+ Pid ! {log,String},
+ String++"\n".
+
+add_remove_instance_nofile(Type) ->
+ ok = logger:add_handler(?MODULE,logger_std_h,
+ #{config => #{type => Type},
+ filter_default=>stop,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()}}),
+ Pid = whereis(h_proc_name()),
+ true = is_pid(Pid),
+ group_leader(group_leader(),Pid), % to get printouts in test log
+ logger:notice(M1=?msg,?domain),
+ ?check(M1),
+ %% check that sync doesn't do damage even if not relevant
+ ok = logger_std_h:filesync(?MODULE),
+ ok = logger:remove_handler(?MODULE),
+ timer:sleep(500),
+ undefined = whereis(h_proc_name()),
+ logger:notice(?msg,?domain),
+ ?check_no_log,
+ ok.
+
+logger_std_h_remove() ->
+ logger:remove_handler(?MODULE).
+logger_std_h_remove(Id) ->
+ logger:remove_handler(Id).
+
+try_read_file(FileName, Expected, Time) when Time > 0 ->
+ case file:read_file(FileName) of
+ Expected ->
+ ok;
+ Error = {error,_Reason} ->
+ ct:pal("Can't read ~tp: ~tp", [FileName,Error]),
+ erlang:error(Error);
+ Got ->
+ ct:pal("try_read_file got ~tp", [Got]),
+ timer:sleep(500),
+ try_read_file(FileName, Expected, Time-500)
+ end;
+try_read_file(FileName, Expected, _) ->
+ ct:pal("Missing pattern ~tp in ~tp", [Expected,FileName]),
+ erlang:error({error,missing_expected_pattern}).
+
+try_match_file(FileName, Pattern, Time) ->
+ try_match_file(FileName, Pattern, Time, <<>>).
+
+try_match_file(FileName, Pattern, Time, _) when Time > 0 ->
+ case file:read_file(FileName) of
+ {ok, Bin} ->
+ case re:run(Bin,Pattern,[{capture,none}]) of
+ match ->
+ unicode:characters_to_list(Bin);
+ _ ->
+ timer:sleep(100),
+ try_match_file(FileName, Pattern, Time-100, Bin)
+ end;
+ Error ->
+ erlang:error(Error)
+ end;
+try_match_file(_,Pattern,_,Incorrect) ->
+ ct:pal("try_match_file did not match pattern: ~p~nGot: ~p~n",
+ [Pattern,Incorrect]),
+ erlang:error({error,not_matching_pattern,Pattern,Incorrect}).
+
+repeat_until_ok(Fun, N) ->
+ repeat_until_ok(Fun, 0, N, undefined).
+
+repeat_until_ok(_Fun, Stop, Stop, Reason) ->
+ {fails,Reason};
+
+repeat_until_ok(Fun, C, Stop, FirstReason) ->
+ if C > 0 -> timer:sleep(5000);
+ true -> ok
+ end,
+ try Fun() of
+ Result ->
+ {ok,{C,Result}}
+ catch
+ _:Reason:Stack ->
+ ct:pal("Test fails: ~p (~p)~n", [Reason,hd(Stack)]),
+ if FirstReason == undefined ->
+ repeat_until_ok(Fun, C+1, Stop, {Reason,Stack});
+ true ->
+ repeat_until_ok(Fun, C+1, Stop, FirstReason)
+ end
+ end.
+
+
+%%%-----------------------------------------------------------------
+%%%
+start_op_trace() ->
+ TraceFun = fun({trace,_,call,{_Mod,Func,Details}}, Pid) ->
+ Pid ! {trace_call,Func,Details},
+ Pid;
+ ({trace,_,return_from,{_Mod,Func,_},RetVal}, Pid) ->
+ Pid ! {trace_return,Func,RetVal},
+ Pid
+ end,
+ TRecvPid = spawn_link(fun() -> trace_receiver(5000) end),
+ {ok,_} = dbg:tracer(process, {TraceFun, TRecvPid}),
+
+ {ok,_} = dbg:p(whereis(h_proc_name()), [c]),
+ {ok,_} = dbg:p(self(), [c]),
+
+ MS1 = dbg:fun2ms(fun([_]) -> return_trace() end),
+ {ok,_} = dbg:tpl(logger_h_common, check_load, 1, MS1),
+
+ {ok,_} = dbg:tpl(logger_h_common, flush_log_requests, 2, []),
+
+ MS2 = dbg:fun2ms(fun([_,mode]) -> return_trace() end),
+ {ok,_} = dbg:tpl(ets, lookup, 2, MS2),
+
+ ct:pal("Tracing started!", []),
+ TRecvPid.
+
+stop_op_trace(TRecvPid) ->
+ dbg:stop_clear(),
+ unlink(TRecvPid),
+ exit(TRecvPid, kill),
+ ok.
+
+find_mode(flush, Events) ->
+ lists:any(fun({trace_call,flush_log_requests,[_,_]}) -> true;
+ (_) -> false
+ end, Events);
+find_mode(Mode, Events) ->
+ lists:keymember([{mode,Mode}], 3, Events).
+
+%% find_switch(_From, To, Events) ->
+%% try lists:foldl(fun({trace_return,check_load,{To,_,_,_}},
+%% {trace_call,check_load,[#{mode := From}]}) ->
+%% throw(match);
+%% (Event, _) ->
+%% Event
+%% end, undefined, Events) of
+%% _ -> false
+%% catch
+%% throw:match -> true
+%% end.
+
+analyse_trace(TRecvPid, TestFun) ->
+ TRecvPid ! {test,self(),TestFun},
+ receive
+ {result,TRecvPid,Result} ->
+ Result
+ after
+ 60000 ->
+ fails
+ end.
+
+trace_receiver(IdleT) ->
+ Msgs = receive_until_idle(IdleT, 5, []),
+ ct:pal("~w trace events generated", [length(Msgs)]),
+ analyse(Msgs).
+
+receive_until_idle(IdleT, WaitN, Msgs) ->
+ receive
+ Msg = {trace_call,_,_} ->
+ receive_until_idle(IdleT, 5, [Msg | Msgs]);
+ Msg = {trace_return,_,_} ->
+ receive_until_idle(IdleT, 5, [Msg | Msgs])
+ after
+ IdleT ->
+ if WaitN == 0 ->
+ Msgs;
+ true ->
+ receive_until_idle(IdleT, WaitN-1, Msgs)
+ end
+ end.
+
+analyse(Msgs) ->
+ receive
+ {test,From,TestFun} ->
+ From ! {result,self(),TestFun(Msgs)},
+ analyse(Msgs)
+ end.
+
+start_tracer(Trace,Expected) ->
+ Pid = self(),
+ FileCtrlPid = maps:get(file_ctrl_pid,
+ maps:get(handler_state,
+ maps:get(cb_state,
+ logger_olp:info(h_proc_name())))),
+ dbg:tracer(process,{fun tracer/2,{Pid,Expected}}),
+ dbg:p(whereis(h_proc_name()),[c]),
+ dbg:p(FileCtrlPid,[c]),
+ tpl(Trace),
+ ok.
+
+tpl([{M,F,A}|Trace]) ->
+ tpl([{{M,F,A},[]}|Trace]);
+tpl([{{M,F,A},MS}|Trace]) ->
+ {ok,Match} = dbg:tpl(M,F,A,MS),
+ case lists:keyfind(matched,1,Match) of
+ {_,_,1} ->
+ ok;
+ _ ->
+ dbg:stop_clear(),
+ throw({skip,"Can't trace "++atom_to_list(M)++":"++
+ atom_to_list(F)++"/"++integer_to_list(A)})
+ end,
+ tpl(Trace);
+tpl([]) ->
+ ok.
+
+tracer({trace,_,call,{logger_h_common,handle_cast,[Op|_]}},
+ {Pid,[{Mod,Func,Op}|Expected]}) ->
+ maybe_tracer_done(Pid,Expected,{Mod,Func,Op});
+tracer({trace,_,call,{Mod=logger_std_h,Func=write_to_dev,[Data,_]}},
+ {Pid,[{Mod,Func,Data}|Expected]}) ->
+ maybe_tracer_done(Pid,Expected,{Mod,Func,Data});
+tracer({trace,_,call,{Mod,Func,_}}, {Pid,[{Mod,Func}|Expected]}) ->
+ maybe_tracer_done(Pid,Expected,{Mod,Func});
+tracer({trace,_,call,Call}, {Pid,Expected}) ->
+ ct:log("Tracer got unexpected: ~p~nExpected: ~p~n",[Call,Expected]),
+ Pid ! {tracer_got_unexpected,Call,Expected},
+ {Pid,Expected}.
+
+maybe_tracer_done(Pid,[]=Expected,Got) ->
+ ct:log("Tracer got: ~p~n",[Got]),
+ Pid ! {tracer_done,0},
+ {Pid,Expected};
+maybe_tracer_done(Pid,[{no_more,T}]=Expected,Got) ->
+ ct:log("Tracer got: ~p~n",[Got]),
+ Pid ! {tracer_done,T},
+ {Pid,Expected};
+maybe_tracer_done(Pid,Expected,Got) ->
+ ct:log("Tracer got: ~p~n",[Got]),
+ {Pid,Expected}.
+
+check_tracer(T) ->
+ check_tracer(T,fun() -> ct:fail({timeout,tracer}) end).
+check_tracer(T,TimeoutFun) ->
+ receive
+ {tracer_done,Delay} ->
+ %% Possibly wait Delay ms to check that no unexpected
+ %% traces are received
+ check_tracer(Delay,fun() -> ok end);
+ {tracer_got_unexpected,Got,Expected} ->
+ dbg:stop_clear(),
+ ct:fail({tracer_got_unexpected,Got,Expected})
+ after T ->
+ dbg:stop_clear(),
+ TimeoutFun()
+ end.
+
+escape([$+|Rest]) ->
+ [$\\,$+|escape(Rest)];
+escape([H|T]) ->
+ [H|escape(T)];
+escape([]) ->
+ [].
+
+h_proc_name() ->
+ h_proc_name(?MODULE).
+h_proc_name(Name) ->
+ ?name_to_reg_name(logger_std_h,Name).
+
+wait_for_process_up(T) ->
+ wait_for_process_up(?MODULE, h_proc_name(), T).
+
+wait_for_process_up(Name, RegName, T) ->
+ N = (T div 500) + 1,
+ wait_for_process_up1(Name, RegName, N).
+
+wait_for_process_up1(_Name, _RegName, 0) ->
+ error;
+wait_for_process_up1(Name, RegName, N) ->
+ timer:sleep(500),
+ case whereis(RegName) of
+ Pid when is_pid(Pid) ->
+ case logger:get_handler_config(Name) of
+ {ok,_} ->
+ %% ct:pal("Process ~p up (~p tries left)",[Name,N]),
+ {ok,Pid};
+ _ ->
+ wait_for_process_up1(Name, RegName, N-1)
+ end;
+ undefined ->
+ %% ct:pal("Waiting for process ~p (~p tries left)",[Name,N]),
+ wait_for_process_up1(Name, RegName, N-1)
+ end.
+
+filesync_rep_int() ->
+ case (fun() -> is_atom(?FILESYNC_REPEAT_INTERVAL) end)() of
+ true -> 5500;
+ false -> ?FILESYNC_REPEAT_INTERVAL + 500
+ end.
+
+
+file_delete(Log) ->
+ file:delete(Log).
diff --git a/lib/kernel/test/logger_stress_SUITE.erl b/lib/kernel/test/logger_stress_SUITE.erl
new file mode 100644
index 0000000000..1a278fb1b2
--- /dev/null
+++ b/lib/kernel/test/logger_stress_SUITE.erl
@@ -0,0 +1,550 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_stress_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct_event.hrl").
+-include_lib("kernel/include/logger.hrl").
+-include_lib("kernel/src/logger_h_common.hrl").
+
+-ifdef(SAVE_STATS).
+ -define(COLLECT_STATS(_All_,_Procs_),
+ ct:pal("~p",[stats(_All_,_Procs_)])).
+-else.
+ -define(COLLECT_STATS(_All_,_Procs__), ok).
+-endif.
+
+-define(TEST_DURATION,120). % seconds
+
+suite() ->
+ [{timetrap,{minutes,3}},
+ {ct_hooks,[logger_test_lib]}].
+
+init_per_suite(Config) ->
+ Config.
+
+end_per_suite(_Config) ->
+ ok.
+
+init_per_group(_Group, Config) ->
+ Config.
+
+end_per_group(_Group, _Config) ->
+ ok.
+
+init_per_testcase(_TestCase, Config) ->
+ Config.
+
+end_per_testcase(Case, Config) ->
+ try apply(?MODULE,Case,[cleanup,Config])
+ catch error:undef -> ok
+ end,
+ ok.
+
+groups() ->
+ [].
+
+all() ->
+ [allow_events,
+ reject_events,
+ std_handler,
+ disk_log_handler,
+ std_handler_time,
+ std_handler_time_big,
+ disk_log_handler_time,
+ disk_log_handler_time_big,
+ emulator_events,
+ remote_events,
+ remote_to_disk_log,
+ remote_emulator_events,
+ remote_emulator_to_disk_log].
+
+%%%-----------------------------------------------------------------
+%%% Test cases
+%%%-----------------------------------------------------------------
+%% Time from log macro call to handler callback
+allow_events(Config) ->
+ {ok,_,Node} =
+ logger_test_lib:setup(Config,
+ [{logger,
+ [{handler,default,?MODULE,#{}}]},
+ {logger_level,notice}]),
+ N = 100000,
+ {T,_} = timer:tc(fun() -> rpc:call(Node,?MODULE,nlogs,[N]) end),
+ IOPS = N * 1000/T, % log events allowed per millisecond
+ ct_event:notify(#event{name = benchmark_data,
+ data = [{value,IOPS}]}),
+ {comment,io_lib:format("~.2f accepted events pr millisecond",
+ [IOPS])}.
+
+%% Time from log macro call to reject (log level)
+reject_events(Config) ->
+ {ok,_,Node} =
+ logger_test_lib:setup(Config,
+ [{logger,
+ [{handler,default,?MODULE,#{}}]},
+ {logger_level,error}]),
+ N = 1000000,
+ {T,_} = timer:tc(fun() -> rpc:call(Node,?MODULE,nlogs,[N]) end),
+ IOPS = N * 1000/T, % log events rejected per millisecond
+ ct_event:notify(#event{name = benchmark_data,
+ data = [{value,IOPS}]}),
+ {comment,io_lib:format("~.2f rejected events pr millisecond",
+ [IOPS])}.
+
+%% Cascading failure that produce gen_server and proc_lib reports -
+%% how many of the produced log events are actually written to a log
+%% with logger_std_h file handler.
+std_handler(Config) ->
+ {ok,_,Node} =
+ logger_test_lib:setup(Config,
+ [{logger,
+ [{handler,default,logger_std_h,
+ #{config=>#{type=>{file,"default.log"}}}}]}]),
+
+ cascade({Node,{logger_backend,log_allowed,2},[]},
+ {Node,{logger_std_h,write,4},[{default,logger_std_h_default}]},
+ fun otp_cascading/0).
+std_handler(cleanup,_Config) ->
+ _ = file:delete("default.log"),
+ ok.
+
+%% Disable overload protection and just print a lot - measure time.
+%% The IOPS reported is the number of log events written per millisecond.
+std_handler_time(Config) ->
+ measure_handler_time(logger_std_h,#{type=>{file,"default.log"}},Config).
+std_handler_time(cleanup,_Config) ->
+ _ = file:delete("default.log"),
+ ok.
+
+std_handler_time_big(Config) ->
+ measure_handler_time_big(logger_std_h,#{type=>{file,"default.log"}},Config).
+std_handler_time_big(cleanup,_Config) ->
+ _ = file:delete("default.log"),
+ ok.
+
+%% Cascading failure that produce gen_server and proc_lib reports -
+%% how many of the produced log events are actually written to a log
+%% with logger_disk_log_h wrap file handler.
+disk_log_handler(Config) ->
+ {ok,_,Node} =
+ logger_test_lib:setup(Config,
+ [{logger,
+ [{handler,default,logger_disk_log_h,#{}}]}]),
+ cascade({Node,{logger_backend,log_allowed,2},[]},
+ {Node,{logger_disk_log_h,write,4},
+ [{default,logger_disk_log_h_default}]},
+ fun otp_cascading/0).
+disk_log_handler(cleanup,_Config) ->
+ Files = filelib:wildcard("default.log.*"),
+ [_ = file:delete(F) || F <- Files],
+ ok.
+
+%% Disable overload protection and just print a lot - measure time.
+%% The IOPS reported is the number of log events written per millisecond.
+disk_log_handler_time(Config) ->
+ measure_handler_time(logger_disk_log_h,#{type=>halt},Config).
+disk_log_handler_time(cleanup,_Config) ->
+ _ = file:delete("default"),
+ ok.
+
+disk_log_handler_time_big(Config) ->
+ measure_handler_time_big(logger_disk_log_h,#{type=>halt},Config).
+disk_log_handler_time_big(cleanup,_Config) ->
+ _ = file:delete("default"),
+ ok.
+
+%% Cascading failure that produce log events from the emulator - how
+%% many of the produced log events pass through the proxy.
+emulator_events(Config) ->
+ {ok,_,Node} =
+ logger_test_lib:setup(Config,
+ [{logger,
+ [{handler,default,?MODULE,#{}}]}]),
+ cascade({Node,{?MODULE,producer,0},[]},
+ {Node,{?MODULE,log,2},[{proxy,logger_proxy}]},
+ fun em_cascading/0).
+
+%% Cascading failure that produce gen_server and proc_lib reports on
+%% remote node - how many of the produced log events pass through the
+%% proxy.
+remote_events(Config) ->
+ {ok,_,Node1} =
+ logger_test_lib:setup([{postfix,1}|Config],
+ [{logger,
+ [{handler,default,?MODULE,#{}}]}]),
+ {ok,_,Node2} =
+ logger_test_lib:setup([{postfix,2}|Config],[]),
+ cascade({Node2,{logger_backend,log_allowed,2},[{remote_proxy,logger_proxy}]},
+ {Node1,{?MODULE,log,2},[{local_proxy,logger_proxy}]},
+ fun otp_cascading/0).
+
+%% Cascading failure that produce gen_server and proc_lib reports on
+%% remote node - how many of the produced log events are actually
+%% written to a log with logger_disk_log_h wrap file handler.
+remote_to_disk_log(Config) ->
+ {ok,_,Node1} =
+ logger_test_lib:setup([{postfix,1}|Config],
+ [{logger,
+ [{handler,default,logger_disk_log_h,#{}}]}]),
+ {ok,_,Node2} =
+ logger_test_lib:setup([{postfix,2}|Config],[]),
+ cascade({Node2,{logger_backend,log_allowed,2},[{remote_proxy,logger_proxy}]},
+ {Node1,{logger_disk_log_h,write,4},
+ [{local_proxy,logger_proxy},
+ {local_default,logger_disk_log_h_default}]},
+ fun otp_cascading/0).
+remote_to_disk_log(cleanup,_Config) ->
+ Files = filelib:wildcard("default.log.*"),
+ [_ = file:delete(F) || F <- Files],
+ ok.
+
+%% Cascading failure that produce log events from the emulator on
+%% remote node - how many of the produced log events pass through the
+%% proxy.
+remote_emulator_events(Config) ->
+ {ok,_,Node1} =
+ logger_test_lib:setup([{postfix,1}|Config],
+ [{logger,
+ [{handler,default,?MODULE,#{}}]}]),
+ {ok,_,Node2} =
+ logger_test_lib:setup([{postfix,2}|Config],[]),
+ cascade({Node2,{?MODULE,producer,0},[{remote_proxy,logger_proxy}]},
+ {Node1,{?MODULE,log,2},[{local_proxy,logger_proxy}]},
+ fun em_cascading/0).
+
+%% Cascading failure that produce log events from the emulator on
+%% remote node - how many of the produced log events are actually
+%% written to a log with logger_disk_log_h wrap file handler.
+remote_emulator_to_disk_log(Config) ->
+ {ok,_,Node1} =
+ logger_test_lib:setup([{postfix,1}|Config],
+ [{logger,
+ [{handler,default,logger_disk_log_h,#{}}]}]),
+ {ok,_,Node2} =
+ logger_test_lib:setup([{postfix,2}|Config],[]),
+ cascade({Node2,{?MODULE,producer,0},[{remote_proxy,logger_proxy}]},
+ {Node1,{logger_disk_log_h,write,4},
+ [{local_proxy,logger_proxy},
+ {local_default,logger_disk_log_h_default}]},
+ fun em_cascading/0).
+remote_emulator_to_disk_log(cleanup,_Config) ->
+ Files = filelib:wildcard("default.log.*"),
+ [_ = file:delete(F) || F <- Files],
+ ok.
+
+%%%-----------------------------------------------------------------
+%%% Internal functions
+measure_handler_time(Module,HCfg,Config) ->
+ measure_handler_time(Module,100000,small_fa(),millisecond,HCfg,#{},Config).
+
+measure_handler_time_big(Module,HCfg,Config) ->
+ FCfg = #{chars_limit=>4096, max_size=>1024},
+ measure_handler_time(Module,100,big_fa(),second,HCfg,FCfg,Config).
+
+measure_handler_time(Module,N,FA,Unit,HCfg,FCfg,Config) ->
+ {ok,_,Node} =
+ logger_test_lib:setup(
+ Config,
+ [{logger,
+ [{handler,default,Module,
+ #{formatter => {logger_formatter,
+ maps:merge(#{legacy_header=>false,
+ single_line=>true},FCfg)},
+ config=>maps:merge(#{sync_mode_qlen => N+1,
+ drop_mode_qlen => N+1,
+ flush_qlen => N+1,
+ burst_limit_enable => false,
+ filesync_repeat_interval => no_repeat},
+ HCfg)}}]}]),
+ %% HPid = rpc:call(Node,erlang,whereis,[?name_to_reg_name(Module,default)]),
+ %% {links,[_,FCPid]} = rpc:call(Node,erlang,process_info,[HPid,links]),
+ T0 = erlang:monotonic_time(millisecond),
+ ok = rpc:call(Node,?MODULE,nlogs_wait,[N,FA,Module]),
+ %% ok = rpc:call(Node,fprof,apply,[fun ?MODULE:nlogs_wait/2,[N div 10,FA,Module],[{procs,[HPid,FCPid]}]]),
+ T1 = erlang:monotonic_time(millisecond),
+ T = T1-T0,
+ M = case Unit of
+ millisecond -> 1;
+ second -> 1000
+ end,
+ IOPS = M*N/T,
+ ct:pal("N: ~p~nT: ~p~nIOPS: ~.2f events pr ~w",[N,T,IOPS,Unit]),
+ %% Stats = rpc:call(Node,logger_olp,info,[?name_to_reg_name(Module,default)]),
+ %% ct:pal("Stats: ~p",[Stats]),
+ ct_event:notify(#event{name = benchmark_data,
+ data = [{value,IOPS}]}),
+ {comment,io_lib:format("~.2f events written pr ~w",[IOPS,Unit])}.
+
+nlogs_wait(N,{F,A},Module) ->
+ group_leader(whereis(user),self()),
+ [?LOG_NOTICE(F,A) || _ <- lists:seq(1,N)],
+ wait(Module).
+
+wait(Module) ->
+ case Module:filesync(default) of
+ {error,handler_busy} ->
+ wait(Module);
+ ok ->
+ ok
+ end.
+
+small_fa() ->
+ Str = "\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ "[\\]^_`abcdefghijklmnopqr",
+ {"~ts",[Str]}.
+
+big_fa() ->
+ {"~p",[lists:duplicate(1048576,"a")]}.
+
+nlogs(N) ->
+ group_leader(whereis(user),self()),
+ Str = "\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ "[\\]^_`abcdefghijklmnopqr",
+ [?LOG_NOTICE(Str) || _ <- lists:seq(1,N)],
+ ok.
+
+%% cascade(ProducerInfo,ConsumerInfo,TestFun)
+cascade({PNode,PMFA,_PStatProcs},{CNode,CMFA,_CStatProcs},TestFun) ->
+ Tab = ets:new(counter,[set,public]),
+ ets:insert(Tab,{producer,0}),
+ ets:insert(Tab,{consumer,0}),
+ dbg:tracer(process,{fun tracer/2,{Tab,PNode,CNode}}),
+ dbg:n(PNode),
+ dbg:n(CNode),
+ dbg:cn(node()),
+ dbg:p(all,[call,arity]),
+ dbg:tpl(PMFA,[]),
+ dbg:tpl(CMFA,[]),
+
+ Pid = rpc:call(CNode,?MODULE,wrap_test,[PNode,TestFun]),
+ MRef = erlang:monitor(process,Pid),
+ TO = ?TEST_DURATION*1000,
+ receive {'DOWN',MRef,_,_,Reason} ->
+ ct:fail({remote_pid_down,Reason})
+ after TO ->
+ All = ets:lookup_element(Tab,producer,2),
+ Written = ets:lookup_element(Tab,consumer,2),
+ dbg:stop_clear(),
+ ?COLLECT_STATS(All,
+ [{PNode,P,Id} || {Id,P} <- _PStatProcs] ++
+ [{CNode,P,Id} || {Id,P} <- _CStatProcs]),
+ Ratio = Written/All * 100,
+ ct_event:notify(#event{name = benchmark_data,
+ data = [{value,Ratio}]}),
+ {comment,io_lib:format("~p % (~p written, ~p produced)",
+ [round(Ratio),Written,All])}
+ end.
+
+wrap_test(Fun) ->
+ wrap_test(node(),Fun).
+wrap_test(Node,Fun) ->
+ reset(),
+ group_leader(whereis(user),self()),
+ rpc:call(Node,?MODULE,do_fun,[Fun]).
+
+do_fun(Fun) ->
+ reset(),
+ Fun().
+
+reset() ->
+ reset([logger_std_h_default, logger_disk_log_h_default, logger_proxy]).
+reset([P|Ps]) ->
+ is_pid(whereis(P)) andalso logger_olp:reset(P),
+ reset(Ps);
+reset([]) ->
+ ok.
+
+
+tracer({trace,_,call,{?MODULE,producer,_}},{Tab,_PNode,_CNode}=S) ->
+ ets:update_counter(Tab,producer,1),
+ S;
+tracer({trace,Pid,call,{logger_backend,log_allowed,_}},{Tab,PNode,_CNode}=S) when node(Pid)=:=PNode ->
+ ets:update_counter(Tab,producer,1),
+ S;
+tracer({trace,_,call,{?MODULE,log,_}},{Tab,_PNode,_CNode}=S) ->
+ ets:update_counter(Tab,consumer,1),
+ S;
+tracer({trace,_,call,{_,write,_}},{Tab,_PNode,_CNode}=S) ->
+ ets:update_counter(Tab,consumer,1),
+ S;
+tracer(_,S) ->
+ S.
+
+
+%%%-----------------------------------------------------------------
+%%% Collect statistics
+-define(STAT_KEYS,
+ [burst_drops,
+ calls,
+ casts,
+ drops,
+ flushed,
+ flushes,
+ freq,
+ last_qlen,
+ max_qlen,
+ time,
+ writes]).
+-define(EVENT_KEYS,
+ [calls,casts,flushed]).
+
+stats(All,Procs) ->
+ NI = [{Id,rpc:call(N,logger_olp,info,[P])} || {N,P,Id}<-Procs],
+ [{all,All}|[stats(Id,I,All) || {Id,I} <- NI]].
+
+stats(Id,Info,All) ->
+ S = maps:with(?STAT_KEYS,Info),
+ AllOnProc = lists:sum(maps:values(maps:with(?EVENT_KEYS,S))),
+ if All>0 ->
+ Writes = maps:get(writes,S),
+ {_,ActiveTime} = maps:get(time,S),
+ Rate = round(100*Writes/All),
+ RateOnProc =
+ if AllOnProc>0 ->
+ round(100*Writes/AllOnProc);
+ true ->
+ 0
+ end,
+ AvFreq =
+ if ActiveTime>0 ->
+ round(Writes/ActiveTime);
+ true ->
+ 0
+ end,
+ {Id,
+ {stats,S},
+ {rate,Rate},
+ {rate_on_proc,RateOnProc},
+ {av_freq,AvFreq}};
+ true ->
+ {Id,none}
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Spawn a lot of processes that crash repeatedly, causing a lot of
+%%% error reports from the emulator.
+em_cascading() ->
+ spawn(fun() -> super() end).
+
+super() ->
+ process_flag(trap_exit,true),
+ spawn_link(fun server/0),
+ [spawn_link(fun client/0) || _<-lists:seq(1,10000)],
+ super_loop().
+
+super_loop() ->
+ receive
+ {'EXIT',_,server} ->
+ spawn_link(fun server/0),
+ super_loop();
+ {'EXIT',_,_} ->
+ _L = lists:sum(lists:seq(1,10000)),
+ spawn_link(fun client/0),
+ super_loop()
+ end.
+
+client() ->
+ receive
+ after 1 ->
+ case whereis(server) of
+ Pid when is_pid(Pid) ->
+ ok;
+ undefined ->
+ producer(),
+ erlang:error(some_exception)
+ end
+ end,
+ client().
+
+server() ->
+ register(server,self()),
+ receive
+ after 3000 ->
+ exit(server)
+ end.
+
+
+%%%-----------------------------------------------------------------
+%%% Create a supervisor tree with processes that crash repeatedly,
+%%% causing a lot of supervisor reports and crashreports
+otp_cascading() ->
+ {ok,Pid} = supervisor:start_link({local,otp_super}, ?MODULE, [otp_super]),
+ unlink(Pid),
+ Pid.
+
+otp_server_sup() ->
+ supervisor:start_link({local,otp_server_sup},?MODULE,[otp_server_sup]).
+
+otp_client_sup(N) ->
+ supervisor:start_link({local,otp_client_sup},?MODULE,[otp_client_sup,N]).
+
+otp_server() ->
+ gen_server:start_link({local,otp_server},?MODULE,[otp_server],[]).
+
+otp_client() ->
+ gen_server:start_link(?MODULE,[otp_client],[]).
+
+init([otp_super]) ->
+ {ok, {{one_for_one, 200, 10},
+ [{client_sup,
+ {?MODULE, otp_client_sup, [10000]},
+ permanent, 1000, supervisor, [?MODULE]},
+ {server_sup,
+ {?MODULE, otp_server_sup, []},
+ permanent, 1000, supervisor, [?MODULE]}
+ ]}};
+init([otp_server_sup]) ->
+ {ok, {{one_for_one, 2, 10},
+ [{server,
+ {?MODULE, otp_server, []},
+ permanent, 1000, worker, [?MODULE]}
+ ]}};
+init([otp_client_sup,N]) ->
+ spawn(fun() ->
+ [supervisor:start_child(otp_client_sup,[])
+ || _ <- lists:seq(1,N)]
+ end),
+ {ok, {{simple_one_for_one, N*10, 1},
+ [{client,
+ {?MODULE, otp_client, []},
+ permanent, 1000, worker, [?MODULE]}
+ ]}};
+init([otp_server]) ->
+ {ok, server, 10000};
+init([otp_client]) ->
+ {ok, client,1}.
+
+handle_info(timeout, client) ->
+ true = is_pid(whereis(otp_server)),
+ {noreply,client,1};
+handle_info(timeout, server) ->
+ exit(self(), some_error).
+
+%%%-----------------------------------------------------------------
+%%% Logger callbacks
+log(_LogEvent,_Config) ->
+ ok.
+
+%%%-----------------------------------------------------------------
+%%% Function to trace on for counting produced emulator messages
+producer() ->
+ ok.
diff --git a/lib/kernel/test/logger_test_lib.erl b/lib/kernel/test/logger_test_lib.erl
new file mode 100644
index 0000000000..be4bc427fb
--- /dev/null
+++ b/lib/kernel/test/logger_test_lib.erl
@@ -0,0 +1,88 @@
+%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_test_lib).
+
+-include_lib("kernel/src/logger_internal.hrl").
+
+-export([setup/2, log/3, sync_and_read/3]).
+
+-export([init/2,
+ pre_init_per_suite/3, pre_init_per_testcase/4,
+ post_end_per_testcase/5, post_end_per_suite/3]).
+
+setup(Config,Vars) ->
+ Postfix = case proplists:get_value(postfix, Config) of
+ undefined -> "";
+ P -> ["_",P]
+ end,
+ FuncStr = lists:concat([proplists:get_value(suite, Config), "_",
+ proplists:get_value(tc, Config)|
+ Postfix]),
+ ConfigFileName = filename:join(proplists:get_value(priv_dir, Config), FuncStr),
+ file:write_file(ConfigFileName ++ ".config", io_lib:format("[{kernel, ~p}].",[Vars])),
+ Sname = lists:concat([proplists:get_value(tc,Config)|Postfix]),
+ case test_server:start_node(Sname, slave,
+ [{args, ["-pa ",filename:dirname(code:which(?MODULE)),
+ " -boot start_sasl -kernel start_timer true "
+ "-config ",ConfigFileName]}]) of
+ {ok, Node} ->
+ L = rpc:call(Node, logger, get_config, []),
+ ct:log("~p",[L]),
+ {ok, L, Node};
+ {error, Reason} ->
+ ct:log("Failed to start node: ~p",[Reason]),
+ error
+ end.
+
+log(Node, F, A) ->
+ log(Node, logger, F, A).
+log(Node, M, F, A) ->
+ MD = #{ gl => rpc:call(Node, erlang, whereis, [logger]) },
+ rpc:call(Node, M, F, A ++ [MD]).
+
+sync_and_read(Node,disk_log,Log) ->
+ rpc:call(Node,logger_disk_log_h,filesync,[?STANDARD_HANDLER]),
+ file:read_file(Log ++ ".1");
+sync_and_read(Node, file,Log) ->
+ ok = rpc:call(Node,logger_std_h,filesync,[?STANDARD_HANDLER]),
+ file:read_file(Log).
+
+
+init(_, _) ->
+ {ok, []}.
+
+pre_init_per_suite(_Suite, Config, State) ->
+ {[{nodes, nodes()} | Config], State}.
+
+pre_init_per_testcase(Suite, TC, Config, State) ->
+ cleanup(Config),
+ {[{suite, Suite}, {tc, TC} | Config], State}.
+
+post_end_per_testcase(_, _TC, Config, Res, State) ->
+ cleanup(Config),
+ {Res, State}.
+
+post_end_per_suite(_, Config, State) ->
+ cleanup(Config),
+ {Config, State}.
+
+cleanup(Config) ->
+ [test_server:stop_node(N) || N <- nodes(),
+ not lists:member(N, proplists:get_value(nodes, Config))].
diff --git a/lib/kernel/test/os_SUITE.erl b/lib/kernel/test/os_SUITE.erl
index a0bcde68db..710b9b115c 100644
--- a/lib/kernel/test/os_SUITE.erl
+++ b/lib/kernel/test/os_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2017. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -22,7 +22,8 @@
-export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1,
init_per_group/2,end_per_group/2,
init_per_testcase/2,end_per_testcase/2]).
--export([space_in_cwd/1, quoting/1, cmd_unicode/1, space_in_name/1, bad_command/1,
+-export([space_in_cwd/1, quoting/1, cmd_unicode/1,
+ null_in_command/1, space_in_name/1, bad_command/1,
find_executable/1, unix_comment_in_command/1, deep_list_command/1,
large_output_command/1, background_command/0, background_command/1,
message_leak/1, close_stdin/0, close_stdin/1, max_size_command/1,
@@ -35,7 +36,8 @@ suite() ->
{timetrap,{minutes,1}}].
all() ->
- [space_in_cwd, quoting, cmd_unicode, space_in_name, bad_command,
+ [space_in_cwd, quoting, cmd_unicode, null_in_command,
+ space_in_name, bad_command,
find_executable, unix_comment_in_command, deep_list_command,
large_output_command, background_command, message_leak,
close_stdin, max_size_command, perf_counter_api].
@@ -126,6 +128,14 @@ cmd_unicode(Config) when is_list(Config) ->
[] = receive_all(),
ok.
+null_in_command(Config) ->
+ {Ok, Error} = case os:type() of
+ {win32,_} -> {"dir", "di\0r"};
+ _ -> {"ls", "l\0s"}
+ end,
+ true = is_list(try os:cmd(Ok) catch Class0:_ -> Class0 end),
+ error = try os:cmd(Error) catch Class1:_ -> Class1 end,
+ ok.
%% Test that program with a space in its name can be executed.
space_in_name(Config) when is_list(Config) ->
@@ -217,8 +227,8 @@ find_executable(Config) when is_list(Config) ->
DataDir = proplists:get_value(data_dir, Config),
%% Smoke test.
- case lib:progname() of
- erl ->
+ case ct:get_progname() of
+ "erl" ->
ErlPath = os:find_executable("erl"),
true = is_list(ErlPath),
true = filelib:is_regular(ErlPath);
@@ -378,7 +388,7 @@ comp(Expected, Got) ->
ct:fail(failed)
end.
-%% Like lib:nonl/1, but strips \r as well as \n.
+%% strips \n and \r\n from end of string
strip_nl([$\r, $\n]) -> [];
strip_nl([$\n]) -> [];
diff --git a/lib/kernel/test/pdict_SUITE.erl b/lib/kernel/test/pdict_SUITE.erl
index d105952df9..3685e51c10 100644
--- a/lib/kernel/test/pdict_SUITE.erl
+++ b/lib/kernel/test/pdict_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1999-2016. All Rights Reserved.
+%% Copyright Ericsson AB 1999-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -33,6 +33,7 @@
init_per_group/2,end_per_group/2,
mixed/1,
literals/1,
+ destructive/1,
simple/1, complicated/1, heavy/1, simple_all_keys/1, info/1]).
-export([init_per_testcase/2, end_per_testcase/2]).
-export([other_process/2]).
@@ -52,6 +53,7 @@ suite() ->
all() ->
[simple, complicated, heavy, simple_all_keys, info,
literals,
+ destructive,
mixed].
groups() ->
@@ -367,6 +369,36 @@ match_keys(All) ->
ok.
+%% Test destructive put optimization of immed values
+%% does not affect get/0 or process_info.
+destructive(_Config) ->
+ Keys = lists:seq(1,100),
+ [put(Key, 17) || Key <- Keys],
+ Get1 = get(),
+ {dictionary,PI1} = process_info(self(), dictionary),
+
+ [begin
+ {Key, 17} = lists:keyfind(Key, 1, Get1),
+ {Key, 17} = lists:keyfind(Key, 1, PI1)
+ end
+ || Key <- Keys],
+
+ [17 = put(Key, 42) || Key <- Keys], % Mutate
+
+ Get2 = get(),
+ {dictionary,PI2} = process_info(self(), dictionary),
+
+ [begin
+ {Key, 17} = lists:keyfind(Key, 1, Get1),
+ {Key, 17} = lists:keyfind(Key, 1, PI1),
+ {Key, 42} = lists:keyfind(Key, 1, Get2),
+ {Key, 42} = lists:keyfind(Key, 1, PI2)
+
+ end
+ || Key <- Keys],
+
+ ok.
+
%% Do random mixed put/erase to test grow/shrink
%% Written for a temporary bug in gc during shrink
mixed(_Config) ->
diff --git a/lib/kernel/test/prim_file_SUITE.erl b/lib/kernel/test/prim_file_SUITE.erl
index e88d42788f..2f465a15bc 100644
--- a/lib/kernel/test/prim_file_SUITE.erl
+++ b/lib/kernel/test/prim_file_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2000-2017. All Rights Reserved.
+%% Copyright Ericsson AB 2000-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -21,38 +21,23 @@
-export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1,
init_per_group/2,end_per_group/2, init_per_testcase/2, end_per_testcase/2,
read_write_file/1, free_memory/0]).
--export([cur_dir_0a/1, cur_dir_0b/1,
- cur_dir_1a/1, cur_dir_1b/1,
- make_del_dir_a/1, make_del_dir_b/1,
- pos1/1, pos2/1]).
--export([close/1,
- delete_a/1, delete_b/1]).
--export([ open1/1, modes/1]).
--export([
- file_info_basic_file_a/1, file_info_basic_file_b/1,
- file_info_basic_directory_a/1, file_info_basic_directory_b/1,
- file_info_bad_a/1, file_info_bad_b/1,
- file_info_times_a/1, file_info_times_b/1,
- file_write_file_info_a/1, file_write_file_info_b/1,
- file_read_file_info_opts/1, file_write_file_info_opts/1,
- file_write_read_file_info_opts/1
- ]).
--export([rename_a/1, rename_b/1,
- access/1, truncate/1, datasync/1, sync/1,
+-export([cur_dir_0/1, cur_dir_1/1,
+ make_del_dir/1, pos1/1, pos2/1]).
+-export([close/1, delete/1]).
+-export([open1/1, modes/1]).
+-export([file_info_basic_file/1, file_info_basic_directory/1, file_info_bad/1,
+ file_info_times/1, file_write_file_info/1,
+ file_read_file_info_opts/1, file_write_file_info_opts/1,
+ file_write_read_file_info_opts/1]).
+-export([rename/1, access/1, truncate/1, datasync/1, sync/1,
read_write/1, pread_write/1, append/1, exclusive/1]).
--export([ e_delete/1, e_rename/1, e_make_dir/1, e_del_dir/1]).
+-export([e_delete/1, e_rename/1, e_make_dir/1, e_del_dir/1]).
--export([ read_not_really_compressed/1,
- read_compressed/1, write_compressed/1,
- compress_errors/1]).
-
--export([
- make_link_a/1, make_link_b/1,
- read_link_info_for_non_link/1,
- symlinks_a/1, symlinks_b/1,
- list_dir_limit/1,
- list_dir_error/1,
- list_dir/1]).
+-export([make_link/1, read_link_info_for_non_link/1,
+ symlinks/1,
+ list_dir_limit/1,
+ list_dir_error/1,
+ list_dir/1]).
-export([advise/1]).
-export([large_write/1]).
@@ -67,29 +52,16 @@
-define(PRIM_FILE, prim_file).
-%% Calls ?PRIM_FILE:F with arguments A and an optional handle H
-%% as first argument, unless the handle is [], i.e no handle.
-%% This is a macro to give the compiler and thereby
-%% the cross reference tool the possibility to interprete
-%% the call, since M, F, A (or [H | A]) can all be known at
-%% compile time.
--define(PRIM_FILE_call(F, H, A),
- case H of
- [] -> apply(?PRIM_FILE, F, A);
- _ -> apply(?PRIM_FILE, F, [H | A])
- end).
-
suite() -> [].
all() ->
[read_write_file, {group, dirs}, {group, files},
- delete_a, delete_b, rename_a, rename_b, {group, errors},
- {group, compression}, {group, links}, list_dir_limit, list_dir].
+ delete, rename, {group, errors}, {group, links},
+ list_dir_limit, list_dir].
groups() ->
[{dirs, [],
- [make_del_dir_a, make_del_dir_b, cur_dir_0a, cur_dir_0b,
- cur_dir_1a, cur_dir_1b]},
+ [make_del_dir, cur_dir_0, cur_dir_1]},
{files, [],
[{group, open}, {group, pos}, {group, file_info},
truncate, sync, datasync, advise, large_write, allocate]},
@@ -98,22 +70,14 @@ groups() ->
append, exclusive]},
{pos, [], [pos1, pos2]},
{file_info, [],
- [file_info_basic_file_a, file_info_basic_file_b,
- file_info_basic_directory_a,
- file_info_basic_directory_b, file_info_bad_a,
- file_info_bad_b, file_info_times_a, file_info_times_b,
- file_write_file_info_a, file_write_file_info_b,
- file_read_file_info_opts, file_write_file_info_opts,
- file_write_read_file_info_opts
+ [file_info_basic_file,file_info_basic_directory, file_info_bad,
+ file_info_times, file_write_file_info, file_read_file_info_opts,
+ file_write_file_info_opts, file_write_read_file_info_opts
]},
{errors, [],
[e_delete, e_rename, e_make_dir, e_del_dir]},
- {compression, [],
- [read_compressed, read_not_really_compressed,
- write_compressed, compress_errors]},
{links, [],
- [make_link_a, make_link_b, read_link_info_for_non_link,
- symlinks_a, symlinks_b, list_dir_error]}].
+ [make_link, read_link_info_for_non_link, symlinks, list_dir_error]}].
init_per_testcase(large_write, Config) ->
{ok, Started} = application:ensure_all_started(os_mon),
@@ -246,39 +210,27 @@ read_write_file(Config) when is_list(Config) ->
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-make_del_dir_a(Config) when is_list(Config) ->
- make_del_dir(Config, [], "_a").
-
-make_del_dir_b(Config) when is_list(Config) ->
- {ok, Handle} = ?PRIM_FILE:start(),
- Result = make_del_dir(Config, Handle, "_b"),
- ok = ?PRIM_FILE:stop(Handle),
- %% Just to make sure the state of the server makes a difference
- {error, einval} = ?PRIM_FILE_call(get_cwd, Handle, []),
- Result.
-
-make_del_dir(Config, Handle, Suffix) ->
+make_del_dir(Config) when is_list(Config) ->
RootDir = proplists:get_value(priv_dir,Config),
NewDir = filename:join(RootDir,
atom_to_list(?MODULE)
- ++"_mk-dir"++Suffix),
- ok = ?PRIM_FILE_call(make_dir, Handle, [NewDir]),
- {error, eexist} = ?PRIM_FILE_call(make_dir, Handle, [NewDir]),
- ok = ?PRIM_FILE_call(del_dir, Handle, [NewDir]),
- {error, enoent} = ?PRIM_FILE_call(del_dir, Handle, [NewDir]),
+ ++"_mk-dir"),
+ ok = ?PRIM_FILE:make_dir(NewDir),
+ {error, eexist} = ?PRIM_FILE:make_dir(NewDir),
+ ok = ?PRIM_FILE:del_dir(NewDir),
+ {error, enoent} = ?PRIM_FILE:del_dir(NewDir),
%% Make sure we are not in a directory directly under test_server
%% as that would result in eacces errors when trying to delete '..',
%% because there are processes having that directory as current.
- ok = ?PRIM_FILE_call(make_dir, Handle, [NewDir]),
- {ok, CurrentDir} = ?PRIM_FILE_call(get_cwd, Handle, []),
+ ok = ?PRIM_FILE:make_dir(NewDir),
+ {ok, CurrentDir} = ?PRIM_FILE:get_cwd(),
case {os:type(), length(NewDir) >= 260 } of
{{win32,_}, true} ->
io:format("Skip set_cwd for windows path longer than 260 (MAX_PATH)\n", []),
io:format("\nNewDir = ~p\n", [NewDir]);
_ ->
- ok = ?PRIM_FILE_call(set_cwd, Handle, [NewDir])
+ ok = ?PRIM_FILE:set_cwd(NewDir)
end,
try
%% Check that we get an error when trying to create...
@@ -286,14 +238,14 @@ make_del_dir(Config, Handle, Suffix) ->
NewDir2 = filename:join(RootDir,
atom_to_list(?MODULE)
++"_mk-dir-noexist/foo"),
- {error, enoent} = ?PRIM_FILE_call(make_dir, Handle, [NewDir2]),
+ {error, enoent} = ?PRIM_FILE:make_dir(NewDir2),
%% a nameless directory
- {error, enoent} = ?PRIM_FILE_call(make_dir, Handle, [""]),
+ {error, enoent} = ?PRIM_FILE:make_dir(""),
%% a directory with illegal name
- {error, badarg} = ?PRIM_FILE_call(make_dir, Handle, ['mk-dir']),
+ {error, badarg} = ?PRIM_FILE:make_dir('mk-dir'),
%% a directory with illegal name, even if it's a (bad) list
- {error, badarg} = ?PRIM_FILE_call(make_dir, Handle, [[1,2,3,{}]]),
+ {error, badarg} = ?PRIM_FILE:make_dir([1,2,3,{}]),
%% Maybe this isn't an error, exactly, but worth mentioning anyway:
%% ok = ?PRIM_FILE:make_dir([$f,$o,$o,0,$b,$a,$r])),
@@ -306,125 +258,101 @@ make_del_dir(Config, Handle, Suffix) ->
%% Try deleting some bad directories
%% Deleting the parent directory to the current, sounds dangerous, huh?
%% Don't worry ;-) the parent directory should never be empty, right?
- case ?PRIM_FILE_call(del_dir, Handle, [".."]) of
+ case ?PRIM_FILE:del_dir("..") of
{error, eexist} -> ok;
{error, eacces} -> ok; %OpenBSD
{error, einval} -> ok %FreeBSD
end,
- {error, enoent} = ?PRIM_FILE_call(del_dir, Handle, [""]),
- {error, badarg} = ?PRIM_FILE_call(del_dir, Handle, [[3,2,1,{}]])
+ {error, enoent} = ?PRIM_FILE:del_dir(""),
+ {error, badarg} = ?PRIM_FILE:del_dir([3,2,1,{}])
after
- ok = ?PRIM_FILE_call(set_cwd, Handle, [CurrentDir])
+ ok = ?PRIM_FILE:set_cwd(CurrentDir)
end,
ok.
-cur_dir_0a(Config) when is_list(Config) ->
- cur_dir_0(Config, []).
-
-cur_dir_0b(Config) when is_list(Config) ->
- {ok, Handle} = ?PRIM_FILE:start(),
- Result = cur_dir_0(Config, Handle),
- ok = ?PRIM_FILE:stop(Handle),
- Result.
-
-cur_dir_0(Config, Handle) ->
+cur_dir_0(Config) when is_list(Config) ->
%% Find out the current dir, and cd to it ;-)
- {ok,BaseDir} = ?PRIM_FILE_call(get_cwd, Handle, []),
+ {ok,BaseDir} = ?PRIM_FILE:get_cwd(),
Dir1 = BaseDir ++ "", %% Check that it's a string
- ok = ?PRIM_FILE_call(set_cwd, Handle, [Dir1]),
- DirName = atom_to_list(?MODULE) ++
- case Handle of
- [] ->
- "_curdir";
- _ ->
- "_curdir_h"
- end,
+ ok = ?PRIM_FILE:set_cwd(Dir1),
+ DirName = atom_to_list(?MODULE) ++ "_curdir",
%% Make a new dir, and cd to that
RootDir = proplists:get_value(priv_dir,Config),
NewDir = filename:join(RootDir, DirName),
- ok = ?PRIM_FILE_call(make_dir, Handle, [NewDir]),
+ ok = ?PRIM_FILE:make_dir(NewDir),
case {os:type(), length(NewDir) >= 260} of
{{win32,_}, true} ->
io:format("Skip set_cwd for windows path longer than 260 (MAX_PATH):\n"),
io:format("\nNewDir = ~p\n", [NewDir]);
_ ->
io:format("cd to ~s",[NewDir]),
- ok = ?PRIM_FILE_call(set_cwd, Handle, [NewDir]),
+ ok = ?PRIM_FILE:set_cwd(NewDir),
%% Create a file in the new current directory, and check that it
%% really is created there
UncommonName = "uncommon.fil",
{ok,Fd} = ?PRIM_FILE:open(UncommonName, [read, write]),
ok = ?PRIM_FILE:close(Fd),
- {ok,NewDirFiles} = ?PRIM_FILE_call(list_dir, Handle, ["."]),
+ {ok,NewDirFiles} = ?PRIM_FILE:list_dir("."),
true = lists:member(UncommonName,NewDirFiles),
%% Delete the directory and return to the old current directory
%% and check that the created file isn't there (too!)
expect({error, einval}, {error, eacces}, {error, eexist},
- ?PRIM_FILE_call(del_dir, Handle, [NewDir])),
- ?PRIM_FILE_call(delete, Handle, [UncommonName]),
- {ok,[]} = ?PRIM_FILE_call(list_dir, Handle, ["."]),
- ok = ?PRIM_FILE_call(set_cwd, Handle, [Dir1]),
+ ?PRIM_FILE:del_dir(NewDir)),
+ ?PRIM_FILE:delete(UncommonName),
+ {ok,[]} = ?PRIM_FILE:list_dir("."),
+ ok = ?PRIM_FILE:set_cwd(Dir1),
io:format("cd back to ~s",[Dir1]),
- ok = ?PRIM_FILE_call(del_dir, Handle, [NewDir]),
- {error, enoent} = ?PRIM_FILE_call(set_cwd, Handle, [NewDir]),
- ok = ?PRIM_FILE_call(set_cwd, Handle, [Dir1]),
+ ok = ?PRIM_FILE:del_dir(NewDir),
+ {error, enoent} = ?PRIM_FILE:set_cwd(NewDir),
+ ok = ?PRIM_FILE:set_cwd(Dir1),
io:format("cd back to ~s",[Dir1]),
- {ok,OldDirFiles} = ?PRIM_FILE_call(list_dir, Handle, ["."]),
+ {ok,OldDirFiles} = ?PRIM_FILE:list_dir("."),
false = lists:member(UncommonName,OldDirFiles)
end,
%% Try doing some bad things
{error, badarg} =
- ?PRIM_FILE_call(set_cwd, Handle, [{foo,bar}]),
+ ?PRIM_FILE:set_cwd({foo,bar}),
{error, enoent} =
- ?PRIM_FILE_call(set_cwd, Handle, [""]),
+ ?PRIM_FILE:set_cwd(""),
{error, enoent} =
- ?PRIM_FILE_call(set_cwd, Handle, [".......a......"]),
+ ?PRIM_FILE:set_cwd(".......a......"),
{ok,BaseDir} =
- ?PRIM_FILE_call(get_cwd, Handle, []), %% Still there?
+ ?PRIM_FILE:get_cwd(), %% Still there?
%% On Windows, there should only be slashes, no backslashes,
%% in the return value of get_cwd().
%% (The test is harmless on Unix, because filenames usually
%% don't contain backslashes.)
- {ok, BaseDir} = ?PRIM_FILE_call(get_cwd, Handle, []),
+ {ok, BaseDir} = ?PRIM_FILE:get_cwd(),
false = lists:member($\\, BaseDir),
ok.
%% Tests ?PRIM_FILE:get_cwd/1.
-cur_dir_1a(Config) when is_list(Config) ->
- cur_dir_1(Config, []).
-
-cur_dir_1b(Config) when is_list(Config) ->
- {ok, Handle} = ?PRIM_FILE:start(),
- Result = cur_dir_1(Config, Handle),
- ok = ?PRIM_FILE:stop(Handle),
- Result.
-
-cur_dir_1(Config, Handle) ->
+cur_dir_1(Config) when is_list(Config) ->
case os:type() of
{win32, _} ->
- win_cur_dir_1(Config, Handle);
+ win_cur_dir_1(Config);
_ ->
{error, enotsup} =
- ?PRIM_FILE_call(get_cwd, Handle, ["d:"])
+ ?PRIM_FILE:get_cwd("d:")
end,
ok.
-win_cur_dir_1(_Config, Handle) ->
- {ok, BaseDir} = ?PRIM_FILE_call(get_cwd, Handle, []),
+win_cur_dir_1(_Config) ->
+ {ok, BaseDir} = ?PRIM_FILE:get_cwd(),
%% Get the drive letter from the current directory,
%% and try to get current directory for that drive.
[Drive, $:|_] = BaseDir,
- {ok, BaseDir} = ?PRIM_FILE_call(get_cwd, Handle, [[Drive, $:]]),
+ {ok, BaseDir} = ?PRIM_FILE:get_cwd([Drive, $:]),
io:format("BaseDir = ~s\n", [BaseDir]),
%% Unfortunately, there is no way to move away from the
@@ -446,12 +374,12 @@ open1(Config) when is_list(Config) ->
Name = filename:join(NewDir, "foo1.fil"),
{ok,Fd1} = ?PRIM_FILE:open(Name, [read, write]),
{ok,Fd2} = ?PRIM_FILE:open(Name, [read]),
- Str = "{a,tuple}.\n",
- Length = length(Str),
- ?PRIM_FILE:write(Fd1,Str),
+ Bin = list_to_binary("{a,tuple}.\n"),
+ Length = byte_size(Bin),
+ ?PRIM_FILE:write(Fd1,Bin),
{ok,0} = ?PRIM_FILE:position(Fd1,bof),
- {ok, Str} = ?PRIM_FILE:read(Fd1,Length),
- {ok, Str} = ?PRIM_FILE:read(Fd2,Length),
+ {ok, Bin} = ?PRIM_FILE:read(Fd1,Length),
+ {ok, Bin} = ?PRIM_FILE:read(Fd2,Length),
ok = ?PRIM_FILE:close(Fd2),
{ok,0} = ?PRIM_FILE:position(Fd1,bof),
ok = ?PRIM_FILE:truncate(Fd1),
@@ -471,13 +399,13 @@ modes(Config) when is_list(Config) ->
++"_open_modes"),
ok = ?PRIM_FILE:make_dir(NewDir),
Name1 = filename:join(NewDir, "foo1.fil"),
- Marker = "hello, world",
- Length = length(Marker),
+ Marker = <<"hello, world">>,
+ Length = byte_size(Marker),
%% write
{ok, Fd1} = ?PRIM_FILE:open(Name1, [write]),
ok = ?PRIM_FILE:write(Fd1, Marker),
- ok = ?PRIM_FILE:write(Fd1, ".\n"),
+ ok = ?PRIM_FILE:write(Fd1, <<".\n">>),
ok = ?PRIM_FILE:close(Fd1),
%% read
@@ -496,12 +424,6 @@ modes(Config) when is_list(Config) ->
{ok, Marker} = ?PRIM_FILE:read(Fd4, Length),
ok = ?PRIM_FILE:close(Fd4),
- %% read and binary
- BinaryMarker = list_to_binary(Marker),
- {ok, Fd5} = ?PRIM_FILE:open(Name1, [read, binary]),
- {ok, BinaryMarker} = ?PRIM_FILE:read(Fd5, Length),
- ok = ?PRIM_FILE:close(Fd5),
-
ok.
close(Config) when is_list(Config) ->
@@ -528,9 +450,9 @@ access(Config) when is_list(Config) ->
Name = filename:join(RootDir,
atom_to_list(?MODULE)
++"_access.fil"),
- Str = "ABCDEFGH",
+ Bin = <<"ABCDEFGH">>,
{ok,Fd1} = ?PRIM_FILE:open(Name, [write]),
- ?PRIM_FILE:write(Fd1,Str),
+ ?PRIM_FILE:write(Fd1,Bin),
ok = ?PRIM_FILE:close(Fd1),
%% Check that we can't write when in read only mode
{ok,Fd2} = ?PRIM_FILE:open(Name, [read]),
@@ -542,7 +464,7 @@ access(Config) when is_list(Config) ->
end,
ok = ?PRIM_FILE:close(Fd2),
{ok, Fd3} = ?PRIM_FILE:open(Name, [read]),
- {ok, Str} = ?PRIM_FILE:read(Fd3,length(Str)),
+ {ok, Bin} = ?PRIM_FILE:read(Fd3,byte_size(Bin)),
ok = ?PRIM_FILE:close(Fd3),
ok.
@@ -564,7 +486,7 @@ read_write(Config) when is_list(Config) ->
ok.
read_write_test(File) ->
- Marker = "hello, world",
+ Marker = <<"hello, world">>,
ok = ?PRIM_FILE:write(File, Marker),
{ok, 0} = ?PRIM_FILE:position(File, 0),
{ok, Marker} = ?PRIM_FILE:read(File, 100),
@@ -590,15 +512,15 @@ pread_write(Config) when is_list(Config) ->
ok.
pread_write_test(File) ->
- Marker = "hello, world",
- Len = length(Marker),
+ Marker = <<"hello, world">>,
+ Len = byte_size(Marker),
ok = ?PRIM_FILE:write(File, Marker),
{ok, Marker} = ?PRIM_FILE:pread(File, 0, 100),
eof = ?PRIM_FILE:pread(File, 100, 1),
ok = ?PRIM_FILE:pwrite(File, Len, Marker),
{ok, Marker} = ?PRIM_FILE:pread(File, Len, 100),
eof = ?PRIM_FILE:pread(File, 100, 1),
- MM = Marker ++ Marker,
+ MM = <<Marker/binary,Marker/binary>>,
{ok, MM} = ?PRIM_FILE:pread(File, 0, 100),
ok = ?PRIM_FILE:close(File),
ok.
@@ -655,24 +577,24 @@ pos1(Config) when is_list(Config) ->
atom_to_list(?MODULE)
++"_pos1.fil"),
{ok, Fd1} = ?PRIM_FILE:open(Name, [write]),
- ?PRIM_FILE:write(Fd1,"ABCDEFGH"),
+ ?PRIM_FILE:write(Fd1,<<"ABCDEFGH">>),
ok = ?PRIM_FILE:close(Fd1),
{ok, Fd2} = ?PRIM_FILE:open(Name, [read]),
%% Start pos is first char
io:format("Relative positions"),
- {ok, "A"} = ?PRIM_FILE:read(Fd2,1),
+ {ok, <<"A">>} = ?PRIM_FILE:read(Fd2,1),
{ok, 2} = ?PRIM_FILE:position(Fd2,{cur,1}),
- {ok, "C"} = ?PRIM_FILE:read(Fd2,1),
+ {ok, <<"C">>} = ?PRIM_FILE:read(Fd2,1),
{ok, 0} = ?PRIM_FILE:position(Fd2,{cur,-3}),
- {ok, "A"} = ?PRIM_FILE:read(Fd2,1),
+ {ok, <<"A">>} = ?PRIM_FILE:read(Fd2,1),
%% Backwards from first char should be an error
{ok,0} = ?PRIM_FILE:position(Fd2,{cur,-1}),
{error, einval} = ?PRIM_FILE:position(Fd2,{cur,-1}),
%% Reset position and move again
{ok, 0} = ?PRIM_FILE:position(Fd2,0),
{ok, 2} = ?PRIM_FILE:position(Fd2,{cur,2}),
- {ok, "C"} = ?PRIM_FILE:read(Fd2,1),
+ {ok, <<"C">>} = ?PRIM_FILE:read(Fd2,1),
%% Go a lot forwards
{ok, 13} = ?PRIM_FILE:position(Fd2,{cur,10}),
eof = ?PRIM_FILE:read(Fd2,1),
@@ -684,27 +606,27 @@ pos1(Config) when is_list(Config) ->
{ok, 8} = ?PRIM_FILE:position(Fd2,cur),
eof = ?PRIM_FILE:read(Fd2,1),
{ok, 7} = ?PRIM_FILE:position(Fd2,7),
- {ok, "H"} = ?PRIM_FILE:read(Fd2,1),
+ {ok, <<"H">>} = ?PRIM_FILE:read(Fd2,1),
{ok, 0} = ?PRIM_FILE:position(Fd2,0),
- {ok, "A"} = ?PRIM_FILE:read(Fd2,1),
+ {ok, <<"A">>} = ?PRIM_FILE:read(Fd2,1),
{ok, 3} = ?PRIM_FILE:position(Fd2,3),
- {ok, "D"} = ?PRIM_FILE:read(Fd2,1),
+ {ok, <<"D">>} = ?PRIM_FILE:read(Fd2,1),
{ok, 12} = ?PRIM_FILE:position(Fd2,12),
eof = ?PRIM_FILE:read(Fd2,1),
{ok, 3} = ?PRIM_FILE:position(Fd2,3),
- {ok, "D"} = ?PRIM_FILE:read(Fd2,1),
+ {ok, <<"D">>} = ?PRIM_FILE:read(Fd2,1),
%% Try the {bof,X} notation
{ok, 3} = ?PRIM_FILE:position(Fd2,{bof,3}),
- {ok, "D"} = ?PRIM_FILE:read(Fd2,1),
+ {ok, <<"D">>} = ?PRIM_FILE:read(Fd2,1),
%% Try eof positions
io:format("EOF positions"),
{ok, 8} = ?PRIM_FILE:position(Fd2,{eof,0}),
eof = ?PRIM_FILE:read(Fd2,1),
{ok, 7} = ?PRIM_FILE:position(Fd2,{eof,-1}),
- {ok, "H"} = ?PRIM_FILE:read(Fd2,1),
+ {ok, <<"H">>} = ?PRIM_FILE:read(Fd2,1),
{ok, 0} = ?PRIM_FILE:position(Fd2,{eof,-8}),
- {ok, "A"} = ?PRIM_FILE:read(Fd2,1),
+ {ok, <<"A">>} = ?PRIM_FILE:read(Fd2,1),
{error, einval} = ?PRIM_FILE:position(Fd2,{eof,-9}),
ok.
@@ -714,7 +636,7 @@ pos2(Config) when is_list(Config) ->
atom_to_list(?MODULE)
++"_pos2.fil"),
{ok,Fd1} = ?PRIM_FILE:open(Name, [write]),
- ?PRIM_FILE:write(Fd1,"ABCDEFGH"),
+ ?PRIM_FILE:write(Fd1,<<"ABCDEFGH">>),
ok = ?PRIM_FILE:close(Fd1),
{ok, Fd2} = ?PRIM_FILE:open(Name, [read]),
{error, einval} = ?PRIM_FILE:position(Fd2,-1),
@@ -722,35 +644,25 @@ pos2(Config) when is_list(Config) ->
%% Make sure that we still can search after an error.
{ok, 0} = ?PRIM_FILE:position(Fd2, 0),
{ok, 3} = ?PRIM_FILE:position(Fd2, {bof,3}),
- {ok, "D"} = ?PRIM_FILE:read(Fd2,1),
+ {ok, <<"D">>} = ?PRIM_FILE:read(Fd2,1),
io:format("DONE"),
ok.
-
-file_info_basic_file_a(Config) when is_list(Config) ->
- file_info_basic_file(Config, [], "_a").
-
-file_info_basic_file_b(Config) when is_list(Config) ->
- {ok, Handle} = ?PRIM_FILE:start(),
- Result = file_info_basic_file(Config, Handle, "_b"),
- ok = ?PRIM_FILE:stop(Handle),
- Result.
-
-file_info_basic_file(Config, Handle, Suffix) ->
+file_info_basic_file(Config) when is_list(Config) ->
RootDir = proplists:get_value(priv_dir, Config),
%% Create a short file.
Name = filename:join(RootDir,
atom_to_list(?MODULE)
- ++"_basic_test"++Suffix++".fil"),
+ ++"_basic_test"".fil"),
{ok,Fd1} = ?PRIM_FILE:open(Name, [write]),
?PRIM_FILE:write(Fd1, "foo bar"),
ok = ?PRIM_FILE:close(Fd1),
%% Test that the file has the expected attributes.
%% The times are tricky, so we will save them to a separate test case.
- {ok, FileInfo} = ?PRIM_FILE_call(read_file_info, Handle, [Name]),
+ {ok, FileInfo} = ?PRIM_FILE:read_file_info(Name),
#file_info{size = Size, type = Type, access = Access,
atime = AccessTime, mtime = ModifyTime} =
FileInfo,
@@ -768,39 +680,30 @@ file_info_basic_file(Config, Handle, Suffix) ->
ok.
-file_info_basic_directory_a(Config) when is_list(Config) ->
- file_info_basic_directory(Config, []).
-
-file_info_basic_directory_b(Config) when is_list(Config) ->
- {ok, Handle} = ?PRIM_FILE:start(),
- Result = file_info_basic_directory(Config, Handle),
- ok = ?PRIM_FILE:stop(Handle),
- Result.
-
-file_info_basic_directory(Config, Handle) ->
+file_info_basic_directory(Config) when is_list(Config) ->
%% Note: filename:join/1 removes any trailing slash,
%% which is essential for ?PRIM_FILE:read_file_info/1 to work on
%% platforms such as Windows95.
RootDir = filename:join([proplists:get_value(priv_dir, Config)]),
%% Test that the RootDir directory has the expected attributes.
- test_directory(RootDir, read_write, Handle),
+ test_directory(RootDir, read_write),
%% Note that on Windows file systems, "/" or "c:/" are *NOT* directories.
%% Therefore, test that ?PRIM_FILE:read_file_info/1 behaves
%% as if they were directories.
case os:type() of
{win32, _} ->
- test_directory("/", read_write, Handle),
- test_directory("c:/", read_write, Handle),
- test_directory("c:\\", read_write, Handle);
+ test_directory("/", read_write),
+ test_directory("c:/", read_write),
+ test_directory("c:\\", read_write);
_ ->
- test_directory("/", read, Handle)
+ test_directory("/", read)
end,
ok.
-test_directory(Name, ExpectedAccess, Handle) ->
- {ok, FileInfo} = ?PRIM_FILE_call(read_file_info, Handle, [Name]),
+test_directory(Name, ExpectedAccess) ->
+ {ok, FileInfo} = ?PRIM_FILE:read_file_info(Name),
#file_info{size = Size, type = Type, access = Access,
atime = AccessTime, mtime = ModifyTime} =
FileInfo,
@@ -824,45 +727,24 @@ all_integers([]) ->
%% Try something nonexistent.
-file_info_bad_a(Config) when is_list(Config) ->
- file_info_bad(Config, []).
-
-file_info_bad_b(Config) when is_list(Config) ->
- {ok, Handle} = ?PRIM_FILE:start(),
- Result = file_info_bad(Config, Handle),
- ok = ?PRIM_FILE:stop(Handle),
- Result.
-
-file_info_bad(Config, Handle) ->
+file_info_bad(Config) when is_list(Config) ->
RootDir = filename:join([proplists:get_value(priv_dir, Config)]),
- {error, enoent} =
- ?PRIM_FILE_call(
- read_file_info, Handle,
- [filename:join(RootDir,
- atom_to_list(?MODULE)++"_nonexistent")]),
+ NonExistent = filename:join(RootDir, atom_to_list(?MODULE)++"_nonexistent"),
+ {error, enoent} = ?PRIM_FILE:read_file_info(NonExistent),
ok.
%% Test that the file times behave as they should.
-file_info_times_a(Config) when is_list(Config) ->
- file_info_times(Config, [], "_a").
-
-file_info_times_b(Config) when is_list(Config) ->
- {ok, Handle} = ?PRIM_FILE:start(),
- Result = file_info_times(Config, Handle, "_b"),
- ok = ?PRIM_FILE:stop(Handle),
- Result.
-
-file_info_times(Config, Handle, Suffix) ->
+file_info_times(Config) when is_list(Config) ->
%% We have to try this twice, since if the test runs across the change
%% of a month the time diff calculations will fail. But it won't happen
%% if you run it twice in succession.
test_server:m_out_of_n(
1,2,
- fun() -> file_info_int(Config, Handle, Suffix) end),
+ fun() -> file_info_int(Config) end),
ok.
-file_info_int(Config, Handle, Suffix) ->
+file_info_int(Config) ->
%% Note: filename:join/1 removes any trailing slash,
%% which is essential for ?PRIM_FILE:read_file_info/1 to work on
%% platforms such as Windows95.
@@ -872,14 +754,14 @@ file_info_int(Config, Handle, Suffix) ->
Name = filename:join(RootDir,
atom_to_list(?MODULE)
- ++"_file_info"++Suffix++".fil"),
+ ++"_file_info.fil"),
{ok,Fd1} = ?PRIM_FILE:open(Name, [write]),
?PRIM_FILE:write(Fd1,"foo"),
%% check that the file got a modify date max a few seconds away from now
{ok, #file_info{type = regular,
atime = AccTime1, mtime = ModTime1}} =
- ?PRIM_FILE_call(read_file_info, Handle, [Name]),
+ ?PRIM_FILE:read_file_info(Name),
Now = erlang:localtime(),
io:format("Now ~p",[Now]),
io:format("Open file Acc ~p Mod ~p",[AccTime1,ModTime1]),
@@ -897,7 +779,7 @@ file_info_int(Config, Handle, Suffix) ->
ok = ?PRIM_FILE:close(Fd1),
{ok, #file_info{size = Size, type = regular, access = Access,
atime = AccTime2, mtime = ModTime2}} =
- ?PRIM_FILE_call(read_file_info, Handle, [Name]),
+ ?PRIM_FILE:read_file_info(Name),
io:format("Closed file Acc ~p Mod ~p",[AccTime2,ModTime2]),
true = time_dist(ModTime1, ModTime2) >= 0,
@@ -909,7 +791,7 @@ file_info_int(Config, Handle, Suffix) ->
{ok, #file_info{size = DSize, type = directory,
access = DAccess,
atime = AccTime3, mtime = ModTime3}} =
- ?PRIM_FILE_call(read_file_info, Handle, [RootDir]),
+ ?PRIM_FILE:read_file_info(RootDir),
%% this dir was modified only a few secs ago
io:format("Dir Acc ~p; Mod ~p; Now ~p",
[AccTime3, ModTime3, Now]),
@@ -936,16 +818,7 @@ filter_atime(Atime, Config) ->
%% Test the write_file_info/2 function.
-file_write_file_info_a(Config) when is_list(Config) ->
- file_write_file_info(Config, [], "_a").
-
-file_write_file_info_b(Config) when is_list(Config) ->
- {ok, Handle} = ?PRIM_FILE:start(),
- Result = file_write_file_info(Config, Handle, "_b"),
- ok = ?PRIM_FILE:stop(Handle),
- Result.
-
-file_write_file_info(Config, Handle, Suffix) ->
+file_write_file_info(Config) when is_list(Config) ->
RootDir = get_good_directory(Config),
io:format("RootDir = ~p", [RootDir]),
@@ -955,16 +828,16 @@ file_write_file_info(Config, Handle, Suffix) ->
Name = filename:join(RootDir,
atom_to_list(?MODULE)
- ++"_write_file_info_ro"++Suffix),
+ ++"_write_file_info_ro"),
ok = ?PRIM_FILE:write_file(Name, "hello"),
Time = {{1997, 01, 02}, {12, 35, 42}},
Info = #file_info{mode=8#400, atime=Time, mtime=Time, ctime=Time},
- ok = ?PRIM_FILE_call(write_file_info, Handle, [Name, Info]),
+ ok = ?PRIM_FILE:write_file_info(Name, Info),
%% Read back the times.
{ok, ActualInfo} =
- ?PRIM_FILE_call(read_file_info, Handle, [Name]),
+ ?PRIM_FILE:read_file_info(Name),
#file_info{mode=_Mode, atime=ActAtime, mtime=Time,
ctime=ActCtime} = ActualInfo,
FilteredAtime = filter_atime(Time, Config),
@@ -980,14 +853,11 @@ file_write_file_info(Config, Handle, Suffix) ->
{error, eacces} = ?PRIM_FILE:write_file(Name, "hello again"),
%% Make the file writable again.
-
- ?PRIM_FILE_call(write_file_info, Handle,
- [Name, #file_info{mode=8#600}]),
+ ?PRIM_FILE:write_file_info(Name, #file_info{mode=8#600}),
ok = ?PRIM_FILE:write_file(Name, "hello again"),
%% And unwritable.
- ?PRIM_FILE_call(write_file_info, Handle,
- [Name, #file_info{mode=8#400}]),
+ ?PRIM_FILE:write_file_info(Name, #file_info{mode=8#400}),
{error, eacces} = ?PRIM_FILE:write_file(Name, "hello again"),
%% Write the times again.
@@ -995,9 +865,9 @@ file_write_file_info(Config, Handle, Suffix) ->
NewTime = {{1997, 02, 15}, {13, 18, 20}},
NewInfo = #file_info{atime=NewTime, mtime=NewTime, ctime=NewTime},
- ok = ?PRIM_FILE_call(write_file_info, Handle, [Name, NewInfo]),
+ ok = ?PRIM_FILE:write_file_info(Name, NewInfo),
{ok, ActualInfo2} =
- ?PRIM_FILE_call(read_file_info, Handle, [Name]),
+ ?PRIM_FILE:read_file_info(Name),
#file_info{atime=NewActAtime, mtime=NewTime,
ctime=NewActCtime} = ActualInfo2,
NewFilteredAtime = filter_atime(NewTime, Config),
@@ -1012,14 +882,12 @@ file_write_file_info(Config, Handle, Suffix) ->
%% Make the file writeable again, so that we can remove the
%% test suites ... :-)
- ?PRIM_FILE_call(write_file_info, Handle,
- [Name, #file_info{mode=8#600}]),
+ ?PRIM_FILE:write_file_info(Name, #file_info{mode=8#600}),
ok.
%% Test the write_file_info/3 function.
file_write_file_info_opts(Config) when is_list(Config) ->
- {ok, Handle} = ?PRIM_FILE:start(),
RootDir = get_good_directory(Config),
io:format("RootDir = ~p", [RootDir]),
@@ -1028,7 +896,7 @@ file_write_file_info_opts(Config) when is_list(Config) ->
lists:foreach(fun
({FI, Opts}) ->
- ok = ?PRIM_FILE_call(write_file_info, Handle, [Name, FI, Opts])
+ ok = ?PRIM_FILE:write_file_info(Name, FI, Opts)
end, [
{#file_info{ mode=8#600, atime = Time, mtime = Time, ctime = Time}, Opts} ||
Opts <- [[{time, posix}]],
@@ -1038,7 +906,7 @@ file_write_file_info_opts(Config) when is_list(Config) ->
%% REM: determine date range dependent on time_t = Uint32 | Sint32 | Sint64 | Uint64
%% Determine time_t on os:type()?
lists:foreach(fun ({FI, Opts}) ->
- ok = ?PRIM_FILE_call(write_file_info, Handle, [Name, FI, Opts])
+ ok = ?PRIM_FILE:write_file_info(Name, FI, Opts)
end, [ {#file_info{ mode=8#400, atime = Time, mtime = Time, ctime = Time}, Opts} ||
Opts <- [[{time, universal}],[{time, local}]],
Time <- [
@@ -1050,11 +918,9 @@ file_write_file_info_opts(Config) when is_list(Config) ->
{{2037,2,3},{23,59,59}},
erlang:localtime()
]]),
- ok = ?PRIM_FILE:stop(Handle),
ok.
file_read_file_info_opts(Config) when is_list(Config) ->
- {ok, Handle} = ?PRIM_FILE:start(),
RootDir = get_good_directory(Config),
io:format("RootDir = ~p", [RootDir]),
@@ -1063,41 +929,38 @@ file_read_file_info_opts(Config) when is_list(Config) ->
lists:foreach(fun
(Opts) ->
- {ok,_} = ?PRIM_FILE_call(read_file_info, Handle, [Name, Opts])
+ {ok,_} = ?PRIM_FILE:read_file_info(Name, Opts)
end, [[{time, Type}] || Type <- [local, universal, posix]]),
- ok = ?PRIM_FILE:stop(Handle),
ok.
%% Test the write and read back *_file_info/3 functions.
file_write_read_file_info_opts(Config) when is_list(Config) ->
- {ok, Handle} = ?PRIM_FILE:start(),
RootDir = get_good_directory(Config),
io:format("RootDir = ~p", [RootDir]),
Name = filename:join(RootDir, atom_to_list(?MODULE) ++"_read_write_file_info_opts"),
ok = ?PRIM_FILE:write_file(Name, "hello_opts2"),
- ok = file_write_read_file_info_opts(Handle, Name, {{1989, 04, 28}, {19,30,22}}, [{time, local}]),
- ok = file_write_read_file_info_opts(Handle, Name, {{1989, 04, 28}, {19,30,22}}, [{time, universal}]),
+ ok = file_write_read_file_info_opts(Name, {{1989, 04, 28}, {19,30,22}}, [{time, local}]),
+ ok = file_write_read_file_info_opts(Name, {{1989, 04, 28}, {19,30,22}}, [{time, universal}]),
%% will not work on platforms with unsigned time_t
- %ok = file_write_read_file_info_opts(Handle, Name, {{1930, 04, 28}, {19,30,22}}, [{time, local}]),
- %ok = file_write_read_file_info_opts(Handle, Name, {{1930, 04, 28}, {19,30,22}}, [{time, universal}]),
- ok = file_write_read_file_info_opts(Handle, Name, 1, [{time, posix}]),
+ %ok = file_write_read_file_info_opts(Name, {{1930, 04, 28}, {19,30,22}}, [{time, local}]),
+ %ok = file_write_read_file_info_opts(Name, {{1930, 04, 28}, {19,30,22}}, [{time, universal}]),
+ ok = file_write_read_file_info_opts(Name, 1, [{time, posix}]),
%% will not work on platforms with unsigned time_t
- %ok = file_write_read_file_info_opts(Handle, Name, -1, [{time, posix}]),
- %ok = file_write_read_file_info_opts(Handle, Name, -300000, [{time, posix}]),
- ok = file_write_read_file_info_opts(Handle, Name, 300000, [{time, posix}]),
- ok = file_write_read_file_info_opts(Handle, Name, 0, [{time, posix}]),
+ %ok = file_write_read_file_info_opts(Name, -1, [{time, posix}]),
+ %ok = file_write_read_file_info_opts(Name, -300000, [{time, posix}]),
+ ok = file_write_read_file_info_opts(Name, 300000, [{time, posix}]),
+ ok = file_write_read_file_info_opts(Name, 0, [{time, posix}]),
- ok = ?PRIM_FILE:stop(Handle),
ok.
-file_write_read_file_info_opts(Handle, Name, Mtime, Opts) ->
- {ok, FI} = ?PRIM_FILE_call(read_file_info, Handle, [Name, Opts]),
+file_write_read_file_info_opts(Name, Mtime, Opts) ->
+ {ok, FI} = ?PRIM_FILE:read_file_info(Name, Opts),
FI2 = FI#file_info{ mtime = Mtime },
- ok = ?PRIM_FILE_call(write_file_info, Handle, [Name, FI2, Opts]),
- {ok, FI3} = ?PRIM_FILE_call(read_file_info, Handle, [Name, Opts]),
+ ok = ?PRIM_FILE:write_file_info(Name, FI2, Opts),
+ {ok, FI3} = ?PRIM_FILE:read_file_info(Name, Opts),
io:format("Expecting mtime = ~p, got ~p~n", [FI2#file_info.mtime, FI3#file_info.mtime]),
FI2 = FI3,
ok.
@@ -1175,8 +1038,8 @@ advise(Config) when is_list(Config) ->
atom_to_list(?MODULE)
++"_advise.fil"),
- Line1 = "Hello\n",
- Line2 = "World!\n",
+ Line1 = <<"Hello\n">>,
+ Line2 = <<"World!\n">>,
{ok, Fd} = ?PRIM_FILE:open(Advise, [write]),
ok = ?PRIM_FILE:advise(Fd, 0, 0, normal),
@@ -1226,7 +1089,7 @@ advise(Config) when is_list(Config) ->
{ok, Fd9} = ?PRIM_FILE:open(Advise, [read]),
Offset = 0,
%% same as a 0 length in some implementations
- Length = length(Line1) + length(Line2),
+ Length = byte_size(Line1) + byte_size(Line2),
ok = ?PRIM_FILE:advise(Fd9, Offset, Length, sequential),
{ok, Line1} = ?PRIM_FILE:read_line(Fd9),
{ok, Line2} = ?PRIM_FILE:read_line(Fd9),
@@ -1250,23 +1113,18 @@ do_large_write(Name) ->
Chunk = <<0:ChunkSize/unit:8>>,
Data = zip_data(lists:duplicate(Chunks, Chunk), Interleave),
Size = Chunks * ChunkSize + Chunks, % 4 G + 32
- Wordsize = erlang:system_info(wordsize),
- case prim_file:write_file(Name, Data) of
- ok when Wordsize =:= 8 ->
- {ok,#file_info{size=Size}} = file:read_file_info(Name),
- {ok,Fd} = prim_file:open(Name, [read]),
- check_large_write(Fd, ChunkSize, 0, Interleave);
- {error,einval} when Wordsize =:= 4 ->
- ok
- end.
+ ok = ?PRIM_FILE:write_file(Name, Data),
+ {ok,#file_info{size=Size}} = file:read_file_info(Name),
+ {ok,Fd} = ?PRIM_FILE:open(Name, [read]),
+ check_large_write(Fd, ChunkSize, 0, Interleave).
check_large_write(Fd, ChunkSize, Pos, [X|Interleave]) ->
Pos1 = Pos + ChunkSize,
- {ok,Pos1} = prim_file:position(Fd, {cur,ChunkSize}),
- {ok,[X]} = prim_file:read(Fd, 1),
+ {ok,Pos1} = ?PRIM_FILE:position(Fd, {cur,ChunkSize}),
+ {ok,<<X>>} = ?PRIM_FILE:read(Fd, 1),
check_large_write(Fd, ChunkSize, Pos1+1, Interleave);
check_large_write(Fd, _, _, []) ->
- eof = prim_file:read(Fd, 1),
+ eof = ?PRIM_FILE:read(Fd, 1),
ok.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -1338,71 +1196,53 @@ allocate_and_assert(Fd, Offset, Length) ->
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-delete_a(Config) when is_list(Config) ->
- delete(Config, [], "_a").
-
-delete_b(Config) when is_list(Config) ->
- {ok, Handle} = ?PRIM_FILE:start(),
- Result = delete(Config, Handle, "_b"),
- ok = ?PRIM_FILE:stop(Handle),
- Result.
-
-delete(Config, Handle, Suffix) ->
+delete(Config) when is_list(Config) ->
RootDir = proplists:get_value(priv_dir,Config),
Name = filename:join(RootDir,
atom_to_list(?MODULE)
- ++"_delete"++Suffix++".fil"),
+ ++"_delete.fil"),
{ok, Fd1} = ?PRIM_FILE:open(Name, [write]),
?PRIM_FILE:write(Fd1,"ok.\n"),
ok = ?PRIM_FILE:close(Fd1),
%% Check that the file is readable
{ok, Fd2} = ?PRIM_FILE:open(Name, [read]),
ok = ?PRIM_FILE:close(Fd2),
- ok = ?PRIM_FILE_call(delete, Handle, [Name]),
+ ok = ?PRIM_FILE:delete(Name),
%% Check that the file is not readable anymore
{error, _} = ?PRIM_FILE:open(Name, [read]),
%% Try deleting a nonexistent file
- {error, enoent} = ?PRIM_FILE_call(delete, Handle, [Name]),
+ {error, enoent} = ?PRIM_FILE:delete(Name),
ok.
-rename_a(Config) when is_list(Config) ->
- rename(Config, [], "_a").
-
-rename_b(Config) when is_list(Config) ->
- {ok, Handle} = ?PRIM_FILE:start(),
- Result = rename(Config, Handle, "_b"),
- ok = ?PRIM_FILE:stop(Handle),
- Result.
-
-rename(Config, Handle, Suffix) ->
+rename(Config) when is_list(Config) ->
RootDir = proplists:get_value(priv_dir,Config),
- FileName1 = atom_to_list(?MODULE)++"_rename"++Suffix++".fil",
- FileName2 = atom_to_list(?MODULE)++"_rename"++Suffix++".ful",
+ FileName1 = atom_to_list(?MODULE)++"_rename.fil",
+ FileName2 = atom_to_list(?MODULE)++"_rename.ful",
Name1 = filename:join(RootDir, FileName1),
Name2 = filename:join(RootDir, FileName2),
{ok,Fd1} = ?PRIM_FILE:open(Name1, [write]),
ok = ?PRIM_FILE:close(Fd1),
%% Rename, and check that it really changed name
- ok = ?PRIM_FILE_call(rename, Handle, [Name1, Name2]),
+ ok = ?PRIM_FILE:rename(Name1, Name2),
{error, _} = ?PRIM_FILE:open(Name1, [read]),
{ok, Fd2} = ?PRIM_FILE:open(Name2, [read]),
ok = ?PRIM_FILE:close(Fd2),
%% Try renaming something to itself
- ok = ?PRIM_FILE_call(rename, Handle, [Name2, Name2]),
+ ok = ?PRIM_FILE:rename(Name2, Name2),
%% Try renaming something that doesn't exist
{error, enoent} =
- ?PRIM_FILE_call(rename, Handle, [Name1, Name2]),
+ ?PRIM_FILE:rename(Name1, Name2),
%% Try renaming to something else than a string
{error, badarg} =
- ?PRIM_FILE_call(rename, Handle, [Name1, foobar]),
+ ?PRIM_FILE:rename(Name1, foobar),
%% Move between directories
DirName1 = filename:join(RootDir,
atom_to_list(?MODULE)
- ++"_rename_dir"++Suffix),
+ ++"_rename_dir"),
DirName2 = filename:join(RootDir,
atom_to_list(?MODULE)
- ++"_second_rename_dir"++Suffix),
+ ++"_second_rename_dir"),
Name1foo = filename:join(DirName1, "foo.fil"),
Name2foo = filename:join(DirName2, "foo.fil"),
Name2bar = filename:join(DirName2, "bar.dir"),
@@ -1410,21 +1250,21 @@ rename(Config, Handle, Suffix) ->
%% The name has to include the full file name, path is not enough
expect(
{error, eexist}, {error, eisdir},
- ?PRIM_FILE_call(rename, Handle, [Name2, DirName1])),
+ ?PRIM_FILE:rename(Name2, DirName1)),
ok =
- ?PRIM_FILE_call(rename, Handle, [Name2, Name1foo]),
+ ?PRIM_FILE:rename(Name2, Name1foo),
%% Now rename the directory
- ok = ?PRIM_FILE_call(rename, Handle, [DirName1, DirName2]),
+ ok = ?PRIM_FILE:rename(DirName1, DirName2),
%% And check that the file is there now
{ok,Fd3} = ?PRIM_FILE:open(Name2foo, [read]),
ok = ?PRIM_FILE:close(Fd3),
%% Try some dirty things now: move the directory into itself
{error, Msg1} =
- ?PRIM_FILE_call(rename, Handle, [DirName2, Name2bar]),
+ ?PRIM_FILE:rename(DirName2, Name2bar),
io:format("Errmsg1: ~p",[Msg1]),
%% move dir into a file in itself
{error, Msg2} =
- ?PRIM_FILE_call(rename, Handle, [DirName2, Name2foo]),
+ ?PRIM_FILE:rename(DirName2, Name2foo),
io:format("Errmsg2: ~p",[Msg2]),
ok.
@@ -1460,13 +1300,14 @@ e_delete(Config) when is_list(Config) ->
case os:type() of
{win32, _} ->
%% Remove a character device.
- {error, eacces} = ?PRIM_FILE:delete("nul");
+ expect({error, eacces}, {error, einval},
+ ?PRIM_FILE:delete("nul"));
_ ->
?PRIM_FILE:write_file_info(
Base, #file_info {mode=0}),
{error, eacces} = ?PRIM_FILE:delete(Afile),
?PRIM_FILE:write_file_info(
- Base, #file_info {mode=8#600})
+ Base, #file_info {mode=8#700})
end,
ok.
@@ -1602,7 +1443,7 @@ e_make_dir(Config) when is_list(Config) ->
?PRIM_FILE:write_file_info(Base, #file_info {mode=0}),
{error, eacces} =
?PRIM_FILE:make_dir(filename:join(Base, "xxxx")),
- ?PRIM_FILE:write_file_info(Base, #file_info {mode=8#600})
+ ?PRIM_FILE:write_file_info(Base, #file_info {mode=8#700})
end,
ok.
@@ -1652,170 +1493,24 @@ e_del_dir(Config) when is_list(Config) ->
?PRIM_FILE:write_file_info(Base, #file_info {mode=0}),
{error, eacces} = ?PRIM_FILE:del_dir(ADirectory),
?PRIM_FILE:write_file_info(
- Base, #file_info {mode=8#600})
+ Base, #file_info {mode=8#700})
end,
ok.
-%% Trying reading and positioning from a compressed file.
-
-read_compressed(Config) when is_list(Config) ->
- Data = proplists:get_value(data_dir, Config),
- Real = filename:join(Data, "realmen.html.gz"),
- {ok, Fd} = ?PRIM_FILE:open(Real, [read, compressed]),
- try_read_file(Fd).
-
-%% Trying reading and positioning from an uncompressed file,
-%% but with the compressed flag given.
-
-read_not_really_compressed(Config) when is_list(Config) ->
- Data = proplists:get_value(data_dir, Config),
- Priv = proplists:get_value(priv_dir, Config),
-
- %% The file realmen.html might have got CRs added (by WinZip).
- %% Remove them, or the file positions will not be correct.
-
- Real = filename:join(Data, "realmen.html"),
- RealPriv = filename:join(Priv,
- atom_to_list(?MODULE)++"_realmen.html"),
- {ok, RealDataBin} = ?PRIM_FILE:read_file(Real),
- RealData = remove_crs(binary_to_list(RealDataBin), []),
- ok = ?PRIM_FILE:write_file(RealPriv, RealData),
- {ok, Fd} = ?PRIM_FILE:open(RealPriv, [read, compressed]),
- try_read_file(Fd).
-
-remove_crs([$\r|Rest], Result) ->
- remove_crs(Rest, Result);
-remove_crs([C|Rest], Result) ->
- remove_crs(Rest, [C|Result]);
-remove_crs([], Result) ->
- lists:reverse(Result).
-
-try_read_file(Fd) ->
- %% Seek to the current position (nothing should happen).
-
- {ok, 0} = ?PRIM_FILE:position(Fd, 0),
- {ok, 0} = ?PRIM_FILE:position(Fd, {cur, 0}),
-
- %% Read a few lines from a compressed file.
-
- ShouldBe = "<TITLE>Real Programmers Don't Use PASCAL</TITLE>\n",
- {ok, ShouldBe} = ?PRIM_FILE:read(Fd, length(ShouldBe)),
-
- %% Now seek forward.
-
- {ok, 381} = ?PRIM_FILE:position(Fd, 381),
- Back = "Back in the good old days -- the \"Golden Era\" " ++
- "of computers, it was\n",
- {ok, Back} = ?PRIM_FILE:read(Fd, length(Back)),
-
- %% Try to search forward relative to the current position.
-
- {ok, CurPos} = ?PRIM_FILE:position(Fd, {cur, 0}),
- RealPos = 4273,
- {ok, RealPos} = ?PRIM_FILE:position(Fd, {cur, RealPos-CurPos}),
- RealProg = "<LI> Real Programmers aren't afraid to use GOTOs.\n",
- {ok, RealProg} = ?PRIM_FILE:read(Fd, length(RealProg)),
-
- %% Seek backward.
-
- AfterTitle = length("<TITLE>"),
- {ok, AfterTitle} = ?PRIM_FILE:position(Fd, AfterTitle),
- Title = "Real Programmers Don't Use PASCAL</TITLE>\n",
- {ok, Title} = ?PRIM_FILE:read(Fd, length(Title)),
-
- %% Done.
-
- ?PRIM_FILE:close(Fd),
- ok.
-
-write_compressed(Config) when is_list(Config) ->
- Priv = proplists:get_value(priv_dir, Config),
- MyFile = filename:join(Priv,
- atom_to_list(?MODULE)++"_test.gz"),
-
- %% Write a file.
-
- {ok, Fd} = ?PRIM_FILE:open(MyFile, [write, compressed]),
- {ok, 0} = ?PRIM_FILE:position(Fd, 0),
- Prefix = "hello\n",
- End = "end\n",
- ok = ?PRIM_FILE:write(Fd, Prefix),
- {ok, 143} = ?PRIM_FILE:position(Fd, 143),
- ok = ?PRIM_FILE:write(Fd, End),
- ok = ?PRIM_FILE:close(Fd),
-
- %% Read the file and verify the contents.
-
- {ok, Fd1} = ?PRIM_FILE:open(MyFile, [read, compressed]),
- {ok, Prefix} = ?PRIM_FILE:read(Fd1, length(Prefix)),
- Second = lists:duplicate(143-length(Prefix), 0) ++ End,
- {ok, Second} = ?PRIM_FILE:read(Fd1, length(Second)),
- ok = ?PRIM_FILE:close(Fd1),
-
- %% Ensure that the file is compressed.
-
- TotalSize = 143 + length(End),
- case ?PRIM_FILE:read_file_info(MyFile) of
- {ok, #file_info{size=Size}} when Size < TotalSize ->
- ok;
- {ok, #file_info{size=Size}} when Size == TotalSize ->
- ct:fail(file_not_compressed)
- end,
-
- %% Write again to ensure that the file is truncated.
-
- {ok, Fd2} = ?PRIM_FILE:open(MyFile, [write, compressed]),
- NewString = "aaaaaaaaaaa",
- ok = ?PRIM_FILE:write(Fd2, NewString),
- ok = ?PRIM_FILE:close(Fd2),
- {ok, Fd3} = ?PRIM_FILE:open(MyFile, [read, compressed]),
- {ok, NewString} = ?PRIM_FILE:read(Fd3, 1024),
- ok = ?PRIM_FILE:close(Fd3),
-
- ok.
-
-compress_errors(Config) when is_list(Config) ->
- Data = proplists:get_value(data_dir, Config),
- {error, enoent} = ?PRIM_FILE:open("non_existing__",
- [compressed, read]),
- {error, einval} = ?PRIM_FILE:open("non_existing__",
- [compressed, read, write]),
-
- %% Read a corrupted .gz file.
-
- Corrupted = filename:join(Data, "corrupted.gz"),
- {ok, Fd} = ?PRIM_FILE:open(Corrupted, [read, compressed]),
- {error, eio} = ?PRIM_FILE:read(Fd, 100),
- ?PRIM_FILE:close(Fd),
-
- ok.
-
-
-%% Test creating a hard link.
-make_link_a(Config) when is_list(Config) ->
- make_link(Config, [], "_a").
-
-%% Test creating a hard link.
-make_link_b(Config) when is_list(Config) ->
- {ok, Handle} = ?PRIM_FILE:start(),
- Result = make_link(Config, Handle, "_b"),
- ok = ?PRIM_FILE:stop(Handle),
- Result.
-
-make_link(Config, Handle, Suffix) ->
+make_link(Config) when is_list(Config) ->
RootDir = proplists:get_value(priv_dir, Config),
NewDir = filename:join(RootDir,
atom_to_list(?MODULE)
- ++"_make_link"++Suffix),
- ok = ?PRIM_FILE_call(make_dir, Handle, [NewDir]),
+ ++"_make_link"),
+ ok = ?PRIM_FILE:make_dir(NewDir),
Name = filename:join(NewDir, "a_file"),
ok = ?PRIM_FILE:write_file(Name, "some contents\n"),
Alias = filename:join(NewDir, "an_alias"),
Result =
- case ?PRIM_FILE_call(make_link, Handle, [Name, Alias]) of
+ case ?PRIM_FILE:make_link(Name, Alias) of
{error, enotsup} ->
{skipped, "Links not supported on this platform"};
ok ->
@@ -1826,12 +1521,12 @@ make_link(Config, Handle, Suffix) ->
%% since they are not used on symbolic links.
{ok, Info} =
- ?PRIM_FILE_call(read_link_info, Handle, [Name]),
+ ?PRIM_FILE:read_link_info(Name),
{ok, Info} =
- ?PRIM_FILE_call(read_link_info, Handle, [Alias]),
+ ?PRIM_FILE:read_link_info(Alias),
#file_info{links = 2, type = regular} = Info,
{error, eexist} =
- ?PRIM_FILE_call(make_link, Handle, [Name, Alias]),
+ ?PRIM_FILE:make_link(Name, Alias),
ok
end,
@@ -1843,30 +1538,19 @@ read_link_info_for_non_link(Config) when is_list(Config) ->
{ok, #file_info{type=directory}} = ?PRIM_FILE:read_link_info("."),
ok.
-%% Test operations on symbolic links (for Unix).
-symlinks_a(Config) when is_list(Config) ->
- symlinks(Config, [], "_a").
-
-%% Test operations on symbolic links (for Unix).
-symlinks_b(Config) when is_list(Config) ->
- {ok, Handle} = ?PRIM_FILE:start(),
- Result = symlinks(Config, Handle, "_b"),
- ok = ?PRIM_FILE:stop(Handle),
- Result.
-
-symlinks(Config, Handle, Suffix) ->
+symlinks(Config) when is_list(Config) ->
RootDir = proplists:get_value(priv_dir, Config),
NewDir = filename:join(RootDir,
atom_to_list(?MODULE)
- ++"_make_symlink"++Suffix),
- ok = ?PRIM_FILE_call(make_dir, Handle, [NewDir]),
+ ++"_make_symlink"),
+ ok = ?PRIM_FILE:make_dir(NewDir),
Name = filename:join(NewDir, "a_plain_file"),
ok = ?PRIM_FILE:write_file(Name, "some stupid content\n"),
Alias = filename:join(NewDir, "a_symlink_alias"),
Result =
- case ?PRIM_FILE_call(make_symlink, Handle, [Name, Alias]) of
+ case ?PRIM_FILE:make_symlink(Name, Alias) of
{error, enotsup} ->
{skipped, "Links not supported on this platform"};
{error, eperm} ->
@@ -1874,20 +1558,20 @@ symlinks(Config, Handle, Suffix) ->
{skipped, "Windows user not privileged to create links"};
ok ->
{ok, Info1} =
- ?PRIM_FILE_call(read_file_info, Handle, [Name]),
+ ?PRIM_FILE:read_file_info(Name),
{ok, Info1} =
- ?PRIM_FILE_call(read_file_info, Handle, [Alias]),
+ ?PRIM_FILE:read_file_info(Alias),
{ok, Info1} =
- ?PRIM_FILE_call(read_link_info, Handle, [Name]),
+ ?PRIM_FILE:read_link_info(Name),
#file_info{links = 1, type = regular} = Info1,
{ok, Info2} =
- ?PRIM_FILE_call(read_link_info, Handle, [Alias]),
+ ?PRIM_FILE:read_link_info(Alias),
#file_info{links=1, type=symlink} = Info2,
{ok, Name} =
- ?PRIM_FILE_call(read_link, Handle, [Alias]),
+ ?PRIM_FILE:read_link(Alias),
{ok, Name} =
- ?PRIM_FILE_call(read_link_all, Handle, [Alias]),
+ ?PRIM_FILE:read_link_all(Alias),
%% If all is good, delete dir again (avoid hanging dir on windows)
rm_rf(?PRIM_FILE,NewDir),
ok
@@ -1907,10 +1591,9 @@ list_dir_limit(Config) when is_list(Config) ->
RootDir = proplists:get_value(priv_dir, Config),
NewDir = filename:join(RootDir,
atom_to_list(?MODULE)++"_list_dir_limit"),
- {ok, Handle1} = ?PRIM_FILE:start(),
- ok = ?PRIM_FILE_call(make_dir, Handle1, [NewDir]),
+ ok = ?PRIM_FILE:make_dir(NewDir),
Ref = erlang:start_timer(MaxTime*1000, self(), []),
- Result = list_dir_limit_loop(NewDir, Handle1, Ref, MaxNumber, 0),
+ Result = list_dir_limit_loop(NewDir, Ref, MaxNumber, 0),
Time = case erlang:cancel_timer(Ref) of
false -> MaxTime;
T -> MaxTime - (T div 1000)
@@ -1920,21 +1603,18 @@ list_dir_limit(Config) when is_list(Config) ->
{error, _Reason, N} -> N;
_ -> 0
end,
- {ok, Handle2} = ?PRIM_FILE:start(),
- list_dir_limit_cleanup(NewDir, Handle2, Number, 0),
- ok = ?PRIM_FILE:stop(Handle1),
- ok = ?PRIM_FILE:stop(Handle2),
+ list_dir_limit_cleanup(NewDir, Number, 0),
{ok, Number} = Result,
{comment,
"Created " ++ integer_to_list(Number) ++ " files in "
++ integer_to_list(Time) ++ " seconds."}.
-list_dir_limit_loop(Dir, Handle, _Ref, N, Cnt) when Cnt >= N ->
- list_dir_check(Dir, Handle, Cnt);
-list_dir_limit_loop(Dir, Handle, Ref, N, Cnt) ->
+list_dir_limit_loop(Dir, _Ref, N, Cnt) when Cnt >= N ->
+ list_dir_check(Dir, Cnt);
+list_dir_limit_loop(Dir, Ref, N, Cnt) ->
receive
{timeout, Ref, []} ->
- list_dir_check(Dir, Handle, Cnt)
+ list_dir_check(Dir, Cnt)
after 0 ->
Name = integer_to_list(Cnt),
case ?PRIM_FILE:write_file(filename:join(Dir, Name), Name) of
@@ -1942,23 +1622,23 @@ list_dir_limit_loop(Dir, Handle, Ref, N, Cnt) ->
Next = Cnt + 1,
case Cnt rem 100 of
0 ->
- case list_dir_check(Dir, Handle, Next) of
+ case list_dir_check(Dir, Next) of
{ok, Next} ->
list_dir_limit_loop(
- Dir, Handle, Ref, N, Next);
+ Dir, Ref, N, Next);
Other ->
Other
end;
_ ->
- list_dir_limit_loop(Dir, Handle, Ref, N, Next)
+ list_dir_limit_loop(Dir, Ref, N, Next)
end;
{error, Reason} ->
{error, Reason, Cnt}
end
end.
-list_dir_check(Dir, Handle, Cnt) ->
- case ?PRIM_FILE:list_dir(Handle, Dir) of
+list_dir_check(Dir, Cnt) ->
+ case ?PRIM_FILE:list_dir(Dir) of
{ok, ListDir} ->
case length(ListDir) of
Cnt ->
@@ -1975,18 +1655,18 @@ list_dir_check(Dir, Handle, Cnt) ->
%% Deletes N files while ignoring errors, then continues deleting
%% as long as they exist.
-list_dir_limit_cleanup(Dir, Handle, N, Cnt) when Cnt >= N ->
+list_dir_limit_cleanup(Dir, N, Cnt) when Cnt >= N ->
Name = integer_to_list(Cnt),
- case ?PRIM_FILE:delete(Handle, filename:join(Dir, Name)) of
+ case ?PRIM_FILE:delete(filename:join(Dir, Name)) of
ok ->
- list_dir_limit_cleanup(Dir, Handle, N, Cnt+1);
+ list_dir_limit_cleanup(Dir, N, Cnt+1);
_ ->
ok
end;
-list_dir_limit_cleanup(Dir, Handle, N, Cnt) ->
+list_dir_limit_cleanup(Dir, N, Cnt) ->
Name = integer_to_list(Cnt),
- ?PRIM_FILE:delete(Handle, filename:join(Dir, Name)),
- list_dir_limit_cleanup(Dir, Handle, N, Cnt+1).
+ ?PRIM_FILE:delete(filename:join(Dir, Name)),
+ list_dir_limit_cleanup(Dir, N, Cnt+1).
%%%
%%% Test list_dir() on a non-existing pathname.
@@ -1995,7 +1675,7 @@ list_dir_limit_cleanup(Dir, Handle, N, Cnt) ->
list_dir_error(Config) ->
Priv = proplists:get_value(priv_dir, Config),
NonExisting = filename:join(Priv, "non-existing-dir"),
- {error,enoent} = prim_file:list_dir(NonExisting),
+ {error,enoent} = ?PRIM_FILE:list_dir(NonExisting),
ok.
%%%
@@ -2063,7 +1743,7 @@ do_run_large_file_test(Config, Run, Name0) ->
{'DOWN',Mref,_,_,_} -> ok;
{Tester,done} -> ok
end,
- prim_file:delete(Name)
+ ?PRIM_FILE:delete(Name)
end),
%% Run the test case.
diff --git a/lib/kernel/test/sendfile_SUITE.erl b/lib/kernel/test/sendfile_SUITE.erl
index bfa564c32c..ad060aa05c 100644
--- a/lib/kernel/test/sendfile_SUITE.erl
+++ b/lib/kernel/test/sendfile_SUITE.erl
@@ -23,30 +23,41 @@
-include_lib("common_test/include/ct.hrl").
-include_lib("kernel/include/file.hrl").
--compile(export_all).
-
-all() -> [{group,async_threads},
- {group,no_async_threads}].
-
-groups() ->
- [{async_threads,[],tcs()},
- {no_async_threads,[],tcs()}].
-
-tcs() ->
- [t_sendfile_small
- ,t_sendfile_big_all
- ,t_sendfile_big_size
- ,t_sendfile_many_small
- ,t_sendfile_partial
- ,t_sendfile_offset
- ,t_sendfile_sendafter
- ,t_sendfile_recvafter
- ,t_sendfile_recvafter_remoteclose
- ,t_sendfile_sendduring
- ,t_sendfile_recvduring
- ,t_sendfile_closeduring
- ,t_sendfile_crashduring
- ].
+-export([all/0, init_per_suite/1, end_per_suite/1, init_per_testcase/2]).
+
+-export([sendfile_server/2, sendfile_do_recv/2, init/1, handle_event/2]).
+
+-export(
+ [t_sendfile_small/1,
+ t_sendfile_big_all/1,
+ t_sendfile_big_size/1,
+ t_sendfile_many_small/1,
+ t_sendfile_partial/1,
+ t_sendfile_offset/1,
+ t_sendfile_sendafter/1,
+ t_sendfile_recvafter/1,
+ t_sendfile_recvafter_remoteclose/1,
+ t_sendfile_sendduring/1,
+ t_sendfile_recvduring/1,
+ t_sendfile_closeduring/1,
+ t_sendfile_crashduring/1,
+ t_sendfile_arguments/1]).
+
+all() ->
+ [t_sendfile_small,
+ t_sendfile_big_all,
+ t_sendfile_big_size,
+ t_sendfile_many_small,
+ t_sendfile_partial,
+ t_sendfile_offset,
+ t_sendfile_sendafter,
+ t_sendfile_recvafter,
+ t_sendfile_recvafter_remoteclose,
+ t_sendfile_sendduring,
+ t_sendfile_recvduring,
+ t_sendfile_closeduring,
+ t_sendfile_crashduring,
+ t_sendfile_arguments].
init_per_suite(Config) ->
case {os:type(),os:version()} of
@@ -72,28 +83,18 @@ init_per_suite(Config) ->
end_per_suite(Config) ->
file:delete(proplists:get_value(big_file, Config)).
-init_per_group(async_threads,Config) ->
- case erlang:system_info(thread_pool_size) of
- 0 ->
- {skip,"No async threads"};
- _ ->
- [{sendfile_opts,[{use_threads,true}]}|Config]
- end;
-init_per_group(no_async_threads,Config) ->
- [{sendfile_opts,[{use_threads,false}]}|Config].
-
-end_per_group(_,_Config) ->
- ok.
-
init_per_testcase(TC,Config) when TC == t_sendfile_recvduring;
TC == t_sendfile_sendduring ->
Filename = proplists:get_value(small_file, Config),
Send = fun(Sock) ->
{_Size, Data} = sendfile_file_info(Filename),
- {ok,D} = file:open(Filename, [raw,binary,read]),
- prim_file:sendfile(D, Sock, 0, 0, 0,
- [],[],[]),
+ {ok,Fd} = file:open(Filename, [raw,binary,read]),
+ %% Determine whether the driver has native support by
+ %% hitting the raw module directly; file:sendfile/5 will
+ %% land in the fallback if it doesn't.
+ RawModule = Fd#file_descriptor.module,
+ {ok, _Ignored} = RawModule:sendfile(Fd,Sock,0,0,0,[],[],[]),
Data
end,
@@ -105,9 +106,8 @@ init_per_testcase(TC,Config) when TC == t_sendfile_recvduring;
ct:log("Error: ~p",[Error]),
{skip,"Not supported"}
end;
-init_per_testcase(_Tc,Config) ->
- Config ++ [{sendfile_opts,[{use_threads,false}]}].
-
+init_per_testcase(_TC,Config) ->
+ Config.
t_sendfile_small(Config) when is_list(Config) ->
Filename = proplists:get_value(small_file, Config),
@@ -124,7 +124,7 @@ t_sendfile_small(Config) when is_list(Config) ->
t_sendfile_many_small(Config) when is_list(Config) ->
Filename = proplists:get_value(small_file, Config),
FileOpts = proplists:get_value(file_opts, Config, []),
- SendfileOpts = proplists:get_value(sendfile_opts, Config),
+ SendfileOpts = proplists:get_value(sendfile_opts, Config, []),
error_logger:add_report_handler(?MODULE,[self()]),
@@ -151,7 +151,7 @@ t_sendfile_many_small(Config) when is_list(Config) ->
t_sendfile_big_all(Config) when is_list(Config) ->
Filename = proplists:get_value(big_file, Config),
- SendfileOpts = proplists:get_value(sendfile_opts, Config),
+ SendfileOpts = proplists:get_value(sendfile_opts, Config, []),
Send = fun(Sock) ->
{ok, #file_info{size = Size}} =
@@ -165,7 +165,7 @@ t_sendfile_big_all(Config) when is_list(Config) ->
t_sendfile_big_size(Config) ->
Filename = proplists:get_value(big_file, Config),
FileOpts = proplists:get_value(file_opts, Config, []),
- SendfileOpts = proplists:get_value(sendfile_opts, Config),
+ SendfileOpts = proplists:get_value(sendfile_opts, Config, []),
SendAll = fun(Sock) ->
{ok, #file_info{size = Size}} =
@@ -180,7 +180,7 @@ t_sendfile_big_size(Config) ->
t_sendfile_partial(Config) ->
Filename = proplists:get_value(small_file, Config),
FileOpts = proplists:get_value(file_opts, Config, []),
- SendfileOpts = proplists:get_value(sendfile_opts, Config),
+ SendfileOpts = proplists:get_value(sendfile_opts, Config, []),
SendSingle = fun(Sock) ->
{_Size, <<Data:5/binary,_/binary>>} =
@@ -217,7 +217,7 @@ t_sendfile_partial(Config) ->
t_sendfile_offset(Config) ->
Filename = proplists:get_value(small_file, Config),
FileOpts = proplists:get_value(file_opts, Config, []),
- SendfileOpts = proplists:get_value(sendfile_opts, Config),
+ SendfileOpts = proplists:get_value(sendfile_opts, Config, []),
Send = fun(Sock) ->
{_Size, <<_:5/binary,Data:3/binary,_/binary>> = AllData} =
@@ -233,7 +233,7 @@ t_sendfile_offset(Config) ->
t_sendfile_sendafter(Config) ->
Filename = proplists:get_value(small_file, Config),
- SendfileOpts = proplists:get_value(sendfile_opts, Config),
+ SendfileOpts = proplists:get_value(sendfile_opts, Config, []),
Send = fun(Sock) ->
{Size, Data} = sendfile_file_info(Filename),
@@ -246,7 +246,7 @@ t_sendfile_sendafter(Config) ->
t_sendfile_recvafter(Config) ->
Filename = proplists:get_value(small_file, Config),
- SendfileOpts = proplists:get_value(sendfile_opts, Config),
+ SendfileOpts = proplists:get_value(sendfile_opts, Config, []),
Send = fun(Sock) ->
{Size, Data} = sendfile_file_info(Filename),
@@ -279,7 +279,7 @@ t_sendfile_recvafter_remoteclose(Config) ->
t_sendfile_sendduring(Config) ->
Filename = proplists:get_value(big_file, Config),
- SendfileOpts = proplists:get_value(sendfile_opts, Config),
+ SendfileOpts = proplists:get_value(sendfile_opts, Config, []),
Send = fun(Sock) ->
{ok, #file_info{size = Size}} =
@@ -296,7 +296,7 @@ t_sendfile_sendduring(Config) ->
t_sendfile_recvduring(Config) ->
Filename = proplists:get_value(big_file, Config),
- SendfileOpts = proplists:get_value(sendfile_opts, Config),
+ SendfileOpts = proplists:get_value(sendfile_opts, Config, []),
Send = fun(Sock) ->
{ok, #file_info{size = Size}} =
@@ -315,7 +315,7 @@ t_sendfile_recvduring(Config) ->
t_sendfile_closeduring(Config) ->
Filename = proplists:get_value(big_file, Config),
- SendfileOpts = proplists:get_value(sendfile_opts, Config),
+ SendfileOpts = proplists:get_value(sendfile_opts, Config, []),
Send = fun(Sock,SFServPid) ->
spawn_link(fun() ->
@@ -341,11 +341,25 @@ t_sendfile_closeduring(Config) ->
-1
end,
- ok = sendfile_send({127,0,0,1}, Send, 0).
+ ok = sendfile_send({127,0,0,1}, Send, 0, [{active,false}]),
+ [] = flush(),
+ ok = sendfile_send({127,0,0,1}, Send, 0, [{active,true}]),
+ [] = flush(),
+ ok.
+
+flush() ->
+ lists:reverse(flush([])).
+
+flush(Acc) ->
+ receive M ->
+ flush([M | Acc])
+ after 0 ->
+ Acc
+ end.
t_sendfile_crashduring(Config) ->
Filename = proplists:get_value(big_file, Config),
- SendfileOpts = proplists:get_value(sendfile_opts, Config),
+ SendfileOpts = proplists:get_value(sendfile_opts, Config, []),
error_logger:add_report_handler(?MODULE,[self()]),
@@ -373,18 +387,52 @@ t_sendfile_crashduring(Config) ->
end
end.
+t_sendfile_arguments(Config) ->
+ Filename = proplists:get_value(small_file, Config),
+
+ {ok, Listener} = gen_tcp:listen(0,
+ [{packet, 0}, {active, false}, {reuseaddr, true}]),
+ {ok, Port} = inet:port(Listener),
+
+ ErrorCheck =
+ fun(Reason, Offset, Length, Opts) ->
+ {ok, Sender} = gen_tcp:connect({127, 0, 0, 1}, Port,
+ [{packet, 0}, {active, false}]),
+ {ok, Receiver} = gen_tcp:accept(Listener),
+ {ok, Fd} = file:open(Filename, [read, raw]),
+ {error, Reason} = file:sendfile(Fd, Sender, Offset, Length, Opts),
+ gen_tcp:close(Receiver),
+ gen_tcp:close(Sender),
+ file:close(Fd)
+ end,
+
+ ErrorCheck(einval, -1, 0, []),
+ ErrorCheck(einval, 0, -1, []),
+ ErrorCheck(badarg, gurka, 0, []),
+ ErrorCheck(badarg, 0, gurka, []),
+ ErrorCheck(badarg, 0, 0, gurka),
+ ErrorCheck(badarg, 0, 0, [{chunk_size, gurka}]),
+
+ gen_tcp:close(Listener),
+
+ ok.
+
%% Generic sendfile server code
sendfile_send(Send) ->
sendfile_send({127,0,0,1},Send).
sendfile_send(Host, Send) ->
sendfile_send(Host, Send, []).
sendfile_send(Host, Send, Orig) ->
+ sendfile_send(Host, Send, Orig, [{active,false}]).
+
+sendfile_send(Host, Send, Orig, SockOpts) ->
+
SFServer = spawn_link(?MODULE, sendfile_server, [self(), Orig]),
receive
{server, Port} ->
- {ok, Sock} = gen_tcp:connect(Host, Port,
- [binary,{packet,0},
- {active,false}]),
+ Opts = [binary,{packet,0}|SockOpts],
+ io:format("connect with opts = ~p\n", [Opts]),
+ {ok, Sock} = gen_tcp:connect(Host, Port, Opts),
Data = case proplists:get_value(arity,erlang:fun_info(Send)) of
1 ->
Send(Sock);
diff --git a/lib/kernel/test/seq_trace_SUITE.erl b/lib/kernel/test/seq_trace_SUITE.erl
index be23a1933f..ee8f4e94f8 100644
--- a/lib/kernel/test/seq_trace_SUITE.erl
+++ b/lib/kernel/test/seq_trace_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1998-2016. All Rights Reserved.
+%% Copyright Ericsson AB 1998-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -25,7 +25,8 @@
-export([token_set_get/1, tracer_set_get/1, print/1,
send/1, distributed_send/1, recv/1, distributed_recv/1,
trace_exit/1, distributed_exit/1, call/1, port/1,
- match_set_seq_token/1, gc_seq_token/1]).
+ match_set_seq_token/1, gc_seq_token/1, label_capability_mismatch/1,
+ send_literal/1]).
%% internal exports
-export([simple_tracer/2, one_time_receiver/0, one_time_receiver/1,
@@ -44,10 +45,10 @@ suite() ->
{timetrap,{minutes,1}}].
all() ->
- [token_set_get, tracer_set_get, print, send,
+ [token_set_get, tracer_set_get, print, send, send_literal,
distributed_send, recv, distributed_recv, trace_exit,
distributed_exit, call, port, match_set_seq_token,
- gc_seq_token].
+ gc_seq_token, label_capability_mismatch].
groups() ->
[].
@@ -90,8 +91,8 @@ do_token_set_get(TsType) ->
%% Test that initial seq_trace is disabled
[] = seq_trace:get_token(),
%% Test setting and reading the different fields
- 0 = seq_trace:set_token(label,17),
- {label,17} = seq_trace:get_token(label),
+ 0 = seq_trace:set_token(label,{my_label,1}),
+ {label,{my_label,1}} = seq_trace:get_token(label),
false = seq_trace:set_token(print,true),
{print,true} = seq_trace:get_token(print),
false = seq_trace:set_token(send,true),
@@ -101,12 +102,12 @@ do_token_set_get(TsType) ->
false = seq_trace:set_token(TsType,true),
{TsType,true} = seq_trace:get_token(TsType),
%% Check the whole token
- {Flags,17,0,Self,0} = seq_trace:get_token(), % all flags are set
+ {Flags,{my_label,1},0,Self,0} = seq_trace:get_token(), % all flags are set
%% Test setting and reading the 'serial' field
{0,0} = seq_trace:set_token(serial,{3,5}),
{serial,{3,5}} = seq_trace:get_token(serial),
%% Check the whole token, test that a whole token can be set and get
- {Flags,17,5,Self,3} = seq_trace:get_token(),
+ {Flags,{my_label,1},5,Self,3} = seq_trace:get_token(),
seq_trace:set_token({Flags,19,7,Self,5}),
{Flags,19,7,Self,5} = seq_trace:get_token(),
%% Check that receive timeout does not reset token
@@ -158,21 +159,51 @@ do_print(TsType) ->
{0,{print,_,_,[],print3}, Ts1}] = stop_tracer(2),
check_ts(TsType, Ts0),
check_ts(TsType, Ts1).
-
+
send(Config) when is_list(Config) ->
lists:foreach(fun do_send/1, ?TIMESTAMP_MODES).
do_send(TsType) ->
+ do_send(TsType, send).
+
+do_send(TsType, Msg) ->
seq_trace:reset_trace(),
start_tracer(),
Receiver = spawn(?MODULE,one_time_receiver,[]),
+ Label = make_ref(),
+ seq_trace:set_token(label,Label),
set_token_flags([send, TsType]),
- Receiver ! send,
+ Receiver ! Msg,
Self = self(),
seq_trace:reset_trace(),
- [{0,{send,_,Self,Receiver,send}, Ts}] = stop_tracer(1),
+ [{Label,{send,_,Self,Receiver,Msg}, Ts}] = stop_tracer(1),
check_ts(TsType, Ts).
+%% This testcase tests that we do not segfault when we have a
+%% literal as the message and the message is copied onto the
+%% heap during a GC.
+send_literal(Config) when is_list(Config) ->
+ lists:foreach(fun do_send_literal/1,
+ [atom, make_ref(), ets:new(hej,[]), 1 bsl 64,
+ "gurka", {tuple,test,with,#{}}, #{}]).
+
+do_send_literal(Msg) ->
+ N = 10000,
+ seq_trace:reset_trace(),
+ start_tracer(),
+ Label = make_ref(),
+ seq_trace:set_token(label,Label),
+ set_token_flags([send, 'receive', no_timestamp]),
+ Receiver = spawn_link(fun() -> receive ok -> ok end end),
+ [Receiver ! Msg || _ <- lists:seq(1, N)],
+ erlang:garbage_collect(Receiver),
+ [Receiver ! Msg || _ <- lists:seq(1, N)],
+ erlang:garbage_collect(Receiver),
+ Self = self(),
+ seq_trace:reset_trace(),
+ [{Label,{send,_,Self,Receiver,Msg}, Ts} | _] = stop_tracer(N),
+ check_ts(no_timestamp, Ts).
+
distributed_send(Config) when is_list(Config) ->
lists:foreach(fun do_distributed_send/1, ?TIMESTAMP_MODES).
@@ -184,14 +215,19 @@ do_distributed_send(TsType) ->
seq_trace:reset_trace(),
start_tracer(),
Receiver = spawn(Node,?MODULE,one_time_receiver,[]),
+
+ %% Make sure complex labels survive the trip.
+ Label = make_ref(),
+ seq_trace:set_token(label,Label),
set_token_flags([send,TsType]),
+
Receiver ! send,
Self = self(),
seq_trace:reset_trace(),
stop_node(Node),
- [{0,{send,_,Self,Receiver,send}, Ts}] = stop_tracer(1),
+ [{Label,{send,_,Self,Receiver,send}, Ts}] = stop_tracer(1),
check_ts(TsType, Ts).
-
+
recv(Config) when is_list(Config) ->
lists:foreach(fun do_recv/1, ?TIMESTAMP_MODES).
@@ -220,7 +256,12 @@ do_distributed_recv(TsType) ->
seq_trace:reset_trace(),
rpc:call(Node,?MODULE,start_tracer,[]),
Receiver = spawn(Node,?MODULE,one_time_receiver,[]),
+
+ %% Make sure complex labels survive the trip.
+ Label = make_ref(),
+ seq_trace:set_token(label,Label),
set_token_flags(['receive',TsType]),
+
Receiver ! 'receive',
%% let the other process receive the message:
receive after 1 -> ok end,
@@ -229,7 +270,7 @@ do_distributed_recv(TsType) ->
Result = rpc:call(Node,?MODULE,stop_tracer,[1]),
stop_node(Node),
ok = io:format("~p~n",[Result]),
- [{0,{'receive',_,Self,Receiver,'receive'}, Ts}] = Result,
+ [{Label,{'receive',_,Self,Receiver,'receive'}, Ts}] = Result,
check_ts(TsType, Ts).
trace_exit(Config) when is_list(Config) ->
@@ -240,7 +281,12 @@ do_trace_exit(TsType) ->
start_tracer(),
Receiver = spawn_link(?MODULE, one_time_receiver, [exit]),
process_flag(trap_exit, true),
+
+ %% Make sure complex labels survive the trip.
+ Label = make_ref(),
+ seq_trace:set_token(label,Label),
set_token_flags([send, TsType]),
+
Receiver ! {before, exit},
%% let the other process receive the message:
receive
@@ -254,8 +300,8 @@ do_trace_exit(TsType) ->
Result = stop_tracer(2),
seq_trace:reset_trace(),
ok = io:format("~p~n", [Result]),
- [{0, {send, {0,1}, Self, Receiver, {before, exit}}, Ts0},
- {0, {send, {1,2}, Receiver, Self,
+ [{Label, {send, {0,1}, Self, Receiver, {before, exit}}, Ts0},
+ {Label, {send, {1,2}, Receiver, Self,
{'EXIT', Receiver, {exit, {before, exit}}}}, Ts1}] = Result,
check_ts(TsType, Ts0),
check_ts(TsType, Ts1).
@@ -291,6 +337,74 @@ do_distributed_exit(TsType) ->
{'EXIT', Receiver, {exit, {before, exit}}}}, Ts}] = Result,
check_ts(TsType, Ts).
+label_capability_mismatch(Config) when is_list(Config) ->
+ Releases = ["20_latest"],
+ Available = [Rel || Rel <- Releases, test_server:is_release_available(Rel)],
+ case Available of
+ [] -> {skipped, "No incompatible releases available"};
+ _ ->
+ lists:foreach(fun do_incompatible_labels/1, Available),
+ lists:foreach(fun do_compatible_labels/1, Available),
+ ok
+ end.
+
+do_incompatible_labels(Rel) ->
+ Cookie = atom_to_list(erlang:get_cookie()),
+ {ok, Node} = test_server:start_node(
+ list_to_atom(atom_to_list(?MODULE)++"_"++Rel), peer,
+ [{args, " -setcookie "++Cookie}, {erl, [{release, Rel}]}]),
+
+ {_,Dir} = code:is_loaded(?MODULE),
+ Mdir = filename:dirname(Dir),
+ true = rpc:call(Node,code,add_patha,[Mdir]),
+ seq_trace:reset_trace(),
+ rpc:call(Node,?MODULE,start_tracer,[]),
+ Receiver = spawn(Node,?MODULE,one_time_receiver,[]),
+
+ %% This node does not support arbitrary labels, so it must fail with a
+ %% timeout as the token is dropped silently.
+ seq_trace:set_token(label,make_ref()),
+ seq_trace:set_token('receive',true),
+
+ Receiver ! 'receive',
+ %% let the other process receive the message:
+ receive after 10 -> ok end,
+ seq_trace:reset_trace(),
+
+ {error,timeout} = rpc:call(Node,?MODULE,stop_tracer,[1]),
+ stop_node(Node),
+ ok.
+
+do_compatible_labels(Rel) ->
+ Cookie = atom_to_list(erlang:get_cookie()),
+ {ok, Node} = test_server:start_node(
+ list_to_atom(atom_to_list(?MODULE)++"_"++Rel), peer,
+ [{args, " -setcookie "++Cookie}, {erl, [{release, Rel}]}]),
+
+ {_,Dir} = code:is_loaded(?MODULE),
+ Mdir = filename:dirname(Dir),
+ true = rpc:call(Node,code,add_patha,[Mdir]),
+ seq_trace:reset_trace(),
+ rpc:call(Node,?MODULE,start_tracer,[]),
+ Receiver = spawn(Node,?MODULE,one_time_receiver,[]),
+
+ %% This node does not support arbitrary labels, but small integers should
+ %% still work.
+ Label = 1234,
+ seq_trace:set_token(label,Label),
+ seq_trace:set_token('receive',true),
+
+ Receiver ! 'receive',
+ %% let the other process receive the message:
+ receive after 10 -> ok end,
+ Self = self(),
+ seq_trace:reset_trace(),
+ Result = rpc:call(Node,?MODULE,stop_tracer,[1]),
+ stop_node(Node),
+ ok = io:format("~p~n",[Result]),
+ [{Label,{'receive',_,Self,Receiver,'receive'}, _}] = Result,
+ ok.
+
call(doc) ->
"Tests special forms {is_seq_trace} and {get_seq_token} "
"in trace match specs.";
@@ -698,6 +812,24 @@ do_shrink(N) ->
erlang:garbage_collect(),
do_shrink(N-1).
+%% Test that messages from a port does not clear the token
+port_clean_token(Config) when is_list(Config) ->
+ seq_trace:reset_trace(),
+ Label = make_ref(),
+ seq_trace:set_token(label, Label),
+ {label,Label} = seq_trace:get_token(label),
+
+ %% Create a port and get messages from it
+ %% We use os:cmd as a convenience as it does
+ %% open_port, port_command, port_close and receives replies.
+ %% Maybe it is not ideal to rely on the internal implementation
+ %% of os:cmd but it will have to do.
+ os:cmd("ls"),
+
+ %% Make sure that the seq_trace token is still there
+ {label,Label} = seq_trace:get_token(label),
+
+ ok.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
diff --git a/lib/kernel/test/wrap_log_reader_SUITE.erl b/lib/kernel/test/wrap_log_reader_SUITE.erl
index 40a016aed0..59b088ca73 100644
--- a/lib/kernel/test/wrap_log_reader_SUITE.erl
+++ b/lib/kernel/test/wrap_log_reader_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1998-2016. All Rights Reserved.
+%% Copyright Ericsson AB 1998-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -428,13 +428,14 @@ stop() ->
ok = wrap_log_test:stop(),
dl_wait().
-%% Give disk logs opened by 'logger' and 'wlt' time to close after
+%% Give disk logs opened by 'wlr_logger' and 'wlt' time to close after
%% receiving EXIT signals.
dl_wait() ->
case disk_log:accessible_logs() of
{[], []} ->
ok;
- _ ->
+ _X ->
+ erlang:display(_X),
timer:sleep(100),
dl_wait()
end.
@@ -507,27 +508,27 @@ add_ext(Name, Ext) ->
%% disk_log.
open(Log, File, Where) ->
- logger ! {open, self(), Log, File},
+ wlr_logger ! {open, self(), Log, File},
rec1(ok, Where).
open_ext(Log, File, Where) ->
- logger ! {open_ext, self(), Log, File},
+ wlr_logger ! {open_ext, self(), Log, File},
rec1(ok, Where).
close(Log) ->
- logger ! {close, self(), Log},
+ wlr_logger ! {close, self(), Log},
rec(ok, ?LINE).
sync(Log) ->
- logger ! {sync, self(), Log},
+ wlr_logger ! {sync, self(), Log},
rec(ok, ?LINE).
log_terms(File, Terms) ->
- logger ! {log_terms, self(), File, Terms},
+ wlr_logger ! {log_terms, self(), File, Terms},
rec(ok, ?LINE).
blog_terms(File, Terms) ->
- logger ! {blog_terms, self(), File, Terms},
+ wlr_logger ! {blog_terms, self(), File, Terms},
rec(ok, ?LINE).
rec1(M, Where) ->
diff --git a/lib/kernel/test/wrap_log_reader_SUITE_data/wrap_log_test.erl b/lib/kernel/test/wrap_log_reader_SUITE_data/wrap_log_test.erl
index 38449b6bb3..d2bac40192 100644
--- a/lib/kernel/test/wrap_log_reader_SUITE_data/wrap_log_test.erl
+++ b/lib/kernel/test/wrap_log_reader_SUITE_data/wrap_log_test.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1998-2016. All Rights Reserved.
+%% Copyright Ericsson AB 1998-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -36,9 +36,9 @@
-endif.
init() ->
- spawn(fun() -> start(logger) end),
+ spawn(fun() -> start(wlr_logger) end),
spawn(fun() -> start2(wlt) end),
- wait_registered(logger),
+ wait_registered(wlr_logger),
wait_registered(wlt),
ok.
@@ -52,9 +52,9 @@ wait_registered(Name) ->
end.
stop() ->
- catch logger ! exit,
+ catch wlr_logger ! exit,
catch wlt ! exit,
- wait_unregistered(logger),
+ wait_unregistered(wlr_logger),
wait_unregistered(wlt),
ok.
@@ -82,47 +82,47 @@ loop() ->
{open, Pid, Name, File} ->
R = disk_log:open([{name, Name}, {type, wrap}, {file, File},
{size, {?fsize, ?fno}}]),
- ?format("logger: open ~p -> ~p~n", [Name, R]),
+ ?format("wlr_logger: open ~p -> ~p~n", [Name, R]),
Pid ! R,
loop();
{open_ext, Pid, Name, File} ->
R = disk_log:open([{name, Name}, {type, wrap}, {file, File},
{format, external}, {size, {?fsize, ?fno}}]),
- ?format("logger: open ~p -> ~p~n", [Name, R]),
+ ?format("wlr_logger: open ~p -> ~p~n", [Name, R]),
Pid ! R,
loop();
{close, Pid, Name} ->
R = disk_log:close(Name),
- ?format("logger: close ~p -> ~p~n", [Name, R]),
+ ?format("wlr_logger: close ~p -> ~p~n", [Name, R]),
Pid ! R,
loop();
{sync, Pid, Name} ->
R = disk_log:sync(Name),
- ?format("logger: sync ~p -> ~p~n", [Name, R]),
+ ?format("wlr_logger: sync ~p -> ~p~n", [Name, R]),
Pid ! R,
loop();
{log_terms, Pid, Name, Terms} ->
R = disk_log:log_terms(Name, Terms),
- ?format("logger: log_terms ~p -> ~p~n", [Name, R]),
+ ?format("wlr_logger: log_terms ~p -> ~p~n", [Name, R]),
Pid ! R,
loop();
{blog_terms, Pid, Name, Terms} ->
R = disk_log:blog_terms(Name, Terms),
- ?format("logger: blog_terms ~p -> ~p~n", [Name, R]),
+ ?format("wlr_logger: blog_terms ~p -> ~p~n", [Name, R]),
Pid ! R,
loop();
exit ->
- ?format("Stopping logger~n", []),
+ ?format("Stopping wlr_logger~n", []),
exit(normal);
_Else ->
- ?format("logger: ignored: ~p~n", [_Else]),
+ ?format("wlr_logger: ignored: ~p~n", [_Else]),
loop()
end.
diff --git a/lib/kernel/test/zlib_SUITE.erl b/lib/kernel/test/zlib_SUITE.erl
index 26602bdcda..52ae1b3ae6 100644
--- a/lib/kernel/test/zlib_SUITE.erl
+++ b/lib/kernel/test/zlib_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2005-2016. All Rights Reserved.
+%% Copyright Ericsson AB 2005-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -1061,32 +1061,27 @@ sub_heap_binaries(Config) when is_list(Config) ->
%% Check concurrent access to zlib driver.
smp(Config) ->
- case erlang:system_info(smp_support) of
- true ->
- NumOfProcs = lists:min([8,erlang:system_info(schedulers)]),
- io:format("smp starting ~p workers\n",[NumOfProcs]),
-
- %% Tests to run in parallel.
- Funcs =
- [zip_usage, gz_usage, compress_usage, dictionary_usage,
- crc, adler],
-
- %% We get all function arguments here to avoid repeated parallel
- %% file read access.
- UsageArgs =
- list_to_tuple([{F, ?MODULE:F({get_arg,Config})} || F <- Funcs]),
- Parent = self(),
-
- WorkerFun =
- fun() ->
- worker(rand:uniform(9999), UsageArgs, Parent)
- end,
-
- Pids = [spawn_link(WorkerFun) || _ <- lists:seq(1, NumOfProcs)],
- wait_pids(Pids);
- false ->
- {skipped,"No smp support"}
- end.
+ NumOfProcs = lists:min([8,erlang:system_info(schedulers)]),
+ io:format("smp starting ~p workers\n",[NumOfProcs]),
+
+ %% Tests to run in parallel.
+ Funcs =
+ [zip_usage, gz_usage, compress_usage, dictionary_usage,
+ crc, adler],
+
+ %% We get all function arguments here to avoid repeated parallel
+ %% file read access.
+ UsageArgs =
+ list_to_tuple([{F, ?MODULE:F({get_arg,Config})} || F <- Funcs]),
+ Parent = self(),
+
+ WorkerFun =
+ fun() ->
+ worker(rand:uniform(9999), UsageArgs, Parent)
+ end,
+
+ Pids = [spawn_link(WorkerFun) || _ <- lists:seq(1, NumOfProcs)],
+ wait_pids(Pids).
worker(Seed, FnATpl, Parent) ->
io:format("smp worker ~p, seed=~p~n",[self(),Seed]),
diff --git a/lib/kernel/test/zzz_SUITE.erl b/lib/kernel/test/zzz_SUITE.erl
new file mode 100644
index 0000000000..59c7fd7404
--- /dev/null
+++ b/lib/kernel/test/zzz_SUITE.erl
@@ -0,0 +1,37 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2006-2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(zzz_SUITE).
+
+%% The sole purpose of this test suite is for things we want to run last
+%% before the VM terminates.
+
+-export([all/0]).
+
+-export([lc_graph/1]).
+
+
+all() ->
+ [lc_graph].
+
+lc_graph(_Config) ->
+ %% Create "lc_graph" file in current working dir
+ %% if lock checker is enabled.
+ erts_debug:lc_graph(),
+ ok.
diff --git a/lib/kernel/vsn.mk b/lib/kernel/vsn.mk
index 60a1b0bff8..b1ae513223 100644
--- a/lib/kernel/vsn.mk
+++ b/lib/kernel/vsn.mk
@@ -1 +1 @@
-KERNEL_VSN = 5.4.3
+KERNEL_VSN = 6.3.1