diff options
Diffstat (limited to 'lib/kernel')
160 files changed, 24633 insertions, 3359 deletions
diff --git a/lib/kernel/doc/src/.gitignore b/lib/kernel/doc/src/.gitignore new file mode 100644 index 0000000000..c2813ac866 --- /dev/null +++ b/lib/kernel/doc/src/.gitignore @@ -0,0 +1 @@ +*.eps
\ No newline at end of file diff --git a/lib/kernel/doc/src/Makefile b/lib/kernel/doc/src/Makefile index 8976a3b800..f8867ccf25 100644 --- a/lib/kernel/doc/src/Makefile +++ b/lib/kernel/doc/src/Makefile @@ -1,7 +1,7 @@ # # %CopyrightBegin% # -# Copyright Ericsson AB 1997-2016. All Rights Reserved. +# Copyright Ericsson AB 1997-2018. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -42,6 +42,7 @@ XML_REF3_FILES = application.xml \ disk_log.xml \ erl_boot_server.xml \ erl_ddll.xml \ + erl_epmd.xml \ erl_prim_loader_stub.xml \ erlang_stub.xml \ error_handler.xml \ @@ -56,6 +57,11 @@ XML_REF3_FILES = application.xml \ inet.xml \ inet_res.xml \ init_stub.xml \ + logger.xml \ + logger_std_h.xml \ + logger_disk_log_h.xml \ + logger_filters.xml \ + logger_formatter.xml \ net_adm.xml \ net_kernel.xml \ os.xml \ @@ -70,11 +76,18 @@ XML_REF4_FILES = app.xml config.xml XML_REF6_FILES = kernel_app.xml -XML_PART_FILES = part_notes.xml part_notes_history.xml -XML_CHAPTER_FILES = notes.xml notes_history.xml +XML_PART_FILES = part.xml +XML_CHAPTER_FILES = \ + notes.xml \ + introduction_chapter.xml \ + logger_chapter.xml BOOK_FILES = book.xml +# The .png file is generated from a .dia file with target 'update_png' +IMAGE_FILES = \ + logger_arch.png + XML_FILES = \ $(BOOK_FILES) $(XML_CHAPTER_FILES) \ $(XML_PART_FILES) $(XML_REF3_FILES) $(XML_REF4_FILES)\ @@ -100,9 +113,20 @@ SPECS_FILES = $(XML_REF3_FILES:%.xml=$(SPECDIR)/specs_%.xml) TOP_SPECS_FILE = specs.xml # ---------------------------------------------------- -# FLAGS +# FIGURES +# ---------------------------------------------------- +# In order to update the figures you have to have both dia +# and imagemagick installed. +# The generated .png file must be committed. + +update_png: + dia --export=logger_arch.eps logger_arch.dia + convert logger_arch.eps -resize 65% logger_arch.png + +# ---------------------------------------------------- +# FLAGS # ---------------------------------------------------- -XML_FLAGS += +XML_FLAGS += SPECS_ESRC = ../../src @@ -111,7 +135,7 @@ SPECS_FLAGS = -I../../include # ---------------------------------------------------- # Targets # ---------------------------------------------------- -$(HTMLDIR)/%.gif: %.gif +$(HTMLDIR)/%: % $(INSTALL_DATA) $< $@ docs: man pdf html @@ -120,38 +144,40 @@ $(TOP_PDF_FILE): $(XML_FILES) pdf: $(TOP_PDF_FILE) -html: gifs $(HTML_REF_MAN_FILE) +html: images $(HTML_REF_MAN_FILE) man: $(MAN3_FILES) $(MAN4_FILES) $(MAN6_FILES) -gifs: $(GIF_FILES:%=$(HTMLDIR)/%) -debug opt: +images: $(IMAGE_FILES:%=$(HTMLDIR)/%) + +debug opt: clean clean_docs: rm -rf $(HTMLDIR)/* + rm -rf $(XMLDIR) rm -f $(MAN3DIR)/* rm -f $(MAN4DIR)/* rm -f $(MAN6DIR)/* rm -f $(TOP_PDF_FILE) $(TOP_PDF_FILE:%.pdf=%.fo) rm -f $(SPECDIR)/* - rm -f errs core *~ + rm -f errs core *~ *.eps $(SPECDIR)/specs_erl_prim_loader_stub.xml: - escript $(SPECS_EXTRACTOR) $(SPECS_FLAGS) \ + $(gen_verbose)escript $(SPECS_EXTRACTOR) $(SPECS_FLAGS) \ -o$(dir $@) -module erl_prim_loader_stub $(SPECDIR)/specs_erlang_stub.xml: - escript $(SPECS_EXTRACTOR) $(SPECS_FLAGS) \ + $(gen_verbose)escript $(SPECS_EXTRACTOR) $(SPECS_FLAGS) \ -o$(dir $@) -module erlang_stub $(SPECDIR)/specs_init_stub.xml: - escript $(SPECS_EXTRACTOR) $(SPECS_FLAGS) \ + $(gen_verbose)escript $(SPECS_EXTRACTOR) $(SPECS_FLAGS) \ -o$(dir $@) -module init_stub $(SPECDIR)/specs_zlib_stub.xml: - escript $(SPECS_EXTRACTOR) $(SPECS_FLAGS) \ + $(gen_verbose)escript $(SPECS_EXTRACTOR) $(SPECS_FLAGS) \ -o$(dir $@) -module zlib_stub # ---------------------------------------------------- # Release Target -# ---------------------------------------------------- +# ---------------------------------------------------- include $(ERL_TOP)/make/otp_release_targets.mk release_docs_spec: docs @@ -169,4 +195,3 @@ release_docs_spec: docs $(INSTALL_DATA) $(MAN6_FILES) "$(RELEASE_PATH)/man/man6" release_spec: - diff --git a/lib/kernel/doc/src/application.xml b/lib/kernel/doc/src/application.xml index 886286b76d..38c7b5acf1 100644 --- a/lib/kernel/doc/src/application.xml +++ b/lib/kernel/doc/src/application.xml @@ -4,7 +4,7 @@ <erlref> <header> <copyright> - <year>1996</year><year>2016</year> + <year>1996</year><year>2018</year> <holder>Ericsson AB. All Rights Reserved.</holder> </copyright> <legalnotice> @@ -318,8 +318,13 @@ Nodes = [cp1@cave, {cp2@cave, cp3@cave}]</code> <c>{error,{not_started,App}}</c> is returned, where <c>App</c> is the name of the missing application.</p> <p>The application controller then creates an <em>application master</em> - for the application. The application master is - the group leader of all the processes in the application. + for the application. The application master becomes the + group leader of all the processes in the application. I/O is + forwarded to the previous group leader, though, this is just + a way to identify processes that belong to the application. + Used for example to find itself from any process, or, + reciprocally, to kill them all when it terminates.</p> + <p> The application master starts the application by calling the application callback function <c>Module:start/2</c> as defined by the application specification key <c>mod</c>.</p> @@ -608,4 +613,3 @@ Nodes = [cp1@cave, {cp2@cave, cp3@cave}]</code> <seealso marker="app">app(4)</seealso></p> </section> </erlref> - diff --git a/lib/kernel/doc/src/book.xml b/lib/kernel/doc/src/book.xml index 81a87d126d..4b3573b9fe 100644 --- a/lib/kernel/doc/src/book.xml +++ b/lib/kernel/doc/src/book.xml @@ -4,7 +4,7 @@ <book xmlns:xi="http://www.w3.org/2001/XInclude"> <header titlestyle="normal"> <copyright> - <year>1997</year><year>2016</year> + <year>1997</year><year>2018</year> <holder>Ericsson AB. All Rights Reserved.</holder> </copyright> <legalnotice> @@ -34,6 +34,9 @@ <preamble> <contents level="2"></contents> </preamble> + <parts lift="yes"> + <xi:include href="part.xml"/> + </parts> <applications> <xi:include href="ref_man.xml"/> </applications> diff --git a/lib/kernel/doc/src/code.xml b/lib/kernel/doc/src/code.xml index c94f612c01..aff3e8133c 100644 --- a/lib/kernel/doc/src/code.xml +++ b/lib/kernel/doc/src/code.xml @@ -4,7 +4,7 @@ <erlref> <header> <copyright> - <year>1996</year><year>2017</year> + <year>1996</year><year>2018</year> <holder>Ericsson AB. All Rights Reserved.</holder> </copyright> <legalnotice> @@ -34,26 +34,28 @@ <p>This module contains the interface to the Erlang <em>code server</em>, which deals with the loading of compiled code into a running Erlang runtime system.</p> - <p>The runtime system can be started in <em>embedded</em> or - <em>interactive</em> mode. Which one is decided by command-line + <p>The runtime system can be started in <em>interactive</em> or + <em>embedded</em> mode. Which one is decided by the command-line flag <c>-mode</c>:</p> <pre> % <input>erl -mode interactive</input></pre> <p>The modes are as follows:</p> <list type="bulleted"> <item> - <p>In embedded mode, all code is loaded during system startup - according to the boot script. (Code can also be loaded later - by explicitly ordering the code server to do so).</p> - </item> - <item> <p>In interactive mode, which is default, only some code is loaded - during system startup, basically the modules needed by the runtime + during system startup, basically the modules needed by the runtime system. Other code is dynamically loaded when first referenced. When a call to a function in a certain module is made, and the module is not loaded, the code server searches for and tries to load the module.</p> </item> + <item> + <p>In embedded mode, modules are not auto loaded. Trying to use + a module that has not been loaded results in an error. This mode is + recommended when the boot script loads all modules, as it is + typically done in OTP releases. (Code can still be loaded later + by explicitly ordering the code server to do so).</p> + </item> </list> <p>To prevent accidentally reloading of modules affecting the Erlang runtime system, directories <c>kernel</c>, <c>stdlib</c>, diff --git a/lib/kernel/doc/src/config.xml b/lib/kernel/doc/src/config.xml index fdb2d29f63..714af93f4d 100644 --- a/lib/kernel/doc/src/config.xml +++ b/lib/kernel/doc/src/config.xml @@ -4,7 +4,7 @@ <fileref> <header> <copyright> - <year>1997</year><year>2017</year> + <year>1997</year><year>2018</year> <holder>Ericsson AB. All Rights Reserved.</holder> </copyright> <legalnotice> @@ -37,10 +37,10 @@ data in the system configuration file <c>Name.config</c>.</p> <p>Configuration parameter values in the configuration file override the values in the application resource files (see - <seealso marker="app"><c>app(4)</c></seealso>. + <seealso marker="app"><c>app(4)</c></seealso>). The values in the configuration file can be overridden by command-line flags (see - <seealso marker="erts:erl"><c>erts:erl(1)</c></seealso>.</p> + <seealso marker="erts:erl"><c>erts:erl(1)</c></seealso>).</p> <p>The value of a configuration parameter is retrieved by calling <c>application:get_env/1,2</c>.</p> </description> @@ -86,8 +86,13 @@ <tag><c>File = string()</c></tag> <item>Name of another <c>.config</c> file. Extension <c>.config</c> can be omitted. It is - recommended to use absolute paths. A relative path is - relative the current working directory of the emulator.</item> + recommended to use absolute paths. If a relative path is used, + <c>File</c> is searched, first, relative from <c>sys.config</c> directory, then relative + to the current working directory of the emulator, for backward compatibility. + This allow to use a <c>sys.config</c> pointing out other <c>.config</c> files in a release + or in a node started manually using <c>-config ...</c> with same result whatever + the current working directory. + </item> </taglist> <p>When traversing the contents of <c>sys.config</c> and a filename is encountered, its contents are read and merged with the result diff --git a/lib/kernel/doc/src/disk_log.xml b/lib/kernel/doc/src/disk_log.xml index 1be28adfb8..884cb32c0c 100644 --- a/lib/kernel/doc/src/disk_log.xml +++ b/lib/kernel/doc/src/disk_log.xml @@ -972,7 +972,7 @@ <item> <p>Specifies if messages will be sent to <c>error_logger</c> on recoverable errors with - the log files. Defaults to <c>true</c>.</p> + the log files. Defaults to <c>false</c>.</p> </item> </taglist> <p><c>open/1</c> returns <c>{ok, <anno>Log</anno>}</c> if the diff --git a/lib/kernel/doc/src/erl_epmd.xml b/lib/kernel/doc/src/erl_epmd.xml new file mode 100644 index 0000000000..8b076cd2d7 --- /dev/null +++ b/lib/kernel/doc/src/erl_epmd.xml @@ -0,0 +1,104 @@ +<?xml version="1.0" encoding="utf-8" ?> +<!DOCTYPE erlref SYSTEM "erlref.dtd"> + +<erlref> + <header> + <copyright> + <year>2018</year><year>2018</year> + <holder>Ericsson AB. All Rights Reserved.</holder> + </copyright> + <legalnotice> + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + </legalnotice> + + <title>erl_epmd</title> + <prepared>Timmo Verlaan</prepared> + <docno>1</docno> + <date>2018-02-19</date> + <rev>A</rev> + </header> + <module>erl_epmd</module> + <modulesummary> + Erlang interface towards epmd + </modulesummary> + <description> + <p>This module communicates with the EPMD daemon, see <seealso + marker="erts:epmd">epmd</seealso>. To implement your own epmd module please + see <seealso marker="erts:alt_disco">ERTS User's Guide: How to Implement an + Alternative Service Discovery for Erlang Distribution</seealso></p> + </description> + + <funcs> + <func> + <name name="start_link" arity="0"/> + <fsummary>Callback for erl_distribution supervisor.</fsummary> + <desc> + <p>This function is invoked as this module is added as a child of the + <c>erl_distribution</c> supervisor.</p> + </desc> + </func> + + <func> + <name name="register_node" arity="2"/> + <name name="register_node" arity="3"/> + <fsummary>Registers the node with <c>epmd</c>.</fsummary> + <desc> + <p>Registers the node with <c>epmd</c> and tells epmd what port will be + used for the current node. It returns a creation number. This number is + incremented on each register to help with identifying if a node is + reconnecting to epmd.</p> + </desc> + </func> + + <func> + <name name="port_please" arity="2"/> + <name name="port_please" arity="3"/> + <fsummary>Returns the port number for a given node.</fsummary> + <desc> + <p>Requests the distribution port for the given node of an EPMD + instance. Together with the port it returns a distribution protocol + version which has been 5 since Erlang/OTP R6.</p> + </desc> + </func> + + <func> + <name name="address_please" arity="3"/> + <fsummary>Returns address and port.</fsummary> + <desc> + <p>Called by the distribution module. Resolves the <c>Host</c> to an IP + address.</p> + <p>Another epmd module may return port and distribution protocol version + as well.</p> + </desc> + </func> + + <func> + <name name="names" arity="1"/> + <fsummary>Names of Erlang nodes at a host.</fsummary> + <desc> + <p>Called by <seealso marker="net_adm"><c>net_adm:names/0</c></seealso>. + <c>Host</c> defaults to the localhost. Returns the names and associated + port numbers of the Erlang nodes that <c>epmd</c> registered at the + specified host. Returns <c>{error, address}</c> if <c>epmd</c> is not + operational.</p> + <p><em>Example:</em></p> + <pre> +(arne@dunn)1> <input>erl_epmd:names(localhost).</input> +{ok,[{"arne",40262}]}</pre> + </desc> + </func> + </funcs> + +</erlref> + diff --git a/lib/kernel/doc/src/error_logger.xml b/lib/kernel/doc/src/error_logger.xml index 91bf57cb91..c3d68fd79f 100644 --- a/lib/kernel/doc/src/error_logger.xml +++ b/lib/kernel/doc/src/error_logger.xml @@ -4,7 +4,7 @@ <erlref> <header> <copyright> - <year>1996</year><year>2017</year> + <year>1996</year><year>2018</year> <holder>Ericsson AB. All Rights Reserved.</holder> </copyright> <legalnotice> @@ -31,36 +31,37 @@ <module>error_logger</module> <modulesummary>Erlang error logger.</modulesummary> <description> + + <note> + <p>In Erlang/OTP 21.0, a new API for logging was added. The + old <c>error_logger</c> module can still be used by legacy + code, but log events are redirected to the new Logger API. New + code should use the Logger API directly.</p> + <p><c>error_logger</c> is no longer started by default, but is + automatically started when an event handler is added + with <c>error_logger:add_report_handler/1,2</c>. The <c>error_logger</c> + module is then also added as a handler to the new logger.</p> + <p>See <seealso marker="logger"><c>logger(3)</c></seealso> and + the <seealso marker="logger_chapter">Logging</seealso> chapter + in the User's Guide for more information.</p> + </note> + <p>The Erlang <em>error logger</em> is an event manager (see <seealso marker="doc/design_principles:des_princ">OTP Design Principles</seealso> and <seealso marker="stdlib:gen_event"><c>gen_event(3)</c></seealso>), - registered as <c>error_logger</c>. Errors, warnings, and info events - are sent to the error logger from the Erlang runtime system and - the different Erlang/OTP applications. The events are, by default, - logged to the terminal. Notice that an event from a process <c>P</c> is - logged at the node of the group leader of <c>P</c>. This means - that log output is directed to the node from which a process was - created, which not necessarily is the same node as where it is - executing.</p> - <p>Initially, <c>error_logger</c> has only a primitive event - handler, which buffers and prints the raw event messages. During - system startup, the Kernel application replaces this with a - <em>standard event handler</em>, by default one that writes - nicely formatted output to the terminal. Kernel can also be - configured so that events are logged to a file instead, or not logged at all, - see <seealso marker="kernel_app"><c>kernel(6)</c></seealso>.</p> - <p>Also the SASL application, if started, adds its own event - handler, which by default writes supervisor, crash, and progress - reports to the terminal. See - <seealso marker="sasl:sasl_app"><c>sasl(6)</c></seealso>.</p> - <p>It is recommended that user-defined applications report - errors through the error logger to get uniform reports. - User-defined event handlers can be added to handle application-specific - events, see - <seealso marker="#add_report_handler/1"><c>add_report_handler/1,2</c></seealso>. - Also, a useful event handler is provided in STDLIB for multi-file - logging of events, see - <seealso marker="stdlib:log_mf_h"><c>log_mf_h(3)</c></seealso>.</p> + registered as <c>error_logger</c>.</p> + <p>Error logger is no longer started by default, but is + automatically started when an event handler is added + with <seealso marker="#add_report_handler/1"> + <c>add_report_handler/1,2</c></seealso>. The <c>error_logger</c> + module is then also added as a handler to the new logger, + causing log events to be forwarded from logger to error logger, + and consequently to all installed error logger event + handlers.</p> + <p>User-defined event handlers can be added to handle application-specific + events.</p> + <p>Existing event handlers provided by STDLIB and SASL are still + available, but are no longer used by OTP.</p> <p>Warning events were introduced in Erlang/OTP R9C and are enabled by default as from Erlang/OTP 18.0. To retain backwards compatibility with existing user-defined event handlers, the warning events can be @@ -89,6 +90,9 @@ The function returns <c>ok</c> if successful.</p> <p>The event handler must be able to handle the events in this module, see section <seealso marker="#events">Events</seealso>.</p> + <p>The first time this function is called, + <c>error_logger</c> is added as a Logger handler, and + the <c>error_logger</c> process is started.</p> </desc> </func> <func> @@ -98,37 +102,40 @@ <p>Deletes an event handler from the error logger by calling <c>gen_event:delete_handler(error_logger, <anno>Handler</anno>, [])</c>, see <seealso marker="stdlib:gen_event"><c>gen_event(3)</c></seealso>.</p> + <p>If no more event handlers exist after the deletion, + <c>error_logger</c> is removed as a Logger handler, and + the <c>error_logger</c> process is stopped.</p> </desc> </func> <func> <name name="error_msg" arity="1"/> <name name="error_msg" arity="2"/> <name name="format" arity="2"/> - <fsummary>Send a standard error event to the error logger.</fsummary> + <fsummary>Log a standard error event.</fsummary> <desc> - <p>Sends a standard error event to the error logger. - The <c><anno>Format</anno></c> and <c><anno>Data</anno></c> arguments - are the same as the arguments of + <p>Log a standard error event. The <c><anno>Format</anno></c> + and <c><anno>Data</anno></c> arguments are the same as the + arguments of <seealso marker="stdlib:io#format/2"><c>io:format/2</c></seealso> - in STDLIB. - The event is handled by the standard event handler.</p> + in STDLIB.</p> + <p>Error logger forwards the event to Logger, including + metadata that allows backwards compatibility with legacy + error logger event handlers.</p> + <p>The event is handled by the default Logger handler.</p> + <p>These functions are kept for backwards compatibility and + must not be used by new code. Use the <seealso marker="logger#macros"> + <c>?LOG_ERROR</c></seealso> macro or + <seealso marker="logger#error-1"><c>logger:error/1,2,3</c></seealso> + instead.</p> <p><em>Example:</em></p> <pre> -1> <input>error_logger:error_msg("An error occurred in ~p~n", [a_module]).</input> - -=ERROR REPORT==== 11-Aug-2005::14:03:19 === +1> <input>error_logger:error_msg("An error occurred in ~p", [a_module]).</input> +=ERROR REPORT==== 22-May-2018::11:18:43.376917 === An error occurred in a_module ok</pre> <warning> - <p>If called with bad arguments, this function can crash - the standard event handler, meaning no further events are - logged. When in doubt, use - <seealso marker="#error_report/1"><c>error_report/1</c></seealso> - instead.</p> - </warning> - <warning> <p>If the Unicode translation modifier (<c>t</c>) is used in - the format string, all error handlers must ensure that the + the format string, all event handlers must ensure that the formatted output is correctly encoded for the I/O device.</p> </warning> @@ -136,36 +143,51 @@ ok</pre> </func> <func> <name name="error_report" arity="1"/> - <fsummary>Send a standard error report event to the error logger.</fsummary> + <fsummary>Log a standard error event.</fsummary> <desc> - <p>Sends a standard error report event to the error logger. - The event is handled by the standard event handler.</p> + <p>Log a standard error event. Error logger forwards the event + to Logger, including metadata that allows backwards + compatibility with legacy error logger event handlers.</p> + <p>The event is handled by the default Logger handler.</p> + <p>This functions is kept for backwards compatibility and + must not be used by new code. Use the <seealso marker="logger#macros"> + <c>?LOG_ERROR</c></seealso> macro or + <seealso marker="logger#error-1"><c>logger:error/1,2,3</c></seealso> + instead.</p> <p><em>Example:</em></p> <pre> 2> <input>error_logger:error_report([{tag1,data1},a_term,{tag2,data}]).</input> - -=ERROR REPORT==== 11-Aug-2005::13:45:41 === +=ERROR REPORT==== 22-May-2018::11:24:23.699306 === tag1: data1 a_term tag2: data ok 3> <input>error_logger:error_report("Serious error in my module").</input> - -=ERROR REPORT==== 11-Aug-2005::13:45:49 === +=ERROR REPORT==== 22-May-2018::11:24:45.972445 === Serious error in my module ok</pre> </desc> </func> <func> <name name="error_report" arity="2"/> - <fsummary>Send a user-defined error report event to the error logger.</fsummary> + <fsummary>Log a user-defined error event.</fsummary> <desc> - <p>Sends a user-defined error report event to the error logger. - An event handler to handle the event is supposed to have been - added. The event is ignored by the standard event handler.</p> + <p>Log a user-defined error event. Error logger forwards the + event to Logger, including metadata that allows backwards + compatibility with legacy error logger event handlers.</p> + <p>Error logger also adds a <c>domain</c> field with + value <c>[<anno>Type</anno>]</c> to this event's metadata, + causing the filters of the default Logger handler to discard + the event. A different Logger handler, or an error logger + event handler, must be added to handle this event.</p> <p>It is recommended that <c><anno>Report</anno></c> follows the same structure as for <seealso marker="#error_report/1"><c>error_report/1</c></seealso>.</p> + <p>This functions is kept for backwards compatibility and + must not be used by new code. Use the <seealso marker="logger#macros"> + <c>?LOG_ERROR</c></seealso> macro or + <seealso marker="logger#error-1"><c>logger:error/1,2,3</c></seealso> + instead.</p> </desc> </func> <func> @@ -174,38 +196,48 @@ ok</pre> <c>error_logger_format_depth</c>.</fsummary> <desc> <p>Returns <c>max(10, Depth)</c>, where <c>Depth</c> is the - value of - <seealso marker="kernel:kernel_app#error_logger_format_depth"> - error_logger_format_depth</seealso> + value of <c>error_logger_format_depth</c> in the Kernel application, if Depth is an integer. Otherwise, <c>unlimited</c> is returned.</p> + <note> + <p>The <c>error_logger_format_depth</c> variable + is <seealso marker="kernel_app#deprecated-configuration-parameters"> + deprecated</seealso> since + the <seealso marker="logger">Logger API</seealso> was + introduced in Erlang/OTP 21.0. The variable, and this + function, are kept for backwards compatibility since they + still might be used by legacy report handlers.</p> + </note> </desc> </func> <func> <name name="info_msg" arity="1"/> <name name="info_msg" arity="2"/> - <fsummary>Send a standard information event to the error logger.</fsummary> + <fsummary>Log a standard information event.</fsummary> <desc> - <p>Sends a standard information event to the error logger. - The <c><anno>Format</anno></c> and <c><anno>Data</anno></c> arguments - are the same as the arguments of + <p>Log a standard information event. The <c><anno>Format</anno></c> + and <c><anno>Data</anno></c> arguments are the same as the + arguments of <seealso marker="stdlib:io#format/2"><c>io:format/2</c></seealso> - in STDLIB. The event is handled by the standard event handler.</p> + in STDLIB.</p> + <p>Error logger forwards the event to Logger, including + metadata that allows backwards compatibility with legacy + error logger event handlers.</p> + <p>The event is handled by the default Logger handler.</p> + <p>These functions are kept for backwards compatibility and + must not be used by new code. Use the <seealso marker="logger#macros"> + <c>?LOG_INFO</c></seealso> macro or + <seealso marker="logger#info-1"><c>logger:info/1,2,3</c></seealso> + instead.</p> <p><em>Example:</em></p> <pre> -1> <input>error_logger:info_msg("Something happened in ~p~n", [a_module]).</input> - -=INFO REPORT==== 11-Aug-2005::14:06:15 === +1> <input>error_logger:info_msg("Something happened in ~p", [a_module]).</input> +=INFO REPORT==== 22-May-2018::12:03:32.612462 === Something happened in a_module ok</pre> <warning> - <p>If called with bad arguments, this function can crash - the standard event handler, meaning no further events are - logged. When in doubt, use <c>info_report/1</c> instead.</p> - </warning> - <warning> <p>If the Unicode translation modifier (<c>t</c>) is used in - the format string, all error handlers must ensure that the + the format string, all event handlers must ensure that the formatted output is correctly encoded for the I/O device.</p> </warning> @@ -213,37 +245,52 @@ ok</pre> </func> <func> <name name="info_report" arity="1"/> - <fsummary>Send a standard information report event to the error logger.</fsummary> + <fsummary>Log a standard information event.</fsummary> <desc> - <p>Sends a standard information report event to the error - logger. The event is handled by the standard event handler.</p> + <p>Log a standard information event. Error logger forwards the + event to Logger, including metadata that allows backwards + compatibility with legacy error logger event handlers.</p> + <p>The event is handled by the default Logger handler.</p> + <p>This functions is kept for backwards compatibility and + must not be used by new code. Use the <seealso marker="logger#macros"> + <c>?LOG_INFO</c></seealso> macro or + <seealso marker="logger#info-1"><c>logger:info/1,2,3</c></seealso> + instead.</p> <p><em>Example:</em></p> <pre> 2> <input>error_logger:info_report([{tag1,data1},a_term,{tag2,data}]).</input> - -=INFO REPORT==== 11-Aug-2005::13:55:09 === +=INFO REPORT==== 22-May-2018::12:06:35.994440 === tag1: data1 a_term tag2: data ok 3> <input>error_logger:info_report("Something strange happened").</input> - -=INFO REPORT==== 11-Aug-2005::13:55:36 === +=INFO REPORT==== 22-May-2018::12:06:49.066872 === Something strange happened ok</pre> </desc> </func> <func> <name name="info_report" arity="2"/> - <fsummary>Send a user-defined information report event to the error logger.</fsummary> + <fsummary>Log a user-defined information event.</fsummary> <desc> - <p>Sends a user-defined information report event to the error - logger. An event handler to handle the event is supposed to - have been added. The event is ignored by the standard event - handler.</p> + <p>Log a user-defined information event. Error logger forwards + the event to Logger, including metadata that allows + backwards compatibility with legacy error logger event + handlers.</p> + <p>Error logger also adds a <c>domain</c> field with + value <c>[<anno>Type</anno>]</c> to this event's metadata, + causing the filters of the default Logger handler to discard + the event. A different Logger handler, or an error logger + event handler, must be added to handle this event.</p> <p>It is recommended that <c><anno>Report</anno></c> follows the same structure as for <seealso marker="#info_report/1"><c>info_report/1</c></seealso>.</p> + <p>This functions is kept for backwards compatibility and + must not be used by new code. Use the <seealso marker="logger#macros"> + <c>?LOG_INFO</c></seealso> macro or + <seealso marker="logger#info-1"><c>logger:info/1,2,3</c></seealso> + instead.</p> </desc> </func> <func> @@ -258,14 +305,22 @@ ok</pre> <type name="open_error"/> <desc> <p>Enables or disables printout of standard events to a file.</p> - <p>This is done by adding or deleting the standard event handler - for output to file. Thus, calling this function overrides - the value of the Kernel <c>error_logger</c> configuration - parameter.</p> - <p>Enabling file logging can be used together with calling - <c>tty(false)</c>, to have a silent system where - all standard events are logged to a file only. - Only one log file can be active at a time.</p> + <p>This is done by adding or deleting + the <c>error_logger_file_h</c> event handler, and thus + indirectly adding <c>error_logger</c> as a Logger + handler.</p> + <p>Notice that this function does not manipulate the Logger + configuration directly, meaning that if the default Logger + handler is already logging to a file, this function can + potentially cause logging to a second file.</p> + <p>This function is useful as a shortcut during development + and testing, but must not be used in a production + system. See + section <seealso marker="logger_chapter">Logging</seealso> + in the Kernel User's Guide, and + the <seealso marker="logger"><c>logger(3)</c></seealso> + manual page for information about how to configure Logger + for live systems.</p> <p><c>Request</c> is one of the following:</p> <taglist> <tag><c>{open, <anno>Filename</anno>}</c></tag> @@ -297,9 +352,14 @@ ok</pre> <p>Enables (<c><anno>Flag</anno> == true</c>) or disables (<c><anno>Flag</anno> == false</c>) printout of standard events to the terminal.</p> - <p>This is done by adding or deleting the standard event handler - for output to the terminal. Thus, calling this function overrides - the value of the Kernel <c>error_logger</c> configuration parameter.</p> + <p>This is done by manipulating the Logger configuration. The + function is useful as a shortcut during development and + testing, but must not be used in a production system. See + section <seealso marker="logger_chapter">Logging</seealso> + in the Kernel User's Guide, and + the <seealso marker="logger"><c>logger(3)</c></seealso> + manual page for information about how to configure Logger + for live systems.</p> </desc> </func> <func> @@ -342,24 +402,27 @@ ok</pre> <func> <name name="warning_msg" arity="1"/> <name name="warning_msg" arity="2"/> - <fsummary>Send a standard warning event to the error logger.</fsummary> + <fsummary>Log a standard warning event.</fsummary> <desc> - <p>Sends a standard warning event to the error logger. - The <c><anno>Format</anno></c> and <c><anno>Data</anno></c> arguments - are the same as the arguments of + <p>Log a standard warning event. The <c><anno>Format</anno></c> + and <c><anno>Data</anno></c> arguments are the same as the + arguments of <seealso marker="stdlib:io#format/2"><c>io:format/2</c></seealso> - in STDLIB. - The event is handled by the standard event handler. It is tagged - as an error, warning, or info, see + in STDLIB.</p> + <p>Error logger forwards the event to Logger, including + metadata that allows backwards compatibility with legacy + error logger event handlers.</p> + <p>The event is handled by the default Logger handler. The log + level can be changed to error or info, see <seealso marker="#warning_map/0"><c>warning_map/0</c></seealso>.</p> - <warning> - <p>If called with bad arguments, this function can crash - the standard event handler, meaning no further events are - logged. When in doubt, use <c>warning_report/1</c> instead.</p> - </warning> + <p>These functions are kept for backwards compatibility and + must not be used by new code. Use the <seealso marker="logger#macros"> + <c>?LOG_WARNING</c></seealso> macro or + <seealso marker="logger#warning-1"><c>logger:warning/1,2,3</c></seealso> + instead.</p> <warning> <p>If the Unicode translation modifier (<c>t</c>) is used in - the format string, all error handlers must ensure that the + the format string, all event handlers must ensure that the formatted output is correctly encoded for the I/O device.</p> </warning> @@ -367,24 +430,43 @@ ok</pre> </func> <func> <name name="warning_report" arity="1"/> - <fsummary>Send a standard warning report event to the error logger.</fsummary> + <fsummary>Log a standard warning event.</fsummary> <desc> - <p>Sends a standard warning report event to the error logger. - The event is handled by the standard event handler. It is - tagged as an error, warning, or info, see + <p>Log a standard warning event. Error logger forwards the event + to Logger, including metadata that allows backwards + compatibility with legacy error logger event handlers.</p> + <p>The event is handled by the default Logger handler. The log + level can be changed to error or info, see <seealso marker="#warning_map/0"><c>warning_map/0</c></seealso>.</p> + <p>This functions is kept for backwards compatibility and + must not be used by new code. Use the <seealso marker="logger#macros"> + <c>?LOG_WARNING</c></seealso> macro or + <seealso marker="logger#warning-1"><c>logger:warning/1,2,3</c></seealso> + instead.</p> </desc> </func> <func> <name name="warning_report" arity="2"/> - <fsummary>Send a user-defined warning report event to the error logger.</fsummary> + <fsummary>Log a user-defined warning event.</fsummary> <desc> - <p>Sends a user-defined warning report event to the error - logger. An event handler to handle the event is supposed to - have been added. The event is ignored by the standard event - handler. It is tagged as an error, warning, or info, - depending on the value of + <p>Log a user-defined warning event. Error logger forwards the + event to Logger, including metadata that allows backwards + compatibility with legacy error logger event handlers.</p> + <p>Error logger also adds a <c>domain</c> field with + value <c>[<anno>Type</anno>]</c> to this event's metadata, + causing the filters of the default Logger handler to discard + the event. A different Logger handler, or an error logger + event handler, must be added to handle this event.</p> + <p>The log level can be changed to error or info, see <seealso marker="#warning_map/0"><c>warning_map/0</c></seealso>.</p> + <p>It is recommended that <c><anno>Report</anno></c> follows the same + structure as for + <seealso marker="#warning_report/1"><c>warning_report/1</c></seealso>.</p> + <p>This functions is kept for backwards compatibility and + must not be used by new code. Use the <seealso marker="logger#macros"> + <c>?LOG_WARNING</c></seealso> macro or + <seealso marker="logger#warning-1"><c>logger:warning/1,2,3</c></seealso> + instead.</p> </desc> </func> </funcs> @@ -448,8 +530,9 @@ ok</pre> <section> <title>See Also</title> <p><seealso marker="stdlib:gen_event"><c>gen_event(3)</c></seealso>, - <seealso marker="stdlib:log_mf_h"><c>log_mf_h(3)</c></seealso> - <seealso marker="kernel_app"><c>kernel(6)</c></seealso> + <seealso marker="kernel:logger"><c>logger(3)</c></seealso>, + <seealso marker="stdlib:log_mf_h"><c>log_mf_h(3)</c></seealso>, + <seealso marker="kernel_app"><c>kernel(6)</c></seealso>, <seealso marker="sasl:sasl_app"><c>sasl(6)</c></seealso></p> </section> </erlref> diff --git a/lib/kernel/doc/src/fascicules.xml b/lib/kernel/doc/src/fascicules.xml deleted file mode 100644 index fadd37eefb..0000000000 --- a/lib/kernel/doc/src/fascicules.xml +++ /dev/null @@ -1,15 +0,0 @@ -<?xml version="1.0" encoding="utf-8" ?> -<!DOCTYPE fascicules SYSTEM "fascicules.dtd"> - -<fascicules> - <fascicule file="ref_man" href="ref_man_frame.html" entry="yes"> - Reference Manual - </fascicule> - <fascicule file="part_notes" href="part_notes_frame.html" entry="no"> - Release Notes - </fascicule> - <fascicule file="" href="../../../../doc/print.html" entry="no"> - Off-Print - </fascicule> -</fascicules> - diff --git a/lib/kernel/doc/src/file.xml b/lib/kernel/doc/src/file.xml index b674b3ca93..9acaf6b41e 100644 --- a/lib/kernel/doc/src/file.xml +++ b/lib/kernel/doc/src/file.xml @@ -4,7 +4,7 @@ <erlref> <header> <copyright> - <year>1996</year><year>2016</year> + <year>1996</year><year>2018</year> <holder>Ericsson AB. All Rights Reserved.</holder> </copyright> <legalnotice> @@ -33,15 +33,18 @@ <description> <p>This module provides an interface to the file system.</p> - <p>On operating systems with thread support, - file operations can be performed in threads of their own, allowing - other Erlang processes to continue executing in parallel with - the file operations. See command-line flag - <c>+A</c> in <seealso marker="erts:erl"><c>erl(1)</c></seealso>.</p> + <warning> + <p>File operations are only guaranteed to appear atomic when going + through the same file server. A NIF or other OS process may observe + intermediate steps on certain operations on some operating systems, + eg. renaming an existing file on Windows, or + <seealso marker="#write_file_info/2"><c>write_file_info/2</c> + </seealso> on any OS at the time of writing.</p> + </warning> <p>Regarding filename encoding, the Erlang VM can operate in two modes. The current mode can be queried using function - <seealso marker="#native_name_encoding"><c>native_name_encoding/0</c></seealso>. + <seealso marker="#native_name_encoding/0"><c>native_name_encoding/0</c></seealso>. It returns <c>latin1</c> or <c>utf8</c>.</p> <p>In <c>latin1</c> mode, the Erlang VM does not change the @@ -59,7 +62,7 @@ terminal supports UTF-8, otherwise <c>latin1</c>. The default can be overridden using <c>+fnl</c> (to force <c>latin1</c> mode) or <c>+fnu</c> (to force <c>utf8</c> mode) when starting - <seealso marker="erts:erl"><c>erts:erl</c></seealso>.</p> + <seealso marker="erts:erl"><c>erl</c></seealso>.</p> <p>On operating systems with transparent naming, files can be inconsistently named, for example, some files are encoded in UTF-8 while @@ -81,6 +84,16 @@ <p>See also section <seealso marker="stdlib:unicode_usage#notes-about-raw-filenames">Notes About Raw Filenames</seealso> in the STDLIB User's Guide.</p> + <note><p> + File operations used to accept filenames containing + null characters (integer value zero). This caused + the name to be truncated and in some cases arguments + to primitive operations to be mixed up. Filenames + containing null characters inside the filename + are now <em>rejected</em> and will cause primitive + file operations fail. + </p></note> + </description> <datatypes> @@ -96,9 +109,21 @@ </datatype> <datatype> <name name="filename"/> + <desc> + <p> + See also the documentation of the + <seealso marker="#type-name_all"><c>name_all()</c></seealso> type. + </p> + </desc> </datatype> <datatype> <name name="filename_all"/> + <desc> + <p> + See also the documentation of the + <seealso marker="#type-name_all"><c>name_all()</c></seealso> type. + </p> + </desc> </datatype> <datatype> <name name="io_device"/> @@ -112,21 +137,23 @@ <name name="name"/> <desc> <p>If VM is in Unicode filename mode, <c>string()</c> and <c>char()</c> - are allowed to be > 255. + are allowed to be > 255. See also the documentation of the + <seealso marker="#type-name_all"><c>name_all()</c></seealso> type. </p> </desc> </datatype> <datatype> <name name="name_all"/> <desc> - <p>If VM is in Unicode filename mode, <c>string()</c> and <c>char()</c> + <p>If VM is in Unicode filename mode, characters are allowed to be > 255. <c><anno>RawFilename</anno></c> is a filename not subject to Unicode translation, meaning that it can contain characters not conforming to the Unicode encoding expected from the file system (that is, non-UTF-8 characters although the VM is started - in Unicode filename mode). + in Unicode filename mode). Null characters (integer value zero) + are <em>not</em> allowed in filenames (not even at the end). </p> </desc> </datatype> @@ -954,8 +981,7 @@ f.txt: {person, "kalle", 25}. </item> <tag><c>eisdir</c></tag> <item> - <p>The named file is not a regular file. It can be a - directory, a FIFO, or a device.</p> + <p>The named file is a directory.</p> </item> <tag><c>enotdir</c></tag> <item> @@ -1407,8 +1433,12 @@ f.txt: {person, "kalle", 25}. which is 1970-01-01 00:00 UTC.</p></item> </taglist> <p>Default is <c>{time, local}</c>.</p> - <p>If the option <c>raw</c> is set, the file server is not called - and only information about local files is returned.</p> + <p>If the option <c>raw</c> is set, the file server is not called and + only information about local files is returned. Note that this will + break this module's atomicity guarantees as it can race with a + concurrent call to + <seealso marker="#write_file_info/2"><c>write_file_info/1,2</c> + </seealso></p> <note> <p>As file times are stored in POSIX time on most OS, it is faster to query file information with option <c>posix</c>.</p> @@ -1656,8 +1686,12 @@ f.txt: {person, "kalle", 25}. except that if <c><anno>Name</anno></c> is a symbolic link, information about the link is returned in the <c>file_info</c> record and the <c>type</c> field of the record is set to <c>symlink</c>.</p> - <p>If the option <c>raw</c> is set, the file server is not called - and only information about local files is returned.</p> + <p>If the option <c>raw</c> is set, the file server is not called and + only information about local files is returned. Note that this will + break this module's atomicity guarantees as it can race with a + concurrent call to + <seealso marker="#write_file_info/2"><c>write_file_info/1,2</c> + </seealso></p> <p>If <c><anno>Name</anno></c> is not a symbolic link, this function returns the same result as <c>read_file_info/1</c>. On platforms that do not support symbolic links, this function @@ -1795,24 +1829,16 @@ f.txt: {person, "kalle", 25}. <p>The file used must be opened using the <c>raw</c> flag, and the process calling <c>sendfile</c> must be the controlling process of the socket. See <seealso marker="gen_tcp#controlling_process-2"><c>gen_tcp:controlling_process/2</c></seealso>.</p> - <p>If the OS used does not support <c>sendfile</c>, an Erlang fallback - using - <seealso marker="#read/2"><c>read/2</c></seealso> and - <seealso marker="gen_tcp#send/2"><c>gen_tcp:send/2</c></seealso> is used.</p> + <p>If the OS used does not support non-blocking <c>sendfile</c>, an + Erlang fallback using <seealso marker="#read/2"><c>read/2</c></seealso> + and <seealso marker="gen_tcp#send/2"><c>gen_tcp:send/2</c></seealso> is + used.</p> <p>The option list can contain the following options:</p> <taglist> <tag><c>chunk_size</c></tag> <item><p>The chunk size used by the Erlang fallback to send data. If using the fallback, set this to a value that comfortably fits in the systems memory. Default is 20 MB.</p></item> - <tag><c>use_threads</c></tag> - <item><p>Instructs the emulator to use the <c>async</c> thread pool for the - <c>sendfile</c> system call. This can be useful if the OS you are running - on does not properly support non-blocking <c>sendfile</c> calls. Notice that - using <c>async</c> threads potentially makes your system vulnerable to slow - client attacks. If set to <c>true</c> and no <c>async</c> threads are available, - the <c>sendfile</c> call returns <c>{error,einval}</c>. - Introduced in Erlang/OTP 17.0. Default is <c>false</c>.</p></item> </taglist> </desc> </func> @@ -1825,7 +1851,7 @@ f.txt: {person, "kalle", 25}. <p>The functions in the module <c>file</c> usually treat binaries as raw filenames, that is, they are passed "as is" even when the encoding of the binary does not agree with - <seealso marker="#native_name_encoding"><c>native_name_encoding()</c></seealso>. + <seealso marker="#native_name_encoding/0"><c>native_name_encoding()</c></seealso>. However, this function expects binaries to be encoded according to the value returned by <c>native_name_encoding()</c>.</p> <p>Typical error reasons are:</p> @@ -2117,144 +2143,77 @@ f.txt: {person, "kalle", 25}. <section> <title>Performance</title> - <p>Some operating system file operations, for example, a - <c>sync/1</c> or <c>close/1</c> on a huge file, can block their - calling thread for seconds. If this affects the emulator main - thread, the response time is no longer in the order of - milliseconds, depending on the definition of "soft" in soft - real-time system.</p> - <p>If the device driver thread pool is active, file operations are - done through those threads instead, so the emulator can go on - executing Erlang processes. Unfortunately, the time for serving a - file operation increases because of the extra scheduling required - from the operating system.</p> - <p>If the device driver thread pool is disabled or of size 0, large - file reads and writes are segmented into many smaller, which - enable the emulator to serve other processes during the file - operation. This has the same effect as when using the thread - pool, but with larger overhead. Other file operations, for - example, <c>sync/1</c> or <c>close/1</c> on a huge file, still are - a problem.</p> - <p>For increased performance, raw files are recommended. Raw files - use the file system of the host machine of the node.</p> + <p>For increased performance, raw files are recommended.</p> + <p>A normal file is really a process so it can be used as an I/O + device (see <seealso marker="stdlib:io"><c>io</c></seealso>). + Therefore, when data is written to a normal file, the sending of the + data to the file process, copies all data that are not binaries. Opening + the file in binary mode and writing binaries is therefore recommended. + If the file is opened on another node, or if the file server runs as + slave to the file server of another node, also binaries are copied.</p> <note> - <p> - For normal files (non-raw), the file server is used to find the files, - and if the node is running its file server as slave to the file server - of another node, and the other node runs on some other host machine, - they can have different file systems. - However, this is seldom a problem.</p> + <p>Raw files use the file system of the host machine of the node. + For normal files (non-raw), the file server is used to find the files, + and if the node is running its file server as slave to the file server + of another node, and the other node runs on some other host machine, + they can have different file systems. + However, this is seldom a problem.</p> </note> - <p>A normal file is really a process so it can be used as an I/O - device (see - <seealso marker="stdlib:io"><c>io</c></seealso>). - Therefore, when data is written to a - normal file, the sending of the data to the file process, copies - all data that are not binaries. Opening the file in binary mode - and writing binaries is therefore recommended. If the file is - opened on another node, or if the file server runs as slave to - the file server of another node, also binaries are copied.</p> - <p>Caching data to reduce the number of file operations, or rather - the number of calls to the file driver, generally increases - performance. The following function writes 4 MBytes in 23 - seconds when tested:</p> + <p><seealso marker="#open/2"><c>open/2</c></seealso> can be given the + options <c>delayed_write</c> and <c>read_ahead</c> to turn on caching, + which will reduce the number of operating system calls and greatly + improve performance for small reads and writes. However, the overhead + won't disappear completely and it's best to keep the number of file + operations to a minimum. As a contrived example, the following function + writes 4MB in 2.5 seconds when tested:</p> + <code type="none"><![CDATA[ -create_file_slow(Name, N) when integer(N), N >= 0 -> - {ok, FD} = file:open(Name, [raw, write, delayed_write, binary]), - ok = create_file_slow(FD, 0, N), - ok = ?FILE_MODULE:close(FD), - ok. - -create_file_slow(FD, M, M) -> +create_file_slow(Name) -> + {ok, Fd} = file:open(Name, [raw, write, delayed_write, binary]), + create_file_slow_1(Fd, 4 bsl 20), + file:close(Fd). + +create_file_slow_1(_Fd, 0) -> ok; -create_file_slow(FD, M, N) -> - ok = file:write(FD, <<M:32/unsigned>>), - create_file_slow(FD, M+1, N).]]></code> +create_file_slow_1(Fd, M) -> + ok = file:write(Fd, <<0>>), + create_file_slow_1(Fd, M - 1).]]></code> + + <p>The following functionally equivalent code writes 128 bytes per call + to <seealso marker="#write/2"><c>write/2</c></seealso> and so does the + same work in 0.08 seconds, which is roughly 30 times faster:</p> - <p>The following, functionally equivalent, function collects 1024 - entries into a list of 128 32-byte binaries before each call to - <seealso marker="#write/2"><c>write/2</c></seealso> and so - does the same work in 0.52 seconds, - which is 44 times faster:</p> <code type="none"><![CDATA[ -create_file(Name, N) when integer(N), N >= 0 -> - {ok, FD} = file:open(Name, [raw, write, delayed_write, binary]), - ok = create_file(FD, 0, N), - ok = ?FILE_MODULE:close(FD), +create_file(Name) -> + {ok, Fd} = file:open(Name, [raw, write, delayed_write, binary]), + create_file_1(Fd, 4 bsl 20), + file:close(Fd), ok. - -create_file(FD, M, M) -> + +create_file_1(_Fd, 0) -> ok; -create_file(FD, M, N) when M + 1024 =< N -> - create_file(FD, M, M + 1024, []), - create_file(FD, M + 1024, N); -create_file(FD, M, N) -> - create_file(FD, M, N, []). - -create_file(FD, M, M, R) -> - ok = file:write(FD, R); -create_file(FD, M, N0, R) when M + 8 =< N0 -> - N1 = N0-1, N2 = N0-2, N3 = N0-3, N4 = N0-4, - N5 = N0-5, N6 = N0-6, N7 = N0-7, N8 = N0-8, - create_file(FD, M, N8, - [<<N8:32/unsigned, N7:32/unsigned, - N6:32/unsigned, N5:32/unsigned, - N4:32/unsigned, N3:32/unsigned, - N2:32/unsigned, N1:32/unsigned>> | R]); -create_file(FD, M, N0, R) -> - N1 = N0-1, - create_file(FD, M, N1, [<<N1:32/unsigned>> | R]).]]></code> +create_file_1(Fd, M) when M >= 128 -> + ok = file:write(Fd, <<0:(128)/unit:8>>), + create_file_1(Fd, M - 128); +create_file_1(Fd, M) -> + ok = file:write(Fd, <<0:(M)/unit:8>>), + create_file_1(Fd, M - 1).]]></code> - <note> - <p>Trust only your own benchmarks. If the list length in - <c>create_file/2</c> above is increased, it runs slightly - faster, but consumes more memory and causes more memory - fragmentation. How much this affects your application is - something that this simple benchmark cannot predict.</p> - <p>If the size of each binary is increased to 64 bytes, it - also runs slightly faster, but the code is then twice as clumsy. - In the current implementation, binaries larger than 64 bytes are - stored in memory common to all processes and not copied when - sent between processes, while these smaller binaries are stored - on the process heap and copied when sent like any other term.</p> - <p>So, with a binary size of 68 bytes, <c>create_file/2</c> runs - 30 percent slower than with 64 bytes, and causes much more - memory fragmentation. Notice that if the binaries were to be sent - between processes (for example, a non-raw file), the results - would probably be completely different.</p> - </note> - <p>A raw file is really a port. When writing data to a port, it is - efficient to write a list of binaries. It is not needed to - flatten a deep list before writing. On Unix hosts, scatter output, - which writes a set of buffers in one operation, is used when - possible. In this way <c>write(FD, [Bin1, Bin2 | Bin3])</c> - writes the contents of the binaries without copying the data - at all, except for perhaps deep down in the operating system - kernel.</p> - <p>For raw files, <c>pwrite/2</c> and <c>pread/2</c> are - efficiently implemented. The file driver is called only once for - the whole operation, and the list iteration is done in the file - driver.</p> - <p>The options <c>delayed_write</c> and <c>read_ahead</c> to - <seealso marker="#open/2"><c>open/2</c></seealso> - make the file driver cache data to reduce - the number of operating system calls. The function - <c>create_file/2</c> in the recent example takes 60 seconds - without option <c>delayed_write</c>, which is 2.6 - times slower.</p> - <p>As a bad example, <c>create_file_slow/2</c> - without options <c>raw</c>, <c>binary</c>, and <c>delayed_write</c>, - meaning it calls <c>open(Name, [write])</c>, needs - 1 min 20 seconds for the job, which is 3.5 times slower than - the first example, and 150 times slower than the optimized - <c>create_file/2</c>.</p> - <warning> - <p>If an error occurs when accessing an open file with module - <seealso marker="stdlib:io"><c>io</c></seealso>, - the process handling the file exits. The dead - file process can hang if a process tries to access it later. - This will be fixed in a future release.</p> - </warning> + <p>When writing data it's generally more efficient to write a list of + binaries rather than a list of integers. It is not needed to + flatten a deep list before writing. On Unix hosts, scatter output, + which writes a set of buffers in one operation, is used when + possible. In this way <c>write(FD, [Bin1, Bin2 | Bin3])</c> + writes the contents of the binaries without copying the data + at all, except for perhaps deep down in the operating system + kernel.</p> + <warning> + <p>If an error occurs when accessing an open file with module + <seealso marker="stdlib:io"><c>io</c></seealso>, the process + handling the file exits. The dead file process can hang if a process + tries to access it later. This will be fixed in a future release. + </p> + </warning> </section> <section> diff --git a/lib/kernel/doc/src/gen_sctp.xml b/lib/kernel/doc/src/gen_sctp.xml index 737800c6b1..1e08b25f66 100644 --- a/lib/kernel/doc/src/gen_sctp.xml +++ b/lib/kernel/doc/src/gen_sctp.xml @@ -4,7 +4,7 @@ <erlref> <header> <copyright> - <year>2007</year><year>2016</year> + <year>2007</year><year>2018</year> <holder>Ericsson AB. All Rights Reserved.</holder> </copyright> <legalnotice> @@ -331,6 +331,37 @@ connect(Socket, Ip, Port>, with <anno>SockType</anno> <c>seqpacket</c>, and with reasonably large <seealso marker="inet#option-sndbuf">kernel</seealso> and driver <seealso marker="inet#option-buffer">buffers</seealso>.</p> + <p> + If the socket is in + <seealso marker="#option-active">passive</seealso> + mode data can be received through the + <seealso marker="#recv/1"><c>recv/1,2</c></seealso> + calls. + </p> + <p> + If the socket is in + <seealso marker="#option-active">active</seealso> + mode data received data is delivered to the controlling process + as messages: + </p> + <code type="none"> +{sctp, <anno>Socket</anno>, FromIP, FromPort, {AncData, Data}} + </code> + <p> + See + <seealso marker="#recv/1"><c>recv/1,2</c></seealso> + for a description of the message fields. + </p> + <note> + <p> + This message format unfortunately differs slightly from the + <seealso marker="gen_udp#open/1"><c>gen_udp</c></seealso> + message format with ancillary data, + and from the + <seealso marker="#recv/1"><c>recv/1,2</c></seealso> + return tuple format. + </p> + </note> </desc> </func> @@ -380,6 +411,19 @@ connect(Socket, Ip, Port>, socket option <seealso marker="#option-sctp_get_peer_addr_info"><c>sctp_get_peer_addr_info</c></seealso>, but this does still not produce the stream number).</p> + <p> + <c><anno>AncData</anno></c> may also contain + <seealso marker="inet#type-ancillary_data"> + ancillary data + </seealso> + from the socket + <seealso marker="#type-option">options</seealso> + <seealso marker="inet#option-recvtos"><c>recvtos</c></seealso>, + <seealso marker="inet#option-recvtclass"><c>recvtclass</c></seealso> + or + <seealso marker="inet#option-recvttl"><c>recvttl</c></seealso>, + if that is supported by the platform for the socket. + </p> <p>The <c><anno>Data</anno></c> received can be a <c>binary()</c> or a <c>list()</c> of bytes (integers in the range 0 through 255) depending on the socket mode, or an SCTP event.</p> @@ -544,12 +588,25 @@ connect(Socket, Ip, Port>, <seealso marker="#recv/1"><c>recv</c></seealso> call to retrieve the available data from the socket.</p> </item> + <item> + <p> + If <c>true|once|N</c> (active modes) + received data or events are sent to the owning process. + See <seealso marker="#open/0"><c>open/0..2</c></seealso> + for the message format. + </p> + </item> <item> - <p>If <c>true</c> (full active mode), the pending data or events are - sent to the owning process.</p> - <p>Notice that this can cause the message queue to overflow, - as there is no way to throttle the sender in this case - (no flow control).</p> + <p> + If <c>true</c> (full active mode) there is no flow control. + </p> + <note> + <p> + Note that this can cause the message queue to overflow + causing for example the virtual machine + to run out of memory and crash. + </p> + </note> </item> <item> <p>If <c>once</c>, only one message is automatically placed diff --git a/lib/kernel/doc/src/gen_tcp.xml b/lib/kernel/doc/src/gen_tcp.xml index 070782e1f3..24d63693fd 100644 --- a/lib/kernel/doc/src/gen_tcp.xml +++ b/lib/kernel/doc/src/gen_tcp.xml @@ -4,7 +4,7 @@ <erlref> <header> <copyright> - <year>1997</year><year>2017</year> + <year>1997</year><year>2018</year> <holder>Ericsson AB. All Rights Reserved.</holder> </copyright> <legalnotice> @@ -51,6 +51,7 @@ server() -> {ok, Sock} = gen_tcp:accept(LSock), {ok, Bin} = do_recv(Sock, []), ok = gen_tcp:close(Sock), + ok = gen_tcp:close(LSock), Bin. do_recv(Sock, Bs) -> @@ -69,6 +70,32 @@ do_recv(Sock, Bs) -> <name name="option"/> </datatype> <datatype> + <name name="pktoptions_value"/> + <desc> + <p> + If the platform implements the IPv4 option + <c>IP_PKTOPTIONS</c>, or the IPv6 option + <c>IPV6_PKTOPTIONS</c> or <c>IPV6_2292PKTOPTIONS</c> for the socket + this value is returned from + <seealso marker="inet#getopts/2"><c>inet:getopts/2</c></seealso> + when called with the option name + <seealso marker="#type-option_name"><c>pktoptions</c></seealso>. + </p> + <note> + <p> + This option appears to be VERY Linux specific, + and its existence in future Linux kernel versions + is also worrying since the option is part of RFC 2292 + which is since long (2003) obsoleted by RFC 3542 + that <em>explicitly</em> removes this possibility to get + packet information from a stream socket. + For comparision: it has existed in FreeBSD but is now removed, + at least since FreeBSD 10. + </p> + </note> + </desc> + </datatype> + <datatype> <name name="option_name"/> </datatype> <datatype> @@ -309,9 +336,9 @@ do_recv(Sock, Bs) -> <seealso marker="inet#setopts/2"><c>inet:setopts/2</c></seealso>. </p></item> </taglist> - <p>The returned socket <c><anno>ListenSocket</anno></c> can only be - used in calls to - <seealso marker="#accept/1"><c>accept/1,2</c></seealso>.</p> + <p>The returned socket <c><anno>ListenSocket</anno></c> should be used + in calls to <seealso marker="#accept/1"><c>accept/1,2</c></seealso> to + accept incoming connection requests.</p> <note> <p>The default values for options specified to <c>listen</c> can be affected by the Kernel configuration parameter diff --git a/lib/kernel/doc/src/gen_udp.xml b/lib/kernel/doc/src/gen_udp.xml index f79566ef71..840ca3c188 100644 --- a/lib/kernel/doc/src/gen_udp.xml +++ b/lib/kernel/doc/src/gen_udp.xml @@ -4,7 +4,7 @@ <erlref> <header> <copyright> - <year>1997</year><year>2016</year> + <year>1997</year><year>2018</year> <holder>Ericsson AB. All Rights Reserved.</holder> </copyright> <legalnotice> @@ -147,7 +147,21 @@ at the opened port, if the socket is in an active mode, the packets are delivered as messages to the controlling process:</p> <code type="none"> -{udp, Socket, IP, InPortNo, Packet}</code> +{udp, Socket, IP, InPortNo, Packet} % Without ancillary data +{udp, Socket, IP, InPortNo, AncData, Packet} % With ancillary data + </code> + <p> + The message contains an <c>AncData</c> field + if any of the socket + <seealso marker="#type-option">options</seealso> + <seealso marker="inet#option-recvtos"><c>recvtos</c></seealso>, + <seealso marker="inet#option-recvtclass"><c>recvtclass</c></seealso> + or + <seealso marker="inet#option-recvttl"><c>recvttl</c></seealso> + are active, otherwise it does not. + </p> + <p> + </p> <p>If the socket is not in an active mode, data can be retrieved through the <seealso marker="#recv/2"><c>recv/2,3</c></seealso> calls. @@ -179,9 +193,22 @@ <name name="recv" arity="3"/> <fsummary>Receive a packet from a passive socket.</fsummary> <desc> - <p>Receives a packet from a socket in passive mode. Optional parameter + <p> + Receives a packet from a socket in passive mode. Optional parameter <c><anno>Timeout</anno></c> specifies a time-out in milliseconds. - Defaults to <c>infinity</c>.</p> + Defaults to <c>infinity</c>. + </p> + <p> + If any of the socket + <seealso marker="#type-option">options</seealso> + <seealso marker="inet#option-recvtos"><c>recvtos</c></seealso>, + <seealso marker="inet#option-recvtclass"><c>recvtclass</c></seealso> + or + <seealso marker="inet#option-recvttl"><c>recvttl</c></seealso> + are active, the <c><anno>RecvData</anno></c> tuple contains an + <c><anno>AncData</anno></c> field, + otherwise it does not. + </p> </desc> </func> diff --git a/lib/kernel/doc/src/heart.xml b/lib/kernel/doc/src/heart.xml index 5b5b71e521..ad1a2ffeb9 100644 --- a/lib/kernel/doc/src/heart.xml +++ b/lib/kernel/doc/src/heart.xml @@ -4,7 +4,7 @@ <erlref> <header> <copyright> - <year>1996</year><year>2016</year> + <year>1996</year><year>2018</year> <holder>Ericsson AB. All Rights Reserved.</holder> </copyright> <legalnotice> @@ -59,8 +59,9 @@ <pre> % <input>erl -heart -env HEART_BEAT_TIMEOUT 30 ...</input></pre> <p>The value (in seconds) must be in the range 10 < X <= 65535.</p> - <p>Notice that if the system clock is adjusted with - more than <c>HEART_BEAT_TIMEOUT</c> seconds, <c>heart</c> + <p>When running on OSs lacking support for monotonic time, + <c>heart</c> is susceptible to system clock adjustments of more than + <c>HEART_BEAT_TIMEOUT</c> seconds. When this happens, <c>heart</c> times out and tries to reboot the system. This can occur, for example, if the system clock is adjusted automatically by use of the Network Time Protocol (NTP).</p> diff --git a/lib/kernel/doc/src/inet.xml b/lib/kernel/doc/src/inet.xml index b71e8a1e5d..87b08e4e36 100644 --- a/lib/kernel/doc/src/inet.xml +++ b/lib/kernel/doc/src/inet.xml @@ -4,7 +4,7 @@ <erlref> <header> <copyright> - <year>1997</year><year>2017</year> + <year>1997</year><year>2018</year> <holder>Ericsson AB. All Rights Reserved.</holder> </copyright> <legalnotice> @@ -177,6 +177,100 @@ fe80::204:acff:fe17:bf38 </desc> </datatype> <datatype> + <name name="ancillary_data"/> + <desc> + <p> + Ancillary data received with the data packet + or read with the socket option + <seealso marker="gen_tcp#type-pktoptions_value"> + <c>pktoptions</c> + </seealso> + from a TCP socket. + </p> + <p> + The value(s) correspond to the currently active socket + <seealso marker="#type-socket_setopt">options</seealso> + <seealso marker="inet#option-recvtos"><c>recvtos</c></seealso>, + <seealso marker="inet#option-recvtclass"><c>recvtclass</c></seealso> + and + <seealso marker="inet#option-recvttl"><c>recvttl</c></seealso>. + </p> + </desc> + </datatype> + <datatype> + <name name="getifaddrs_ifopts"/> + <desc> + <p> + Interface address description list returned from + <seealso marker="#getifaddrs/0"><c>getifaddrs/0,1</c></seealso> + for a named interface, translated from the returned + data of the POSIX API function <c>getaddrinfo()</c>. + </p> + <p> + <c><anno>Hwaddr</anno></c> is hardware dependent, + for example, on Ethernet interfaces it is + the 6-byte Ethernet address (MAC address (EUI-48 address)). + </p> + <p> + The tuples <c>{addr,<anno>Addr</anno>}</c>, + <c>{netmask,<anno>Netmask</anno>}</c>, and possibly + <c>{broadaddr,<anno>Broadaddr</anno>}</c> or + <c>{dstaddr,<anno>Dstaddr</anno>}</c> + are repeated in the list + if the interface has got multiple addresses. + An interface may have multiple <c>{flag,_}</c> tuples + for example if it has different flags + for different address families. + Multiple <c>{hwaddr,<anno>Hwaddr</anno>}</c> tuples + is hard to say anything definite about, though. + The tuple <c>{flag,<anno>Flags</anno>}</c> is mandatory, + all others are optional. + </p> + <p> + Do not rely too much on the order + of <c><anno>Flags</anno></c> atoms + or the <c><anno>Ifopt</anno></c> tuples. + There are however some rules: + </p> + <list type="bulleted"> + <item><p> + A <c>{flag,_}</c> tuple applies to all other tuples that follow. + </p></item> + <item><p> + Immediately after <c>{addr,_}</c> follows <c>{netmask,_}</c>. + </p></item> + <item><p> + Immediately thereafter may <c>{broadaddr,_}</c> follow + if <c>broadcast</c> is member of <c><anno>Flags</anno></c>, + or <c>{dstaddr,_}</c> if <c>pointtopoint</c> + is member of <c><anno>Flags</anno></c>. + Both <c>{dstaddr,_}</c> and <c>{broadaddr,_}</c> + does not occur for the same <c>{addr,_}</c>. + </p></item> + <item><p> + Any <c>{netmask,_}</c>, <c>{broadaddr,_}</c>, or + <c>{dstaddr,_}</c> tuples that follow an + <c>{addr,<anno>Addr</anno>}</c> + tuple concerns the address <c><anno>Addr</anno></c>. + </p></item> + </list> + <p> + The tuple <c>{hwaddr,_}</c> is not returned on Solaris, as the + hardware address historically belongs to the link layer + and it is not returned by the Solaris API function + <c>getaddrinfo()</c>. + </p> + <warning> + <p> + On Windows, the data is fetched from different + OS API functions, so the <c><anno>Netmask</anno></c> + and <c><anno>Broadaddr</anno></c> values may be calculated, + just as some <c><anno>Flags</anno></c> values. + </p> + </warning> + </desc> + </datatype> + <datatype> <name name="posix"/> <desc> <p>An atom that is named from the POSIX error codes used in Unix, @@ -197,6 +291,9 @@ fe80::204:acff:fe17:bf38 <datatype> <name name="address_family"/> </datatype> + <datatype> + <name name="socket_protocol"/> + </datatype> </datatypes> <funcs> @@ -222,11 +319,18 @@ fe80::204:acff:fe17:bf38 <name name="get_rc" arity="0"/> <fsummary>Return a list of IP configuration parameters.</fsummary> <desc> - <p>Returns the state of the <c>Inet</c> configuration database in + <p> + Returns the state of the <c>Inet</c> configuration database in form of a list of recorded configuration parameters. For more information, see <seealso marker="erts:inet_cfg">ERTS User's Guide: Inet Configuration</seealso>. - Only parameters with other than default values are returned.</p> + </p> + <p> + Only actual parameters with other than default values + are returned, for example not directives that specify + other sources for configuration parameters nor + directives that clear parameters. + </p> </desc> </func> @@ -267,9 +371,7 @@ fe80::204:acff:fe17:bf38 <p>Returns a <c>hostent</c> record for the host with the specified hostname.</p> <p>If resolver option <c>inet6</c> is <c>true</c>, - an IPv6 address is looked up. If that fails, - the IPv4 address is looked up and returned on - IPv6-mapped IPv4 format.</p> + an IPv6 address is looked up.</p> </desc> </func> @@ -295,38 +397,64 @@ fe80::204:acff:fe17:bf38 <name name="getifaddrs" arity="0"/> <fsummary>Return a list of interfaces and their addresses.</fsummary> <desc> - <p>Returns a list of 2-tuples containing interface names and the - interface addresses. <c><anno>Ifname</anno></c> is a Unicode string. - <c><anno>Hwaddr</anno></c> is hardware dependent, for example, on - Ethernet interfaces - it is the 6-byte Ethernet address (MAC address (EUI-48 address)).</p> - <p>The tuples <c>{addr,<anno>Addr</anno>}</c>, <c>{netmask,_}</c>, and - <c>{broadaddr,_}</c> are repeated in the result list if the interface - has multiple addresses. If you come across an interface with - multiple <c>{flag,_}</c> or <c>{hwaddr,_}</c> tuples, you have - a strange interface or possibly a bug in this function. The tuple - <c>{flag,_}</c> is mandatory, all others are optional.</p> - <p>Do not rely too much on the order of <c><anno>Flag</anno></c> atoms - or <c><anno>Ifopt</anno></c> tuples. There are however some rules:</p> - <list type="bulleted"> - <item><p>Immediately after - <c>{addr,_}</c> follows <c>{netmask,_}</c>.</p></item> - <item><p>Immediately thereafter follows <c>{broadaddr,_}</c> if flag - <c>broadcast</c> is <em>not</em> set and flag - <c>pointtopoint</c> <em>is</em> set.</p></item> - <item><p>Any <c>{netmask,_}</c>, <c>{broadaddr,_}</c>, or - <c>{dstaddr,_}</c> tuples that follow an <c>{addr,_}</c> - tuple concerns that address.</p></item> - </list> - <p>The tuple <c>{hwaddr,_}</c> is not returned on Solaris, as the - hardware address historically belongs to the link layer and only - the superuser can read such addresses.</p> - <warning> - <p>On Windows, the data is fetched from different OS API functions, - so the <c><anno>Netmask</anno></c> and <c><anno>Broadaddr</anno></c> - values can be calculated, just as some <c><anno>Flag</anno></c> - values. Report flagrant bugs.</p> - </warning> + <p> + Returns a list of 2-tuples containing interface names and + the interfaces' addresses. <c><anno>Ifname</anno></c> + is a Unicode string and + <c><anno>Ifopts</anno></c> is a list of + interface address description tuples. + </p> + <p> + The interface address description tuples + are documented under the type of the + <seealso marker="#type-getifaddrs_ifopts"> + <c><anno>Ifopts</anno></c> + </seealso> + value. + </p> + </desc> + </func> + + <func> + <name>getifaddrs(Opts) -> + {ok, [{Ifname, Ifopts}]} | {error, Posix} + </name> + <fsummary>Return a list of interfaces and their addresses.</fsummary> + <type> + <v> + Opts = [{netns, Namespace}] + </v> + <v> + Namespace = + <seealso marker="file#type-filename_all"> + file:filename_all() + </seealso> + </v> + <v>Ifname = string()</v> + <v> + Ifopts = + <seealso marker="#type-getifaddrs_ifopts"> + getifaddrs_ifopts() + </seealso> + </v> + <v>Posix = <seealso marker="#type-posix">posix()</seealso></v> + </type> + <desc> + <p> + The same as + <seealso marker="#getifaddrs/0"><c>getifaddrs/0</c></seealso> + but the <c>Option</c> + <c>{netns, Namespace}</c> sets a network namespace + for the OS call, on platforms that supports that feature. + </p> + <p> + See the socket option + <seealso marker="#option-netns"> + <c>{netns, Namespace}</c> + </seealso> + under + <seealso marker="#setopts/2"><c>setopts/2</c></seealso>. + </p> </desc> </func> @@ -336,7 +464,11 @@ fe80::204:acff:fe17:bf38 <desc> <p>Gets one or more options for a socket. For a list of available options, see - <seealso marker="#setopts/2"><c>setopts/2</c></seealso>.</p> + <seealso marker="#setopts/2"><c>setopts/2</c></seealso>. + See also the description for the type + <seealso marker="gen_tcp#type-pktoptions_value"> + <c>gen_tcp:pktoptions_value()</c> + </seealso>.</p> <p>The number of elements in the returned <c><anno>OptionValues</anno></c> list does not necessarily correspond to the number of options @@ -352,7 +484,7 @@ fe80::204:acff:fe17:bf38 socket options not (explicitly) supported by the emulator. The use of raw socket options makes the code non-portable, but allows the Erlang programmer to take advantage of unusual features - present on the current platform.</p> + present on a particular platform.</p> <p><c>RawOptReq</c> consists of tag <c>raw</c> followed by the protocol level, the option number, and either a binary or the size, in bytes, of the @@ -454,6 +586,61 @@ get_tcpi_sacked(Sock) -> </func> <func> + <name name="i" arity="0" /> + <name name="i" arity="1" /> + <name name="i" arity="2" /> + <fsummary>Displays information and statistics about sockets on the terminal</fsummary> + <desc> + <p> + Lists all TCP, UDP and SCTP sockets, including those that the Erlang runtime system uses as well as + those created by the application. + </p> + <p> + The following options are available: + </p> + + <taglist> + <tag><c>port</c></tag> + <item> + <p>The internal index of the port.</p> + </item> + <tag><c>module</c></tag> + <item> + <p>The callback module of the socket.</p> + </item> + <tag><c>recv</c></tag> + <item> + <p>Number of bytes received by the socket.</p> + </item> + <tag><c>sent</c></tag> + <item> + <p>Number of bytes sent from the socket.</p> + </item> + <tag><c>owner</c></tag> + <item> + <p>The socket owner process.</p> + </item> + <tag><c>local_address</c></tag> + <item> + <p>The local address of the socket.</p> + </item> + <tag><c>foreign_address</c></tag> + <item> + <p>The address and port of the other end of the connection.</p> + </item> + <tag><c>state</c></tag> + <item> + <p>The connection state.</p> + </item> + <tag><c>type</c></tag> + <item> + <p>STREAM or DGRAM or SEQPACKET.</p> + </item> + </taglist> + </desc> + </func> + + <func> <name name="ntoa" arity="1" /> <fsummary>Convert IPv6/IPV4 address to ASCII.</fsummary> <desc> @@ -517,6 +704,19 @@ get_tcpi_sacked(Sock) -> </func> <func> + <name name="ipv4_mapped_ipv6_address" arity="1" /> + <fsummary>Convert to and from IPv4-mapped IPv6 address.</fsummary> + <desc> + <p> + Convert an IPv4 address to an IPv4-mapped IPv6 address + or the reverse. When converting from an IPv6 address + all but the 2 low words are ignored so this function also + works on some other types of addresses than IPv4-mapped. + </p> + </desc> + </func> + + <func> <name name="parse_strict_address" arity="1" /> <fsummary>Parse an IPv4 or IPv6 address strict.</fsummary> <desc> @@ -658,22 +858,23 @@ get_tcpi_sacked(Sock) -> </item> <tag><c>{buffer, Size}</c></tag> <item> - <p>The size of the user-level software buffer used by - the driver. - Not to be confused with options <c>sndbuf</c> + <p>The size of the user-level buffer used by + the driver. Not to be confused with options <c>sndbuf</c> and <c>recbuf</c>, which correspond to the - Kernel socket buffers. It is recommended - to have <c>val(buffer) >= max(val(sndbuf),val(recbuf))</c> to + Kernel socket buffers. For TCP it is recommended + to have <c>val(buffer) >= val(recbuf)</c> to avoid performance issues because of unnecessary copying. + For UDP the same recommendation applies, but the max should + not be larger than the MTU of the network path. <c>val(buffer)</c> is automatically set to the above - maximum when values <c>sndbuf</c> or <c>recbuf</c> are set. - However, as the sizes set for <c>sndbuf</c> and <c>recbuf</c> + maximum when <c>recbuf</c> is set. + However, as the size set for <c>recbuf</c> usually become larger, you are encouraged to use <seealso marker="#getopts/2"><c>getopts/2</c></seealso> to analyze the behavior of your operating system.</p> <p>Note that this is also the maximum amount of data that can be - received from a single recv call. If you are using higher than - normal MTU consider setting buffer higher.</p> + received from a single recv call. If you are using higher than + normal MTU consider setting buffer higher.</p> </item> <tag><c>{delay_send, Boolean}</c></tag> <item> @@ -848,20 +1049,29 @@ get_tcpi_sacked(Sock) -> </item> <tag><c>{mode, Mode :: binary | list}</c></tag> <item> - <p>Received <c>Packet</c> is delivered as defined by <c>Mode</c>. + <p> + Received <c>Packet</c> is delivered as defined by <c>Mode</c>. </p> </item> - <tag><c>{netns, Namespace :: file:filename_all()}</c></tag> + <tag> + <marker id="option-netns"/> + <c>{netns, Namespace :: file:filename_all()}</c> + </tag> <item> - <p>Sets a network namespace for the socket. Parameter + <p> + Sets a network namespace for the socket. Parameter <c>Namespace</c> is a filename defining the namespace, for example, <c>"/var/run/netns/example"</c>, typically created by command <c>ip netns add example</c>. This option must be used in a function call that creates a socket, that is, <seealso marker="gen_tcp#connect/3"><c>gen_tcp:connect/3,4</c></seealso>, <seealso marker="gen_tcp#listen/2"><c>gen_tcp:listen/2</c></seealso>, - <seealso marker="gen_udp#open/1"><c>gen_udp:open/1,2</c></seealso>, or - <seealso marker="gen_sctp#open/0"><c>gen_sctp:open/0,1,2</c></seealso>.</p> + <seealso marker="gen_udp#open/1"><c>gen_udp:open/1,2</c></seealso> + or + <seealso marker="gen_sctp#open/0"><c>gen_sctp:open/0,1,2</c></seealso>, + and also + <seealso marker="#getifaddrs/1"><c>getifaddrs/1</c></seealso>. + </p> <p>This option uses the Linux-specific syscall <c>setns()</c>, such as in Linux kernel 3.0 or later, and therefore only exists when the runtime system @@ -937,6 +1147,18 @@ setcap cap_sys_admin,cap_sys_ptrace,cap_dac_read_search+epi beam.smp</code> is turned on for the socket, which means that also small amounts of data are sent immediately.</p> </item> + <tag><c>{nopush, Boolean}</c>(TCP/IP sockets)</tag> + <item> + <p>This translates to <c>TCP_NOPUSH</c> on BSD and + to <c>TCP_CORK</c> on Linux.</p> + <p>If <c>Boolean == true</c>, the corresponding option + is turned on for the socket, which means that small + amounts of data are accumulated until a full MSS-worth + of data is available or this option is turned off.</p> + <p>Note that while <c>TCP_NOPUSH</c> socket option is available on OSX, its semantics + is very different (e.g., unsetting it does not cause immediate send + of accumulated data). Hence, <c>nopush</c> option is intentionally ignored on OSX.</p> + </item> <tag><c>{packet, PacketType}</c>(TCP/IP sockets)</tag> <item> <p><marker id="packet"/>Defines the type of packets to use for a socket. @@ -1038,6 +1260,100 @@ setcap cap_sys_admin,cap_sys_ptrace,cap_dac_read_search+epi beam.smp</code> the socket. You are encouraged to use <seealso marker="#getopts/2"><c>getopts/2</c></seealso> to retrieve the size set by your operating system.</p> + <marker id="option-recvtclass"></marker> + </item> + <tag><c>{recvtclass, Boolean}</c></tag> + <item> + <p> + If set to <c>true</c> activates returning the received + <c>TCLASS</c> value on platforms that implements + the protocol <c>IPPROTO_IPV6</c> + option <c>IPV6_RECVTCLASS</c> or <c>IPV6_2292RECVTCLASS</c> + for the socket. + The value is returned as a <c>{tclass,TCLASS}</c> tuple + regardless of if the platform returns an <c>IPV6_TCLASS</c> + or an <c>IPV6_RECVTCLASS</c> CMSG value. + </p> + <p> + For packet oriented sockets that supports receiving + ancillary data with the payload data + (<c>gen_udp</c> and <c>gen_sctp</c>), + the <c>TCLASS</c> value is returned + in an extended return tuple contained in an + <seealso marker="inet#type-ancillary_data"> + ancillary data + </seealso> + list. + For stream oriented sockets (<c>gen_tcp</c>) + the only way to get the <c>TCLASS</c> + value is if the platform supports the + <seealso marker="gen_tcp#type-pktoptions_value"> + <c>pktoptions</c> + </seealso> + option. + </p> + <marker id="option-recvtos"></marker> + </item> + <tag><c>{recvtos, Boolean}</c></tag> + <item> + <p> + If set to <c>true</c> activates returning the received + <c>TOS</c> value on platforms that implements + the protocol <c>IPPROTO_IP</c> option <c>IP_RECVTOS</c> + for the socket. + The value is returned as a <c>{tos,TOS}</c> tuple + regardless of if the platform returns an <c>IP_TOS</c> + or an <c>IP_RECVTOS</c> CMSG value. + </p> + <p> + For packet oriented sockets that supports receiving + ancillary data with the payload data + (<c>gen_udp</c> and <c>gen_sctp</c>), + the <c>TOS</c> value is returned + in an extended return tuple contained in an + <seealso marker="inet#type-ancillary_data"> + ancillary data + </seealso> + list. + For stream oriented sockets (<c>gen_tcp</c>) + the only way to get the <c>TOS</c> + value is if the platform supports the + <seealso marker="gen_tcp#type-pktoptions_value"> + <c>pktoptions</c> + </seealso> + option. + </p> + <marker id="option-recvttl"></marker> + </item> + <tag><c>{recvttl, Boolean}</c></tag> + <item> + <p> + If set to <c>true</c> activates returning the received + <c>TTL</c> value on platforms that implements + the protocol <c>IPPROTO_IP</c> option <c>IP_RECVTTL</c> + for the socket. + The value is returned as a <c>{ttl,TTL}</c> tuple + regardless of if the platform returns an <c>IP_TTL</c> + or an <c>IP_RECVTTL</c> CMSG value. + </p> + <p> + For packet oriented sockets that supports receiving + ancillary data with the payload data + (<c>gen_udp</c> and <c>gen_sctp</c>), + the <c>TTL</c> value is returned + in an extended return tuple contained in an + <seealso marker="inet#type-ancillary_data"> + ancillary data + </seealso> + list. + For stream oriented sockets (<c>gen_tcp</c>) + the only way to get the <c>TTL</c> + value is if the platform supports the + <seealso marker="gen_tcp#type-pktoptions_value"> + <c>pktoptions</c> + </seealso> + option. + </p> </item> <tag><c>{reuseaddr, Boolean}</c></tag> <item> @@ -1073,7 +1389,7 @@ setcap cap_sys_admin,cap_sys_ptrace,cap_dac_read_search+epi beam.smp</code> <seealso marker="gen_tcp#recv/2"><c>gen_tcp:recv/2</c></seealso> gets <c>{error, closed}</c>. In active mode, the controlling process receives a - <c>{tcp_close, Socket}</c> message, indicating that the + <c>{tcp_closed, Socket}</c> message, indicating that the peer has closed the connection.</p> <p>Setting this option to <c>true</c> allows you to distinguish between a connection that was closed normally, @@ -1207,7 +1523,7 @@ inet:setopts(Sock,[{raw,6,8,<<30:32/native>>}]),]]></code> For one-to-many style sockets, the special value <c>0</c> is defined to mean that the returned addresses must be without any particular association. - How different SCTP implementations interprets this varies somewhat. + How different SCTP implementations interpret this varies somewhat. </p> </desc> </func> diff --git a/lib/kernel/doc/src/inet_res.xml b/lib/kernel/doc/src/inet_res.xml index 4ada4203c0..351d86a93a 100644 --- a/lib/kernel/doc/src/inet_res.xml +++ b/lib/kernel/doc/src/inet_res.xml @@ -4,7 +4,7 @@ <erlref> <header> <copyright> - <year>2009</year><year>2015</year> + <year>2009</year><year>2018</year> <holder>Ericsson AB. All Rights Reserved.</holder> </copyright> <legalnotice> @@ -130,7 +130,7 @@ dns_header() = DnsHeader inet_dns:header(DnsHeader) -> [ {id, integer()} | {qr, boolean()} - | {opcode, 'query' | iquery | status | integer()} + | {opcode, query | iquery | status | integer()} | {aa, boolean()} | {tc, boolean()} | {rd, boolean()} @@ -230,9 +230,7 @@ inet_dns:record_type(_) -> undefined.</pre> <seealso marker="#getbyname/2"><c>getbyname/2,3</c></seealso>. </p> <p>If resolver option <c>inet6</c> is <c>true</c>, - an IPv6 address is looked up. If that fails, - the IPv4 address is looked up and returned on - IPv6-mapped IPv4 format.</p> + an IPv6 address is looked up.</p> </desc> </func> diff --git a/lib/kernel/doc/src/introduction_chapter.xml b/lib/kernel/doc/src/introduction_chapter.xml new file mode 100644 index 0000000000..d02b1a2ee5 --- /dev/null +++ b/lib/kernel/doc/src/introduction_chapter.xml @@ -0,0 +1,63 @@ +<?xml version="1.0" encoding="utf-8" ?> +<!DOCTYPE chapter SYSTEM "chapter.dtd"> + +<chapter> + <header> + <copyright> + <year>2017</year><year>2018</year> + <holder>Ericsson AB. All Rights Reserved.</holder> + </copyright> + <legalnotice> + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + </legalnotice> + + <title>Introduction</title> + <prepared></prepared> + <responsible></responsible> + <docno></docno> + <approved></approved> + <checked></checked> + <date></date> + <rev></rev> + <file>introduction.xml</file> + </header> + + <section> + <title>Scope</title> + <p>The Kernel application has all the code necessary to run + the Erlang runtime system: file servers, code servers, + and so on.</p> + <p>The Kernel application is the first application started. It is + mandatory in the sense that the minimal system based on + Erlang/OTP consists of Kernel and STDLIB. Kernel + contains the following functional areas:</p> + <list type="bulleted"> + <item>Start, stop, supervision, configuration, and distribution of applications</item> + <item>Code loading</item> + <item>Logging</item> + <item>Global name service</item> + <item>Supervision of Erlang/OTP</item> + <item>Communication with sockets</item> + <item>Operating system interface</item> + </list> + </section> + + <section> + <title>Prerequisites</title> + <p>It is assumed that the reader is familiar with the Erlang programming + language.</p> + </section> +</chapter> + + diff --git a/lib/kernel/doc/src/kernel_app.xml b/lib/kernel/doc/src/kernel_app.xml index e5ac031539..15dbdb47dc 100644 --- a/lib/kernel/doc/src/kernel_app.xml +++ b/lib/kernel/doc/src/kernel_app.xml @@ -4,7 +4,7 @@ <appref> <header> <copyright> - <year>1996</year><year>2017</year> + <year>1996</year><year>2018</year> <holder>Ericsson AB. All Rights Reserved.</holder> </copyright> <legalnotice> @@ -42,7 +42,6 @@ <item>Start, stop, supervision, configuration, and distribution of applications</item> <item>Code loading</item> <item>Logging</item> - <item>Error logging</item> <item>Global name service</item> <item>Supervision of Erlang/OTP</item> <item>Communication with sockets</item> @@ -51,10 +50,13 @@ </description> <section> - <title>Error Logger Event Handlers</title> - <p>Two standard error logger event handlers are defined in - the Kernel application. These are described in - <seealso marker="error_logger"><c>error_logger(3)</c></seealso>.</p> + <title>Logger Handlers</title> + <p>Two standard logger handlers are defined in + the Kernel application. These are described in the + <seealso marker="logger_chapter">Kernel User's Guide</seealso>, + and in the <seealso marker="logger_std_h"><c>logger_std_h(3)</c></seealso> + and <seealso marker="logger_disk_log_h"><c>logger_disk_log_h(3)</c> + </seealso> manual pages.</p> </section> <section> @@ -113,26 +115,12 @@ </section> <section> + <marker id="configuration"/> <title>Configuration</title> <p>The following configuration parameters are defined for the Kernel application. For more information about configuration parameters, see file <seealso marker="app"><c>app(4)</c></seealso>.</p> <taglist> - <tag><c>browser_cmd = string() | {M,F,A}</c></tag> - <item> - <p>When pressing the <em>Help</em> button in a tool such as Debugger, - the help text (an HTML file <c>File</c>) is by default - displayed in a Netscape browser, which is required to be - operational. This parameter can be used to change the command for - how to display the help text if another browser than Netscape - is preferred, or if another platform than Unix or Windows is - used.</p> - <p>If set to a string <c>Command</c>, the command - <c>"Command File"</c> is evaluated using - <seealso marker="os#cmd/1"><c>os:cmd/1</c></seealso>.</p> - <p>If set to a module-function-args tuple, <c>{M,F,A}</c>, - the call <c>apply(M,F,[File|A])</c> is evaluated.</p> - </item> <tag><c>distributed = [Distrib]</c></tag> <item> <p>Specifies which applications that are distributed and on which @@ -176,65 +164,74 @@ <p>Permissions are described in <seealso marker="application#permit/2"><c>application:permit/2</c></seealso>.</p> </item> - <tag><c>error_logger = Value</c></tag> - <item> - <p><c>Value</c> is one of:</p> - <taglist> - <tag><c>tty</c></tag> - <item><p>Installs the standard event handler, which prints error - reports to <c>stdio</c>. This is the default option.</p></item> - <tag><c>{file, FileName}</c></tag> - <item><p>Installs the standard event handler, which prints error - reports to file <c>FileName</c>, where <c>FileName</c> - is a string. The file is opened with encoding UTF-8.</p></item> - <tag><c>false</c></tag> - <item> - <p>No standard event handler is installed, but - the initial, primitive event handler is kept, printing - raw event messages to <c>tty</c>.</p> - </item> - <tag><c>silent</c></tag> - <item> - <p>Error logging is turned off.</p> - </item> - </taglist> + <tag><marker id="logger"/><c>logger = [Config]</c></tag> + <item> + <p>Specifies the configuration + for <seealso marker="logger">Logger</seealso>, except the + primary log level, which is specified + with <seealso marker="#logger_level"><c>logger_level</c></seealso>, + and the compatibility + with <seealso marker="sasl:error_logging">SASL Error + Logging</seealso>, which is specified + with <seealso marker="#logger_sasl_compatible"> + <c>logger_sasl_compatible</c></seealso>.</p> + <p>The <c>logger </c> parameter is described in + section <seealso marker="logger_chapter#logger_parameter"> + Logging</seealso> in the Kernel User's Guide.</p> + </item> + <tag><marker id="logger_level"/><c>logger_level = Level</c></tag> + <item> + <p>Specifies the primary log level for Logger. Log events with + the same, or a more severe level, pass through the primary + log level check. See + section <seealso marker="logger_chapter">Logging</seealso> + in the Kernel User's Guide for more information about Logger + and log levels.</p> + <p><c>Level = emergency | alert | critical | error | warning | + notice | info | debug | all | none</c></p> + <p>To change the primary log level at runtime, use + <seealso marker="logger#set_primary_config/2"> + <c>logger:set_primary_config(level, Level)</c></seealso>.</p> + <p>Defaults to <c>notice</c>.</p> + </item> + <tag><marker id="logger_sasl_compatible"/> + <c>logger_sasl_compatible = true | false</c></tag> + <item> + <p>Specifies if Logger behaves backwards compatible with the + SASL error logging functionality from releases prior to + Erlang/OTP 21.0.</p> + <p>If this parameter is set to <c>true</c>, the default Logger + handler does not log any progress-, crash-, or supervisor + reports. If the SASL application is then started, it adds a + Logger handler named <c>sasl</c>, which logs these events + according to values of the SASL configuration + parameter <c>sasl_error_logger</c> + and <c>sasl_errlog_type</c>.</p> + <p>See section + <seealso marker="sasl:sasl_app#deprecated_error_logger_config"> + Deprecated Error Logger Event Handlers and + Configuration</seealso> in the sasl(6) manual page for + information about the SASL configuration parameters.</p> + <p>See section <seealso marker="sasl:error_logging">SASL Error + Logging</seealso> in the SASL User's Guide, and + section <seealso marker="logger_chapter#compatibility">Backwards + Compatibility with error_logger</seealso> in the Kernel + User's Guide for information about the SASL error logging + functionality, and how Logger can be backwards compatible + with this.</p> + <p>Defaults to <c>false</c>.</p> + <note> + <p>If this parameter is set to <c>true</c>, + <c>sasl_errlog_type</c> indicates that progress reports + shall be logged, and the configured primary log level + is <c>notice</c> or more severe, then SASL automatically + sets the primary log level to <c>info</c>. That is, this + setting can potentially overwrite the value of the Kernel + configuration parameter <c>logger_level</c>. This is to + allow progress reports, which have log level <c>info</c>, + to be forwarded to the handlers.</p> + </note> </item> - <tag><c>error_logger_format_depth = Depth</c></tag> - <item> - <marker id="error_logger_format_depth"></marker> - <p>Can be used to limit the size of the - formatted output from the error logger event handlers.</p> - - <note><p>This configuration parameter was introduced in OTP 18.1 - and is experimental. Based on user feedback, it - can be changed or improved in future releases, for example, - to gain better control over how to limit the size of the - formatted output. We have no plans to remove this - new feature entirely, unless it turns out to be - useless.</p></note> - - <p><c>Depth</c> is a positive integer representing the maximum - depth to which terms are printed by the error logger event - handlers included in OTP. This - configuration parameter is used by the two event handlers - defined by the Kernel application and the two event - handlers in the SASL application. - (If you have implemented your own error handlers, this configuration - parameter has no effect on them.)</p> - - <p><c>Depth</c> is used as follows: Format strings - passed to the event handlers are rewritten. - The format controls <c>~p</c> and <c>~w</c> are replaced with - <c>~P</c> and <c>~W</c>, respectively, and <c>Depth</c> is - used as the depth parameter. For details, see - <seealso marker="stdlib:io#format/2"><c>io:format/2</c></seealso> - in STDLIB.</p> - - <note><p>A reasonable starting value for <c>Depth</c> is - <c>30</c>. We recommend to test crashing various processes in your - application, examine the logs from the crashes, and then - increase or decrease the value.</p></note> - </item> <tag><c>global_groups = [GroupTuple]</c></tag> <item> <marker id="global_groups"></marker> @@ -286,9 +283,8 @@ </item> <tag><c>inet_parse_error_log = silent</c></tag> <item> - <p>If set, no - <c>error_logger</c> messages are generated when erroneous - lines are found and skipped in the various Inet configuration + <p>If set, no log events are issued when erroneous lines are + found and skipped in the various Inet configuration files.</p> </item> <tag><c>inetrc = Filename</c></tag> @@ -314,24 +310,31 @@ <tag><c>net_ticktime = TickTime</c></tag> <item> <marker id="net_ticktime"></marker> - <p>Specifies the <c>net_kernel</c> tick time. <c>TickTime</c> - is specified in seconds. Once every <c>TickTime/4</c> second, all - connected nodes are ticked (if anything else is written - to a node). If nothing is received from another node - within the last four tick times, that node is considered - to be down. This ensures that nodes that are not responding, - for reasons such as hardware errors, are considered to be - down.</p> - <p>The time <c>T</c>, in which a node that is not responding is - detected, is calculated as <c><![CDATA[MinT < T < MaxT]]></c>, where:</p> + <p>Specifies the <c>net_kernel</c> tick time in seconds. This is the + approximate time a connected node may be unresponsive until it is + considered down and thereby disconnected.</p> + <p>Once every <c>TickTime/4</c> seconds, each connected node is ticked + if nothing has been sent to it during that last <c>TickTime/4</c> + interval. A tick is a small package sent on the connection. A connected + node is considered to be down if no ticks or payload packages have been + received during the last four <c>TickTime/4</c> intervals. This ensures + that nodes that are not responding, for reasons such as hardware errors, + are considered to be down.</p> + <p>As the availability is only checked every <c>TickTime/4</c> seconds, + the actual time <c>T</c> a node have been unresponsive when + detected may vary between <c>MinT</c> and <c>MaxT</c>, + where:</p> <code type="none"> MinT = TickTime - TickTime / 4 MaxT = TickTime + TickTime / 4</code> - <p><c>TickTime</c> defaults to <c>60</c> (seconds). Thus, - <c><![CDATA[45 < T < 75]]></c> seconds.</p> - <p>Notice that <em>all</em> communicating nodes are to have the <em>same</em> - <c>TickTime</c> value specified.</p> - <p>Normally, a terminating node is detected immediately.</p> + <p><c>TickTime</c> defaults to <c>60</c> seconds. Thus, + <c><![CDATA[45 < T < 75]]></c> seconds.</p> + <p>Notice that <em>all</em> communicating nodes are to have the + <em>same</em> <c>TickTime</c> value specified, as it determines both the + frequency of outgoing ticks and the expected frequency of incominging + ticks.</p> + <p>Normally, a terminating node is detected immediately by the transport + protocol (like TCP/IP).</p> </item> <tag><c>shutdown_timeout = integer() | infinity</c></tag> <item> @@ -469,8 +472,12 @@ MaxT = TickTime + TickTime / 4</code> <item><c>ObjSuffix = string()</c></item> <item><c>SrcSuffix = string()</c></item> </list> - <p>Specifies a list of rules for use by <c>filelib:find_file/2</c> and - <c>filelib:find_source/2</c>. If this is set to some other value + <p>Specifies a list of rules for use by + <seealso marker="stdlib:filelib#find_file/2"> + <c>filelib:find_file/2</c></seealso> + <seealso marker="stdlib:filelib#find_source/2"> + <c>filelib:find_source/2</c></seealso> + If this is set to some other value than the empty list, it replaces the default rules. Rules can be simple pairs of directory suffixes, such as <c>{"ebin", "src"}</c>, which are used by <c>filelib:find_file/2</c>, or @@ -478,8 +485,49 @@ MaxT = TickTime + TickTime / 4</code> file name extensions, for example <c>[{".beam", ".erl", [{"ebin", "src"}]}</c>, which are used by <c>filelib:find_source/2</c>. Both kinds of rules can be mixed in the list.</p> + <p>The interpretation of <c>ObjDirSuffix</c> and <c>SrcDirSuffix</c> + is as follows: if the end of the directory name where an + object is located matches <c>ObjDirSuffix</c>, then the + name created by replacing <c>ObjDirSuffix</c> with + <c>SrcDirSuffix</c> is expanded by calling + <seealso marker="stdlib:filelib#wildcard/1"> + <c>filelib:wildcard/1</c></seealso>, and the first regular + file found among the matches is the source file. + </p> + + </item> + </taglist> + </section> + + <section> + <title>Deprecated Configuration Parameters</title> + <p>In Erlang/OTP 21.0, a new API for logging was added. The + old <c>error_logger</c> event manager, and event handlers + running on this manager, still work, but they are no longer used + by default.</p> + <p>The following application configuration parameters can still be + set, but they are only used if the corresponding configuration + parameters for Logger are not set.</p> + <taglist> + <tag><c>error_logger</c></tag> + <item>Replaced by setting the type of the default + <seealso marker="logger_std_h#type"><c>logger_std_h</c></seealso> + to the same value. Example: + <code type="none"> +erl -kernel logger '[{handler,default,logger_std_h,#{config=>#{type=>{file,"/tmp/erlang.log"}}}}]' + </code> + </item> + <tag><c>error_logger_format_depth</c></tag> + <item>Replaced by setting the <seealso marker="logger_formatter#depth"><c>depth</c></seealso> + parameter of the default handlers formatter. Example: + <code type="none"> +erl -kernel logger '[{handler,default,logger_std_h,#{formatter=>{logger_formatter,#{legacy_header=>true,template=>[{logger_formatter,header},"\n",msg,"\n"],depth=>10}}}]' + </code> </item> </taglist> + <p>See <seealso marker="logger_chapter#compatibility">Backwards + compatibility with error_logger</seealso> for more + information.</p> </section> <section> @@ -490,12 +538,12 @@ MaxT = TickTime + TickTime / 4</code> <seealso marker="disk_log"><c>disk_log(3)</c></seealso>, <seealso marker="erl_boot_server"><c>erl_boot_server(3)</c></seealso>, <seealso marker="erl_ddll"><c>erl_ddll(3)</c></seealso>, - <seealso marker="error_logger"><c>error_logger(3)</c></seealso>, <seealso marker="file"><c>file(3)</c></seealso>, <seealso marker="global"><c>global(3)</c></seealso>, <seealso marker="global_group"><c>global_group(3)</c></seealso>, <seealso marker="heart"><c>heart(3)</c></seealso>, <seealso marker="inet"><c>inet(3)</c></seealso>, + <seealso marker="logger"><c>logger(3)</c></seealso>, <seealso marker="net_kernel"><c>net_kernel(3)</c></seealso>, <seealso marker="os"><c>os(3)</c></seealso>, <seealso marker="pg2"><c>pg2(3)</c></seealso>, diff --git a/lib/kernel/doc/src/logger.xml b/lib/kernel/doc/src/logger.xml new file mode 100644 index 0000000000..2bcf137299 --- /dev/null +++ b/lib/kernel/doc/src/logger.xml @@ -0,0 +1,1268 @@ +<?xml version="1.0" encoding="utf-8" ?> +<!DOCTYPE erlref SYSTEM "erlref.dtd"> + +<erlref> + <header> + <copyright> + <year>2017</year><year>2018</year> + <holder>Ericsson AB. All Rights Reserved.</holder> + </copyright> + <legalnotice> + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + </legalnotice> + + <title>logger</title> + <prepared></prepared> + <responsible></responsible> + <docno></docno> + <approved></approved> + <checked></checked> + <date></date> + <rev>A</rev> + <file>logger.xml</file> + </header> + <module>logger</module> + <modulesummary>API module for Logger, the standard logging facility + in Erlang/OTP.</modulesummary> + + <description> + <p>This module implements the main API for logging in + Erlang/OTP. To create a log event, use the + <seealso marker="#logging_API">API functions</seealso> or the + log + <seealso marker="#macros">macros</seealso>, for example:</p> + <code> +?LOG_ERROR("error happened because: ~p", [Reason]). % With macro +logger:error("error happened because: ~p", [Reason]). % Without macro + </code> + <p>To configure the Logger backend, + use <seealso marker="kernel_app#logger">Kernel configuration + parameters</seealso> + or <seealso marker="#configuration_API">configuration + functions</seealso> in the Logger API.</p> + + <p>By default, the Kernel application installs one log handler at + system start. This handler is named <c>default</c>. It receives + and processes standard log events produced by the Erlang runtime + system, standard behaviours and different Erlang/OTP + applications. The log events are by default printed to the + terminal.</p> + <p>If you want your systems logs to be printed to a file instead, + you must configure the default handler to do so. The simplest + way is to include the following in + your <seealso marker="config"><c>sys.config</c></seealso>:</p> + <code> +[{kernel, + [{logger, + [{handler, default, logger_std_h, + #{config => #{type => {file, "path/to/file.log"}}}}]}]}]. + </code> + <p> + For more information about: + </p> + <list type="bulleted"> + <item>the Logger facility in general, see + the <seealso marker="logger_chapter">User's + Guide</seealso>.</item> + <item>how to configure Logger, see + the <seealso marker="logger_chapter#configuration">Configuration</seealso> + section in the User's Guide.</item> + <item>the built-in handlers, + see <seealso marker="logger_std_h">logger_std_h</seealso> and + <seealso marker="logger_disk_log_h">logger_disk_log_h</seealso>.</item> + <item>the built-in formatter, + see <seealso marker="logger_formatter">logger_formatter</seealso>.</item> + <item>built-in filters, + see <seealso marker="logger_filters">logger_filters</seealso>.</item> + </list> + + <note> + <p>Since Logger is new in Erlang/OTP 21.0, we do reserve the right + to introduce changes to the Logger API and functionality in + patches following this release. These changes might or might not + be backwards compatible with the initial version.</p> + </note> + + </description> + + <datatypes> + <datatype> + <name name="filter"/> + <desc> + <p>A filter which can be installed as a handler filter, or as + a primary filter in Logger.</p> + </desc> + </datatype> + <datatype> + <name name="filter_arg"/> + <desc> + <p>The second argument to the filter fun.</p> + </desc> + </datatype> + <datatype> + <name name="filter_id"/> + <desc> + <p>A unique identifier for a filter.</p> + </desc> + </datatype> + <datatype> + <name name="filter_return"/> + <desc> + <p>The return value from the filter fun.</p> + </desc> + </datatype> + <datatype> + <name name="formatter_config"/> + <desc> + <p>Configuration data for the + formatter. See <seealso marker="logger_formatter"> + <c>logger_formatter(3)</c></seealso> + for an example of a formatter implementation.</p> + </desc> + </datatype> + <datatype> + <name name="handler_config"/> + <desc> + <p>Handler configuration data for Logger. The following + default values apply:</p> + <list> + <item><c>level => all</c></item> + <item><c>filter_default => log</c></item> + <item><c>filters => []</c></item> + <item><c>formatter => {logger_formatter, DefaultFormatterConfig</c>}</item> + </list> + <p>In addition to these, the following fields are + automatically inserted by Logger, values taken from the + two first parameters + to <seealso marker="#add_handler-3"><c>add_handler/3</c></seealso>:</p> + <list> + <item><c>id => HandlerId</c></item> + <item><c>module => Module</c></item> + </list> + <p>These are read-only and cannot be changed in runtime.</p> + <p>Handler specific configuration data is inserted by the + handler callback itself, in a sub structure associated with + the field named <c>config</c>. See + the <seealso marker="logger_std_h"><c>logger_std_h(3)</c></seealso> + and <seealso marker="logger_disk_log_h"><c>logger_disk_log_h</c></seealso> + manual pages for information about the specifc configuration + for these handlers.</p> + <p>See the <seealso marker="logger_formatter#type-config"> + <c>logger_formatter(3)</c></seealso> manual page for + information about the default configuration for this + formatter.</p> + </desc> + </datatype> + <datatype> + <name name="handler_id"/> + <desc> + <p>A unique identifier for a handler instance.</p> + </desc> + </datatype> + <datatype> + <name name="level"/> + <desc> + <p>The severity level for the message to be logged.</p> + </desc> + </datatype> + <datatype> + <name name="log_event"/> + <desc> + <p></p> + </desc> + </datatype> + <datatype> + <name name="metadata"/> + <desc> + <p>Metadata for the log event.</p> + <p>Logger adds the following metadata to each log event:</p> + <list> + <item><c>pid => self()</c></item> + <item><c>gl => group_leader()</c></item> + <item><c>time => erlang:system_time(microsecond)</c></item> + </list> + <p>When a log macro is used, Logger also inserts location + information:</p> + <list> + <item><c>mfa => {?MODULE, ?FUNCTION_NAME, ?FUNCTION_ARITY}</c></item> + <item><c>file => ?FILE</c></item> + <item><c>line => ?LINE</c></item> + </list> + <p>You can add custom metadata, either by specifying a map as + the last parameter to any of the log macros or the API + functions, or by setting process metadata + with <seealso marker="#set_process_metadata-1"> + <c>set_process_metadata/1</c></seealso> + or <seealso marker="#update_process_metadata-1"> + <c>update_process_metadata/1</c></seealso>.</p> + <p>Logger merges all the metadata maps before forwarding the + log event to the handlers. If the same keys occur, values + from the log call overwrite process metadata, which in turn + overwrite values set by Logger.</p> + <p>The following custom metadata keys have special meaning:</p> + <taglist> + <tag><c>domain</c></tag> + <item> + <p>The value associated with this key is used by filters + for grouping log events originating from, for example, + specific functional + areas. See <seealso marker="logger_filters#domain-2"> + <c>logger_filters:domain/2</c></seealso> + for a description of how this field can be used.</p> + </item> + <tag><c>report_cb</c></tag> + <item> + <p>If the log message is specified as + a <seealso marker="#type-report"><c>report()</c></seealso>, + the <c>report_cb</c> key can be associated with a fun + (report callback) that converts the report to a format + string and arguments, or directly to a string. See the + type definition + of <seealso marker="#type-report_cb"><c>report_cb()</c></seealso>, + and + section <seealso marker="logger_chapter#log_message">Log + Message</seealso> in the User's Guide for more + information about report callbacks.</p> + </item> + </taglist> + </desc> + </datatype> + <datatype> + <name name="msg_fun"/> + <desc> + <p></p> + </desc> + </datatype> + <datatype> + <name name="primary_config"/> + <desc> + <p>Primary configuration data for Logger. The following + default values apply:</p> + <list> + <item><c>level => info</c></item> + <item><c>filter_default => log</c></item> + <item><c>filters => []</c></item> + </list> + </desc> + </datatype> + <datatype> + <name name="report"/> + <desc> + <p></p> + </desc> + </datatype> + <datatype> + <name name="report_cb"/> + <desc> + <p>A fun which converts a <seealso marker="#type-report"><c>report()</c> + </seealso> to a format string and arguments, or directly to a string. + See section <seealso marker="logger_chapter#log_message">Log + Message</seealso> in the User's Guide for more + information.</p> + </desc> + </datatype> + <datatype> + <name name="report_cb_config"/> + <desc> + <p></p> + </desc> + </datatype> + <datatype> + <name name="timestamp"/> + <desc> + <p>A timestamp produced + with <seealso marker="erts:erlang#system_time-1"> + <c>erlang:system_time(microsecond)</c></seealso>.</p> + </desc> + </datatype> + </datatypes> + + <section> + <title>Macros</title> + <p>The following macros are defined:</p> + + <list> + <item><c>?LOG_EMERGENCY(StringOrReport[,Metadata])</c></item> + <item><c>?LOG_EMERGENCY(FunOrFormat,Args[,Metadata])</c></item> + <item><c>?LOG_ALERT(StringOrReport[,Metadata])</c></item> + <item><c>?LOG_ALERT(FunOrFormat,Args[,Metadata])</c></item> + <item><c>?LOG_CRITICAL(StringOrReport[,Metadata])</c></item> + <item><c>?LOG_CRITICAL(FunOrFormat,Args[,Metadata])</c></item> + <item><c>?LOG_ERROR(StringOrReport[,Metadata])</c></item> + <item><c>?LOG_ERROR(FunOrFormat,Args[,Metadata])</c></item> + <item><c>?LOG_WARNING(StringOrReport[,Metadata])</c></item> + <item><c>?LOG_WARNING(FunOrFormat,Args[,Metadata])</c></item> + <item><c>?LOG_NOTICE(StringOrReport[,Metadata])</c></item> + <item><c>?LOG_NOTICE(FunOrFormat,Args[,Metadata])</c></item> + <item><c>?LOG_INFO(StringOrReport[,Metadata])</c></item> + <item><c>?LOG_INFO(FunOrFormat,Args[,Metadata])</c></item> + <item><c>?LOG_DEBUG(StringOrReport[,Metadata])</c></item> + <item><c>?LOG_DEBUG(FunOrFormat,Args[,Metadata])</c></item> + <item><c>?LOG(Level,StringOrReport[,Metadata])</c></item> + <item><c>?LOG(Level,FunOrFormat,Args[,Metadata])</c></item> + </list> + + <p>All macros expand to a call to Logger, where <c>Level</c> is + taken from the macro name, or from the first argument in the + case of the <c>?LOG</c> macro. Location data is added to the + metadata as described under + the <seealso marker="#type-metadata"><c>metadata()</c></seealso> + type definition.</p> + + <p>The call is wrapped in a case statement and will be evaluated + only if <c>Level</c> is equal to or below the configured log + level.</p> + </section> + + <section> + <marker id="logging_API"/> + <title>Logging API functions</title> + </section> + <funcs> + <func> + <name>emergency(StringOrReport[,Metadata])</name> + <name>emergency(Format,Args[,Metadata])</name> + <name>emergency(Fun,FunArgs[,Metadata])</name> + <fsummary>Logs the given message as level <c>emergency</c>.</fsummary> + <desc> + <p>Equivalent to + <seealso marker="#log-2"><c>log(emergency,...)</c></seealso>.</p> + </desc> + </func> + + <func> + <name>alert(StringOrReport[,Metadata])</name> + <name>alert(Format,Args[,Metadata])</name> + <name>alert(Fun,FunArgs[,Metadata])</name> + <fsummary>Logs the given message as level <c>alert</c>.</fsummary> + <desc> + <p>Equivalent to + <seealso marker="#log-2"><c>log(alert,...)</c></seealso>.</p> + </desc> + </func> + + <func> + <name>critical(StringOrReport[,Metadata])</name> + <name>critical(Format,Args[,Metadata])</name> + <name>critical(Fun,FunArgs[,Metadata])</name> + <fsummary>Logs the given message as level <c>critical</c>.</fsummary> + <desc> + <p>Equivalent to + <seealso marker="#log-2"><c>log(critical,...)</c></seealso>.</p> + </desc> + </func> + + <func> + <name>error(StringOrReport[,Metadata])</name> + <name>error(Format,Args[,Metadata])</name> + <name>error(Fun,FunArgs[,Metadata])</name> + <fsummary>Logs the given message as level <c>error</c>.</fsummary> + <desc> + <p>Equivalent to + <seealso marker="#log-2"><c>log(error,...)</c></seealso>.</p> + </desc> + </func> + + <func> + <name>warning(StringOrReport[,Metadata])</name> + <name>warning(Format,Args[,Metadata])</name> + <name>warning(Fun,FunArgs[,Metadata])</name> + <fsummary>Logs the given message as level <c>warning</c>.</fsummary> + <desc> + <p>Equivalent to + <seealso marker="#log-2"><c>log(warning,...)</c></seealso>.</p> + </desc> + </func> + + <func> + <name>notice(StringOrReport[,Metadata])</name> + <name>notice(Format,Args[,Metadata])</name> + <name>notice(Fun,FunArgs[,Metadata])</name> + <fsummary>Logs the given message as level <c>notice</c>.</fsummary> + <desc> + <p>Equivalent to + <seealso marker="#log-2"><c>log(notice,...)</c></seealso>.</p> + </desc> + </func> + + <func> + <name>info(StringOrReport[,Metadata])</name> + <name>info(Format,Args[,Metadata])</name> + <name>info(Fun,FunArgs[,Metadata])</name> + <fsummary>Logs the given message as level <c>info</c>.</fsummary> + <desc> + <p>Equivalent to + <seealso marker="#log-2"><c>log(info,...)</c></seealso>.</p> + </desc> + </func> + + <func> + <name>debug(StringOrReport[,Metadata])</name> + <name>debug(Format,Args[,Metadata])</name> + <name>debug(Fun,FunArgs[,Metadata])</name> + <fsummary>Logs the given message as level <c>debug</c>.</fsummary> + <desc> + <p>Equivalent to + <seealso marker="#log-2"><c>log(debug,...)</c></seealso>.</p> + </desc> + </func> + + <func> + <name name="log" arity="2"/> + <name name="log" arity="3" clause_i="1"/> + <name name="log" arity="3" clause_i="2"/> + <name name="log" arity="3" clause_i="3"/> + <name name="log" arity="4" clause_i="1"/> + <name name="log" arity="4" clause_i="2"/> + <fsummary>Logs the given message.</fsummary> + <type variable="Level"/> + <type variable="StringOrReport" name_i="1"/> + <type variable="Format" name_i="3"/> + <type variable="Args" name_i="3"/> + <type variable="Fun" name_i="4"/> + <type variable="FunArgs" name_i="4"/> + <type variable="Metadata"/> + <desc> + <p>Log the given message.</p> + </desc> + </func> + </funcs> + + <section> + <marker id="configuration_API"/> + <title>Configuration API functions</title> + </section> + <funcs> + <func> + <name name="add_handler" arity="3"/> + <fsummary>Add a handler with the given configuration.</fsummary> + <desc> + <p>Add a handler with the given configuration.</p> + <p><c><anno>HandlerId</anno></c> is a unique identifier which + must be used in all subsequent calls referring to this + handler.</p> + </desc> + </func> + + <func> + <name name="add_handler_filter" arity="3"/> + <fsummary>Add a filter to the specified handler.</fsummary> + <desc> + <p>Add a filter to the specified handler.</p> + <p>The filter fun is called with the log event as the first + parameter, and the specified <c>filter_args()</c> as the + second parameter.</p> + <p>The return value of the fun specifies if a log event is to + be discarded or forwarded to the handler callback:</p> + <taglist> + <tag><c>log_event()</c></tag> + <item> + <p>The filter <em>passed</em>. The next handler filter, if + any, is applied. If no more filters exist for this + handler, the log event is forwarded to the handler + callback.</p> + </item> + <tag><c>stop</c></tag> + <item> + <p>The filter <em>did not pass</em>, and the log event is + immediately discarded.</p> + </item> + <tag><c>ignore</c></tag> + <item> + <p>The filter has no knowledge of the log event. The next + handler filter, if any, is applied. If no more filters + exist for this handler, the value of + the <c>filter_default</c> configuration parameter for + the handler specifies if the log event shall be + discarded or forwarded to the handler callback.</p> + </item> + </taglist> + <p>See + section <seealso marker="logger_chapter#filters">Filters</seealso> + in the User's Guide for more information about filters.</p> + <p>Some built-in filters exist. These are defined in + <seealso marker="logger_filters"><c>logger_filters</c></seealso>.</p> + </desc> + </func> + + <func> + <name name="add_handlers" arity="1" clause_i="1"/> + <fsummary>Set up log handlers from the application's + configuration parameters.</fsummary> + <desc> + <p>Reads the application configuration parameter <c>logger</c> and + calls <c>add_handlers/1</c> with its contents.</p> + </desc> + </func> + + <func> + <name name="add_handlers" arity="1" clause_i="2"/> + <fsummary>Setup logger handlers.</fsummary> + <type name="config_handler"/> + <desc> + <p>This function should be used by custom Logger handlers to make + configuration consistent no matter which handler the system uses. + Normal usage is to add a call to <c>logger:add_handlers/1</c> + just after the processes that the handler needs are started, + and pass the application's <c>logger</c> configuration as the argument. + For example:</p> + <code> +-behaviour(application). +start(_, []) -> + case supervisor:start_link({local, my_sup}, my_sup, []) of + {ok, Pid} -> + ok = logger:add_handlers(my_app), + {ok, Pid, []}; + Error -> Error + end.</code> + <p>This reads the <c>logger</c> configuration parameter from + the <c>my_app</c> application and starts the configured + handlers. The contents of the configuration use the same + rules as the + <seealso marker="logger_chapter#handler-configuration">logger handler configuration</seealso>. + </p> + <p>If the handler is meant to replace the default handler, the Kernel's + default handler have to be disabled before the new handler is added. + A <c>sys.config</c> file that disables the Kernel handler and adds + a custom handler could look like this:</p> + <code> +[{kernel, + [{logger, + %% Disable the default Kernel handler + [{handler, default, undefined}]}]}, + {my_app, + [{logger, + %% Enable this handler as the default + [{handler, default, my_handler, #{}}]}]}]. + </code> + </desc> + </func> + + <func> + <name name="add_primary_filter" arity="2"/> + <fsummary>Add a primary filter to Logger.</fsummary> + <desc> + <p>Add a primary filter to Logger.</p> + <p>The filter fun is called with the log event as the first + parameter, and the specified <c>filter_args()</c> as the + second parameter.</p> + <p>The return value of the fun specifies if a log event is to + be discarded or forwarded to the handlers:</p> + <taglist> + <tag><c>log_event()</c></tag> + <item> + <p>The filter <em>passed</em>. The next primary filter, if + any, is applied. If no more primary filters exist, the + log event is forwarded to the handler part of Logger, + where handler filters are applied.</p> + </item> + <tag><c>stop</c></tag> + <item> + <p>The filter <em>did not pass</em>, and the log event is + immediately discarded.</p> + </item> + <tag><c>ignore</c></tag> + <item> + <p>The filter has no knowledge of the log event. The next + primary filter, if any, is applied. If no more primary + filters exist, the value of the + primary <c>filter_default</c> configuration parameter + specifies if the log event shall be discarded or + forwarded to the handler part.</p> + </item> + </taglist> + <p>See section <seealso marker="logger_chapter#filters"> + Filters</seealso> in the User's Guide for more information + about filters.</p> + <p>Some built-in filters exist. These are defined + in <seealso marker="logger_filters"><c>logger_filters</c></seealso>.</p> + </desc> + </func> + + <func> + <name name="get_config" arity="0"/> + <fsummary>Look up the current Logger configuration</fsummary> + <desc> + <p>Look up all current Logger configuration, including primary + and handler configuration, and module level settings.</p> + </desc> + </func> + + <func> + <name name="get_handler_config" arity="0"/> + <fsummary>Look up the current configuration for all handlers.</fsummary> + <desc> + <p>Look up the current configuration for all handlers.</p> + </desc> + </func> + + <func> + <name name="get_handler_config" arity="1"/> + <fsummary>Look up the current configuration for the given + handler.</fsummary> + <desc> + <p>Look up the current configuration for the given handler.</p> + </desc> + </func> + + <func> + <name name="get_handler_ids" arity="0"/> + <fsummary>Look up the identities for all installed handlers.</fsummary> + <desc> + <p>Look up the identities for all installed handlers.</p> + </desc> + </func> + + <func> + <name name="get_primary_config" arity="0"/> + <fsummary>Look up the current primary configuration for Logger.</fsummary> + <desc> + <p>Look up the current primary configuration for Logger.</p> + </desc> + </func> + + <func> + <name name="get_module_level" arity="0"/> + <fsummary>Look up all current module levels.</fsummary> + <desc> + <p>Look up all current module levels. Returns a list + containing one <c>{Module,Level}</c> element for each module + for which the module level was previously set + with <seealso marker="#set_module_level-2"> + <c>set_module_level/2</c></seealso>.</p> + </desc> + </func> + + <func> + <name name="get_module_level" arity="1"/> + <fsummary>Look up the current level for the given modules.</fsummary> + <desc> + <p>Look up the current level for the given modules. Returns a + list containing one <c>{Module,Level}</c> element for each + of the given modules for which the module level was + previously set with <seealso marker="#set_module_level-2"> + <c>set_module_level/2</c></seealso>.</p> + </desc> + </func> + + <func> + <name name="get_process_metadata" arity="0"/> + <fsummary>Retrieve data set with set_process_metadata/1.</fsummary> + <desc> + <p>Retrieve data set + with <seealso marker="#set_process_metadata-1"> + <c>set_process_metadata/1</c></seealso> or + <seealso marker="#update_process_metadata-1"> + <c>update_process_metadata/1</c></seealso>.</p> + </desc> + </func> + + <func> + <name name="remove_handler" arity="1"/> + <fsummary>Remove the handler with the specified identity.</fsummary> + <desc> + <p>Remove the handler identified by <c><anno>HandlerId</anno></c>.</p> + </desc> + </func> + + <func> + <name name="remove_handler_filter" arity="2"/> + <fsummary>Remove a filter from the specified handler.</fsummary> + <desc> + <p>Remove the filter identified + by <c><anno>FilterId</anno></c> from the handler identified + by <c><anno>HandlerId</anno></c>.</p> + </desc> + </func> + + <func> + <name name="remove_primary_filter" arity="1"/> + <fsummary>Remove a primary filter from Logger.</fsummary> + <desc> + <p>Remove the primary filter identified + by <c><anno>FilterId</anno></c> from Logger.</p> + </desc> + </func> + + <func> + <name name="set_application_level" arity="2"/> + <fsummary>Set the log level for all modules in the specified application.</fsummary> + <desc> + <p>Set the log level for all the modules of the specified application.</p> + <p>This function is a convenience function that calls + <seealso marker="#set_module_level/2">logger:set_module_level/2</seealso> + for each module associated with an application.</p> + </desc> + </func> + + <func> + <name name="set_handler_config" arity="2"/> + <fsummary>Set configuration data for the specified handler.</fsummary> + <desc> + <p>Set configuration data for the specified handler. This + overwrites the current handler configuration.</p> + <p>To modify the existing configuration, + use <seealso marker="#update_handler_config-2"> + <c>update_handler_config/2</c></seealso>, or, if a more + complex merge is needed, read the current configuration + with <seealso marker="#get_handler_config-1"><c>get_handler_config/1</c> + </seealso>, then do the merge before writing the new + configuration back with this function.</p> + <p>If a key is removed compared to the current configuration, + and the key is known by Logger, the default value is used. If + it is a custom key, then it is up to the handler + implementation if the value is removed or a default value is + inserted.</p> + </desc> + </func> + + <func> + <name name="set_handler_config" arity="3" clause_i="1"/> + <name name="set_handler_config" arity="3" clause_i="2"/> + <name name="set_handler_config" arity="3" clause_i="3"/> + <name name="set_handler_config" arity="3" clause_i="4"/> + <name name="set_handler_config" arity="3" clause_i="5"/> + <fsummary>Add or update configuration data for the specified + handler.</fsummary> + <type variable="HandlerId"/> + <type variable="Level" name_i="1"/> + <type variable="FilterDefault" name_i="2"/> + <type variable="Filters" name_i="3"/> + <type variable="Formatter" name_i="4"/> + <type variable="Config" name_i="5"/> + <type variable="Return"/> + <desc> + <p>Add or update configuration data for the specified + handler. If the given <c><anno>Key</anno></c> already + exists, its associated value will be changed + to the given value. If it does not exist, it will + be added.</p> + <p>If the value is incomplete, which for example can be the + case for the <c>config</c> key, it is up to the handler + implementation how the unspecified parts are set. For all + handlers in the Kernel application, unspecified data for + the <c>config</c> key is set to default values. To update + only specified data, and keep the existing configuration for + the rest, use <seealso marker="#update_handler_config-3"> + <c>update_handler_config/3</c></seealso>.</p> + <p>See the definition of + the <seealso marker="#type-handler_config"> + <c>handler_config()</c></seealso> type for more + information about the different parameters.</p> + </desc> + </func> + + <func> + <name name="set_primary_config" arity="1"/> + <fsummary>Set primary configuration data for Logger.</fsummary> + <desc> + <p>Set primary configuration data for Logger. This + overwrites the current configuration.</p> + <p>To modify the existing configuration, + use <seealso marker="#update_primary_config-1"> + <c>update_primary_config/1</c></seealso>, or, if a more + complex merge is needed, read the current configuration + with <seealso marker="#get_primary_config-0"><c>get_primary_config/0</c> + </seealso>, then do the merge before writing the new + configuration back with this function.</p> + <p>If a key is removed compared to the current configuration, + the default value is used.</p> + </desc> + </func> + + <func> + <name name="set_primary_config" arity="2" clause_i="1"/> + <name name="set_primary_config" arity="2" clause_i="2"/> + <name name="set_primary_config" arity="2" clause_i="3"/> + <fsummary>Add or update primary configuration data for Logger.</fsummary> + <type variable="Level" name_i="1"/> + <type variable="FilterDefault" name_i="2"/> + <type variable="Filters" name_i="3"/> + <desc> + <p>Add or update primary configuration data for Logger. If the + given <c><anno>Key</anno></c> already exists, its associated + value will be changed to the given value. If it does not + exist, it will be added.</p> + </desc> + </func> + + <func> + <name name="set_module_level" arity="2"/> + <fsummary>Set the log level for the specified modules.</fsummary> + <desc> + <p>Set the log level for the specified modules.</p> + <p>The log level for a module overrides the primary log level + of Logger for log events originating from the module in + question. Notice, however, that it does not override the + level configuration for any handler.</p> + <p>For example: Assume that the primary log level for Logger + is <c>info</c>, and there is one handler, <c>h1</c>, with + level <c>info</c> and one handler, <c>h2</c>, with + level <c>debug</c>.</p> + <p>With this configuration, no debug messages will be logged, + since they are all stopped by the primary log level.</p> + <p>If the level for <c>mymodule</c> is now set + to <c>debug</c>, then debug events from this module will be + logged by the handler <c>h2</c>, but not by + handler <c>h1</c>.</p> + <p>Debug events from other modules are still not logged.</p> + <p>To change the primary log level for Logger, use + <seealso marker="#set_primary_config/2"> + <c>set_primary_config(level, Level)</c></seealso>.</p> + <p>To change the log level for a handler, use + <seealso marker="#set_handler_config/3"> + <c>set_handler_config(HandlerId, level, Level)</c> + </seealso>.</p> + <note> + <p>The originating module for a log event is only detected + if the key <c>mfa</c> exists in the metadata, and is + associated with <c>{Module, Function, Arity}</c>. When log + macros are used, this association is automatically added + to all log events. If an API function is called directly, + without using a macro, the logging client must explicitly + add this information if module levels shall have any + effect.</p> + </note> + </desc> + </func> + + <func> + <name name="set_process_metadata" arity="1"/> + <fsummary>Set metadata to use when logging from current process.</fsummary> + <desc> + <p>Set metadata which Logger shall automatically insert in + all log events produced on the current process.</p> + <p>Location data produced by the log macros, and/or metadata + given as argument to the log call (API function or macro), + are merged with the process metadata. If the same keys + occur, values from the metadata argument to the log call + overwrite values from the process metadata, which in turn + overwrite values from the location data.</p> + <p>Subsequent calls to this function overwrites previous data + set. To update existing data instead of overwriting it, + see <seealso marker="#update_process_metadata-1"> + <c>update_process_metadata/1</c></seealso>.</p> + </desc> + </func> + + <func> + <name name="unset_application_level" arity="1"/> + <fsummary>Unset the log level for all modules in the specified application.</fsummary> + <desc> + <p>Unset the log level for all the modules of the specified application.</p> + <p>This function is a convinience function that calls + <seealso marker="#unset_module_level/1">logger:unset_module_level/2</seealso> + for each module associated with an application.</p> + </desc> + </func> + + <func> + <name name="unset_module_level" arity="0"/> + <fsummary>Remove module specific log settings for all modules.</fsummary> + <desc> + <p>Remove module specific log settings. After this, the + primary log level is used for all modules.</p> + </desc> + </func> + + <func> + <name name="unset_module_level" arity="1"/> + <fsummary>Remove module specific log settings for the given + modules.</fsummary> + <desc> + <p>Remove module specific log settings. After this, the + primary log level is used for the specified modules.</p> + </desc> + </func> + + <func> + <name name="unset_process_metadata" arity="0"/> + <fsummary>Delete data set with set_process_metadata/1.</fsummary> + <desc> + <p>Delete data set + with <seealso marker="#set_process_metadata-1"> + <c>set_process_metadata/1</c></seealso> or + <seealso marker="#update_process_metadata-1"> + <c>update_process_metadata/1</c></seealso>.</p> + </desc> + </func> + + <func> + <name name="update_formatter_config" arity="2"/> + <fsummary>Update the formatter configuration for the specified handler.</fsummary> + <desc> + <p>Update the formatter configuration for the specified handler.</p> + <p>The new configuration is merged with the existing formatter + configuration.</p> + <p>To overwrite the existing configuration without any merge, + use</p> + <pre> +<seealso marker="#set_handler_config-3">set_handler_config(HandlerId, formatter, + {FormatterModule, FormatterConfig})</seealso>.</pre> + </desc> + </func> + + <func> + <name name="update_formatter_config" arity="3"/> + <fsummary>Update the formatter configuration for the specified handler.</fsummary> + <desc> + <p>Update the formatter configuration for the specified handler.</p> + <p>This is equivalent to</p> + <pre> +<seealso marker="#update_formatter_config-2">update_formatter_config(<anno>HandlerId</anno>, #{<anno>Key</anno> => <anno>Value</anno>})</seealso></pre> + </desc> + </func> + + <func> + <name name="update_handler_config" arity="2"/> + <fsummary>Update configuration data for the specified handler.</fsummary> + <desc> + <p>Update configuration data for the specified handler. This function + behaves as if it was implemented as follows:</p> + <code type="erl"> +{ok, {_, Old}} = logger:get_handler_config(HandlerId), +logger:set_handler_config(HandlerId, maps:merge(Old, Config)). + </code> + <p>To overwrite the existing configuration without any merge, + use <seealso marker="#set_handler_config-2"><c>set_handler_config/2</c> + </seealso>.</p> + </desc> + </func> + + <func> + <name name="update_handler_config" arity="3" clause_i="1"/> + <name name="update_handler_config" arity="3" clause_i="2"/> + <name name="update_handler_config" arity="3" clause_i="3"/> + <name name="update_handler_config" arity="3" clause_i="4"/> + <name name="update_handler_config" arity="3" clause_i="5"/> + <fsummary>Add or update configuration data for the specified + handler.</fsummary> + <type variable="HandlerId"/> + <type variable="Level" name_i="1"/> + <type variable="FilterDefault" name_i="2"/> + <type variable="Filters" name_i="3"/> + <type variable="Formatter" name_i="4"/> + <type variable="Config" name_i="5"/> + <type variable="Return"/> + <desc> + <p>Add or update configuration data for the specified + handler. If the given <c><anno>Key</anno></c> already + exists, its associated value will be changed + to the given value. If it does not exist, it will + be added.</p> + <p>If the value is incomplete, which for example can be the + case for the <c>config</c> key, it is up to the handler + implementation how the unspecified parts are set. For all + handlers in the Kernel application, unspecified data for + the <c>config</c> key is not changed. To reset unspecified + data to default values, + use <seealso marker="#set_handler_config-3"> + <c>set_handler_config/3</c></seealso>.</p> + <p>See the definition of + the <seealso marker="#type-handler_config"> + <c>handler_config()</c></seealso> type for more + information about the different parameters.</p> + </desc> + </func> + + <func> + <name name="update_primary_config" arity="1"/> + <fsummary>Update primary configuration data for Logger.</fsummary> + <desc> + <p>Update primary configuration data for Logger. This function + behaves as if it was implemented as follows:</p> + <code type="erl"> +Old = logger:get_primary_config(), +logger:set_primary_config(maps:merge(Old, Config)). + </code> + <p>To overwrite the existing configuration without any merge, + use <seealso marker="#set_primary_config-1"><c>set_primary_config/1</c> + </seealso>.</p> + </desc> + </func> + + <func> + <name name="update_process_metadata" arity="1"/> + <fsummary>Set or update metadata to use when logging from + current process.</fsummary> + <desc> + <p>Set or update metadata to use when logging from current + process</p> + <p>If process metadata exists for the current process, this + function behaves as if it was implemented as follows:</p> + <code type="erl"> +logger:set_process_metadata(maps:merge(logger:get_process_metadata(), Meta)). + </code> + <p>If no process metadata exists, the function behaves as + <seealso marker="#set_process_metadata-1"> + <c>set_process_metadata/1</c> + </seealso>.</p> + </desc> + </func> + </funcs> + + <section> + <marker id="misc_API"/> + <title>Miscellaneous API functions</title> + </section> + <funcs> + <func> + <name name="compare_levels" arity="2"/> + <fsummary>Compare the severity of two log levels.</fsummary> + <desc> + <p>Compare the severity of two log levels. Returns <c>gt</c> + if <c>Level1</c> is more severe than + <c>Level2</c>, <c>lt</c> if <c>Level1</c> is less severe, + and <c>eq</c> if the levels are equal.</p> + </desc> + </func> + + <func> + <name name="format_report" arity="1"/> + <fsummary>Convert a log message on report form to {Format, Args}.</fsummary> + <desc> + <p>Convert a log message on report form to <c>{Format, + Args}</c>. This is the default report callback used + by <seealso marker="logger_formatter"> + <c>logger_formatter</c></seealso> when no custom report + callback is found. See + section <seealso marker="logger_chapter#log_message">Log + Message</seealso> in the Kernel User's Guide for + information about report callbacks and valid forms of log + messages.</p> + <p>The function produces lines of <c>Key: Value</c> from + key-value lists. Strings are printed with <c>~ts</c> and + other terms with <c>~tp</c>.</p> + <p>If <c><anno>Report</anno></c> is a map, it is converted to + a key-value list before formatting as such.</p> + </desc> + </func> + </funcs> + + <section> + <marker id="handler_callback_functions"/> + <title>Handler Callback Functions</title> + <p>The following functions are to be exported from a handler + callback module.</p> + </section> + + <funcs> + <func> + <name>HModule:adding_handler(Config1) -> {ok, Config2} | {error, + Reason}</name> + <fsummary>An instance of this handler is about to be added.</fsummary> + <type> + <v>Config1 = Config2 = + <seealso marker="#type-handler_config">handler_config()</seealso></v> + <v>Reason = term()</v> + </type> + <desc> + <p>This callback function is optional.</p> + <p>The function is called on a temporary process when an new + handler is about to be added. The purpose is to verify the + configuration and initiate all resources needed by the + handler.</p> + <p>The handler identity is associated with the <c>id</c> key + in <c>Config1</c>.</p> + <p>If everything succeeds, the callback function can add + possible default values or internal state values to the + configuration, and return the adjusted map + in <c>{ok,Config2}</c>.</p> + <p>If the configuration is faulty, or if the initiation fails, + the callback function must return <c>{error,Reason}</c>.</p> + </desc> + </func> + + <func> + <name>HModule:changing_config(SetOrUpdate, OldConfig, NewConfig) -> {ok, Config} | {error, Reason}</name> + <fsummary>The configuration for this handler is about to change.</fsummary> + <type> + <v>SetOrUpdate = set | update</v> + <v>OldConfig = NewConfig = Config = + <seealso marker="#type-handler_config">handler_config()</seealso></v> + <v>Reason = term()</v> + </type> + <desc> + <p>This callback function is optional.</p> + <p>The function is called on a temporary process when the + configuration for a handler is about to change. The purpose + is to verify and act on the new configuration.</p> + <p><c>OldConfig</c> is the existing configuration + and <c>NewConfig</c> is the new configuration.</p> + <p>The handler identity is associated with the <c>id</c> key + in <c>OldConfig</c>.</p> + <p><c>SetOrUpdate</c> has the value <c>set</c> if the + configuration change originates from a call to + <seealso marker="#set_handler_config-2"> + <c>set_handler_config/2,3</c></seealso>, and <c>update</c> + if it originates from <seealso marker="#update_handler_config-2"> + <c>update_handler_config/2,3</c></seealso>. The handler can + use this parameteter to decide how to update the value of + the <c>config</c> field, that is, the handler specific + configuration data. Typically, if <c>SetOrUpdate</c> + equals <c>set</c>, values that are not specified must be + given their default values. If <c>SetOrUpdate</c> + equals <c>update</c>, the values found in <c>OldConfig</c> + must be used instead.</p> + <p>If everything succeeds, the callback function must return a + possibly adjusted configuration in <c>{ok,Config}</c>.</p> + <p>If the configuration is faulty, the callback function must + return <c>{error,Reason}</c>.</p> + </desc> + </func> + + <func> + <name>HModule:filter_config(Config) -> FilteredConfig</name> + <fsummary>Remove internal data from configuration.</fsummary> + <type> + <v>Config = FilteredConfig = + <seealso marker="#type-handler_config">handler_config()</seealso></v> + </type> + <desc> + <p>This callback function is optional.</p> + <p>The function is called when one of the Logger API functions + for fetching the handler configuration is called, for + example + <seealso marker="#get_handler_config-1"> + <c>logger:get_handler_config/1</c></seealso>.</p> + <p>It allows the handler to remove internal data fields from + its configuration data before it is returned to the + caller.</p> + </desc> + </func> + + <func> + <name>HModule:log(LogEvent, Config) -> void()</name> + <fsummary>Log the given log event.</fsummary> + <type> + <v>LogEvent = + <seealso marker="#type-log_event">log_event()</seealso></v> + <v>Config = + <seealso marker="#type-handler_config">handler_config()</seealso></v> + </type> + <desc> + <p>This callback function is mandatory.</p> + <p>The function is called when all primary filters and all + handler filters for the handler in question have passed for + the given log event. It is called on the client process, that + is, the process that issued the log event.</p> + <p>The handler identity is associated with the <c>id</c> key + in <c>Config</c>.</p> + <p>The handler must log the event.</p> + <p>The return value from this function is ignored by + Logger.</p> + </desc> + </func> + + <func> + <name>HModule:removing_handler(Config) -> ok</name> + <fsummary>The given handler is about to be removed.</fsummary> + <type> + <v>Config = + <seealso marker="#type-handler_config">handler_config()</seealso></v> + </type> + <desc> + <p>This callback function is optional.</p> + <p>The function is called on a temporary process when a + handler is about to be removed. The purpose is to release + all resources used by the handler.</p> + <p>The handler identity is associated with the <c>id</c> key + in <c>Config</c>.</p> + <p>The return value is ignored by Logger.</p> + </desc> + </func> + + </funcs> + + <section> + <marker id="formatter_callback_functions"/> + <title>Formatter Callback Functions</title> + <p>The following functions are to be exported from a formatter + callback module.</p> + </section> + + <funcs> + <func> + <name>FModule:check_config(FConfig) -> ok | {error, Reason}</name> + <fsummary>Validate the given formatter configuration.</fsummary> + <type> + <v>FConfig = + <seealso marker="#type-formatter_config">formatter_config()</seealso></v> + <v>Reason = term()</v> + </type> + <desc> + <p>This callback function is optional.</p> + <p>The function is called by a Logger when formatter + configuration is set or modified. The formatter must + validate the given configuration and return <c>ok</c> if it + is correct, and <c>{error,Reason}</c> if it is faulty.</p> + <p>The following Logger API functions can trigger this callback:</p> + <list> + <item><seealso marker="logger#add_handler-3"> + <c>logger:add_handler/3</c></seealso></item> + <item><seealso marker="logger#set_handler_config-2"> + <c>logger:set_handler_config/2,3</c></seealso></item> + <item><seealso marker="logger#update_handler_config-2"> + <c>logger:updata_handler_config/2,3</c></seealso></item> + <item><seealso marker="logger#update_formatter_config-2"> + <c>logger:update_formatter_config/2</c></seealso></item> + </list> + <p>See <seealso marker="logger_formatter"> + <c>logger_formatter(3)</c></seealso> + for an example implementation. <c>logger_formatter</c> is the + default formatter used by Logger.</p> + </desc> + </func> + <func> + <name>FModule:format(LogEvent, FConfig) -> FormattedLogEntry</name> + <fsummary>Format the given log event.</fsummary> + <type> + <v>LogEvent = + <seealso marker="#type-log_event">log_event()</seealso></v> + <v>FConfig = + <seealso marker="#type-formatter_config">formatter_config()</seealso></v> + <v>FormattedLogEntry = + <seealso marker="unicode#type-chardata">unicode:chardata()</seealso></v> + </type> + <desc> + <p>This callback function is mandatory.</p> + <p>The function can be called by a log handler to convert a + log event term to a printable string. The returned value + can, for example, be printed as a log entry to the console + or a file using <seealso marker="stdlib:io#put_chars-1"> + <c>io:put_chars/1,2</c></seealso>.</p> + <p>See <seealso marker="logger_formatter"> + <c>logger_formatter(3)</c></seealso> + for an example implementation. <c>logger_formatter</c> is the + default formatter used by Logger.</p> + </desc> + </func> + </funcs> + + <section> + <title>See Also</title> + <p> + <seealso marker="config"><c>config(4)</c></seealso>, + <seealso marker="erts:erlang"><c>erlang(3)</c></seealso>, + <seealso marker="stdlib:io"><c>io(3)</c></seealso>, + <seealso marker="logger_disk_log_h"><c>logger_disk_log_h(3)</c></seealso>, + <seealso marker="logger_filters"><c>logger_filters(3)</c></seealso>, + <seealso marker="logger_formatter"><c>logger_formatter(3)</c></seealso>, + <seealso marker="logger_std_h"><c>logger_std_h(3)</c></seealso>, + <seealso marker="stdlib:unicode"><c>unicode(3)</c></seealso> + </p> + </section> +</erlref> + + diff --git a/lib/kernel/doc/src/logger_arch.dia b/lib/kernel/doc/src/logger_arch.dia Binary files differnew file mode 100644 index 0000000000..97be31856e --- /dev/null +++ b/lib/kernel/doc/src/logger_arch.dia diff --git a/lib/kernel/doc/src/logger_arch.png b/lib/kernel/doc/src/logger_arch.png Binary files differnew file mode 100644 index 0000000000..70933a5a41 --- /dev/null +++ b/lib/kernel/doc/src/logger_arch.png diff --git a/lib/kernel/doc/src/logger_chapter.xml b/lib/kernel/doc/src/logger_chapter.xml new file mode 100644 index 0000000000..1870d2ab79 --- /dev/null +++ b/lib/kernel/doc/src/logger_chapter.xml @@ -0,0 +1,1345 @@ +<?xml version="1.0" encoding="utf-8" ?> +<!DOCTYPE chapter SYSTEM "chapter.dtd"> + +<chapter> + <header> + <copyright> + <year>2017</year><year>2018</year> + <holder>Ericsson AB. All Rights Reserved.</holder> + </copyright> + <legalnotice> + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + </legalnotice> + + <title>Logging</title> + <prepared></prepared> + <docno></docno> + <date></date> + <rev></rev> + <file>logger_chapter.xml</file> + </header> + + <p>Erlang/OTP 21.0 provides a standard API for logging + through <c>Logger</c>, which is part of the Kernel + application. Logger consists of the API for issuing log events, + and a customizable backend where log handlers, filters and + formatters can be plugged in.</p> + <p>By default, the Kernel application installs one log handler at + system start. This handler is named <c>default</c>. It receives + and processes standard log events produced by the Erlang runtime + system, standard behaviours and different Erlang/OTP + applications. The log events are by default written to the + terminal.</p> + <p>You can also configure the system so that the default handler + prints log events to a single file, or to a set of wrap logs + via <seealso marker="disk_log"><c>disk_log</c></seealso>.</p> + <p>By configuration, you can also modify or disable the default + handler, replace it by a custom handler, and install additional + handlers.</p> + + <note> + <p>Since Logger is new in Erlang/OTP 21.0, we do reserve the right + to introduce changes to the Logger API and functionality in + patches following this release. These changes might or might not + be backwards compatible with the initial version.</p> + </note> + + <section> + <title>Overview</title> + <p>A <em>log event</em> consists of a <em>log level</em>, the + <em>message</em> to be logged, and <em>metadata</em>.</p> + <p>The Logger backend forwards log events from the API, first + through a set of <em>primary filters</em>, then through a set of + secondary filters attached to each log handler. The secondary + filters are in the following named <em>handler filters</em>.</p> + <p>Each filter set consists of a <em>log level check</em>, + followed by zero or more <em>filter functions</em>.</p> + <p>The following figure shows a conceptual overview of Logger. The + figure shows two log handlers, but any number of handlers can be + installed.</p> + + <!-- The image is edited with dia in logger_arch.dia file, + and .png file generated with make target 'png'. --> + <image file="logger_arch.png"> + <icaption>Conceptual Overview</icaption> + </image> + + <p>Log levels are expressed as atoms. Internally in Logger, the + atoms are mapped to integer values, and a log event passes the + log level check if the integer value of its log level is less + than or equal to the currently configured log level. That is, + the check passes if the event is equally or more severe than the + configured level. See section <seealso marker="#log_level">Log + Level</seealso> for a listing and description of all log + levels.</p> + <p>The primary log level can be overridden by a log level + configured per module. This is to, for instance, allow more + verbose logging from a specific part of the system.</p> + <p>Filter functions can be used for more sophisticated filtering + than the log level check provides. A filter function can stop or + pass a log event, based on any of the event's contents. It can + also modify all parts of the log event. See see + section <seealso marker="#filters">Filters</seealso> for more + details.</p> + <p>If a log event passes through all primary filters and all + handler filters for a specific handler, Logger forwards the + event to the <em>handler callback</em>. The handler formats and + prints the event to its destination. See + section <seealso marker="#handlers">Handlers</seealso> for more + details.</p> + <p>Everything up to and including the call to the handler + callbacks is executed on the client process, that is, the + process where the log event was issued. It is up to the handler + implementation if other processes are involved or not.</p> + <p>The handlers are called in sequence, and the order is not + defined.</p> + </section> + <section> + <marker id="logger_api"/> + <title>Logger API</title> + <p>The API for logging consists of a set + of <seealso marker="logger#macros">macros</seealso>, and a set + of functions on the form <c>logger:Level/1,2,3</c>, which are + all shortcuts + for <seealso marker="logger#log-2"> + <c>logger:log(Level,Arg1[,Arg2[,Arg3]])</c></seealso>.</p> + <p>The difference between using the macros and the exported + functions is that macros add location (originator) information + to the metadata, and performs lazy evaluation by wrapping the + logger call in a case statement, so it is only evaluated if the + log level of the event passes the primary log level check.</p> + <section> + <marker id="log_level"/> + <title>Log Level</title> + <p>The log level indicates the severity of a event. In + accordance with the Syslog protocol, + <url href="https://www.ietf.org/rfc/rfc5424.txt">RFC + 5424</url>, eight log levels can be specified. The following + table lists all possible log levels by name (atom), integer + value, and description:</p> + + <table align="left"> + <row> + <cell><strong>Level</strong></cell> + <cell align="center"><strong>Integer</strong></cell> + <cell><strong>Description</strong></cell> + </row> + <row> + <cell>emergency</cell> + <cell align="center">0</cell> + <cell>system is unusable</cell> + </row> + <row> + <cell>alert</cell> + <cell align="center">1</cell> + <cell>action must be taken immediately</cell> + </row> + <row> + <cell>critical</cell> + <cell align="center">2</cell> + <cell>critical conditions</cell> + </row> + <row> + <cell>error</cell> + <cell align="center">3</cell> + <cell>error conditions</cell> + </row> + <row> + <cell>warning</cell> + <cell align="center">4</cell> + <cell>warning conditions</cell> + </row> + <row> + <cell>notice</cell> + <cell align="center">5</cell> + <cell>normal but significant conditions</cell> + </row> + <row> + <cell>info</cell> + <cell align="center">6</cell> + <cell>informational messages</cell> + </row> + <row> + <cell>debug</cell> + <cell align="center">7</cell> + <cell>debug-level messages</cell> + </row> + <tcaption>Log Levels</tcaption> + </table> + <p>Notice that the integer value is only used internally in + Logger. In the API, you must always use the atom. To compare + the severity of two log levels, + use <seealso marker="logger#compare_levels-2"> + <c>logger:compare_levels/2</c></seealso>.</p> + </section> + <section> + <marker id="log_message"/> + <title>Log Message</title> + <p>The log message contains the information to be logged. The + message can consist of a format string and arguments (given as + two separate parameters in the Logger API), a string or a + report. The latter, which is either a map or a key-value list, + can be accompanied by a <em>report callback</em> specified in + the log event's <seealso marker="#metadata">metadata</seealso>. + The report callback is a convenience function that + the <seealso marker="#formatters">formatter</seealso> can use + to convert the report to a format string and arguments, or + directly to a string. The + formatter can also use its own conversion function, if no + callback is provided, or if a customized formatting is + desired.</p> + <p>The report callback must be a fun with one or two + arguments. If it takes one argument, this is the report + itself, and the fun returns a format string and arguments:</p> + <pre>fun((<seealso marker="logger#type-report"><c>logger:report()</c></seealso>) -> {<seealso marker="stdlib:io#type-format"><c>io:format()</c></seealso>,[term()]})</pre> + <p>If it takes two arguments, the first is the report, and the + second is a map containing extra data that allows direct + coversion to a string:</p> + <pre>fun((<seealso marker="logger#type-report"><c>logger:report()</c></seealso>,<seealso marker="logger#type-report_cb_config"><c>logger:report_cb_config()</c></seealso>) -> <seealso marker="stdlib:unicode#type-chardata"><c>unicode:chardata()</c></seealso>) + </pre> + <p>The fun must obey the <c>depth</c> and <c>chars_limit</c> + parameters provided in the second argument, as the formatter can + not do anything useful of these parameters with the returned + string. The extra data also contains a field named + <c>single_line</c>, indicating if the printed log message may + contain line breaks or not. This variant is used when the + formatting of the report depends on the size or single line + parameters.</p> + <p>Example, format string and arguments:</p> + <code>logger:error("The file does not exist: ~ts",[Filename])</code> + <p>Example, string:</p> + <code>logger:notice("Something strange happened!")</code> + <p>Example, report, and metadata with report callback:</p> + <code> +logger:debug(#{got => connection_request, id => Id, state => State}, + #{report_cb => fun(R) -> {"~p",[R]} end})</code> + <p>The log message can also be provided through a fun for lazy + evaluation. The fun is only evaluated if the primary log level + check passes, and is therefore recommended if it is expensive + to generate the message. The lazy fun must return a string, a + report, or a tuple with format string and arguments.</p> + </section> + <section> + <title>Metadata</title> + <p>Metadata contains additional data associated with a log + message. Logger inserts some metadata fields by default, and + the client can add custom metadata in two different ways:</p> + <taglist> + <tag>Set process metadata</tag> + <item> + <p>Process metadata is set and updated + with <seealso marker="logger#set_process_metadata-1"> + <c>logger:set_process_metadata/1</c></seealso> + and <seealso marker="logger#update_process_metadata-1"> + <c>logger:update_process_metadata/1</c></seealso>, + respectively. This metadata applies to the process on + which these calls are made, and Logger adds the metadata + to all log events issued on that process.</p> + </item> + <tag>Add metadata to a specific log event</tag> + <item> + <p>Metadata associated with one specific log event is given + as the last parameter to the log macro or Logger API + function when the event is issued. For example:</p> + <code>?LOG_ERROR("Connection closed",#{context => server})</code> + </item> + </taglist> + <p>See the description of + the <seealso marker="logger#type-metadata"> + <c>logger:metadata()</c></seealso> type for information + about which default keys Logger inserts, and how the different + metadata maps are merged.</p> + </section> + </section> + <section> + <marker id="filter"/> + <title>Filters</title> + <p>Filters can be primary, or attached to a specific + handler. Logger calls the primary filters first, and if they all + pass, it calls the handler filters for each handler. Logger + calls the handler callback only if all filters attached to the + handler in question also pass.</p> + <p>A filter is defined as:</p> + <pre>{FilterFun, Extra}</pre> + <p>where <c>FilterFun</c> is a function of arity 2, + and <c>Extra</c> is any term. When applying the filter, Logger + calls the function with the log event as the first argument, + and the value of <c>Extra</c> as the second + argument. See <seealso marker="logger#type-filter"> + <c>logger:filter()</c></seealso> for type definitions.</p> + <p>The filter function can return <c>stop</c>, <c>ignore</c> or + the (possibly modified) log event.</p> + <p>If <c>stop</c> is returned, the log event is immediately + discarded. If the filter is primary, no handler filters or + callbacks are called. If it is a handler filter, the + corresponding handler callback is not called, but the log event + is forwarded to filters attached to the next handler, if + any.</p> + <p>If the log event is returned, the next filter function is + called with the returned value as the first argument. That is, + if a filter function modifies the log event, the next filter + function receives the modified event. The value returned from + the last filter function is the value that the handler callback + receives.</p> + <p>If the filter function returns <c>ignore</c>, it means that it + did not recognize the log event, and thus leaves to other + filters to decide the event's destiny.</p> + <p>The configuration option <c>filter_default</c> specifies the + behaviour if all filter functions return <c>ignore</c>, or if no + filters exist. <c>filter_default</c> is by default set + to <c>log</c>, meaning that if all existing filters ignore a log + event, Logger forwards the event to the handler + callback. If <c>filter_default</c> is set to <c>stop</c>, Logger + discards such events.</p> + <p>Primary filters are added + with <seealso marker="logger#add_primary_filter-2"> + <c>logger:add_primary_filter/2</c></seealso> + and removed + with <seealso marker="logger#remove_primary_filter-1"> + <c>logger:remove_primary_filter/1</c></seealso>. They can also + be added at system start via the Kernel configuration + parameter <seealso marker="#logger_parameter"><c>logger</c></seealso>.</p> + <p>Handler filters are added + with <seealso marker="logger#add_handler_filter-3"> + <c>logger:add_handler_filter/3</c></seealso> + and removed + with <seealso marker="logger#remove_handler_filter-2"> + <c>logger:remove_handler_filter/2</c></seealso>. They can also + be specified directly in the configuration when adding a handler + with <seealso marker="logger#add_handler/3"> + <c>logger:add_handler/3</c></seealso> + or via the Kernel configuration + parameter <seealso marker="#logger_parameter"><c>logger</c></seealso>.</p> + + <p>To see which filters are currently installed in the system, + use <seealso marker="logger#get_config-0"> + <c>logger:get_config/0</c></seealso>, + or <seealso marker="logger#get_primary_config-0"> + <c>logger:get_primary_config/0</c></seealso> + and <seealso marker="logger#get_handler_config-1"> + <c>logger:get_handler_config/1</c></seealso>. Filters are + listed in the order they are applied, that is, the first + filter in the list is applied first, and so on.</p> + + <p>For convenience, the following built-in filters exist:</p> + + <taglist> + <tag><seealso marker="logger_filters#domain-2"> + <c>logger_filters:domain/2</c></seealso></tag> + <item> + <p>Provides a way of filtering log events based on a + <c>domain</c> field in <c>Metadata</c>.</p> + </item> + <tag><seealso marker="logger_filters#level-2"> + <c>logger_filters:level/2</c></seealso></tag> + <item> + <p>Provides a way of filtering log events based on the log + level.</p> + </item> + <tag><seealso marker="logger_filters#progress-2"> + <c>logger_filters:progress/2</c></seealso></tag> + <item> + <p>Stops or allows progress reports from <c>supervisor</c> + and <c>application_controller</c>.</p> + </item> + <tag><seealso marker="logger_filters#remote_gl-2"> + <c>logger_filters:remote_gl/2</c></seealso></tag> + <item> + <p>Stops or allows log events originating from a process + that has its group leader on a remote node.</p> + </item> + </taglist> + </section> + + <section> + <marker id="handlers"/> + <title>Handlers</title> + <p>A handler is defined as a module exporting at least the + following callback function:</p> + + <pre><seealso marker="logger#HModule:log-2">log(LogEvent, Config) -> void()</seealso></pre> + + <p>This function is called when a log event has passed through all + primary filters, and all handler filters attached to the handler + in question. The function call is executed on the client + process, and it is up to the handler implementation if other + processes are involved or not.</p> + + <p>Logger allows adding multiple instances of a handler + callback. That is, if a callback module implementation allows + it, you can add multiple handler instances using the same + callback module. The different instances are identified by + unique handler identities.</p> + + <p>In addition to the mandatory callback function <c>log/2</c>, a + handler module can export the optional callback + functions <c>adding_handler/1</c>, <c>changing_config/3</c>, + <c>filter_config/1</c>, and <c>removing_handler/1</c>. See + section <seealso marker="logger#handler_callback_functions">Handler + Callback Functions</seealso> in the logger(3) manual page for + more information about these function.</p> + + <p>The following built-in handlers exist:</p> + + <taglist> + <tag><c>logger_std_h</c></tag> + <item> + <p>This is the default handler used by OTP. Multiple instances + can be started, and each instance will write log events to a + given destination, terminal or file.</p> + </item> + + <tag><c>logger_disk_log_h</c></tag> + <item> + <p>This handler behaves much like <c>logger_std_h</c>, except it uses + <seealso marker="disk_log"><c>disk_log</c></seealso> as its + destination.</p> + </item> + + <tag><marker id="ErrorLoggerManager"/><c>error_logger</c></tag> + <item> + <p>This handler is provided for backwards compatibility + only. It is not started by default, but will be + automatically started the first time an <c>error_logger</c> + event handler is added + with <seealso marker="error_logger#add_report_handler-1"> + <c>error_logger:add_report_handler/1,2</c></seealso>.</p> + + <p>The old <c>error_logger</c> event handlers in STDLIB and + SASL still exist, but they are not added by Erlang/OTP 21.0 + or later.</p> + </item> + </taglist> + </section> + + <section> + <marker id="formatters"/> + <title>Formatters</title> + <p>A formatter can be used by the handler implementation to do the + final formatting of a log event, before printing to the + handler's destination. The handler callback receives the + formatter information as part of the handler configuration, + which is passed as the second argument + to <seealso marker="logger#HModule:log-2"> + <c>HModule:log/2</c></seealso>.</p> + <p>The formatter information consist of a formatter + module, <c>FModule</c> and its + configuration, <c>FConfig</c>. <c>FModule</c> must export the + following function, which can be called by the handler:</p> + <pre><seealso marker="logger#FModule:format-2">format(LogEvent,FConfig) + -> FormattedLogEntry</seealso></pre> + <p>The formatter information for a handler is set as a part of its + configuration when the handler is added. It can also be changed + during runtime + with <seealso marker="logger#set_handler_config-3"> + <c>logger:set_handler_config(HandlerId,formatter,{FModule,FConfig})</c> + </seealso>, which overwrites the current formatter information, + or with <seealso marker="logger#update_formatter_config-2"> + <c>logger:update_formatter_config/2,3</c></seealso>, which + only modifies the formatter configuration.</p> + <p>If the formatter module exports the optional callback + function <seealso marker="logger#FModule:check_config-1"> + <c>check_config(FConfig)</c></seealso>, Logger calls this + function when the formatter information is set or modified, to + verify the validity of the formatter configuration.</p> + <p>If no formatter information is specified for a handler, Logger + uses <c>logger_formatter</c> as default. See + the <seealso marker="logger_formatter"><c>logger_formatter(3)</c></seealso> + manual page for more information about this module.</p> + </section> + + <section> + <title>Configuration</title> + + <p>At system start, Logger is configured through Kernel + configuration parameters. The parameters that apply to Logger + are described in + section <seealso marker="#kernel_config_params">Kernel + Configuration Parameters</seealso>. Examples are found in + section <seealso marker="#config_examples">Configuration + Examples</seealso>.</p> + <p>During runtime, Logger configuration is changed via API + functions. See + section <seealso marker="logger#configuration_API">Configuration + API Functions</seealso> in the <c>logger(3)</c> manual page.</p> + + <section> + <title>Primary Logger Configuration</title> + <p>Logger API functions that apply to the primary Logger + configuration are:</p> + <list> + <item><seealso marker="logger#get_primary_config-0"> + <c>get_primary_config/0</c></seealso></item> + <item><seealso marker="logger#set_primary_config-1"> + <c>set_primary_config/1,2</c></seealso></item> + <item><seealso marker="logger#update_primary_config-1"> + <c>update_primary_config/1</c></seealso></item> + <item><seealso marker="logger#add_primary_filter-2"> + <c>add_primary_filter/2</c></seealso></item> + <item><seealso marker="logger#remove_primary_filter-1"> + <c>remove_primary_filter/1</c></seealso></item> + </list> + <p>The primary Logger configuration is a map with the following + keys:</p> + <taglist> + <tag><marker id="primary_level"/> + <c>level = </c><seealso marker="logger#type-level"> + <c>logger:level()</c></seealso><c> | all | none</c></tag> + <item> + <p>Specifies the primary log level, that is, log event that + are equally or more severe than this level, are forwarded + to the primary filters. Less severe log events are + immediately discarded.</p> + <p>See section <seealso marker="#log_level">Log + Level</seealso> for a listing and description of + possible log levels.</p> + <p>The initial value of this option is set by the Kernel + configuration parameter <seealso marker="#logger_level"> + <c>logger_level</c></seealso>. It is changed during + runtime with <seealso marker="logger#set_primary_config-2"> + <c>logger:set_primary_config(level,Level)</c></seealso>.</p> + <p>Defaults to <c>notice</c>.</p> + </item> + <tag><c>filters = [{FilterId,Filter}]</c></tag> + <item> + <p>Specifies the primary filters.</p> + <list> + <item><c>FilterId = </c><seealso marker="logger#type-filter_id"> + <c>logger:filter_id()</c></seealso></item> + <item><c>Filter = </c><seealso marker="logger#type-filter"> + <c>logger:filter()</c></seealso></item> + </list> + <p>The initial value of this option is set by the Kernel + configuration + parameter <seealso marker="#logger_parameter"><c>logger</c></seealso>. + During runtime, primary filters are added and removed with + <seealso marker="logger#add_primary_filter-2"> + <c>logger:add_primary_filter/2</c></seealso> and + <seealso marker="logger#remove_primary_filter-1"> + <c>logger:remove_primary_filter/1</c></seealso>, + respectively.</p> + <p>See section <seealso marker="#filters">Filters</seealso> + for more detailed information.</p> + <p>Defaults to <c>[]</c>.</p> + </item> + <tag><c>filter_default = log | stop</c></tag> + <item> + <p>Specifies what happens to a log event if all filters + return <c>ignore</c>, or if no filters exist.</p> + <p>See section <seealso marker="#filters">Filters</seealso> + for more information about how this option is used.</p> + <p>Defaults to <c>log</c>.</p> + </item> + </taglist> + </section> + + <section> + <marker id="handler_configuration"/> + <title>Handler Configuration</title> + <p>Logger API functions that apply to handler configuration + are:</p> + <list> + <item><seealso marker="logger#get_handler_config-0"> + <c>get_handler_config/0,1</c></seealso></item> + <item><seealso marker="logger#set_handler_config-2"> + <c>set_handler_config/2,3</c></seealso></item> + <item><seealso marker="logger#update_handler_config-2"> + <c>update_handler_config/2,3</c></seealso></item> + <item><seealso marker="logger#add_handler_filter-3"> + <c>add_handler_filter/3</c></seealso></item> + <item><seealso marker="logger#remove_handler_filter-2"> + <c>remove_handler_filter/2</c></seealso></item> + <item><seealso marker="logger#update_formatter_config-2"> + <c>update_formatter_config/2,3</c></seealso></item> + </list> + <p>The configuration for a handler is a map with the following keys:</p> + <taglist> + <tag><c>id = </c><seealso marker="logger#type-handler_id"> + <c>logger:handler_id()</c></seealso></tag> + <item> + <p>Automatically inserted by Logger. The value is the same + as the <c>HandlerId</c> specified when adding the handler, + and it cannot be changed.</p> + </item> + <tag><c>module = module()</c></tag> + <item> + <p>Automatically inserted by Logger. The value is the same + as the <c>Module</c> specified when adding the handler, + and it cannot be changed.</p> + </item> + <tag><c>level = </c><seealso marker="logger#type-level"> + <c>logger:level()</c></seealso><c> | all | none</c></tag> + <item> + <p>Specifies the log level for the handler, that is, log + events that are equally or more severe than this level, + are forwarded to the handler filters for this + handler.</p> + <p>See section <seealso marker="#log_level">Log + Level</seealso> for a listing and description of + possible log levels.</p> + <p>The log level is specified when adding the handler, or + changed during runtime with, for + instance, <seealso marker="logger#set_handler_config/3"> + <c>logger:set_handler_config(HandlerId,level,Level)</c></seealso>. + </p> + <p>Defaults to <c>all</c>.</p> + </item> + <tag><c>filters = [{FilterId,Filter}]</c></tag> + <item> + <p>Specifies the handler filters.</p> + <list> + <item><c>FilterId = </c><seealso marker="logger#type-filter_id"> + <c>logger:filter_id()</c></seealso></item> + <item><c>Filter = </c><seealso marker="logger#type-filter"> + <c>logger:filter()</c></seealso></item> + </list> + <p>Handler filters are specified when adding the handler, + or added or removed during runtime with + <seealso marker="logger#add_handler_filter-3"> + <c>logger:add_handler_filter/3</c></seealso> and + <seealso marker="logger#remove_handler_filter-2"> + <c>logger:remove_handler_filter/2</c></seealso>, + respectively.</p> + <p>See <seealso marker="#filters">Filters</seealso> for more + detailed information.</p> + <p>Defaults to <c>[]</c>.</p> + </item> + <tag><c>filter_default = log | stop</c></tag> + <item> + <p>Specifies what happens to a log event if all filters + return <c>ignore</c>, or if no filters exist.</p> + <p>See section <seealso marker="#filters">Filters</seealso> + for more information about how this option is used.</p> + <p>Defaults to <c>log</c>.</p> + </item> + <tag><c>formatter = {FormatterModule,FormatterConfig}</c></tag> + <item> + <p>Specifies a formatter that the handler can use for + converting the log event term to a printable string.</p> + <list> + <item><c>FormatterModule = module()</c></item> + <item><c>FormatterConfig = </c> + <seealso marker="logger#type-formatter_config"> + <c>logger:formatter_config()</c></seealso></item> + </list> + <p>The formatter information is specified when adding the + handler. The formatter configuration can be changed during + runtime + with <seealso marker="logger#update_formatter_config-2"> + <c>logger:update_formatter_config/2,3</c></seealso>, + or the complete formatter information can be overwritten + with, for + instance, <seealso marker="logger#set_handler_config-3"> + <c>logger:set_handler_config/3</c></seealso>.</p> + <p>See + section <seealso marker="#formatters">Formatters</seealso> + for more detailed information.</p> + <p>Defaults + to <c>{logger_formatter,DefaultFormatterConfig}</c>. See + the <seealso marker="logger_formatter"> + <c>logger_formatter(3)</c></seealso> manual page for + information about this formatter and its default + configuration.</p> + </item> + <tag><c>config = term()</c></tag> + <item> + <p>Handler specific configuration, that is, configuration + data related to a specific handler implementation.</p> + <p>The configuration for the built-in handlers is described + in + the <seealso marker="logger_std_h"><c>logger_std_h(3)</c></seealso> + and + <seealso marker="logger_disk_log_h"><c>logger_disk_log_h(3)</c> + </seealso> manual pages.</p> + </item> + </taglist> + + <p>Notice that <c>level</c> and <c>filters</c> are obeyed by + Logger itself before forwarding the log events to each + handler, while <c>formatter</c> and all handler specific + options are left to the handler implementation.</p> + </section> + + <section> + <marker id="kernel_config_params"/> + <title>Kernel Configuration Parameters</title> + + <p>The following Kernel configuration parameters apply to + Logger:</p> + <taglist> + <tag><marker id="logger_parameter"/><c>logger = [Config]</c></tag> + <item> + <p>Specifies the configuration + for <seealso marker="logger">Logger</seealso>, except the + primary log level, which is specified + with <seealso marker="#logger_level"><c>logger_level</c></seealso>, + and the compatibility + with <seealso marker="sasl:error_logging">SASL Error + Logging</seealso>, which is specified + with <seealso marker="#logger_sasl_compatible"> + <c>logger_sasl_compatible</c></seealso>.</p> + <p>With this parameter, you can modify or disable the default + handler, add custom handlers and primary logger filters, and + set log levels per module.</p> + <p><c>Config</c> is any (zero or more) of the following:</p> + <taglist> + <tag><c>{handler, default, undefined}</c></tag> + <item> + <p>Disables the default handler. This allows another + application to add its own default handler.</p> + <p>Only one entry of this type is allowed.</p> + </item> + <tag><c>{handler, HandlerId, Module, HandlerConfig}</c></tag> + <item> + <p>If <c>HandlerId</c> is <c>default</c>, then this entry + modifies the default handler, equivalent to calling</p> + <pre><seealso marker="logger#remove_handler-1"> + logger:remove_handler(default) + </seealso></pre> + <p>followed by</p> + <pre><seealso marker="logger#add_handler-3"> + logger:add_handler(default, Module, HandlerConfig) + </seealso></pre> + <p>For all other values of <c>HandlerId</c>, this entry + adds a new handler, equivalent to calling</p> + <pre><seealso marker="logger:add_handler/3"> + logger:add_handler(HandlerId, Module, HandlerConfig) + </seealso></pre> + <p>Multiple entries of this type are allowed.</p></item> + <tag><c>{filters, FilterDefault, [Filter]}</c></tag> + <item> + <p>Adds the specified primary filters.</p> + <list> + <item><c>FilterDefault = log | stop</c></item> + <item><c>Filter = {FilterId, {FilterFun, FilterConfig}}</c></item> + </list> + <p>Equivalent to calling</p> + <pre><seealso marker="logger#add_primary_filter/2"> + logger:add_primary_filter(FilterId, {FilterFun, FilterConfig}) + </seealso></pre> + <p>for each <c>Filter</c>.</p> + <p><c>FilterDefault</c> specifies the behaviour if all + primary filters return <c>ignore</c>, see + section <seealso marker="#filters">Filters</seealso>.</p> + <p>Only one entry of this type is allowed.</p> + </item> + <tag><c>{module_level, Level, [Module]}</c></tag> + <item> + <p>Sets module log level for the given modules. Equivalent + to calling</p> + <pre><seealso marker="logger#set_module_level/2"> + logger:set_module_level(Module, Level)</seealso></pre> + <p>for each <c>Module</c>.</p> + <p>Multiple entries of this type are allowed.</p> + </item> + </taglist> + <p>See + section <seealso marker="#config_examples">Configuration + Examples</seealso> for examples using the <c>logger</c> + parameter for system configuration.</p> + </item> + <tag><marker id="logger_level"/> + <c>logger_level = Level</c></tag> + <item> + <p>Specifies the primary log level. See + the <seealso marker="kernel_app#logger_level"><c>kernel(6)</c></seealso> + manual page for more information about this parameter.</p> + </item> + <tag><marker id="logger_sasl_compatible"/> + <c>logger_sasl_compatible = true | false</c></tag> + <item> + <p>Specifies Logger's compatibility + with <seealso marker="sasl:error_logging">SASL Error + Logging</seealso>. See + the <seealso marker="kernel_app#logger_sasl_compatible"> + <c>kernel(6)</c></seealso> manual page for more + information about this parameter.</p> + </item> + </taglist> + </section> + + <section> + <marker id="config_examples"/> + <title>Configuration Examples</title> + <p>The value of the Kernel configuration parameter <c>logger</c> + is a list of tuples. It is possible to write the term on the + command line when starting an erlang node, but as the term + grows, a better approach is to use the system configuration + file. See + the <seealso marker="config"><c>config(4)</c></seealso> manual + page for more information about this file.</p> + <p>Each of the following examples shows a simple system + configuration file that configures Logger according to the + description.</p> + <p>Modify the default handler to print to a file instead of + <c>standard_io</c>:</p> + <code> +[{kernel, + [{logger, + [{handler, default, logger_std_h, % {handler, HandlerId, Module, + #{config => #{type => {file,"log/erlang.log"}}}} % Config} + ]}]}]. + </code> + <p>Modify the default handler to print each log event as a + single line:</p> + <code> +[{kernel, + [{logger, + [{handler, default, logger_std_h, + #{formatter => {logger_formatter, #{single_line => true}}}} + ]}]}]. + </code> + <p>Modify the default handler to print the pid of the logging + process for each log event:</p> + <code> +[{kernel, + [{logger, + [{handler, default, logger_std_h, + #{formatter => {logger_formatter, + #{template => [time," ",pid," ",msg,"\n"]}}}} + ]}]}]. + </code> + <p>Modify the default handler to only print errors and more + severe log events to "log/erlang.log", and add another handler + to print all log events to "log/debug.log".</p> + <code> +[{kernel, + [{logger, + [{handler, default, logger_std_h, + #{level => error, + config => #{type => {file, "log/erlang.log"}}}}, + {handler, info, logger_std_h, + #{level => debug, + config => #{type => {file, "log/debug.log"}}}} + ]}]}]. + </code> + </section> + + </section> + + <section> + <marker id="compatibility"/> + <title>Backwards Compatibility with error_logger</title> + <p>Logger provides backwards compatibility with + <c>error_logger</c> in the following ways:</p> + + <taglist> + <tag>API for Logging</tag> + <item> + <p>The <c>error_logger</c> API still exists, but should only + be used by legacy code. It will be removed in a later + release.</p> + <p>Calls + to <seealso marker="error_logger#error_report-1"> + <c>error_logger:error_report/1,2</c></seealso>, + <seealso marker="error_logger#error_msg-1"> + <c>error_logger:error_msg/1,2</c></seealso>, and + corresponding functions for warning and info messages, are + all forwarded to Logger as calls + to <seealso marker="logger#log-3"> + <c>logger:log(Level,Report,Metadata)</c></seealso>.</p> + <p><c>Level = error | warning | info</c> and is taken + from the function name. <c>Report</c> contains the actual + log message, and <c>Metadata</c> contains additional + information which can be used for creating backwards + compatible events for legacy <c>error_logger</c> event + handlers, see + section <seealso marker="#legacy_event_handlers">Legacy + Event Handlers</seealso>.</p> + </item> + <tag>Output Format</tag> + <item> + <p>To get log events on the same format as produced + by <c>error_logger_tty_h</c> and <c>error_logger_file_h</c>, + use the default formatter, <c>logger_formatter</c>, with + configuration parameter <c>legacy_header</c> set + to <c>true</c>. This is the default configuration of + the <c>default</c> handler started by Kernel.</p> + </item> + <tag>Default Format of Log Events from OTP</tag> + <item> + <p>By default, all log events originating from within OTP, + except the former so called "SASL reports", look the same as + before.</p> + </item> + <tag><marker id="sasl_reports"/>SASL Reports</tag> + <item> + <p>By SASL reports we mean supervisor reports, crash reports + and progress reports.</p> + <p>Prior to Erlang/OTP 21.0, these reports were only logged + when the SASL application was running, and they were printed + trough SASL's own event handlers <c>sasl_report_tty_h</c> + and <c>sasl_report_file_h</c>.</p> + <p>The destination of these log events was configured by + <seealso marker="sasl:sasl_app#deprecated_error_logger_config">SASL + configuration parameters</seealso>.</p> + <p>Due to the specific event handlers, the output format + slightly differed from other log events.</p> + <p>As of Erlang/OTP 21.0, the concept of SASL reports is + removed, meaning that the default behaviour is as + follows:</p> + <list> + <item>Supervisor reports, crash reports, and progress reports + are no longer connected to the SASL application.</item> + <item>Supervisor reports and crash reports are issued + as <c>error</c> level log events, and are logged through + the default handler started by Kernel.</item> + <item>Progress reports are issued as <c>info</c> level log + events, and since the default primary log level + is <c>notice</c>, these are not logged by default. To + enable printing of progress reports, set + the <seealso marker="#primary_level">primary log + level</seealso> to <c>info</c>.</item> + <item>The output format is the same for all log + events.</item> + </list> + <p>If the old behaviour is preferred, the Kernel configuration + parameter <seealso marker="kernel_app#logger_sasl_compatible"> + <c>logger_sasl_compatible</c></seealso> can be set + to <c>true</c>. The + <seealso marker="sasl:sasl_app#deprecated_error_logger_config">SASL + configuration parameters</seealso> can then be used as + before, and the SASL reports will only be printed if the + SASL application is running, through a second log handler + named <c>sasl</c>.</p> + <p>All SASL reports have a metadata field <c>domain</c> which + is set to <c>[otp,sasl]</c>. This field can be + used by filters to stop or allow the log events.</p> + <p>See section <seealso marker="sasl:error_logging">SASL User's + Guide</seealso> for more information about the old SASL + error logging functionality.</p> + </item> + <tag><marker id="legacy_event_handlers"/>Legacy Event Handlers</tag> + <item> + <p>To use event handlers written for <c>error_logger</c>, just + add your event handler with</p> + <code> +error_logger:add_report_handler/1,2. + </code> + <p>This automatically starts the error logger event manager, + and adds <c>error_logger</c> as a handler to Logger, with + the following configuration:</p> +<code> +#{level => info, + filter_default => log, + filters => []}. +</code> + <note> + <p>This handler ignores events that do not originate from + the <c>error_logger</c> API, or from within OTP. This + means that if your code uses the Logger API for logging, + then your log events will be discarded by this + handler.</p> + <p>The handler is not overload protected.</p> + </note> + </item> + </taglist> + </section> + + + <section> + <title>Error Handling</title> + <p>Logger does, to a certain extent, check its input data before + forwarding a log event to filters and handlers. It does, + however, not evaluate report callbacks, or check the validity of + format strings and arguments. This means that all filters and + handlers must be careful when formatting the data of a log + event, making sure that it does not crash due to bad input data + or faulty callbacks.</p> + <p>If a filter or handler still crashes, Logger will remove the + filter or handler in question from the configuration, and print + a short error message to the terminal. A debug event containing + the crash reason and other details is also issued.</p> + <p>See section <seealso marker="#log_message">Log + Message</seealso> for more information about report callbacks + and valid forms of log messages.</p> + </section> + + <section> + <title>Example: Add a handler to log info events to file</title> + <p>When starting an Erlang node, the default behaviour is that all + log events on level <c>notice</c> or more severe, are logged to + the terminal via the default handler. To also log info events, + you can either change the primary log level to <c>info</c>:</p> + <pre> +1> <input>logger:set_primary_config(level, info).</input> +ok</pre> + <p>or set the level for one or a few modules only:</p> + <pre> +2> <input>logger:set_module_level(mymodule, info).</input> +ok</pre> + <p>This allows info events to pass through to the default handler, + and be printed to the terminal as well. If there are many info + events, it can be useful to print these to a file instead.</p> + <p>First, set the log level of the default handler + to <c>notice</c>, preventing it from printing info events to the + terminal:</p> + <pre> +3> <input>logger:set_handler_config(default, level, notice).</input> +ok</pre> + <p>Then, add a new handler which prints to file. You can use the + handler + module <seealso marker="logger_std_h"><c>logger_std_h</c></seealso>, + and specify type <c>{file,File}</c>.:</p> + <pre> +4> <input>Config = #{config => #{type => {file,"./info.log"}}, level => info}.</input> +#{config => #{type => {file,"./info.log"}},level => info} +5> <input>logger:add_handler(myhandler, logger_std_h, Config).</input> +ok</pre> + <p>Since <c>filter_default</c> defaults to <c>log</c>, this + handler now receives all log events. If you want info events + only in the file, you must add a filter to stop all non-info + events. The built-in + filter <seealso marker="logger_filters#level-2"> + <c>logger_filters:level/2</c></seealso> + can do this:</p> + <pre> +6> <input>logger:add_handler_filter(myhandler, stop_non_info, + {fun logger_filters:level/2, {stop, neq, info}}).</input> +ok</pre> + <p>See section <seealso marker="#filters">Filters</seealso> for + more information about the filters and the <c>filter_default</c> + configuration parameter.</p> + + </section> + + <section> + <title>Example: Implement a handler</title> + <p>Section <seealso marker="logger#handler_callback_functions">Handler + Callback Functions</seealso> in the logger(3) manual page + describes the callback functions that can be implemented for a + Logger handler.</p> + <p>A handler callback module must export:</p> + <list> + <item><c>log(Log, Config)</c></item> + </list> + <p>It can optionally also export some, or all, of the following:</p> + <list> + <item><c>adding_handler(Config)</c></item> + <item><c>removing_handler(Config)</c></item> + <item><c>changing_config(SetOrUpdate, OldConfig, NewConfig)</c></item> + <item><c>filter_config(Config)</c></item> + </list> + <p>When a handler is added, by for example a call + to <seealso marker="logger#add_handler-3"> + <c>logger:add_handler(Id, HModule, Config)</c></seealso>, + Logger first calls <c>HModule:adding_handler(Config)</c>. If + this function returns <c>{ok,Config1}</c>, Logger + writes <c>Config1</c> to the configuration database, and + the <c>logger:add_handler/3</c> call returns. After this, the + handler is installed and must be ready to receive log events as + calls to <c>HModule:log/2</c>.</p> + <p>A handler can be removed by calling + <seealso marker="logger#remove_handler-1"> + <c>logger:remove_handler(Id)</c></seealso>. Logger calls + <c>HModule:removing_handler(Config)</c>, and removes the + handler's configuration from the configuration database.</p> + <p>When <seealso marker="logger#set_handler_config-2"> + <c>logger:set_handler_config/2,3</c></seealso> + or <seealso marker="logger#update_handler_config/2"> + <c>logger:update_handler_config/2,3</c></seealso> is called, + Logger + calls <c>HModule:changing_config(SetOrUpdate, OldConfig, NewConfig)</c>. If + this function returns <c>{ok,NewConfig1}</c>, Logger + writes <c>NewConfig1</c> to the configuration database.</p> + <p>When <seealso marker="logger#get_config-0"> + <c>logger:get_config/0</c></seealso> or + <seealso marker="logger#get_handler_config-0"> + <c>logger:get_handler_config/0,1</c></seealso> is called, + Logger calls <c>HModule:filter_config(Config)</c>. This function + must return the handler configuration where internal data is + removed.</p> + + <p>A simple handler that prints to the terminal can be implemented + as follows:</p> + <code> +-module(myhandler1). +-export([log/2]). + +log(LogEvent, #{formatter := {FModule, FConfig}}) -> + io:put_chars(FModule:format(LogEvent, FConfig)). + </code> + + <p>Notice that the above handler does not have any overload + protection, and all log events are printed directly from the + client process.</p> + <p>For information and examples of overload protection, please + refer to + section <seealso marker="#overload_protection">Protecting the + Handler from Overload</seealso>, and the implementation + of <seealso marker="logger_std_h"><c>logger_std_h</c></seealso> + and <seealso marker="logger_disk_log_h"><c>logger_disk_log_h</c> + </seealso>.</p> + <p>The following is a simpler example of a handler which logs to a + file through one single process:</p> + <code> +-module(myhandler2). +-export([adding_handler/1, removing_handler/1, log/2]). +-export([init/1, handle_call/3, handle_cast/2, terminate/2]). + +adding_handler(Config) -> + MyConfig = maps:get(config,Config,#{file => "myhandler2.log"}), + {ok, Pid} = gen_server:start(?MODULE, MyConfig, []), + {ok, Config#{config => MyConfig#{pid => Pid}}}. + +removing_handler(#{config := #{pid := Pid}}) -> + gen_server:stop(Pid). + +log(LogEvent,#{config := #{pid := Pid}} = Config) -> + gen_server:cast(Pid, {log, LogEvent, Config}). + +init(#{file := File}) -> + {ok, Fd} = file:open(File, [append, {encoding, utf8}]), + {ok, #{file => File, fd => Fd}}. + +handle_call(_, _, State) -> + {reply, {error, bad_request}, State}. + +handle_cast({log, LogEvent, Config}, #{fd := Fd} = State) -> + do_log(Fd, LogEvent, Config), + {noreply, State}. + +terminate(_Reason, #{fd := Fd}) -> + _ = file:close(Fd), + ok. + +do_log(Fd, LogEvent, #{formatter := {FModule, FConfig}}) -> + String = FModule:format(LogEvent, FConfig), + io:put_chars(Fd, String). + </code> + </section> + + <section> + <marker id="overload_protection"/> + <title>Protecting the Handler from Overload</title> + <p>The default handlers, <seealso marker="logger_std_h"> + <c>logger_std_h</c></seealso> and <seealso marker="logger_disk_log_h"> + <c>logger_disk_log_h</c></seealso>, feature an overload protection + mechanism, which makes it possible for the handlers to survive, + and stay responsive, during periods of high load (when huge + numbers of incoming log requests must be handled). + The mechanism works as follows:</p> + + <section> + <title>Message Queue Length</title> + <p>The handler process keeps track of the length of its message + queue and takes some form of action when the current length exceeds a + configurable threshold. The purpose is to keep the handler in, or to + as quickly as possible get the handler into, a state where it can + keep up with the pace of incoming log events. The memory use of the + handler must never grow larger and larger, since that will eventually + cause the handler to crash. These three thresholds, with associated + actions, exist:</p> + + <taglist> + <tag><c>sync_mode_qlen</c></tag> + <item> + <p>As long as the length of the message queue is lower than this + value, all log events are handled asynchronously. This means that + the client process sending the log event, by calling a log function + in the <seealso marker="logger_chapter#logger_api">Logger API</seealso>, + does not wait for a response from the handler but continues + executing immediately after the event is sent. It is not affected + by the time it takes the handler to print the event to the log + device. If the message queue grows larger than this value, + the handler starts handling log events synchronously instead, + meaning that the client process sending the event must wait for a + response. When the handler reduces the message queue to a + level below the <c>sync_mode_qlen</c> threshold, asynchronous + operation is resumed. The switch from asynchronous to synchronous + mode can slow down the logging tempo of one, or a few, busy senders, + but cannot protect the handler sufficiently in a situation of many + busy concurrent senders.</p> + <p>Defaults to <c>10</c> messages.</p> + </item> + <tag><c>drop_mode_qlen</c></tag> + <item> + <p>When the message queue grows larger than this threshold, the + handler switches to a mode in which it drops all new events that + senders want to log. Dropping an event in this mode means that the + call to the log function never results in a message being sent to + the handler, but the function returns without taking any action. + The handler keeps logging the events that are already in its message + queue, and when the length of the message queue is reduced to a level + below the threshold, synchronous or asynchronous mode is resumed. + Notice that when the handler activates or deactivates drop mode, + information about it is printed in the log.</p> + <p>Defaults to <c>200</c> messages.</p> + </item> + <tag><c>flush_qlen</c></tag> + <item> + <p>If the length of the message queue grows larger than this threshold, + a flush (delete) operation takes place. To flush events, the handler + discards the messages in the message queue by receiving them in a + loop without logging. Client processes waiting for a response from a + synchronous log request receive a reply from the handler indicating + that the request is dropped. The handler process increases its + priority during the flush loop to make sure that no new events + are received during the operation. Notice that after the flush operation + is performed, the handler prints information in the log about how many + events have been deleted.</p> + <p>Defaults to <c>1000</c> messages.</p> + </item> + </taglist> + + <p>For the overload protection algorithm to work properly, it is + required that:</p> + + <p><c>sync_mode_qlen =< drop_mode_qlen =< flush_qlen</c></p> + + <p>and that:</p> + + <p><c>drop_mode_qlen > 1</c></p> + + <p>To disable certain modes, do the following:</p> + <list> + <item>If <c>sync_mode_qlen</c> is set to <c>0</c>, all log events are handled + synchronously. That is, asynchronous logging is disabled.</item> + <item>If <c>sync_mode_qlen</c> is set to the same value as + <c>drop_mode_qlen</c>, synchronous mode is disabled. That is, the handler + always runs in asynchronous mode, unless dropping or flushing is invoked.</item> + <item>If <c>drop_mode_qlen</c> is set to the same value as <c>flush_qlen</c>, + drop mode is disabled and can never occur.</item> + </list> + + <p>During high load scenarios, the length of the handler message queue + rarely grows in a linear and predictable way. Instead, whenever the + handler process is scheduled in, it can have an almost arbitrary number + of messages waiting in the message queue. It is for this reason that the overload + protection mechanism is focused on acting quickly, and quite drastically, + such as immediately dropping or flushing messages, when a large queue length + is detected.</p> + + <p>The values of the previously listed thresholds can be specified by the user. + This way, a handler can be configured to, for example, not drop or flush + messages unless the message queue length of the handler process grows extremely + large. Notice that large amounts of memory can be required for the node under such + circumstances. Another example of user configuration is when, for performance + reasons, the client processes must never be blocked by synchronous log requests. + It is possible, perhaps, that dropping or flushing events is still acceptable, since + it does not affect the performance of the client processes sending the log events.</p> + + <p>A configuration example:</p> + <code type="none"> +logger:add_handler(my_standard_h, logger_std_h, + #{config => #{type => {file,"./system_info.log"}, + sync_mode_qlen => 100, + drop_mode_qlen => 1000, + flush_qlen => 2000}}). + </code> + </section> + + <section> + <title>Controlling Bursts of Log Requests</title> + <p>Large bursts of log events - many events received by the handler + under a short period of time - can potentially cause problems, such as:</p> + <list> + <item>Log files grow very large, very quickly.</item> + <item>Circular logs wrap too quickly so that important data is overwritten.</item> + <item>Write buffers grow large, which slows down file sync operations.</item> + </list> + + <p>For this reason, both built-in handlers offer the possibility to specify the + maximum number of events to be handled within a certain time frame. + With this burst control feature enabled, the handler can avoid choking the log with + massive amounts of printouts. The configuration parameters are:</p> + <taglist> + <tag><c>burst_limit_enable</c></tag> + <item> + <p>Value <c>true</c> enables burst control and <c>false</c> disables it.</p> + <p>Defaults to <c>true</c>.</p> + </item> + <tag><c>burst_limit_max_count</c></tag> + <item> + <p>This is the maximum number of events to handle within a + <c>burst_limit_window_time</c> time frame. After the limit is + reached, successive events are dropped until the end of the time frame.</p> + <p>Defaults to <c>500</c> events.</p> + </item> + <tag><c>burst_limit_window_time</c></tag> + <item> + <p>See the previous description of <c>burst_limit_max_count</c>.</p> + <p>Defaults to <c>1000</c> milliseconds.</p> + </item> + </taglist> + + <p>A configuration example:</p> + <code type="none"> +logger:add_handler(my_disk_log_h, logger_disk_log_h, + #{config => #{file => "./my_disk_log", + burst_limit_enable => true, + burst_limit_max_count => 20, + burst_limit_window_time => 500}}). + </code> + </section> + + <section> + <title>Terminating an Overloaded Handler</title> + <p>It is possible that a handler, even if it can successfully manage peaks + of high load without crashing, can build up a large message queue, or use a + large amount of memory. The overload protection mechanism includes an + automatic termination and restart feature for the purpose of guaranteeing + that a handler does not grow out of bounds. The feature is configured + with the following parameters:</p> + <taglist> + <tag><c>overload_kill_enable</c></tag> + <item> + <p>Value <c>true</c> enables the feature and <c>false</c> disables it.</p> + <p>Defaults to <c>false</c>.</p> + </item> + <tag><c>overload_kill_qlen</c></tag> + <item> + <p>This is the maximum allowed queue length. If the message queue grows + larger than this, the handler process is terminated.</p> + <p>Defaults to <c>20000</c> messages.</p> + </item> + <tag><c>overload_kill_mem_size</c></tag> + <item> + <p>This is the maximum memory size that the handler process is allowed to use. + If the handler grows larger than this, the process is terminated.</p> + <p>Defaults to <c>3000000</c> bytes.</p> + </item> + <tag><c>overload_kill_restart_after</c></tag> + <item> + <p>If the handler is terminated, it restarts automatically after a + delay specified in milliseconds. The value <c>infinity</c> prevents + restarts.</p> + <p>Defaults to <c>5000</c> milliseconds.</p> + </item> + </taglist> + <p>If the handler process is terminated because of overload, it prints + information about it in the log. It also prints information about when a + restart has taken place, and the handler is back in action.</p> + <note> + <p>The sizes of the log events affect the memory needs of the handler. + For information about how to limit the size of log events, see the + <seealso marker="logger_formatter"><c>logger_formatter(3)</c></seealso> + manual page.</p> + </note> + </section> + </section> + + <section> + <title>See Also</title> + <p> + <seealso marker="disk_log"><c>disk_log(3)</c></seealso>, + <seealso marker="error_logger"><c>error_logger(3)</c></seealso>, + <seealso marker="logger"><c>logger(3)</c></seealso>, + <seealso marker="logger_disk_log_h"><c>logger_disk_log_h(3)</c></seealso>, + <seealso marker="logger_filters"><c>logger_filters(3)</c></seealso>, + <seealso marker="logger_formatter"><c>logger_formatter(3)</c></seealso>, + <seealso marker="logger_std_h"><c>logger_std_h(3)</c></seealso>, + <seealso marker="sasl:sasl_app"><c>sasl(6)</c></seealso></p> + </section> +</chapter> diff --git a/lib/kernel/doc/src/logger_disk_log_h.xml b/lib/kernel/doc/src/logger_disk_log_h.xml new file mode 100644 index 0000000000..d9b941a0a9 --- /dev/null +++ b/lib/kernel/doc/src/logger_disk_log_h.xml @@ -0,0 +1,168 @@ +<?xml version="1.0" encoding="utf-8" ?> +<!DOCTYPE erlref SYSTEM "erlref.dtd"> + +<erlref> + <header> + <copyright> + <year>2017</year><year>2018</year> + <holder>Ericsson AB. All Rights Reserved.</holder> + </copyright> + <legalnotice> + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + </legalnotice> + + <title>logger_disk_log_h</title> + <prepared></prepared> + <responsible></responsible> + <docno></docno> + <approved></approved> + <checked></checked> + <date></date> + <rev>A</rev> + <file>logger_disk_log_h.xml</file> + </header> + <module>logger_disk_log_h</module> + <modulesummary>A disk_log based handler for Logger</modulesummary> + + <description> + <p>This is a handler for Logger that offers circular + (wrapped) logs by using <seealso marker="disk_log"><c>disk_log</c></seealso>. + Multiple instances of this handler can be added to Logger, and each instance + prints to its own disk log file, created with the name and settings specified + in the handler configuration.</p> + <p>The default standard handler, + <seealso marker="logger_std_h"><c>logger_std_h</c></seealso>, can be + replaced by a disk_log handler at startup of the Kernel application. + See an example of this below.</p> + <p>The handler has an overload protection mechanism that keeps the handler + process and the Kernel application alive during high loads of log + events. How overload protection works, and how to configure it, is + described in the + <seealso marker="logger_chapter#overload_protection"><c>User's Guide</c> + </seealso>.</p> + <p>To add a new instance of the disk_log handler, use + <seealso marker="logger#add_handler-3"><c>logger:add_handler/3</c> + </seealso>. The handler configuration argument is a map which can contain + general configuration parameters, as documented in the + <seealso marker="logger_chapter#handler_configuration"><c>User's Guide</c> + </seealso>, and handler specific parameters. The specific data + is stored in a sub map with the key <c>config</c>, and can contain the + following parameters:</p> + <taglist> + <tag><c>file</c></tag> + <item> + <p>This is the full name of the disk log file. The option + corresponds to the <c>name</c> property in the + <seealso marker="disk_log#open-1"><c>dlog_option()</c></seealso> + datatype.</p> + <p>The value is set when the handler is added, and it can not + be changed in runtime.</p> + <p>Defaults to the same name as the handler identity, in the + current directory.</p> + </item> + <tag><c>type</c></tag> + <item> + <p>This is the disk log type, <c>wrap</c> or <c>halt</c>. The option + corresponds to the <c>type</c> property in the + <seealso marker="disk_log#open-1"><c>dlog_option()</c></seealso> + datatype.</p> + <p>The value is set when the handler is added, and it can not + be changed in runtime.</p> + <p>Defaults to <c>wrap</c>.</p> + </item> + <tag><c>max_no_files</c></tag> + <item> + <p>This is the maximum number of files that disk_log uses + for its circular logging. The option + corresponds to the <c>MaxNoFiles</c> element in the <c>size</c> property in the + <seealso marker="disk_log#open-1"><c>dlog_option()</c></seealso> + datatype.</p> + <p>The value is set when the handler is added, and it can not + be changed in runtime.</p> + <p>Defaults to <c>10</c>.</p> + <p>The setting has no effect on a halt log.</p> + </item> + <tag><c>max_no_bytes</c></tag> + <item> + <p>This is the maximum number of bytes that is written to + a log file before disk_log proceeds with the next file in order, or + generates an error in case of a full halt log. The option + corresponds to the <c>MaxNoBytes</c> element in the <c>size</c> property in the + <seealso marker="disk_log#open-1"><c>dlog_option()</c></seealso> + datatype.</p> + <p>The value is set when the handler is added, and it can not + be changed in runtime.</p> + <p>Defaults to <c>1048576</c> bytes for a wrap log, and + <c>infinity</c> for a halt log.</p> + </item> + <tag><c>filesync_repeat_interval</c></tag> + <item> + <p>This value, in milliseconds, specifies how often the handler does + a disk_log sync operation to write buffered data to disk. The handler attempts + the operation repeatedly, but only performs a new sync if something has + actually been logged.</p> + <p>Defaults to <c>5000</c> milliseconds.</p> + <p>If <c>no_repeat</c> is set as value, the repeated sync operation + is disabled. The user can also call the + <seealso marker="logger_disk_log_h#filesync-1"><c>filesync/1</c> + </seealso> function to perform a disk_log sync.</p> + </item> + </taglist> + <p>Other configuration parameters exist, to be used for customizing + the overload protection behaviour. The same parameters are used both in the + standard handler and the disk_log handler, and are documented in the + <seealso marker="logger_chapter#overload_protection"><c>User's Guide</c> + </seealso>.</p> + <p>Notice that when changing the configuration of the handler in runtime, the + disk_log options (<c>file</c>, <c>type</c>, <c>max_no_files</c>, + <c>max_no_bytes</c>) must not be modified.</p> + <p>Example of adding a disk_log handler:</p> + <code type="none"> +logger:add_handler(my_disk_log_h, logger_disk_log_h, + #{config => #{file => "./my_disk_log", + type => wrap, + max_no_files => 4, + max_no_bytes => 10000}, + filesync_repeat_interval => 1000}}). + </code> + <p>To use the disk_log handler instead of the default standard + handler when starting an Erlang node, change the Kernel default logger to + use <c>logger_disk_log_h</c>. Example:</p> + <code type="none"> +erl -kernel logger '[{handler,default,logger_disk_log_h, + #{config => #{file => "./system_disk_log"}}}]' + </code> + </description> + + <funcs> + + <func> + <name name="filesync" arity="1" clause_i="1"/> + <fsummary>Writes buffered data to disk.</fsummary> + <desc> + <p>Write buffered data to disk.</p> + </desc> + </func> + + </funcs> + + <section> + <title>See Also</title> + <p><seealso marker="logger"><c>logger(3)</c></seealso>, + <seealso marker="logger_std_h"><c>logger_std_h(3)</c></seealso>, + <seealso marker="disk_log"><c>disk_log(3)</c></seealso></p> + </section> +</erlref> + + diff --git a/lib/kernel/doc/src/logger_filters.xml b/lib/kernel/doc/src/logger_filters.xml new file mode 100644 index 0000000000..90f1fcc270 --- /dev/null +++ b/lib/kernel/doc/src/logger_filters.xml @@ -0,0 +1,254 @@ +<?xml version="1.0" encoding="utf-8" ?> +<!DOCTYPE erlref SYSTEM "erlref.dtd"> + +<erlref> + <header> + <copyright> + <year>2018</year> + <holder>Ericsson AB. All Rights Reserved.</holder> + </copyright> + <legalnotice> + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + </legalnotice> + + <title>logger_filters</title> + <prepared></prepared> + <responsible></responsible> + <docno></docno> + <approved></approved> + <checked></checked> + <date></date> + <rev>A</rev> + <file>logger_filters.xml</file> + </header> + <module>logger_filters</module> + <modulesummary>Filters to use with Logger.</modulesummary> + + <description> + <p>All functions exported from this module can be used as primary + or handler + filters. See <seealso marker="logger#add_primary_filter-2"> + <c>logger:add_primary_filter/2</c></seealso> + and <seealso marker="logger#add_handler_filter-3"> + <c>logger:add_handler_filter/3</c></seealso> for more information + about how filters are added.</p> + <p>Filters are removed with <seealso marker="logger#remove_primary_filter-1"> + <c>logger:remove_primary_filter/1</c></seealso> + and <seealso marker="logger#remove_handler_filter-2"> + <c>logger:remove_handler_filter/2</c></seealso>.</p> + </description> + + <funcs> + <func> + <name name="domain" arity="2"/> + <fsummary>Filter log events based on the domain field in + metadata.</fsummary> + <desc> + <p>This filter provides a way of filtering log events based on a + <c>domain</c> field in <c>Metadata</c>. This field is + optional, and the purpose of using it is to group log events + from, for example, a specific functional area. This allows + filtering or other specialized treatment in a Logger + handler.</p> + + <p>A domain field must be a list of atoms, creating smaller + and more specialized domains as the list grows longer. The + greatest domain is <c>[]</c>, which comprises all possible + domains.</p> + + <p>For example, consider the following domains:</p> + <pre> +D1 = [otp] +D2 = [otp, sasl]</pre> + + <p><c>D1</c> is the greatest of the two, and is said to be a + super-domain of <c>D2</c>. <c>D2</c> is a + sub-domain <c>D1</c>. Both <c>D1</c> and <c>D2</c> are + sub-domains of <c>[]</c>.</p> + + <p>The above domains are used for logs originating from + Erlang/OTP. D1 specifies that the log event comes from + Erlang/OTP in general, and D2 indicates that the log event + is a so + called <seealso marker="logger_chapter#sasl_reports">SASL + report</seealso>.</p> + + <p>The <c><anno>Extra</anno></c> parameter to + the <c>domain/2</c> function is specified when adding the + filter via <seealso marker="logger#add_primary_filter-2"> + <c>logger:add_primary_filter/2</c></seealso> + or <seealso marker="logger#add_handler_filter-3"> + <c>logger:add_handler_filter/3</c></seealso>.</p> + + <p>The filter compares the value of the <c>domain</c> field in + the log event's metadata (<c>Domain</c>) against + <c><anno>MatchDomain</anno></c>. The filter matches if the + value of <c>Compare</c> is:</p> + + <taglist> + <tag><c>sub</c></tag> + <item> + <p>and <c>Domain</c> is equal to or a sub-domain + of <c>MatchDomain</c>, that is, if <c>MatchDomain</c> is + a prefix of <c>Domain</c>.</p> + </item> + <tag><c>super</c></tag> + <item> + <p>and <c>Domain</c> is equal to or a super-domain + of <c>MatchDomain</c>, that is, if <c>Domain</c> is a + prefix of <c>MatchDomain</c>.</p> + </item> + <tag><c>equal</c></tag> + <item> + <p>and <c>Domain</c> is equal to <c>MatchDomain</c>.</p> + </item> + <tag><c>not_equal</c></tag> + <item> + <p>and <c>Domain</c> differs from <c>MatchDomain</c>, or + if there is no domain field in metadata.</p> + </item> + <tag><c>undefined</c></tag> + <item> + <p>and there is no domain field in metadata. In this + case <c><anno>MatchDomain</anno></c> must be set + to <c>[]</c>.</p> + </item> + </taglist> + + <p>If the filter matches and <c><anno>Action</anno></c> is + <c>log</c>, the log event is allowed. If the filter matches + and <c><anno>Action</anno></c> is <c>stop</c>, the log event + is stopped.</p> + + <p>If the filter does not match, it returns <c>ignore</c>, + meaning that other filters, or the value of the + configuration parameter <c>filter_default</c>, decide if the + event is allowed or not.</p> + + <p>Log events that do not contain any domain field, match only + when <c><anno>Compare</anno></c> is equal + to <c>undefined</c> or <c>not_equal</c>.</p> + + <p>Example: stop all events with domain <c>[otp, + sasl | _]</c></p> + + <code> +logger:set_handler_config(h1, filter_default, log). % this is the default +Filter = {fun logger_filters:domain/2, {stop, sub, [otp, sasl]}}. +logger:add_handler_filter(h1, no_sasl, Filter). +ok</code> + </desc> + </func> + + <func> + <name name="level" arity="2"/> + <fsummary>Filter log events based on the log level.</fsummary> + <desc> + <p>This filter provides a way of filtering log events based + on the log level. It matches log events by comparing the + log level with a specified <c>MatchLevel</c></p> + + <p>The <c><anno>Extra</anno></c> parameter is specified when + adding the filter + via <seealso marker="logger#add_primary_filter-2"> + <c>logger:add_primary_filter/2</c></seealso> + or <seealso marker="logger#add_handler_filter-3"> + <c>logger:add_handler_filter/3</c></seealso>.</p> + + <p>The filter compares the value of the event's log level + (<c>Level</c>) to <c><anno>MatchLevel</anno></c> by + calling <seealso marker="logger#compare_levels-2"> + <c>logger:compare_levels(Level, MatchLevel)</c></seealso>. + The filter matches if the value + of <c><anno>Operator</anno></c> is:</p> + + <taglist> + <tag><c>neq</c></tag> + <item><p>and the compare function returns <c>lt</c> + or <c>gt</c>.</p></item> + <tag><c>eq</c></tag> + <item><p>and the compare function returns <c>eq</c>.</p></item> + <tag><c>lt</c></tag> + <item><p>and the compare function returns <c>lt</c>.</p></item> + <tag><c>gt</c></tag> + <item><p>and the compare function returns <c>gt</c>.</p></item> + <tag><c>lteq</c></tag> + <item><p>and the compare function returns <c>lt</c> + or <c>eq</c>.</p></item> + <tag><c>gteq</c></tag> + <item><p>and the compare function returns <c>gt</c> + or <c>eq</c>.</p></item> + </taglist> + + <p>If the filter matches and <c><anno>Action</anno></c> is + <c>log</c>, the log event is allowed. If the filter + matches and <c><anno>Action</anno></c> is <c>stop</c>, the + log event is stopped.</p> + + <p>If the filter does not match, it returns <c>ignore</c>, + meaning that other filters, or the value of the + configuration parameter <c>filter_default</c>, will decide + if the event is allowed or not.</p> + + <p>Example: only allow debug level log events</p> + + <code> +logger:set_handler_config(h1, filter_default, stop). +Filter = {fun logger_filters:level/2, {log, eq, debug}}. +logger:add_handler_filter(h1, debug_only, Filter). +ok</code> + </desc> + </func> + + <func> + <name name="progress" arity="2"/> + <fsummary>Filter progress reports from supervisor and application_controller.</fsummary> + <desc> + <p>This filter matches all progress reports + from <c>supervisor</c> and <c>application_controller</c>.</p> + + <p>If <c><anno>Extra</anno></c> is <c>log</c>, the progress + reports are allowed. If <c><anno>Extra</anno></c> + is <c>stop</c>, the progress reports are stopped.</p> + + <p>The filter returns <c>ignore</c> for all other log events.</p> + </desc> + </func> + + <func> + <name name="remote_gl" arity="2"/> + <fsummary>Filter events with group leader on remote node.</fsummary> + <desc> + <p>This filter matches all events originating from a process + that has its group leader on a remote node.</p> + + <p>If <c><anno>Extra</anno></c> is <c>log</c>, the matching + events are allowed. If <c><anno>Extra</anno></c> + is <c>stop</c>, the matching events are stopped.</p> + + <p>The filter returns <c>ignore</c> for all other log events.</p> + </desc> + </func> + + </funcs> + + <section> + <title>See Also</title> + <p> + <seealso marker="logger"><c>logger(3)</c></seealso> + </p> + </section> +</erlref> + + diff --git a/lib/kernel/doc/src/logger_formatter.xml b/lib/kernel/doc/src/logger_formatter.xml new file mode 100644 index 0000000000..24772fd6c4 --- /dev/null +++ b/lib/kernel/doc/src/logger_formatter.xml @@ -0,0 +1,354 @@ +<?xml version="1.0" encoding="utf-8" ?> +<!DOCTYPE erlref SYSTEM "erlref.dtd"> + +<erlref> + <header> + <copyright> + <year>2017</year><year>2018</year> + <holder>Ericsson AB. All Rights Reserved.</holder> + </copyright> + <legalnotice> + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + </legalnotice> + + <title>logger_formatter</title> + <prepared></prepared> + <responsible></responsible> + <docno></docno> + <approved></approved> + <checked></checked> + <date></date> + <rev>A</rev> + <file>logger_formatter.xml</file> + </header> + <module>logger_formatter</module> + <modulesummary>Default formatter for Logger.</modulesummary> + + <description> + <p>Each Logger handler has a configured formatter specified as a + module and a configuration term. The purpose of the formatter is + to translate the log events to a final printable string + (<seealso marker="stdlib:unicode#type-chardata"><c>unicode:chardata()</c> + </seealso>) which can be written to the output device of the + handler. See + sections <seealso marker="logger_chapter#handlers">Handlers</seealso> + and <seealso marker="logger_chapter#formatters">Formatters</seealso> + in the Kernel User's Guide for more information.</p> + <p><c>logger_formatter</c> is the default formatter used by + Logger.</p> + </description> + + + <datatypes> + <datatype> + <name name="config"/> + <desc> + <p>The configuration term for <c>logger_formatter</c> is a + <seealso marker="stdlib:maps">map</seealso>, and the + following keys can be set as configuration parameters:</p> + <taglist> + <tag><marker id="chars_limit"/> + <c>chars_limit = integer() > 0 | unlimited</c></tag> + <item> + <p>A positive integer representing the value of the option + with the same name to be used when calling + <seealso marker="stdlib:io_lib#format-3"> + <c>io_lib:format/3</c></seealso>. + This value limits the total number of characters printed + for each log event. Notice that this is a soft limit. For a + hard truncation limit, see option <c>max_size</c>.</p> + <p>Defaults to <c>unlimited</c>.</p> + </item> + <tag><marker id="depth"/><c>depth = integer() > 0 | unlimited</c></tag> + <item> + <p>A positive integer representing the maximum depth to + which terms shall be printed by this formatter. Format + strings passed to this formatter are rewritten. The + format controls ~p and ~w are replaced with ~P and ~W, + respectively, and the value is used as the depth + parameter. For details, see + <seealso marker="stdlib:io#format-2"><c>io:format/2,3</c></seealso> + in STDLIB.</p> + <p>Defaults to <c>unlimited</c>.</p> + </item> + <tag><c>legacy_header = boolean()</c></tag> + <item> + <p>If set to <c>true</c> a header field is added to + logger_formatter's part of <c>Metadata</c>. The value of + this field is a string similar to the header created by + the + old <seealso marker="error_logger"><c>error_logger</c></seealso> + event handlers. It can be included in the log event by + adding the list <c>[logger_formatter,header]</c> to the + template. See the description of + the <seealso marker="#type-template"><c>template()</c></seealso> + type for more information.</p> + <p>Defaults to <c>false</c>.</p> + </item> + <tag><marker id="max_size"/> + <c>max_size = integer() > 0 | unlimited</c></tag> + <item> + <p>A positive integer representing the absolute maximum size a + string returned from this formatter can have. If the + formatted string is longer, after possibly being limited + by <c>chars_limit</c> or <c>depth</c>, it is truncated.</p> + <p>Defaults to <c>unlimited</c>.</p> + </item> + <tag><c>report_cb = </c><seealso marker="logger#type-report_cb"> + <c>logger:report_cb()</c></seealso></tag> + <item> + <p>A report callback is used by the formatter to transform + log messages on report form to a format string and + arguments. The report callback can be specified in the + metadata for the log event. If no report callback exists + in metadata, <c>logger_formatter</c> will + use <seealso marker="logger#format_report-1"> + <c>logger:format_report/1</c></seealso> as default + callback.</p> + <p>If this configuration parameter is set, it replaces + both the default report callback, and any report + callback found in metadata. That is, all reports are + converted by this configured function.</p> + </item> + <tag><c>single_line = boolean()</c></tag> + <item> + <p>If set to <c>true</c>, each log event is printed as a + single line. To achieve this, <c>logger_formatter</c> + sets the field width to <c>0</c> for all <c>~p</c> + and <c>~P</c> control sequences in the format a string + (see <seealso marker="stdlib:io#format-2"> + <c>io:format/2</c></seealso>), and replaces all + newlines in the message with <c>", "</c>. White spaces + following directly after newlines are removed. Notice + that newlines added by the <c>template</c> parameter are + not replaced.</p> + <p>Defaults to <c>true</c>.</p> + </item> + <tag><marker id="template"/> + <c>template = </c><seealso marker="#type-template"><c>template()</c> + </seealso></tag> + <item> + <p>The template describes how the formatted string is + composed by combining different data values from the log + event. See the description of + the <seealso marker="#type-template"><c>template()</c></seealso> + type for more information about this.</p> + </item> + <tag><c>time_designator = byte()</c></tag> + <item> + <p>Timestamps are formatted according to RFC3339, and the + time designator is the character used as date and time + separator.</p> + <p>Defaults to <c>$T</c>.</p> + <p>The value of this parameter is used as + the <c>time_designator</c> option + to <seealso marker="stdlib:calendar#system_time_to_rfc3339-2"> + <c>calendar:system_time_to_rcf3339/2</c></seealso>.</p> + </item> + <tag><c>time_offset = integer() | [byte()]</c></tag> + <item> + <p>The time offset, either a string or an integer, to be + used when formatting the timestamp.</p> + <p>An empty string is interpreted as local time. The + values <c>"Z"</c>, <c>"z"</c> or <c>0</c> are + interpreted as Universal Coordinated Time (UTC).</p> + <p>Strings, other than <c>"Z"</c>, <c>"z"</c>, + or <c>""</c>, must be on the form <c>±[hh]:[mm]</c>, for + example <c>"-02:00"</c> or <c>"+00:00"</c>.</p> + <p>Integers must be in microseconds, meaning that the + offset <c>7200000000</c> is equivalent + to <c>"+02:00"</c>.</p> + <p>Defaults to an empty string, meaning that timestamps + are displayed in local time. However, for backwards + compatibility, if the SASL configuration + parameter <seealso marker="sasl:sasl_app#utc_log"> + <c>utc_log</c></seealso><c>=true</c>, the default is + changed to <c>"Z"</c>, meaning that timestamps are displayed + in UTC.</p> + <p>The value of this parameter is used as + the <c>offset</c> option + to <seealso marker="stdlib:calendar#system_time_to_rfc3339-2"> + <c>calendar:system_time_to_rcf3339/2</c></seealso>.</p> + </item> + </taglist> + </desc> + </datatype> + <datatype> + <name name="metakey"/> + <desc> + <p></p> + </desc> + </datatype> + <datatype> + <name name="template"/> + <desc> + <p>The template is a list of atoms, atom lists, tuples and strings. The + atoms <c>level</c> or <c>msg</c>, are treated as + placeholders for the severity level and the log message, + respectively. Other atoms or atom lists are interpreted as + placeholders for metadata, where atoms are expected to match + top level keys, and atom lists represent paths to sub keys when + the metadata is a nested map. For example the + list <c>[key1,key2]</c> is replaced by the value of + the <c>key2</c> field in the nested map below. The + atom <c>key1</c> on its own is replaced by the complete + value of the <c>key1</c> field. The values are converted to + strings.</p> + + <code> +#{key1 => #{key2 => my_value, + ...} + ...}</code> + + <p>Tuples in the template express if-exist tests for metadata + keys. For example, the following tuple says that + if <c>key1</c> exists in the metadata map, + print <c>"key1=Value"</c>, where <c>Value</c> is the value + that <c>key1</c> is associated with in the metadata map. If + <c>key1</c> does not exist, print nothing.</p> + <code> +{key1, ["key1=",key1], []}</code> + + <p>Strings in the template are printed literally.</p> + <p>The default value for the <c>template</c> configuration + parameter depends on the value of the <c>single_line</c> + and <c>legacy_header</c> configuration parameters as + follows.</p> + + <p>The log event used in the examples is:</p> + <code> +?LOG_ERROR("name: ~p~nexit_reason: ~p", [my_name, "It crashed"])</code> + + <taglist> + <tag><c>legacy_header = true, single_line = false</c></tag> + <item> + <p>Default + template: <c>[[logger_formatter,header],"\n",msg,"\n"]</c></p> + + <p>Example log entry:</p> + <code type="none"> +=ERROR REPORT==== 17-May-2018::18:30:19.453447 === +name: my_name +exit_reason: "It crashed"</code> + + <p>Notice that all eight levels can occur in the heading, + not only <c>ERROR</c>, <c>WARNING</c> or <c>INFO</c> as + <seealso marker="error_logger"><c>error_logger</c></seealso> + produces. And microseconds are added at the end of the + timestamp.</p> + </item> + + <tag><c>legacy_header = true, single_line = true</c></tag> + <item> + <p>Default + template: <c>[[logger_formatter,header],"\n",msg,"\n"]</c></p> + + <p>Notice that the template is here the same as + for <c>single_line=false</c>, but the resulting log entry + differs in that there is only one line after the + heading:</p> + <code type="none"> +=ERROR REPORT==== 17-May-2018::18:31:06.952665 === +name: my_name, exit_reason: "It crashed"</code> + </item> + + <tag><c>legacy_header = false, single_line = true</c></tag> + <item> + <p>Default template: <c>[time," ",level,": ",msg,"\n"]</c></p> + + <p>Example log entry:</p> + <code type="none"> +2018-05-17T18:31:31.152864+02:00 error: name: my_name, exit_reason: "It crashed"</code> + </item> + + <tag><c>legacy_header = false, single_line = false</c></tag> + <item> + <p>Default template: <c>[time," ",level,":\n",msg,"\n"]</c></p> + + <p>Example log entry:</p> + <code type="none"> +2018-05-17T18:32:20.105422+02:00 error: +name: my_name +exit_reason: "It crashed"</code> + </item> + </taglist> + </desc> + </datatype> + </datatypes> + + <funcs> + <func> + <name name="check_config" arity="1"/> + <fsummary>Validates the given formatter configuration.</fsummary> + <desc> + <p>The function is called by Logger when the formatter + configuration for a handler is set or modified. It + returns <c>ok</c> if the configuration is valid, + and <c>{error,term()}</c> if it is faulty.</p> + <p>The following Logger API functions can trigger this callback:</p> + <list> + <item><seealso marker="logger#add_handler-3"> + <c>logger:add_handler/3</c></seealso></item> + <item><seealso marker="logger#set_handler_config-2"> + <c>logger:set_handler_config/2,3</c></seealso></item> + <item><seealso marker="logger#update_handler_config-2"> + <c>logger:updata_handler_config/2</c></seealso></item> + <item><seealso marker="logger#update_formatter_config-2"> + <c>logger:update_formatter_config/2</c></seealso></item> + </list> + </desc> + </func> + <func> + <name name="format" arity="2"/> + <fsummary>Formats the given message.</fsummary> + <desc> + <p>This the formatter callback function to be called from + handlers. The log event is processed as follows:</p> + <list> + <item>If the message is on report form, it is converted to + <c>{Format,Args}</c> by calling the report callback. See + section <seealso marker="logger_chapter#log_message">Log + Message</seealso> in the Kernel User's Guide for more + information about report callbacks and valid forms of log + messages.</item> + <item>The message size is limited according to the values of + configuration parameters <seealso marker="#chars_limit"> + <c>chars_limit</c></seealso> + and <seealso marker="#depth"><c>depth</c></seealso>.</item> + <item>The full log entry is composed according to + the <seealso marker="#template"><c>template</c></seealso>.</item> + <item>If the final string is too long, it is truncated + according to the value of configuration + parameter <seealso marker="#max_size"><c>max_size</c></seealso>.</item> + </list> + </desc> + </func> + </funcs> + + <section> + <title>See Also</title> + <p> + <seealso marker="stdlib:calendar"><c>calendar(3)</c></seealso>, + <seealso marker="error_logger"><c>error_logger(3)</c></seealso>, + <seealso marker="stdlib:io"><c>io(3)</c></seealso>, + <seealso marker="stdlib:io_lib"><c>io_lib(3)</c></seealso>, + <seealso marker="logger"><c>logger(3)</c></seealso>, + <seealso marker="stdlib:maps"><c>maps(3)</c></seealso>, + <seealso marker="sasl:sasl_app"><c>sasl(6)</c></seealso>, + <seealso marker="stdlib:unicode"><c>unicode(3)</c></seealso> + </p> + </section> +</erlref> + + diff --git a/lib/kernel/doc/src/logger_std_h.xml b/lib/kernel/doc/src/logger_std_h.xml new file mode 100644 index 0000000000..e156f5719b --- /dev/null +++ b/lib/kernel/doc/src/logger_std_h.xml @@ -0,0 +1,141 @@ +<?xml version="1.0" encoding="utf-8" ?> +<!DOCTYPE erlref SYSTEM "erlref.dtd"> + +<erlref> + <header> + <copyright> + <year>2017</year><year>2018</year> + <holder>Ericsson AB. All Rights Reserved.</holder> + </copyright> + <legalnotice> + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + </legalnotice> + + <title>logger_std_h</title> + <prepared></prepared> + <responsible></responsible> + <docno></docno> + <approved></approved> + <checked></checked> + <date></date> + <rev>A</rev> + <file>logger_std_h.xml</file> + </header> + <module>logger_std_h</module> + <modulesummary>Standard handler for Logger.</modulesummary> + + <description> + <p>This is the standard handler for Logger. + Multiple instances of this handler can be added to + Logger, and each instance prints logs to <c>standard_io</c>, + <c>standard_error</c>, or to file.</p> + <p>The handler has an overload protection mechanism that keeps the handler + process and the Kernel application alive during high loads of log + events. How overload protection works, and how to configure it, is + described in the + <seealso marker="logger_chapter#overload_protection"><c>User's Guide</c> + </seealso>.</p> + <p>To add a new instance of the standard handler, use + <seealso marker="logger#add_handler-3"><c>logger:add_handler/3</c> + </seealso>. The handler configuration argument is a map which can contain + general configuration parameters, as documented in the + <seealso marker="logger_chapter#handler_configuration"><c>User's Guide</c> + </seealso>, and handler specific parameters. The specific data + is stored in a sub map with the key <c>config</c>, and can contain the + following parameters:</p> + <taglist> + <tag><marker id="type"/><c>type</c></tag> + <item> + <p>This has the value <c>standard_io</c>, <c>standard_error</c>, + <c>{file,LogFileName}</c>, or <c>{file,LogFileName,LogFileOpts}</c>.</p> + <p>If <c>LogFileOpts</c> is specified, it replaces the default + list of options used when opening the log file. The default + list is <c>[raw,append,delayed_write]</c>. One reason to do + so can be to change <c>append</c> to, for + example, <c>write</c>, ensuring that the old log is + truncated when a node is restarted. See the reference manual + for <seealso marker="file#open-2"><c>file:open/2</c></seealso> + for more information about file options.</p> + <p>Log files are always UTF-8 encoded. The encoding can not be + changed by setting the option <c>{encoding,Encoding}</c> + in <c>LogFileOpts</c>.</p> + <p>Notice that the standard handler does not have support for + circular logging. Use the disk_log handler, + <seealso marker="logger_disk_log_h"><c>logger_disk_log_h</c></seealso>, + for this.</p> + <p>The value is set when the handler is added, and it can not + be changed in runtime.</p> + <p>Defaults to <c>standard_io</c>.</p> + </item> + <tag><c>filesync_repeat_interval</c></tag> + <item> + <p>This value, in milliseconds, specifies how often the handler does + a file sync operation to write buffered data to disk. The handler attempts + the operation repeatedly, but only performs a new sync if something has + actually been logged.</p> + <p>If <c>no_repeat</c> is set as value, the repeated file sync operation + is disabled, and it is the operating system settings that determine + how quickly or slowly data is written to disk. The user can also call + the <seealso marker="logger_std_h#filesync-1"><c>filesync/1</c></seealso> + function to perform a file sync.</p> + <p>Defaults to <c>5000</c> milliseconds.</p> + </item> + </taglist> + <p>Other configuration parameters exist, to be used for customizing + the overload protection behaviour. The same parameters are used both in the + standard handler and the disk_log handler, and are documented in the + <seealso marker="logger_chapter#overload_protection"><c>User's Guide</c> + </seealso>.</p> + <p>Notice that if changing the configuration of the handler in runtime, + the <c>type</c> parameter must not be modified.</p> + <p>Example of adding a standard handler:</p> + <code type="none"> +logger:add_handler(my_standard_h, logger_std_h, + #{config => #{type => {file,"./system_info.log"}, + filesync_repeat_interval => 1000}}). + </code> + <p>To set the default handler, that starts initially with + the Kernel application, to log to file instead of <c>standard_io</c>, + change the Kernel default logger configuration. Example:</p> + <code type="none"> +erl -kernel logger '[{handler,default,logger_std_h, + #{config => #{type => {file,"./log.log"}}}}]' + </code> + <p>An example of how to replace the standard handler with a disk_log handler + at startup is found in the + <seealso marker="logger_disk_log_h"><c>logger_disk_log_h</c></seealso> + manual.</p> + </description> + + <funcs> + + <func> + <name name="filesync" arity="1" clause_i="1"/> + <fsummary>Writes buffered data to disk.</fsummary> + <desc> + <p>Write buffered data to disk.</p> + </desc> + </func> + + </funcs> + + <section> + <title>See Also</title> + <p><seealso marker="logger"><c>logger(3)</c></seealso>, + <seealso marker="logger_disk_log_h"> + <c>logger_disk_log_h(3)</c></seealso></p> + </section> +</erlref> + + diff --git a/lib/kernel/doc/src/net_kernel.xml b/lib/kernel/doc/src/net_kernel.xml index 0b94fc0fa6..bfbe7a6470 100644 --- a/lib/kernel/doc/src/net_kernel.xml +++ b/lib/kernel/doc/src/net_kernel.xml @@ -4,7 +4,7 @@ <erlref> <header> <copyright> - <year>1996</year><year>2017</year> + <year>1996</year><year>2018</year> <holder>Ericsson AB. All Rights Reserved.</holder> </copyright> <legalnotice> @@ -56,7 +56,7 @@ $ <input>erl -sname foobar</input></pre> <p>Normally, connections are established automatically when another node is referenced. This functionality can be disabled by setting Kernel configuration parameter - <c>dist_auto_connect</c> to <c>false</c>, see + <c>dist_auto_connect</c> to <c>never</c>, see <seealso marker="kernel_app"><c>kernel(6)</c></seealso>. In this case, connections must be established explicitly by calling <seealso marker="#connect_node/1"><c>connect_node/1</c></seealso>.</p> @@ -102,8 +102,10 @@ $ <input>erl -sname foobar</input></pre> <fsummary>Establish a connection to a node.</fsummary> <desc> <p>Establishes a connection to <c><anno>Node</anno></c>. Returns - <c>true</c> if successful, <c>false</c> if not, and <c>ignored</c> - if the local node is not alive.</p> + <c>true</c> if a connection was established or was already + established or if <c><anno>Node</anno></c> is the local node + itself. Returns <c>false</c> if the connection attempt failed, and + <c>ignored</c> if the local node is not alive.</p> </desc> </func> @@ -230,7 +232,12 @@ $ <input>erl -sname foobar</input></pre> <item> <p>The tuple <c>{nodedown_reason, Reason}</c> is included in <c>InfoList</c> in <c>nodedown</c> messages.</p> - <p><c>Reason</c> can be any of the following:</p> + <p> + <c>Reason</c> can, depending on which + distribution module or process that is used be any term, + but for the standard TCP distribution module it is + any of the following: + </p> <taglist> <tag><c>connection_setup_failed</c></tag> <item><p>The connection setup failed (after <c>nodeup</c> diff --git a/lib/kernel/doc/src/notes.xml b/lib/kernel/doc/src/notes.xml index e1cf45109d..8188ede6a2 100644 --- a/lib/kernel/doc/src/notes.xml +++ b/lib/kernel/doc/src/notes.xml @@ -4,7 +4,7 @@ <chapter> <header> <copyright> - <year>2004</year><year>2017</year> + <year>2004</year><year>2018</year> <holder>Ericsson AB. All Rights Reserved.</holder> </copyright> <legalnotice> @@ -31,6 +31,569 @@ </header> <p>This document describes the changes made to the Kernel application.</p> +<section><title>Kernel 6.1</title> + + <section><title>Fixed Bugs and Malfunctions</title> + <list> + <item> + <p> + The values <c>all</c> and <c>none</c> are documented as + valid value for the Kernel configuration parameter + <c>logger_level</c>, but would cause a crash during node + start. This is now corrected.</p> + <p> + Own Id: OTP-15143</p> + </item> + <item> + <p> + Fix some potential buggy behavior in how ticks are sent + on inter node distribution connections. Tick is now sent + to c-node even if there are unsent buffered data, as + c-nodes need ticks in order to send reply ticks. The + amount of sent data was also calculated wrongly when + ticks were suppressed due to unsent buffered data.</p> + <p> + Own Id: OTP-15162 Aux Id: ERIERL-191 </p> + </item> + <item> + <p> + Non semantic change in dist_util.erl to silence dialyzer + warning.</p> + <p> + Own Id: OTP-15170</p> + </item> + <item> + <p> + Fixed <c>net_kernel:connect_node(node())</c> to return + <c>true</c> (and do nothing) as it always has before + OTP-21.0. Also documented this successful "self connect" + as the expected behavior.</p> + <p> + Own Id: OTP-15182 Aux Id: ERL-643 </p> + </item> + <item> + <p> + The single_line option on logger_formatter would in some + cases add an unwanted comma after the association arrows + in a map. This is now corrected.</p> + <p> + Own Id: OTP-15228</p> + </item> + <item> + <p> + Improved robustness of distribution connection setup. In + OTP-21.0 a truly asynchronous connection setup was + introduced. This is further improvement on that work to + make the emulator more robust and also be able to recover + in cases when involved Erlang processes misbehave.</p> + <p> + Own Id: OTP-15297 Aux Id: OTP-15279, OTP-15280 </p> + </item> + </list> + </section> + + + <section><title>Improvements and New Features</title> + <list> + <item> + <p> + A new macro, <c>?LOG(Level,...)</c>, is added. This is + equivalent to the existing <c>?LOG_<LEVEL>(...)</c> + macros.</p> + <p> + A new variant of Logger report callback is added, which + takes an extra argument containing options for size + limiting and line breaks. Module <c>proc_lib</c> in + <c>STDLIB</c> uses this for crash reports.</p> + <p> + Logger configuration is now checked a bit more for + errors.</p> + <p> + Own Id: OTP-15132</p> + </item> + <item> + <p> + The socket options <c>recvtos</c>, <c>recvttl</c>, + <c>recvtclass</c> and <c>pktoptions</c> have been + implemented in the socket modules. See the documentation + for the <c>gen_tcp</c>, <c>gen_udp</c> and <c>inet</c> + modules. Note that support for these in the runtime + system is platform dependent. Especially for + <c>pktoptions</c> which is very Linux specific and + obsoleted by the RFCs that defined it.</p> + <p> + Own Id: OTP-15145 Aux Id: ERIERL-187 </p> + </item> + <item> + <p> + Add <c>logger:set_application_level/2</c> for setting the + logger level of all modules in one application.</p> + <p> + Own Id: OTP-15146</p> + </item> + </list> + </section> + +</section> + +<section><title>Kernel 6.0.1</title> + + <section><title>Fixed Bugs and Malfunctions</title> + <list> + <item> + <p> + Fixed bug in <c>net_kernel</c> that could cause an + emulator crash if certain connection attempts failed. Bug + exists since kernel-6.0 (OTP-21.0).</p> + <p> + Own Id: OTP-15280 Aux Id: ERIERL-226, OTP-15279 </p> + </item> + </list> + </section> + +</section> + +<section><title>Kernel 6.0</title> + + <section><title>Fixed Bugs and Malfunctions</title> + <list> + <item> + <p> Clarify the documentation of <c>rpc:multicall/5</c>. + </p> + <p> + Own Id: OTP-10551</p> + </item> + <item> + <p> + The DNS resolver when getting econnrefused from a server + retained an invalid socket so look up towards the next + server(s) also failed.</p> + <p> + Own Id: OTP-13133 Aux Id: PR-1557 </p> + </item> + <item> + <p> + No resolver backend returns V4Mapped IPv6 addresses any + more. This was inconsistent before, some did, some did + not. To facilitate working with such addresses a new + function <c>inet:ipv4_mapped_ipv6_address/1</c> has been + added.</p> + <p> + *** POTENTIAL INCOMPATIBILITY ***</p> + <p> + Own Id: OTP-13761 Aux Id: ERL-503 </p> + </item> + <item> + <p> + The type specifications for <c>file:posix/0</c> and + <c>inet:posix/0</c> have been updated according to which + errors file and socket operations should be able to + return.</p> + <p> + Own Id: OTP-14019 Aux Id: ERL-550 </p> + </item> + <item> + <p> + Fix name resolving in IPv6 only environments when doing + the initial distributed connection.</p> + <p> + Own Id: OTP-14501</p> + </item> + <item> + <p> File operations used to accept <seealso + marker="kernel:file#type-name_all">filenames</seealso> + containing null characters (integer value zero). This + caused the name to be truncated and in some cases + arguments to primitive operations to be mixed up. + Filenames containing null characters inside the filename + are now <em>rejected</em> and will cause primitive file + operations to fail. </p> <p> Also environment variable + operations used to accept <seealso + marker="kernel:os#type-env_var_name">names</seealso> and + <seealso + marker="kernel:os#type-env_var_value">values</seealso> of + environment variables containing null characters (integer + value zero). This caused operations to silently produce + erroneous results. Environment variable names and values + containing null characters inside the name or value are + now <em>rejected</em> and will cause environment variable + operations to fail. </p> <p>Primitive environment + variable operations also used to accept the <c>$=</c> + character in environment variable names causing various + problems. <c>$=</c> characters in environment variable + names are now also <em>rejected</em>. </p> <p>Also + <seealso + marker="kernel:os#cmd/1"><c>os:cmd/1</c></seealso> now + reject null characters inside its <seealso + marker="kernel:os#type-os_command">command</seealso>. + </p> <p><seealso + marker="erts:erlang#open_port/2"><c>erlang:open_port/2</c></seealso> + will also reject null characters inside the port name + from now on.</p> + <p> + *** POTENTIAL INCOMPATIBILITY ***</p> + <p> + Own Id: OTP-14543 Aux Id: ERL-370 </p> + </item> + <item> + <p><c>os:putenv</c> and <c>os:getenv</c> no longer access + the process environment directly and instead work on a + thread-safe emulation. The only observable difference is + that it's <em>not</em> kept in sync with libc + <c>getenv(3)</c> / <c>putenv(3)</c>, so those who relied + on that behavior in drivers or NIFs will need to add + manual synchronization.</p> <p>On Windows this means that + you can no longer resolve DLL dependencies by modifying + the <c>PATH</c> just before loading the driver/NIF. To + make this less of a problem, the emulator now adds the + target DLL's folder to the DLL search path.</p> + <p> + *** POTENTIAL INCOMPATIBILITY ***</p> + <p> + Own Id: OTP-14666</p> + </item> + <item> + <p> + Fixed connection tick toward primitive hidden nodes + (erl_interface) that could cause faulty tick timeout in + rare cases when payload data is sent to hidden node but + not received.</p> + <p> + Own Id: OTP-14681</p> + </item> + <item> + <p> + Make group react immediately on an EXIT-signal from shell + in e.g ssh.</p> + <p> + Own Id: OTP-14991 Aux Id: PR1705 </p> + </item> + <item> + <p> + Calls to <c>gen_tcp:send/2</c> on closed sockets now + returns <c>{error, closed}</c> instead of + <c>{error,enotconn}</c>.</p> + <p> + Own Id: OTP-15001</p> + </item> + <item> + <p> + The <c>included_applications</c> key are no longer + duplicated as application environment variable. Earlier, + the included applications could be read both with + <c>application:get[_all]_env(...)</c> and + <c>application:get[_all]_key(...)</c> functions. Now, it + can only be read with + <c>application:get[_all]_key(...)</c>.</p> + <p> + *** POTENTIAL INCOMPATIBILITY ***</p> + <p> + Own Id: OTP-15071</p> + </item> + <item> + <p>Owner and group changes through + <c>file:write_file_info</c>, <c>file:change_owner</c>, + and <c>file:change_group</c> will no longer report + success on permission errors.</p> + <p> + *** POTENTIAL INCOMPATIBILITY ***</p> + <p> + Own Id: OTP-15118</p> + </item> + </list> + </section> + + + <section><title>Improvements and New Features</title> + <list> + <item> + <p>A new logging API is added to Erlang/OTP, see the + <seealso + marker="kernel:logger"><c>logger(3)</c></seealso> manual + page, and section <seealso + marker="kernel:logger_chapter">Logging</seealso> in the + Kernel User's Guide.</p> + <p>Calls to <c>error_logger</c> are automatically + redirected to the new API, and legacy error logger event + handlers can still be used. It is, however, recommended + to use the Logger API directly when writing new code.</p> + <p>Notice the following potential incompatibilities:</p> + <list> <item><p>Kernel configuration parameters + <c>error_logger</c> still works, but is overruled if the + default handler's output destination is configured with + Kernel configuration parameter <c>logger</c>.</p> <p>In + general, parameters for configuring error logger are + overwritten by new parameters for configuring + Logger.</p></item> <item><p>The concept of SASL error + logging is deprecated, meaning that by default the SASL + application does not affect which log events are + logged.</p> <p>By default, supervisor reports and crash + reports are logged by the default Logger handler started + by Kernel, and end up at the same destination (terminal + or file) as other standard log event from Erlang/OTP.</p> + <p>Progress reports are not logged by default, but can be + enabled by setting the primary log level to info, for + example with the Kernel configuration parameter + <c>logger_level</c>.</p> <p>To obtain backwards + compatibility with the SASL error logging functionality + from earlier releases, set Kernel configuration parameter + <c>logger_sasl_compatible</c> to <c>true</c>. This + prevents the default Logger handler from logging any + supervisor-, crash-, or progress reports. Instead, SASL + adds a separate Logger handler during application start, + which takes care of these log events. The SASL + configuration parameters <c>sasl_error_logger</c> and + <c>sasl_errlog_type</c> specify the destination (terminal + or file) and severity level to log for these + events.</p></item></list> + <p> + Since Logger is new in Erlang/OTP 21.0, we do reserve the + right to introduce changes to the Logger API and + functionality in patches following this release. These + changes might or might not be backwards compatible with + the initial version.</p> + <p> + *** POTENTIAL INCOMPATIBILITY ***</p> + <p> + Own Id: OTP-13295</p> + </item> + <item> + <p> + The function <c>inet:i/0</c> has been documented.</p> + <p> + Own Id: OTP-13713 Aux Id: PR-1645 </p> + </item> + <item> + <p> + Typespecs for <c>netns</c> and <c>bind_to_device</c> + options have been added to <c>gen_tcp</c>, <c>gen_udp</c> + and <c>gen_sctp</c> functions.</p> + <p> + Own Id: OTP-14359 Aux Id: PR-1816 </p> + </item> + <item> + <p> + New functionality for implementation of alternative + carriers for the Erlang distribution has been introduced. + This mainly consists of support for usage of distribution + controller processes (previously only ports could be used + as distribution controllers). For more information see + <seealso marker="erts:alt_dist#distribution_module">ERTS + User's Guide ➜ How to implement an Alternative Carrier + for the Erlang Distribution ➜ Distribution + Module</seealso>.</p> + <p> + Own Id: OTP-14459</p> + </item> + <item> + <p><c>seq_trace</c> labels may now be any erlang + term.</p> + <p> + Own Id: OTP-14899</p> + </item> + <item> + <p> + The SSL distribution protocol <c>-proto inet_tls</c> has + stopped setting the SSL option + <c>server_name_indication</c>. New verify funs for client + and server in <c>inet_tls_dist</c> has been added, not + documented yet, that checks node name if present in peer + certificate. Usage is still also yet to be documented.</p> + <p> + Own Id: OTP-14969 Aux Id: OTP-14465, ERL-598 </p> + </item> + <item> + <p> + Changed timeout of <c>gen_server</c> calls to <c>auth</c> + server from default 5 seconds to <c>infinity</c>.</p> + <p> + Own Id: OTP-15009 Aux Id: ERL-601 </p> + </item> + <item> + <p>The callback module passed as <c>-epmd_module</c> to + erl has been expanded to be able to do name and port + resolving.</p> <p>Documentation has also been added in + the <seealso + marker="kernel:erl_epmd"><c>erl_epmd</c></seealso> + reference manual and ERTS User's Guide <seealso + marker="erts:alt_disco">How to Implement an Alternative + Service Discovery for Erlang Distribution</seealso>.</p> + <p> + Own Id: OTP-15086 Aux Id: PR-1694 </p> + </item> + <item> + <p> + Included config file specified with relative path in + sys.config are now first searched for relative to the + directory of sys.config itself. If not found, it is also + searched for relative to the current working directory. + The latter is for backwards compatibility.</p> + <p> + Own Id: OTP-15137 Aux Id: PR-1838 </p> + </item> + </list> + </section> + +</section> + +<section><title>Kernel 5.4.3.2</title> + + <section><title>Fixed Bugs and Malfunctions</title> + <list> + <item> + <p> + Non semantic change in dist_util.erl to silence dialyzer + warning.</p> + <p> + Own Id: OTP-15170</p> + </item> + </list> + </section> + +</section> + +<section><title>Kernel 5.4.3.1</title> + + <section><title>Fixed Bugs and Malfunctions</title> + <list> + <item> + <p> + Fix some potential buggy behavior in how ticks are sent + on inter node distribution connections. Tick is now sent + to c-node even if there are unsent buffered data, as + c-nodes need ticks in order to send reply ticks. The + amount of sent data was calculated wrongly when ticks + where suppressed due to unsent buffered data.</p> + <p> + Own Id: OTP-15162 Aux Id: ERIERL-191 </p> + </item> + </list> + </section> + +</section> + +<section><title>Kernel 5.4.3</title> + + <section><title>Fixed Bugs and Malfunctions</title> + <list> + <item> + <p> Correct a few contracts. </p> + <p> + Own Id: OTP-14889</p> + </item> + <item> + <p> + Reject loading modules with names containing directory + separators ('/' or '\' on Windows).</p> + <p> + Own Id: OTP-14933 Aux Id: ERL-564, PR-1716 </p> + </item> + <item> + <p> + Fix bug in handling of os:cmd/2 option max_size on + windows.</p> + <p> + Own Id: OTP-14940</p> + </item> + </list> + </section> + +</section> + +<section><title>Kernel 5.4.2</title> + + <section><title>Fixed Bugs and Malfunctions</title> + <list> + <item> + <p> + Add <c>os:cmd/2</c> that takes an options map as the + second argument.</p> + <p> + Add <c>max_size</c> as an option to <c>os:cmd/2</c> that + control the maximum size of the result that + <c>os:cmd/2</c> will return.</p> + <p> + Own Id: OTP-14823</p> + </item> + </list> + </section> + +</section> + +<section><title>Kernel 5.4.1</title> + + <section><title>Fixed Bugs and Malfunctions</title> + <list> + <item> + <p> + Refactored an internal API.</p> + <p> + Own Id: OTP-14784</p> + </item> + </list> + </section> + +</section> + +<section><title>Kernel 5.4</title> + + <section><title>Fixed Bugs and Malfunctions</title> + <list> + <item> + <p> + Processes which did output after switching jobs (Ctrl+G) + could be left forever stuck in the io request.</p> + <p> + Own Id: OTP-14571 Aux Id: ERL-472 </p> + </item> + </list> + </section> + + + <section><title>Improvements and New Features</title> + <list> + <item> + <p>Lock counting can now be fully toggled at runtime in + the lock counting emulator (<c>-emu_type lcnt</c>). + Everything is enabled by default to match the old + behavior, but specific categories can be toggled at will + with minimal runtime overhead when disabled. Refer to the + documentation on <c>lcnt:rt_mask/1</c> for details.</p> + <p> + Own Id: OTP-13170</p> + </item> + <item> + <p><c>lcnt:collect</c> and <c>lcnt:clear</c> will no + longer block all other threads in the runtime system.</p> + <p> + Own Id: OTP-14412</p> + </item> + <item> + <p> + General Unicode improvements.</p> + <p> + Own Id: OTP-14462</p> + </item> + </list> + </section> + +</section> + +<section><title>Kernel 5.3.1</title> + + <section><title>Fixed Bugs and Malfunctions</title> + <list> + <item> + <p>The documentation for the 'quiet' option in + disk_log:open/1 had an incorrect default value.</p> + <p> + Own Id: OTP-14498</p> + </item> + </list> + </section> + +</section> + <section><title>Kernel 5.3</title> <section><title>Fixed Bugs and Malfunctions</title> diff --git a/lib/kernel/doc/src/os.xml b/lib/kernel/doc/src/os.xml index 0e9add4161..c95e615c6b 100644 --- a/lib/kernel/doc/src/os.xml +++ b/lib/kernel/doc/src/os.xml @@ -4,7 +4,7 @@ <erlref> <header> <copyright> - <year>1997</year><year>2017</year> + <year>1997</year><year>2018</year> <holder>Ericsson AB. All Rights Reserved.</holder> </copyright> <legalnotice> @@ -36,19 +36,116 @@ only run on a specific platform. On the other hand, with careful use, these functions can be of help in enabling a program to run on most platforms.</p> + + <note> + <p> + File operations used to accept filenames containing + null characters (integer value zero). This caused + the name to be truncated and in some cases arguments + to primitive operations to be mixed up. Filenames + containing null characters inside the filename + are now <em>rejected</em> and will cause primitive + file operations to fail. + </p> + <p> + Also environment variable operations used to accept + names and values of environment variables containing + null characters (integer value zero). This caused + operations to silently produce erroneous results. + Environment variable names and values containing + null characters inside the name or value are now + <em>rejected</em> and will cause environment variable + operations to fail. + </p> + </note> </description> + <datatypes> + <datatype> + <name name="env_var_name"/> + <desc> + <p>A string containing valid characters on the specific + OS for environment variable names using + <seealso marker="file#native_name_encoding/0"><c>file:native_name_encoding()</c></seealso> + encoding. Note that specifically null characters (integer + value zero) and <c>$=</c> characters are not allowed. + However, note that not all invalid characters necessarily + will cause the primitiv operations to fail, but may instead + produce invalid results. + </p> + </desc> + </datatype> + <datatype> + <name name="env_var_value"/> + <desc> + <p>A string containing valid characters on the specific + OS for environment variable values using + <seealso marker="file#native_name_encoding/0"><c>file:native_name_encoding()</c></seealso> + encoding. Note that specifically null characters (integer + value zero) are not allowed. However, note that not all + invalid characters necessarily will cause the primitiv + operations to fail, but may instead produce invalid results. + </p> + </desc> + </datatype> + <datatype> + <name name="env_var_name_value"/> + <desc> + <p> + Assuming that environment variables has been correctly + set, a strings containing valid characters on the specific + OS for environment variable names and values using + <seealso marker="file#native_name_encoding/0"><c>file:native_name_encoding()</c></seealso> + encoding. The first <c>$=</c> characters appearing in + the string separates environment variable name (on the + left) from environment variable value (on the right). + </p> + </desc> + </datatype> + <datatype> + <name name="os_command"/> + <desc> + <p>All characters needs to be valid characters on the + specific OS using + <seealso marker="file#native_name_encoding/0"><c>file:native_name_encoding()</c></seealso> + encoding. Note that specifically null characters (integer + value zero) are not allowed. However, note that not all + invalid characters not necessarily will cause + <seealso marker="#cmd/1"><c>os:cmd/1</c></seealso> + to fail, but may instead produce invalid results. + </p> + </desc> + </datatype> + <datatype> + <name name="os_command_opts"/> + <desc> + <p>Options for <seealso marker="#cmd/2"><c>os:cmd/2</c></seealso></p> + <taglist> + <tag><c>max_size</c></tag> + <item> + <p>The maximum size of the data returned by the <c>os:cmd</c> call. + See the <seealso marker="#cmd/2"><c>os:cmd/2</c></seealso> + documentation for more details.</p> + </item> + </taglist> + </desc> + </datatype> + </datatypes> + <funcs> <func> <name name="cmd" arity="1"/> + <name name="cmd" arity="2"/> <fsummary>Execute a command in a shell of the target OS.</fsummary> <desc> <p>Executes <c><anno>Command</anno></c> in a command shell of the - target OS, - captures the standard output of the command, and returns this - result as a string. This function is a replacement of - the previous function <c>unix:cmd/1</c>; they are equivalent on a - Unix platform.</p> + target OS, captures the standard output of the command, + and returns this result as a string.</p> + <warning><p>Previous implementation used to allow all characters + as long as they were integer values greater than or equal to zero. + This sometimes lead to unwanted results since null characters + (integer value zero) often are interpreted as string termination. The + current implementation rejects these.</p></warning> <p><em>Examples:</em></p> <code type="none"> LsOut = os:cmd("ls"), % on unix platform @@ -57,6 +154,21 @@ DirOut = os:cmd("dir"), % on Win32 platform</code> called from another program (for example, <c>os:cmd/1</c>) can differ, compared with the standard output of the command when called directly from an OS command shell.</p> + <p><c>os:cmd/2</c> was added in kernel-5.5 (OTP-20.2.1). It makes it + possible to pass an options map as the second argument in order to + control the behaviour of <c>os:cmd</c>. The possible options are: + </p> + <taglist> + <tag><c>max_size</c></tag> + <item> + <p>The maximum size of the data returned by the <c>os:cmd</c> call. + This option is a safety feature that should be used when the command + executed can return a very large, possibly infinite, result.</p> + <code type="none"> +> os:cmd("cat /dev/zero", #{ max_size => 20 }). +[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]</code> + </item> + </taglist> </desc> </func> @@ -152,6 +264,15 @@ DirOut = os:cmd("dir"), % on Win32 platform</code> <p>On Unix platforms, the environment is set using UTF-8 encoding if Unicode filename translation is in effect. On Windows, the environment is set using wide character interfaces.</p> + <note> + <p> + <c><anno>VarName</anno></c> is not allowed to contain + an <c>$=</c> character. Previous implementations used + to just let the <c>$=</c> character through which + silently caused erroneous results. Current implementation + will instead throw a <c>badarg</c> exception. + </p> + </note> </desc> </func> diff --git a/lib/kernel/doc/src/part_notes.xml b/lib/kernel/doc/src/part.xml index 5e849039ee..fa7e92835f 100644 --- a/lib/kernel/doc/src/part_notes.xml +++ b/lib/kernel/doc/src/part.xml @@ -4,7 +4,7 @@ <part xmlns:xi="http://www.w3.org/2001/XInclude"> <header> <copyright> - <year>2004</year><year>2016</year> + <year>2018</year> <holder>Ericsson AB. All Rights Reserved.</holder> </copyright> <legalnotice> @@ -22,19 +22,16 @@ </legalnotice> - <title>Kernel Release Notes</title> - <prepared></prepared> + <title>Kernel User's Guide</title> + <prepared>OTP Team</prepared> <docno></docno> - <date></date> - <rev></rev> + <date>2018-06-06</date> + <file>part.xml</file> </header> <description> - <p>The <em>Kernel</em> application has all the code necessary to run - the Erlang runtime system itself; File servers and code servers - etc.</p> - <p>For information about older versions, see - <url href="part_notes_history_frame.html">Release Notes History</url>.</p> + <p></p> </description> - <xi:include href="notes.xml"/> + <xi:include href="introduction_chapter.xml"/> + <xi:include href="logger_chapter.xml"/> </part> diff --git a/lib/kernel/doc/src/part_notes_history.xml b/lib/kernel/doc/src/part_notes_history.xml deleted file mode 100644 index 0cf7b793da..0000000000 --- a/lib/kernel/doc/src/part_notes_history.xml +++ /dev/null @@ -1,40 +0,0 @@ -<?xml version="1.0" encoding="utf-8" ?> -<!DOCTYPE part SYSTEM "part.dtd"> - -<part> - <header> - <copyright> - <year>2006</year> - <year>2016</year> - <holder>Ericsson AB, All Rights Reserved</holder> - </copyright> - <legalnotice> - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - The Initial Developer of the Original Code is Ericsson AB. - </legalnotice> - - <title>Kernel Release Notes History</title> - <prepared></prepared> - <docno></docno> - <date></date> - <rev></rev> - </header> - <description> - <p>The <em>Kernel</em> application has all the code necessary to run - the Erlang runtime system itself; File servers and code servers - etc.</p> - </description> - <include file="notes_history"></include> -</part> - diff --git a/lib/kernel/doc/src/ref_man.xml b/lib/kernel/doc/src/ref_man.xml index 5cd77e0f6f..d3b947527f 100644 --- a/lib/kernel/doc/src/ref_man.xml +++ b/lib/kernel/doc/src/ref_man.xml @@ -4,7 +4,7 @@ <application xmlns:xi="http://www.w3.org/2001/XInclude"> <header> <copyright> - <year>1996</year><year>2016</year> + <year>1996</year><year>2018</year> <holder>Ericsson AB. All Rights Reserved.</holder> </copyright> <legalnotice> @@ -32,12 +32,15 @@ </description> <xi:include href="kernel_app.xml"/> + <xi:include href="app.xml"/> <xi:include href="application.xml"/> <xi:include href="auth.xml"/> <xi:include href="code.xml"/> + <xi:include href="config.xml"/> <xi:include href="disk_log.xml"/> <xi:include href="erl_boot_server.xml"/> <xi:include href="erl_ddll.xml"/> + <xi:include href="erl_epmd.xml"/> <xi:include href="erl_prim_loader_stub.xml"/> <xi:include href="erlang_stub.xml"/> <xi:include href="error_handler.xml"/> @@ -52,6 +55,11 @@ <xi:include href="inet.xml"/> <xi:include href="inet_res.xml"/> <xi:include href="init_stub.xml"/> + <xi:include href="logger.xml"/> + <xi:include href="logger_filters.xml"/> + <xi:include href="logger_formatter.xml"/> + <xi:include href="logger_std_h.xml"/> + <xi:include href="logger_disk_log_h.xml"/> <xi:include href="net_adm.xml"/> <xi:include href="net_kernel.xml"/> <xi:include href="os.xml"/> @@ -61,6 +69,4 @@ <xi:include href="user.xml"/> <xi:include href="wrap_log_reader.xml"/> <xi:include href="zlib_stub.xml"/> - <xi:include href="app.xml"/> - <xi:include href="config.xml"/> </application> diff --git a/lib/kernel/doc/src/rpc.xml b/lib/kernel/doc/src/rpc.xml index adec2d9520..fab616e630 100644 --- a/lib/kernel/doc/src/rpc.xml +++ b/lib/kernel/doc/src/rpc.xml @@ -4,7 +4,7 @@ <erlref> <header> <copyright> - <year>1996</year><year>2016</year> + <year>1996</year><year>2018</year> <holder>Ericsson AB. All Rights Reserved.</holder> </copyright> <legalnotice> @@ -217,7 +217,7 @@ <list type="bulleted"> <item>A list of the nodes that do not exist</item> <item>A list of the nodes where the server does not exist</item> - <item>A list of the nodes where the server terminatd before sending + <item>A list of the nodes where the server terminated before sending any reply.</item> </list> </desc> @@ -268,8 +268,9 @@ on the specified nodes and collects the answers. It returns <c>{<anno>ResL</anno>, <anno>BadNodes</anno>}</c>, where <c><anno>BadNodes</anno></c> is a list - of the nodes that terminated or timed out during computation, - and <c><anno>ResL</anno></c> is a list of the return values. + of the nodes that do not exist, + and <c><anno>ResL</anno></c> is a list of the return values, + or <c>{badrpc, <anno>Reason</anno>}</c> for failing calls. <c><anno>Timeout</anno></c> is a time (integer) in milliseconds, or <c>infinity</c>.</p> <p>The following example is useful when new object code is to @@ -347,7 +348,7 @@ <func> <name name="pmap" arity="3"/> - <fsummary>Parallell evaluation of mapping a function over a + <fsummary>Parallel evaluation of mapping a function over a list.</fsummary> <desc> <p>Evaluates <c>apply(<anno>Module</anno>, <anno>Function</anno>, diff --git a/lib/kernel/doc/src/seq_trace.xml b/lib/kernel/doc/src/seq_trace.xml index 197851021f..1a4a74419a 100644 --- a/lib/kernel/doc/src/seq_trace.xml +++ b/lib/kernel/doc/src/seq_trace.xml @@ -4,7 +4,7 @@ <erlref> <header> <copyright> - <year>1998</year><year>2017</year> + <year>1998</year><year>2018</year> <holder>Ericsson AB. All Rights Reserved.</holder> </copyright> <legalnotice> @@ -80,13 +80,18 @@ seq_trace:set_token(OldToken), % activate the trace token again <p>Sets the individual <c><anno>Component</anno></c> of the trace token to <c><anno>Val</anno></c>. Returns the previous value of the component.</p> <taglist> - <tag><c>set_token(label, <anno>Integer</anno>)</c></tag> + <tag><c>set_token(label, <anno>Label</anno>)</c></tag> <item> - <p>The <c>label</c> component is an integer which + <p>The <c>label</c> component is a term which identifies all events belonging to the same sequential trace. If several sequential traces can be active simultaneously, <c>label</c> is used to identify the separate traces. Default is 0.</p> + <warning> + <p>Labels were restricted to small signed integers (28 bits) + prior to OTP 21. The trace token will be silenty dropped if it + crosses over to a node that does not support the label.</p> + </warning> </item> <tag><c>set_token(serial, SerialValue)</c></tag> <item> diff --git a/lib/kernel/doc/src/specs.xml b/lib/kernel/doc/src/specs.xml index 29d52f23bb..b8c25ca53b 100644 --- a/lib/kernel/doc/src/specs.xml +++ b/lib/kernel/doc/src/specs.xml @@ -6,6 +6,7 @@ <xi:include href="../specs/specs_disk_log.xml"/> <xi:include href="../specs/specs_erl_boot_server.xml"/> <xi:include href="../specs/specs_erl_ddll.xml"/> + <xi:include href="../specs/specs_erl_epmd.xml"/> <xi:include href="../specs/specs_erl_prim_loader_stub.xml"/> <xi:include href="../specs/specs_erlang_stub.xml"/> <xi:include href="../specs/specs_error_handler.xml"/> @@ -20,6 +21,11 @@ <xi:include href="../specs/specs_inet.xml"/> <xi:include href="../specs/specs_inet_res.xml"/> <xi:include href="../specs/specs_init_stub.xml"/> + <xi:include href="../specs/specs_logger.xml"/> + <xi:include href="../specs/specs_logger_filters.xml"/> + <xi:include href="../specs/specs_logger_formatter.xml"/> + <xi:include href="../specs/specs_logger_std_h.xml"/> + <xi:include href="../specs/specs_logger_disk_log_h.xml"/> <xi:include href="../specs/specs_net_adm.xml"/> <xi:include href="../specs/specs_net_kernel.xml"/> <xi:include href="../specs/specs_os.xml"/> diff --git a/lib/kernel/doc/src/user_guide.gif b/lib/kernel/doc/src/user_guide.gif Binary files differdeleted file mode 100644 index e6275a803d..0000000000 --- a/lib/kernel/doc/src/user_guide.gif +++ /dev/null diff --git a/lib/kernel/examples/Makefile b/lib/kernel/examples/Makefile index 26ec58f571..f86e662838 100644 --- a/lib/kernel/examples/Makefile +++ b/lib/kernel/examples/Makefile @@ -45,7 +45,7 @@ RELSYSDIR = $(RELEASE_PATH)/lib/kernel-$(KERNEL_VSN)/examples # Pack and install the complete directory structure from # here (CWD) and down, for all examples. -EXAMPLES = uds_dist +EXAMPLES = uds_dist gen_tcp_dist release_spec: $(INSTALL_DIR) "$(RELSYSDIR)" diff --git a/lib/kernel/examples/gen_tcp_dist/Makefile b/lib/kernel/examples/gen_tcp_dist/Makefile new file mode 100644 index 0000000000..65513a1729 --- /dev/null +++ b/lib/kernel/examples/gen_tcp_dist/Makefile @@ -0,0 +1,20 @@ +RM=rm -f +CP=cp +EBIN=ebin +ERLC=erlc +# Works if building in open source source tree +KERNEL_INCLUDE=$(ERL_TOP)/lib/kernel/include +ERLCFLAGS+= -W -I$(KERNEL_INCLUDE) + +MODULES=gen_tcp_dist + +TARGET_FILES=$(MODULES:%=$(EBIN)/%.beam) + +opt: $(TARGET_FILES) + +$(EBIN)/%.beam: src/%.erl + $(ERLC) $(ERLCFLAGS) -o$(EBIN) $< + +clean: + $(RM) $(TARGET_FILES) + diff --git a/lib/kernel/examples/gen_tcp_dist/ebin/.gitignore b/lib/kernel/examples/gen_tcp_dist/ebin/.gitignore new file mode 100644 index 0000000000..e69de29bb2 --- /dev/null +++ b/lib/kernel/examples/gen_tcp_dist/ebin/.gitignore diff --git a/lib/kernel/examples/gen_tcp_dist/src/gen_tcp_dist.erl b/lib/kernel/examples/gen_tcp_dist/src/gen_tcp_dist.erl new file mode 100644 index 0000000000..98554ed805 --- /dev/null +++ b/lib/kernel/examples/gen_tcp_dist/src/gen_tcp_dist.erl @@ -0,0 +1,781 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2017. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(gen_tcp_dist). + +%% +%% This is an example of how to plug in an arbitrary distribution +%% carrier for Erlang using distribution processes. +%% +%% This example uses gen_tcp for transportation of data, but +%% you can use whatever underlying protocol you want as long +%% as your implementation reliably delivers data chunks to the +%% receiving VM in the order they were sent from the sending +%% VM. +%% +%% This code is a rewrite of the lib/kernel/src/inet_tcp_dist.erl +%% distribution impementation for TCP used by default. That +%% implementation use distribution ports instead of distribution +%% processes and is more efficient compared to this implementation. +%% This since this implementation more or less gets the +%% distribution processes in between the VM and the ports without +%% any gain specific gain. +%% + +-export([listen/1, accept/1, accept_connection/5, + setup/5, close/1, select/1, is_node_name/1]). + +%% Optional +-export([setopts/2, getopts/2]). + +%% internal exports + +-export([dist_cntrlr_setup/1, dist_cntrlr_input_setup/3, + dist_cntrlr_tick_handler/1]). + +-export([accept_loop/2,do_accept/6,do_setup/6]). + +-import(error_logger,[error_msg/2]). + +-include("net_address.hrl"). + +-include("dist.hrl"). +-include("dist_util.hrl"). + +%% ------------------------------------------------------------ +%% Select this protocol based on node name +%% select(Node) => Bool +%% ------------------------------------------------------------ + +select(Node) -> + case split_node(atom_to_list(Node), $@, []) of + [_, Host] -> + case inet:getaddr(Host, inet) of + {ok,_} -> true; + _ -> false + end; + _ -> false + end. + +%% ------------------------------------------------------------ +%% Create the listen socket, i.e. the port that this erlang +%% node is accessible through. +%% ------------------------------------------------------------ + +listen(Name) -> + case do_listen([binary, {active, false}, {packet,2}, {reuseaddr, true}]) of + {ok, Socket} -> + TcpAddress = get_tcp_address(Socket), + {_,Port} = TcpAddress#net_address.address, + ErlEpmd = net_kernel:epmd_module(), + case ErlEpmd:register_node(Name, Port) of + {ok, Creation} -> + {ok, {Socket, TcpAddress, Creation}}; + Error -> + Error + end; + Error -> + Error + end. + +do_listen(Options) -> + {First,Last} = case application:get_env(kernel,inet_dist_listen_min) of + {ok,N} when is_integer(N) -> + case application:get_env(kernel, + inet_dist_listen_max) of + {ok,M} when is_integer(M) -> + {N,M}; + _ -> + {N,N} + end; + _ -> + {0,0} + end, + do_listen(First, Last, listen_options([{backlog,128}|Options])). + +do_listen(First,Last,_) when First > Last -> + {error,eaddrinuse}; +do_listen(First,Last,Options) -> + case gen_tcp:listen(First, Options) of + {error, eaddrinuse} -> + do_listen(First+1,Last,Options); + Other -> + Other + end. + +listen_options(Opts0) -> + Opts1 = + case application:get_env(kernel, inet_dist_use_interface) of + {ok, Ip} -> + [{ip, Ip} | Opts0]; + _ -> + Opts0 + end, + case application:get_env(kernel, inet_dist_listen_options) of + {ok,ListenOpts} -> + ListenOpts ++ Opts1; + _ -> + Opts1 + end. + + +%% ------------------------------------------------------------ +%% Accepts new connection attempts from other Erlang nodes. +%% ------------------------------------------------------------ + +accept(Listen) -> + spawn_opt(?MODULE, accept_loop, [self(), Listen], [link, {priority, max}]). + +accept_loop(Kernel, Listen) -> + ?trace("~p~n",[{?MODULE, accept_loop, self()}]), + case gen_tcp:accept(Listen) of + {ok, Socket} -> + DistCtrl = spawn_dist_cntrlr(Socket), + ?trace("~p~n",[{?MODULE, accept_loop, accepted, Socket, DistCtrl, self()}]), + flush_controller(DistCtrl, Socket), + gen_tcp:controlling_process(Socket, DistCtrl), + flush_controller(DistCtrl, Socket), + Kernel ! {accept,self(),DistCtrl,inet,tcp}, + receive + {Kernel, controller, Pid} -> + call_ctrlr(DistCtrl, {supervisor, Pid}), + Pid ! {self(), controller}; + {Kernel, unsupported_protocol} -> + exit(unsupported_protocol) + end, + accept_loop(Kernel, Listen); + Error -> + exit(Error) + end. + +flush_controller(Pid, Socket) -> + receive + {tcp, Socket, Data} -> + Pid ! {tcp, Socket, Data}, + flush_controller(Pid, Socket); + {tcp_closed, Socket} -> + Pid ! {tcp_closed, Socket}, + flush_controller(Pid, Socket) + after 0 -> + ok + end. + +%% ------------------------------------------------------------ +%% Accepts a new connection attempt from another Erlang node. +%% Performs the handshake with the other side. +%% ------------------------------------------------------------ + +accept_connection(AcceptPid, DistCtrl, MyNode, Allowed, SetupTime) -> + spawn_opt(?MODULE, do_accept, + [self(), AcceptPid, DistCtrl, MyNode, Allowed, SetupTime], + [link, {priority, max}]). + +do_accept(Kernel, AcceptPid, DistCtrl, MyNode, Allowed, SetupTime) -> + ?trace("~p~n",[{?MODULE, do_accept, self(), MyNode}]), + receive + {AcceptPid, controller} -> + Timer = dist_util:start_timer(SetupTime), + case check_ip(DistCtrl) of + true -> + HSData0 = hs_data_common(DistCtrl), + HSData = HSData0#hs_data{kernel_pid = Kernel, + this_node = MyNode, + socket = DistCtrl, + timer = Timer, + this_flags = 0, + allowed = Allowed}, + dist_util:handshake_other_started(HSData); + {false,IP} -> + error_msg("** Connection attempt from " + "disallowed IP ~w ** ~n", [IP]), + ?shutdown(no_node) + end + end. + +%% we may not always want the nodelay behaviour +%% for performance reasons + +nodelay() -> + case application:get_env(kernel, dist_nodelay) of + undefined -> + {nodelay, true}; + {ok, true} -> + {nodelay, true}; + {ok, false} -> + {nodelay, false}; + _ -> + {nodelay, true} + end. + +%% ------------------------------------------------------------ +%% Setup a new connection to another Erlang node. +%% Performs the handshake with the other side. +%% ------------------------------------------------------------ + +setup(Node, Type, MyNode, LongOrShortNames,SetupTime) -> + spawn_opt(?MODULE, do_setup, + [self(), Node, Type, MyNode, LongOrShortNames, SetupTime], + [link, {priority, max}]). + +do_setup(Kernel, Node, Type, MyNode, LongOrShortNames, SetupTime) -> + ?trace("~p~n",[{?MODULE, do_setup, self(), Node}]), + [Name, Address] = splitnode(Node, LongOrShortNames), + case inet:getaddr(Address, inet) of + {ok, Ip} -> + Timer = dist_util:start_timer(SetupTime), + ErlEpmd = net_kernel:epmd_module(), + case ErlEpmd:port_please(Name, Ip) of + {port, TcpPort, Version} -> + ?trace("port_please(~p) -> version ~p~n", + [Node,Version]), + dist_util:reset_timer(Timer), + case + gen_tcp:connect( + Ip, TcpPort, + connect_options([binary, {active, false}, {packet, 2}])) + of + {ok, Socket} -> + DistCtrl = spawn_dist_cntrlr(Socket), + call_ctrlr(DistCtrl, {supervisor, self()}), + flush_controller(DistCtrl, Socket), + gen_tcp:controlling_process(Socket, DistCtrl), + flush_controller(DistCtrl, Socket), + HSData0 = hs_data_common(DistCtrl), + HSData = HSData0#hs_data{kernel_pid = Kernel, + other_node = Node, + this_node = MyNode, + socket = DistCtrl, + timer = Timer, + this_flags = 0, + other_version = Version, + request_type = Type}, + dist_util:handshake_we_started(HSData); + _ -> + %% Other Node may have closed since + %% port_please ! + ?trace("other node (~p) " + "closed since port_please.~n", + [Node]), + ?shutdown(Node) + end; + _ -> + ?trace("port_please (~p) " + "failed.~n", [Node]), + ?shutdown(Node) + end; + _Other -> + ?trace("inet_getaddr(~p) " + "failed (~p).~n", [Node,_Other]), + ?shutdown(Node) + end. + +connect_options(Opts) -> + case application:get_env(kernel, inet_dist_connect_options) of + {ok,ConnectOpts} -> + ConnectOpts ++ Opts; + _ -> + Opts + end. + +%% +%% Close a socket. +%% +close(Listen) -> + gen_tcp:close(Listen). + + +%% If Node is illegal terminate the connection setup!! +splitnode(Node, LongOrShortNames) -> + case split_node(atom_to_list(Node), $@, []) of + [Name|Tail] when Tail =/= [] -> + Host = lists:append(Tail), + case split_node(Host, $., []) of + [_] when LongOrShortNames =:= longnames -> + case inet:parse_address(Host) of + {ok, _} -> + [Name, Host]; + _ -> + error_msg("** System running to use " + "fully qualified " + "hostnames **~n" + "** Hostname ~ts is illegal **~n", + [Host]), + ?shutdown(Node) + end; + L when length(L) > 1, LongOrShortNames =:= shortnames -> + error_msg("** System NOT running to use fully qualified " + "hostnames **~n" + "** Hostname ~ts is illegal **~n", + [Host]), + ?shutdown(Node); + _ -> + [Name, Host] + end; + [_] -> + error_msg("** Nodename ~p illegal, no '@' character **~n", + [Node]), + ?shutdown(Node); + _ -> + error_msg("** Nodename ~p illegal **~n", [Node]), + ?shutdown(Node) + end. + +split_node([Chr|T], Chr, Ack) -> [lists:reverse(Ack)|split_node(T, Chr, [])]; +split_node([H|T], Chr, Ack) -> split_node(T, Chr, [H|Ack]); +split_node([], _, Ack) -> [lists:reverse(Ack)]. + +%% ------------------------------------------------------------ +%% Fetch local information about a Socket. +%% ------------------------------------------------------------ +get_tcp_address(Socket) -> + {ok, Address} = inet:sockname(Socket), + {ok, Host} = inet:gethostname(), + #net_address { + address = Address, + host = Host, + protocol = tcp, + family = inet + }. + +%% ------------------------------------------------------------ +%% Do only accept new connection attempts from nodes at our +%% own LAN, if the check_ip environment parameter is true. +%% ------------------------------------------------------------ +check_ip(DistCtrl) -> + case application:get_env(check_ip) of + {ok, true} -> + case get_ifs(DistCtrl) of + {ok, IFs, IP} -> + check_ip(IFs, IP); + _ -> + ?shutdown(no_node) + end; + _ -> + true + end. + +get_ifs(DistCtrl) -> + Socket = call_ctrlr(DistCtrl, socket), + case inet:peername(Socket) of + {ok, {IP, _}} -> + case inet:getif(Socket) of + {ok, IFs} -> {ok, IFs, IP}; + Error -> Error + end; + Error -> + Error + end. + +check_ip([{OwnIP, _, Netmask}|IFs], PeerIP) -> + case {inet_tcp:mask(Netmask, PeerIP), inet_tcp:mask(Netmask, OwnIP)} of + {M, M} -> true; + _ -> check_ip(IFs, PeerIP) + end; +check_ip([], PeerIP) -> + {false, PeerIP}. + +is_node_name(Node) when is_atom(Node) -> + case split_node(atom_to_list(Node), $@, []) of + [_, _Host] -> true; + _ -> false + end; +is_node_name(_Node) -> + false. + +hs_data_common(DistCtrl) -> + TickHandler = call_ctrlr(DistCtrl, tick_handler), + Socket = call_ctrlr(DistCtrl, socket), + #hs_data{f_send = send_fun(), + f_recv = recv_fun(), + f_setopts_pre_nodeup = setopts_pre_nodeup_fun(), + f_setopts_post_nodeup = setopts_post_nodeup_fun(), + f_getll = getll_fun(), + f_handshake_complete = handshake_complete_fun(), + f_address = address_fun(), + mf_setopts = setopts_fun(DistCtrl, Socket), + mf_getopts = getopts_fun(DistCtrl, Socket), + mf_getstat = getstat_fun(DistCtrl, Socket), + mf_tick = tick_fun(DistCtrl, TickHandler)}. + +%%% ------------------------------------------------------------ +%%% Distribution controller processes +%%% ------------------------------------------------------------ + +%% +%% There will be five parties working together when the +%% connection is up: +%% - The gen_tcp socket. Providing a tcp/ip connection +%% to the other node. +%% - The output handler. It will dispatch all outgoing +%% traffic from the VM to the gen_tcp socket. This +%% process is registered as distribution controller +%% for this channel with the VM. +%% - The input handler. It will dispatch all incoming +%% traffic from the gen_tcp socket to the VM. This +%% process is also the socket owner and receives +%% incoming traffic using active-N. +%% - The tick handler. Dispatches asynchronous tick +%% requests to the socket. It executes on max priority +%% since it is important to get ticks through to the +%% other end. +%% - The channel supervisor (provided by dist_util). It +%% monitors traffic. Issue tick requests to the tick +%% handler when no outgoing traffic is seen and bring +%% the connection down if no incoming traffic is seen. +%% This process also executes on max priority. +%% +%% These parties are linked togheter so should one +%% of them fail, all of them are terminated and the +%% connection is taken down. +%% + +%% In order to avoid issues with lingering signal binaries +%% we enable off-heap message queue data as well as fullsweep +%% after 0. The fullsweeps will be cheap since we have more +%% or less no live data. +-define(DIST_CNTRL_COMMON_SPAWN_OPTS, + [{message_queue_data, off_heap}, + {fullsweep_after, 0}]). + +tick_fun(DistCtrl, TickHandler) -> + fun (Ctrl) when Ctrl == DistCtrl -> + TickHandler ! tick + end. + +getstat_fun(DistCtrl, Socket) -> + fun (Ctrl) when Ctrl == DistCtrl -> + case inet:getstat(Socket, [recv_cnt, send_cnt, send_pend]) of + {ok, Stat} -> + split_stat(Stat,0,0,0); + Error -> + Error + end + end. + +split_stat([{recv_cnt, R}|Stat], _, W, P) -> + split_stat(Stat, R, W, P); +split_stat([{send_cnt, W}|Stat], R, _, P) -> + split_stat(Stat, R, W, P); +split_stat([{send_pend, P}|Stat], R, W, _) -> + split_stat(Stat, R, W, P); +split_stat([], R, W, P) -> + {ok, R, W, P}. + +setopts_fun(DistCtrl, Socket) -> + fun (Ctrl, Opts) when Ctrl == DistCtrl -> + setopts(Socket, Opts) + end. + +getopts_fun(DistCtrl, Socket) -> + fun (Ctrl, Opts) when Ctrl == DistCtrl -> + getopts(Socket, Opts) + end. + +setopts(S, Opts) -> + case [Opt || {K,_}=Opt <- Opts, + K =:= active orelse K =:= deliver orelse K =:= packet] of + [] -> inet:setopts(S,Opts); + Opts1 -> {error, {badopts,Opts1}} + end. + +getopts(S, Opts) -> + inet:getopts(S, Opts). + +send_fun() -> + fun (Ctrlr, Packet) -> + call_ctrlr(Ctrlr, {send, Packet}) + end. + +recv_fun() -> + fun (Ctrlr, Length, Timeout) -> + case call_ctrlr(Ctrlr, {recv, Length, Timeout}) of + {ok, Bin} when is_binary(Bin) -> + {ok, binary_to_list(Bin)}; + Other -> + Other + end + end. + +getll_fun() -> + fun (Ctrlr) -> + call_ctrlr(Ctrlr, getll) + end. + +address_fun() -> + fun (Ctrlr, Node) -> + case call_ctrlr(Ctrlr, {address, Node}) of + {error, no_node} -> %% No '@' or more than one '@' in node name. + ?shutdown(no_node); + Res -> + Res + end + end. + +setopts_pre_nodeup_fun() -> + fun (Ctrlr) -> + call_ctrlr(Ctrlr, pre_nodeup) + end. + +setopts_post_nodeup_fun() -> + fun (Ctrlr) -> + call_ctrlr(Ctrlr, post_nodeup) + end. + +handshake_complete_fun() -> + fun (Ctrlr, Node, DHandle) -> + call_ctrlr(Ctrlr, {handshake_complete, Node, DHandle}) + end. + +call_ctrlr(Ctrlr, Msg) -> + Ref = erlang:monitor(process, Ctrlr), + Ctrlr ! {Ref, self(), Msg}, + receive + {Ref, Res} -> + erlang:demonitor(Ref, [flush]), + Res; + {'DOWN', Ref, process, Ctrlr, Reason} -> + exit({dist_controller_exit, Reason}) + end. + +%% +%% The tick handler process writes a tick to the +%% socket when it receives a 'tick' message from +%% the connection supervisor. +%% +%% We are not allowed to block the connection +%% superviser when writing a tick and we also want +%% the tick to go through even during a heavily +%% loaded system. gen_tcp does not have a +%% non-blocking send operation exposed in its API +%% and we don't want to run the distribution +%% controller under high priority. Therefore this +%% sparate process with max prio that dispatches +%% ticks. +%% +dist_cntrlr_tick_handler(Socket) -> + receive + tick -> + %% May block due to busy port... + sock_send(Socket, ""); + _ -> + ok + end, + dist_cntrlr_tick_handler(Socket). + +spawn_dist_cntrlr(Socket) -> + spawn_opt(?MODULE, dist_cntrlr_setup, [Socket], + [{priority, max}] ++ ?DIST_CNTRL_COMMON_SPAWN_OPTS). + +dist_cntrlr_setup(Socket) -> + TickHandler = spawn_opt(?MODULE, dist_cntrlr_tick_handler, + [Socket], + [link, {priority, max}] + ++ ?DIST_CNTRL_COMMON_SPAWN_OPTS), + dist_cntrlr_setup_loop(Socket, TickHandler, undefined). + +%% +%% During the handshake phase we loop in dist_cntrlr_setup(). +%% When the connection is up we spawn an input handler and +%% continue as output handler. +%% +dist_cntrlr_setup_loop(Socket, TickHandler, Sup) -> + receive + {tcp_closed, Socket} -> + exit(connection_closed); + + {Ref, From, {supervisor, Pid}} -> + Res = link(Pid), + From ! {Ref, Res}, + dist_cntrlr_setup_loop(Socket, TickHandler, Pid); + + {Ref, From, tick_handler} -> + From ! {Ref, TickHandler}, + dist_cntrlr_setup_loop(Socket, TickHandler, Sup); + + {Ref, From, socket} -> + From ! {Ref, Socket}, + dist_cntrlr_setup_loop(Socket, TickHandler, Sup); + + {Ref, From, {send, Packet}} -> + Res = gen_tcp:send(Socket, Packet), + From ! {Ref, Res}, + dist_cntrlr_setup_loop(Socket, TickHandler, Sup); + + {Ref, From, {recv, Length, Timeout}} -> + Res = gen_tcp:recv(Socket, Length, Timeout), + From ! {Ref, Res}, + dist_cntrlr_setup_loop(Socket, TickHandler, Sup); + + {Ref, From, getll} -> + From ! {Ref, {ok, self()}}, + dist_cntrlr_setup_loop(Socket, TickHandler, Sup); + + {Ref, From, {address, Node}} -> + Res = case inet:peername(Socket) of + {ok, Address} -> + case split_node(atom_to_list(Node), $@, []) of + [_,Host] -> + #net_address{address=Address,host=Host, + protocol=tcp, family=inet}; + _ -> + {error, no_node} + end + end, + From ! {Ref, Res}, + dist_cntrlr_setup_loop(Socket, TickHandler, Sup); + + {Ref, From, pre_nodeup} -> + Res = inet:setopts(Socket, + [{active, false}, + {packet, 4}, + nodelay()]), + From ! {Ref, Res}, + dist_cntrlr_setup_loop(Socket, TickHandler, Sup); + + {Ref, From, post_nodeup} -> + Res = inet:setopts(Socket, + [{active, false}, + {packet, 4}, + nodelay()]), + From ! {Ref, Res}, + dist_cntrlr_setup_loop(Socket, TickHandler, Sup); + + {Ref, From, {handshake_complete, _Node, DHandle}} -> + From ! {Ref, ok}, + %% Handshake complete! Begin dispatching traffic... + + %% We use separate process for dispatching input. This + %% is not necessary, but it enables parallel execution + %% of independent work loads at the same time as it + %% simplifies the the implementation... + InputHandler = spawn_opt(?MODULE, dist_cntrlr_input_setup, + [DHandle, Socket, Sup], + [link] ++ ?DIST_CNTRL_COMMON_SPAWN_OPTS), + + flush_controller(InputHandler, Socket), + gen_tcp:controlling_process(Socket, InputHandler), + flush_controller(InputHandler, Socket), + + ok = erlang:dist_ctrl_input_handler(DHandle, InputHandler), + + InputHandler ! DHandle, + + %% From now on we execute on normal priority + process_flag(priority, normal), + erlang:dist_ctrl_get_data_notification(DHandle), + dist_cntrlr_output_loop(DHandle, Socket) + end. + +%% We use active 10 for good throughput while still +%% maintaining back-pressure if the input controller +%% isn't able to handle all incoming messages... +-define(ACTIVE_INPUT, 10). + +dist_cntrlr_input_setup(DHandle, Socket, Sup) -> + link(Sup), + %% Ensure we don't try to put data before registerd + %% as input handler... + receive + DHandle -> + dist_cntrlr_input_loop(DHandle, Socket, 0) + end. + +dist_cntrlr_input_loop(DHandle, Socket, N) when N =< ?ACTIVE_INPUT/2 -> + inet:setopts(Socket, [{active, ?ACTIVE_INPUT - N}]), + dist_cntrlr_input_loop(DHandle, Socket, ?ACTIVE_INPUT); +dist_cntrlr_input_loop(DHandle, Socket, N) -> + receive + {tcp_closed, Socket} -> + %% Connection to remote node terminated... + exit(connection_closed); + + {tcp, Socket, Data} -> + %% Incoming data from remote node... + try erlang:dist_ctrl_put_data(DHandle, Data) + catch _ : _ -> death_row() + end, + dist_cntrlr_input_loop(DHandle, Socket, N-1); + + _ -> + %% Ignore... + dist_cntrlr_input_loop(DHandle, Socket, N) + end. + +dist_cntrlr_send_data(DHandle, Socket) -> + case erlang:dist_ctrl_get_data(DHandle) of + none -> + erlang:dist_ctrl_get_data_notification(DHandle); + Data -> + sock_send(Socket, Data), + dist_cntrlr_send_data(DHandle, Socket) + end. + + +dist_cntrlr_output_loop(DHandle, Socket) -> + receive + dist_data -> + %% Outgoing data from this node... + try dist_cntrlr_send_data(DHandle, Socket) + catch _ : _ -> death_row() + end, + dist_cntrlr_output_loop(DHandle, Socket); + + {send, From, Ref, Data} -> + %% This is for testing only! + %% + %% Needed by some OTP distribution + %% test suites... + sock_send(Socket, Data), + From ! {Ref, ok}, + dist_cntrlr_output_loop(DHandle, Socket); + + _ -> + %% Drop garbage message... + dist_cntrlr_output_loop(DHandle, Socket) + + end. + +sock_send(Socket, Data) -> + try gen_tcp:send(Socket, Data) of + ok -> ok; + {error, Reason} -> death_row({send_error, Reason}) + catch + Type : Reason -> death_row({send_error, {Type, Reason}}) + end. + +death_row() -> + death_row(connection_closed). + +death_row(normal) -> + %% We do not want to exit with normal + %% exit reason since it wont bring down + %% linked processes... + death_row(); +death_row(Reason) -> + %% When the connection is on its way down operations + %% begin to fail. We catch the failures and call + %% this function waiting for termination. We should + %% be terminated by one of our links to the other + %% involved parties that began bringing the + %% connection down. By waiting for termination we + %% avoid altering the exit reason for the connection + %% teardown. We however limit the wait to 5 seconds + %% and bring down the connection ourselves if not + %% terminated... + receive after 5000 -> exit(Reason) end. diff --git a/lib/kernel/include/dist.hrl b/lib/kernel/include/dist.hrl index d6bccdf474..003852f1b0 100644 --- a/lib/kernel/include/dist.hrl +++ b/lib/kernel/include/dist.hrl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1999-2016. All Rights Reserved. +%% Copyright Ericsson AB 1999-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -40,3 +40,8 @@ -define(DFLAG_UTF8_ATOMS, 16#10000). -define(DFLAG_MAP_TAG, 16#20000). -define(DFLAG_BIG_CREATION, 16#40000). +-define(DFLAG_SEND_SENDER, 16#80000). +-define(DFLAG_BIG_SEQTRACE_LABELS, 16#100000). + +%% Also update dflag2str() in ../src/dist_util.erl +%% when adding flags... diff --git a/lib/kernel/include/dist_util.hrl b/lib/kernel/include/dist_util.hrl index e3d2fe0eb6..56f775f060 100644 --- a/lib/kernel/include/dist_util.hrl +++ b/lib/kernel/include/dist_util.hrl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1999-2016. All Rights Reserved. +%% Copyright Ericsson AB 1999-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -29,9 +29,9 @@ -endif. -ifdef(dist_trace). --define(trace(Fmt,Args), io:format("~p ~p:~s",[erlang:timestamp(),node(),lists:flatten(io_lib:format(Fmt, Args))])). +-define(trace(Fmt,Args), io:format("~p ~p:~s",[erlang:convert_time_unit(erlang:monotonic_time()-erlang:system_info(start_time), native, microsecond),node(),lists:flatten(io_lib:format(Fmt, Args))])). % Use the one below for config-file (early boot) connection tracing -%-define(trace(Fmt,Args), erlang:display([erlang:now(),node(),lists:flatten(io_lib:format(Fmt, Args))])). +%-define(trace(Fmt,Args), erlang:display([erlang:convert_time_unit(erlang:monotonic_time()-erlang:system_info(start_time), native, microsecond),node(),lists:flatten(io_lib:format(Fmt, Args))])). -define(trace_factor,8). -else. -define(trace(Fmt,Args), ok). @@ -78,7 +78,13 @@ %% New in kernel-5.1 (OTP 19.1): mf_setopts, %% netkernel:setopts on active connection - mf_getopts %% netkernel:getopts on active connection + mf_getopts, %% netkernel:getopts on active connection + + %% New in kernel-6.0 (OTP 21.0) + f_handshake_complete, %% Notify handshake complete + add_flags, %% dflags to add + reject_flags, %% dflags not to use (not all can be rejected) + require_flags %% dflags that are required }). diff --git a/lib/kernel/include/logger.hrl b/lib/kernel/include/logger.hrl new file mode 100644 index 0000000000..b09977e0f2 --- /dev/null +++ b/lib/kernel/include/logger.hrl @@ -0,0 +1,53 @@ +-ifndef(LOGGER_HRL). +-define(LOGGER_HRL,true). +-define(LOG_EMERGENCY(A),?DO_LOG(emergency,[A])). +-define(LOG_EMERGENCY(A,B),?DO_LOG(emergency,[A,B])). +-define(LOG_EMERGENCY(A,B,C),?DO_LOG(emergency,[A,B,C])). + +-define(LOG_ALERT(A),?DO_LOG(alert,[A])). +-define(LOG_ALERT(A,B),?DO_LOG(alert,[A,B])). +-define(LOG_ALERT(A,B,C),?DO_LOG(alert,[A,B,C])). + +-define(LOG_CRITICAL(A),?DO_LOG(critical,[A])). +-define(LOG_CRITICAL(A,B),?DO_LOG(critical,[A,B])). +-define(LOG_CRITICAL(A,B,C),?DO_LOG(critical,[A,B,C])). + +-define(LOG_ERROR(A),?DO_LOG(error,[A])). +-define(LOG_ERROR(A,B),?DO_LOG(error,[A,B])). +-define(LOG_ERROR(A,B,C),?DO_LOG(error,[A,B,C])). + +-define(LOG_WARNING(A),?DO_LOG(warning,[A])). +-define(LOG_WARNING(A,B),?DO_LOG(warning,[A,B])). +-define(LOG_WARNING(A,B,C),?DO_LOG(warning,[A,B,C])). + +-define(LOG_NOTICE(A),?DO_LOG(notice,[A])). +-define(LOG_NOTICE(A,B),?DO_LOG(notice,[A,B])). +-define(LOG_NOTICE(A,B,C),?DO_LOG(notice,[A,B,C])). + +-define(LOG_INFO(A),?DO_LOG(info,[A])). +-define(LOG_INFO(A,B),?DO_LOG(info,[A,B])). +-define(LOG_INFO(A,B,C),?DO_LOG(info,[A,B,C])). + +-define(LOG_DEBUG(A),?DO_LOG(debug,[A])). +-define(LOG_DEBUG(A,B),?DO_LOG(debug,[A,B])). +-define(LOG_DEBUG(A,B,C),?DO_LOG(debug,[A,B,C])). + +-define(LOG(L,A),?DO_LOG(L,[A])). +-define(LOG(L,A,B),?DO_LOG(L,[A,B])). +-define(LOG(L,A,B,C),?DO_LOG(L,[A,B,C])). + +-define(LOCATION,#{mfa=>{?MODULE,?FUNCTION_NAME,?FUNCTION_ARITY}, + line=>?LINE, + file=>?FILE}). + +%%%----------------------------------------------------------------- +%%% Internal, i.e. not intended for direct use in code - use above +%%% macros instead! +-define(DO_LOG(Level,Args), + case logger:allow(Level,?MODULE) of + true -> + apply(logger,macro_log,[?LOCATION,Level|Args]); + false -> + ok + end). +-endif. diff --git a/lib/kernel/src/Makefile b/lib/kernel/src/Makefile index 5946620f0f..57f17defc8 100644 --- a/lib/kernel/src/Makefile +++ b/lib/kernel/src/Makefile @@ -1,7 +1,7 @@ # # %CopyrightBegin% # -# Copyright Ericsson AB 1996-2017. All Rights Reserved. +# Copyright Ericsson AB 1996-2018. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -106,8 +106,21 @@ MODULES = \ inet_sctp \ kernel \ kernel_config \ + kernel_refc \ local_udp \ local_tcp \ + logger \ + logger_backend \ + logger_config \ + logger_handler_watcher \ + logger_std_h \ + logger_disk_log_h \ + logger_h_common \ + logger_filters \ + logger_formatter \ + logger_server \ + logger_simple_h \ + logger_sup \ net \ net_adm \ net_kernel \ @@ -120,17 +133,25 @@ MODULES = \ user \ user_drv \ user_sup \ + raw_file_io \ + raw_file_io_compressed \ + raw_file_io_inflate \ + raw_file_io_deflate \ + raw_file_io_delayed \ + raw_file_io_list \ + raw_file_io_raw \ wrap_log_reader HRL_FILES= ../include/file.hrl ../include/inet.hrl ../include/inet_sctp.hrl \ ../include/dist.hrl ../include/dist_util.hrl \ - ../include/net_address.hrl + ../include/net_address.hrl ../include/logger.hrl INTERNAL_HRL_FILES= application_master.hrl disk_log.hrl \ - erl_epmd.hrl hipe_ext_format.hrl \ + erl_epmd.hrl file_int.hrl hipe_ext_format.hrl \ inet_dns.hrl inet_res.hrl \ inet_boot.hrl inet_config.hrl inet_int.hrl \ - inet_dns_record_adts.hrl + inet_dns_record_adts.hrl \ + logger_internal.hrl logger_h_common.hrl ERL_FILES= $(MODULES:%=%.erl) @@ -215,7 +236,7 @@ release_docs_spec: # Include dependencies -- list below added by Kostis Sagonas -$(EBIN)/application_controller.beam: application_master.hrl +$(EBIN)/application_controller.beam: application_master.hrl ../include/logger.hrl $(EBIN)/application_master.beam: application_master.hrl $(EBIN)/auth.beam: ../include/file.hrl $(EBIN)/code.beam: ../include/file.hrl @@ -226,7 +247,9 @@ $(EBIN)/disk_log_server.beam: disk_log.hrl $(EBIN)/dist_util.beam: ../include/dist_util.hrl ../include/dist.hrl $(EBIN)/erl_boot_server.beam: inet_boot.hrl $(EBIN)/erl_epmd.beam: inet_int.hrl erl_epmd.hrl -$(EBIN)/file.beam: ../include/file.hrl +$(EBIN)/error_logger.beam: logger_internal.hrl ../include/logger.hrl +$(EBIN)/file.beam: ../include/file.hrl file_int.hrl +$(EBIN)/file_io_server.beam: ../include/file.hrl file_int.hrl $(EBIN)/gen_tcp.beam: inet_int.hrl $(EBIN)/gen_udp.beam: inet_int.hrl $(EBIN)/gen_sctp.beam: ../include/inet_sctp.hrl @@ -250,7 +273,24 @@ $(EBIN)/inet_udp.beam: inet_int.hrl $(EBIN)/inet_sctp.beam: inet_int.hrl ../include/inet_sctp.hrl $(EBIN)/local_udp.beam: inet_int.hrl $(EBIN)/local_tcp.beam: inet_int.hrl +$(EBIN)/logger.beam: logger_internal.hrl ../include/logger.hrl +$(EBIN)/logger_backend.beam: logger_internal.hrl ../include/logger.hrl +$(EBIN)/logger_config.beam: logger_internal.hrl ../include/logger.hrl +$(EBIN)/logger_disk_log_h.beam: logger_h_common.hrl logger_internal.hrl ../include/logger.hrl ../include/file.hrl +$(EBIN)/logger_filters.beam: logger_internal.hrl ../include/logger.hrl +$(EBIN)/logger_formatter.beam: logger_internal.hrl ../include/logger.hrl +$(EBIN)/logger_server.beam: logger_internal.hrl ../include/logger.hrl +$(EBIN)/logger_simple_h.beam: logger_internal.hrl ../include/logger.hrl +$(EBIN)/logger_std_h.beam: logger_h_common.hrl logger_internal.hrl ../include/logger.hrl ../include/file.hrl +$(EBIN)/logger_h_common.beam: logger_h_common.hrl logger_internal.hrl ../include/logger.hrl $(EBIN)/net_kernel.beam: ../include/net_address.hrl $(EBIN)/os.beam: ../include/file.hrl $(EBIN)/ram_file.beam: ../include/file.hrl $(EBIN)/wrap_log_reader.beam: disk_log.hrl ../include/file.hrl +$(EBIN)/raw_file_io.beam: ../include/file.hrl file_int.hrl +$(EBIN)/raw_file_io_compressed.beam: ../include/file.hrl file_int.hrl +$(EBIN)/raw_file_io_inflate.beam: ../include/file.hrl file_int.hrl +$(EBIN)/raw_file_io_deflate.beam: ../include/file.hrl file_int.hrl +$(EBIN)/raw_file_io_delayed.beam: ../include/file.hrl file_int.hrl +$(EBIN)/raw_file_io_list.beam: ../include/file.hrl file_int.hrl +$(EBIN)/raw_file_io_raw.beam: ../include/file.hrl file_int.hrl diff --git a/lib/kernel/src/application_controller.erl b/lib/kernel/src/application_controller.erl index 3b642f5873..a074d2e74b 100644 --- a/lib/kernel/src/application_controller.erl +++ b/lib/kernel/src/application_controller.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1996-2017. All Rights Reserved. +%% Copyright Ericsson AB 1996-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -44,6 +44,7 @@ keyfind/3, keydelete/3, keyreplace/4]). -include("application_master.hrl"). +-include("logger.hrl"). -define(AC, ?MODULE). % Name of process @@ -1271,9 +1272,7 @@ load(S, {ApplData, ApplEnv, IncApps, Descr, Id, Vsn, Apps}) -> NewEnv = merge_app_env(ApplEnv, ConfEnv), CmdLineEnv = get_cmd_env(Name), NewEnv2 = merge_app_env(NewEnv, CmdLineEnv), - NewEnv3 = keyreplaceadd(included_applications, 1, NewEnv2, - {included_applications, IncApps}), - add_env(Name, NewEnv3), + add_env(Name, NewEnv2), Appl = #appl{name = Name, descr = Descr, id = Id, vsn = Vsn, appl_data = ApplData, inc_apps = IncApps, apps = Apps}, ets:insert(ac_tab, {{loaded, Name}, Appl}), @@ -1291,7 +1290,7 @@ load(S, {ApplData, ApplEnv, IncApps, Descr, Id, Vsn, Apps}) -> {ok, NewS}. unload(AppName, S) -> - {ok, IncApps} = get_env(AppName, included_applications), + {ok, IncApps} = get_key(AppName, included_applications), del_env(AppName), ets:delete(ac_tab, {loaded, AppName}), foldl(fun(App, S1) -> @@ -1546,9 +1545,8 @@ do_change_apps(Applications, Config, OldAppls) -> %% Report errors, but do not terminate %% (backwards compatible behaviour) lists:foreach(fun({error, {SysFName, Line, Str}}) -> - Str2 = lists:flatten(io_lib:format("~tp: ~w: ~ts~n", - [SysFName, Line, Str])), - error_logger:format(Str2, []) + ?LOG_ERROR("~tp: ~w: ~ts~n",[SysFName, Line, Str], + #{error_logger=>#{tag=>error}}) end, Errors), @@ -1583,13 +1581,9 @@ do_change_appl({ok, {ApplData, Env, IncApps, Descr, Id, Vsn, Apps}}, CmdLineEnv = get_cmd_env(AppName), NewEnv2 = merge_app_env(NewEnv1, CmdLineEnv), - %% included_apps is made into an env parameter as well - NewEnv3 = keyreplaceadd(included_applications, 1, NewEnv2, - {included_applications, IncApps}), - %% Update ets table with new application env del_env(AppName), - add_env(AppName, NewEnv3), + add_env(AppName, NewEnv2), OldAppl#appl{appl_data=ApplData, descr=Descr, @@ -1631,8 +1625,9 @@ make_term(Str) -> end. handle_make_term_error(Mod, Reason, Str) -> - error_logger:format("application_controller: ~ts: ~ts~n", - [Mod:format_error(Reason), Str]), + ?LOG_ERROR("application_controller: ~ts: ~ts~n", + [Mod:format_error(Reason), Str], + #{error_logger=>#{tag=>error}}), throw({error, {bad_environment_value, Str}}). get_env_i(Name, #state{conf_data = ConfData}) when is_list(ConfData) -> @@ -1819,8 +1814,9 @@ check_conf() -> %% Therefore read and merge contents. if BFName =:= "sys" -> + DName = filename:dirname(FName), {ok, SysEnv, Errors} = - check_conf_sys(NewEnv), + check_conf_sys(NewEnv, [], [], DName), %% Report first error, if any, and %% terminate @@ -1842,20 +1838,31 @@ check_conf() -> end. check_conf_sys(Env) -> - check_conf_sys(Env, [], []). + check_conf_sys(Env, [], [], []). -check_conf_sys([File|T], SysEnv, Errors) when is_list(File) -> +check_conf_sys([File|T], SysEnv, Errors, DName) when is_list(File),is_list(DName) -> BFName = filename:basename(File, ".config"), FName = filename:join(filename:dirname(File), BFName ++ ".config"), - case load_file(FName) of + LName = case filename:pathtype(FName) of + relative when (DName =/= []) -> + % Check if relative to sys.config dir otherwise use legacy mode, + % i.e relative to cwd. + RName = filename:join(DName, FName), + case erl_prim_loader:read_file_info(RName) of + {ok, _} -> RName ; + error -> FName + end; + _ -> FName + end, + case load_file(LName) of {ok, NewEnv} -> - check_conf_sys(T, merge_env(SysEnv, NewEnv), Errors); + check_conf_sys(T, merge_env(SysEnv, NewEnv), Errors, DName); {error, {Line, _Mod, Str}} -> - check_conf_sys(T, SysEnv, [{error, {FName, Line, Str}}|Errors]) + check_conf_sys(T, SysEnv, [{error, {LName, Line, Str}}|Errors], DName) end; -check_conf_sys([Tuple|T], SysEnv, Errors) -> - check_conf_sys(T, merge_env(SysEnv, [Tuple]), Errors); -check_conf_sys([], SysEnv, Errors) -> +check_conf_sys([Tuple|T], SysEnv, Errors, DName) -> + check_conf_sys(T, merge_env(SysEnv, [Tuple]), Errors, DName); +check_conf_sys([], SysEnv, Errors, _) -> {ok, SysEnv, lists:reverse(Errors)}. load_file(File) -> @@ -1913,19 +1920,25 @@ config_error() -> "configuration file must contain ONE list ended by <dot>"}}. %%----------------------------------------------------------------- -%% Info messages sent to error_logger +%% Info messages sent to logger %%----------------------------------------------------------------- info_started(Name, Node) -> - Rep = [{application, Name}, - {started_at, Node}], - error_logger:info_report(progress, Rep). + ?LOG_INFO(#{label=>{application_controller,progress}, + report=>[{application, Name}, + {started_at, Node}]}, + #{domain=>[otp,sasl], + report_cb=>fun logger:format_otp_report/1, + logger_formatter=>#{title=>"PROGRESS REPORT"}, + error_logger=>#{tag=>info_report,type=>progress}}). info_exited(Name, Reason, Type) -> - Rep = [{application, Name}, - {exited, Reason}, - {type, Type}], - error_logger:info_report(Rep). - + ?LOG_NOTICE(#{label=>{application_controller,exit}, + report=>[{application, Name}, + {exited, Reason}, + {type, Type}]}, + #{domain=>[otp], + report_cb=>fun logger:format_otp_report/1, + error_logger=>#{tag=>info_report,type=>std_info}}). %%----------------------------------------------------------------- %% Reply to all processes waiting this application to be started. @@ -2012,5 +2025,5 @@ to_string(Term) -> true -> Term; false -> - lists:flatten(io_lib:format("~134217728p", [Term])) + lists:flatten(io_lib:format("~0p", [Term])) end. diff --git a/lib/kernel/src/application_master.erl b/lib/kernel/src/application_master.erl index 5da2b0b06c..8697143dfb 100644 --- a/lib/kernel/src/application_master.erl +++ b/lib/kernel/src/application_master.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1996-2016. All Rights Reserved. +%% Copyright Ericsson AB 1996-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -118,6 +118,10 @@ init(Parent, Starter, ApplData, Type) -> link(Parent), process_flag(trap_exit, true), OldGleader = group_leader(), + %% We become the group leader, but forward all I/O to OldGleader. + %% This is just a way to identify processes that belong to the + %% application. Used for example to find ourselves from any + %% process, or, reciprocally, to kill them all when we terminate. group_leader(self(), self()), %% Insert ourselves as master for the process. This ensures that %% the processes in the application can use get_env/1 at startup. diff --git a/lib/kernel/src/auth.erl b/lib/kernel/src/auth.erl index 40feee6bf0..4d18daf9e4 100644 --- a/lib/kernel/src/auth.erl +++ b/lib/kernel/src/auth.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1996-2016. All Rights Reserved. +%% Copyright Ericsson AB 1996-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -107,7 +107,7 @@ get_cookie() -> get_cookie(_Node) when node() =:= nonode@nohost -> nocookie; get_cookie(Node) -> - gen_server:call(auth, {get_cookie, Node}). + gen_server:call(auth, {get_cookie, Node}, infinity). -spec set_cookie(Cookie :: cookie()) -> 'true'. @@ -119,12 +119,12 @@ set_cookie(Cookie) -> set_cookie(_Node, _Cookie) when node() =:= nonode@nohost -> erlang:error(distribution_not_started); set_cookie(Node, Cookie) -> - gen_server:call(auth, {set_cookie, Node, Cookie}). + gen_server:call(auth, {set_cookie, Node, Cookie}, infinity). -spec sync_cookie() -> any(). sync_cookie() -> - gen_server:call(auth, sync_cookie). + gen_server:call(auth, sync_cookie, infinity). -spec print(Node :: node(), Format :: string(), Args :: [_]) -> 'ok'. diff --git a/lib/kernel/src/code.erl b/lib/kernel/src/code.erl index 9969021a6c..7faef93609 100644 --- a/lib/kernel/src/code.erl +++ b/lib/kernel/src/code.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1996-2017. All Rights Reserved. +%% Copyright Ericsson AB 1996-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -149,8 +149,11 @@ load_file(Mod) when is_atom(Mod) -> -spec ensure_loaded(Module) -> {module, Module} | {error, What} when Module :: module(), What :: embedded | badfile | nofile | on_load_failure. -ensure_loaded(Mod) when is_atom(Mod) -> - call({ensure_loaded,Mod}). +ensure_loaded(Mod) when is_atom(Mod) -> + case erlang:module_loaded(Mod) of + true -> {module, Mod}; + false -> call({ensure_loaded,Mod}) + end. %% XXX File as an atom is allowed only for backwards compatibility. -spec load_abs(Filename) -> load_ret() when @@ -627,7 +630,7 @@ do_par_recv(N, Good, Bad) -> call(Req) -> code_server:call(Req). --spec start_link() -> {'ok', pid()} | {'error', 'crash'}. +-spec start_link() -> {'ok', pid()}. start_link() -> do_start(). diff --git a/lib/kernel/src/code_server.erl b/lib/kernel/src/code_server.erl index 418b0c50e1..1b4a67ecb7 100644 --- a/lib/kernel/src/code_server.erl +++ b/lib/kernel/src/code_server.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1998-2016. All Rights Reserved. +%% Copyright Ericsson AB 1998-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -340,8 +340,7 @@ handle_call(all_loaded, _From, S) -> {reply,all_loaded(Db),S}; handle_call({get_object_code,Mod}, _From, St) when is_atom(Mod) -> - Path = St#state.path, - case mod_to_bin(Path, Mod) of + case get_object_code(St, Mod) of {_,Bin,FName} -> {reply,{Mod,Bin,FName},St}; Error -> {reply,Error,St} end; @@ -1182,19 +1181,28 @@ load_file(Mod, From, St0) -> end, handle_pending_on_load(Action, Mod, From, St0). -load_file_1(Mod, From, #state{path=Path}=St) -> - case mod_to_bin(Path, Mod) of +load_file_1(Mod, From, St) -> + case get_object_code(St, Mod) of error -> {reply,{error,nofile},St}; {Mod,Binary,File} -> try_load_module_1(File, Mod, Binary, From, St) end. -mod_to_bin([Dir|Tail], Mod) -> - File = filename:append(Dir, atom_to_list(Mod) ++ objfile_extension()), +get_object_code(#state{path=Path}, Mod) when is_atom(Mod) -> + ModStr = atom_to_list(Mod), + case erl_prim_loader:is_basename(ModStr) of + true -> + mod_to_bin(Path, Mod, ModStr ++ objfile_extension()); + false -> + error + end. + +mod_to_bin([Dir|Tail], Mod, ModFile) -> + File = filename:append(Dir, ModFile), case erl_prim_loader:get_file(File) of error -> - mod_to_bin(Tail, Mod); + mod_to_bin(Tail, Mod, ModFile); {ok,Bin,_} -> case filename:pathtype(File) of absolute -> @@ -1203,10 +1211,9 @@ mod_to_bin([Dir|Tail], Mod) -> {Mod,Bin,absname(File)} end end; -mod_to_bin([], Mod) -> +mod_to_bin([], Mod, ModFile) -> %% At last, try also erl_prim_loader's own method - File = to_list(Mod) ++ objfile_extension(), - case erl_prim_loader:get_file(File) of + case erl_prim_loader:get_file(ModFile) of error -> error; % No more alternatives ! {ok,Bin,FName} -> @@ -1427,14 +1434,20 @@ all_loaded(Db) -> -spec error_msg(io:format(), [term()]) -> 'ok'. error_msg(Format, Args) -> - Msg = {notify,{error, group_leader(), {self(), Format, Args}}}, - error_logger ! Msg, + logger ! {log,error,Format,Args, + #{pid=>self(), + gl=>group_leader(), + time=>erlang:system_time(microsecond), + error_logger=>#{tag=>error}}}, ok. -spec info_msg(io:format(), [term()]) -> 'ok'. info_msg(Format, Args) -> - Msg = {notify,{info_msg, group_leader(), {self(), Format, Args}}}, - error_logger ! Msg, + logger ! {log,info,Format,Args, + #{pid=>self(), + gl=>group_leader(), + time=>erlang:system_time(microsecond), + error_logger=>#{tag=>info_msg}}}, ok. objfile_extension() -> diff --git a/lib/kernel/src/disk_log.erl b/lib/kernel/src/disk_log.erl index 70cbf1c87c..99ea8dc384 100644 --- a/lib/kernel/src/disk_log.erl +++ b/lib/kernel/src/disk_log.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1997-2017. All Rights Reserved. +%% Copyright Ericsson AB 1997-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -266,7 +266,7 @@ inc_wrap_file(Log) -> Size :: dlog_size(), Reason :: no_such_log | nonode | {read_only_mode, Log} | {blocked_log, Log} - | {new_size_too_small, CurrentSize :: pos_integer()} + | {new_size_too_small, Log, CurrentSize :: pos_integer()} | {badarg, size} | {file_error, file:filename(), file_error()}. change_size(Log, NewSize) -> diff --git a/lib/kernel/src/disk_log_1.erl b/lib/kernel/src/disk_log_1.erl index 93856aa7b3..41ef33c613 100644 --- a/lib/kernel/src/disk_log_1.erl +++ b/lib/kernel/src/disk_log_1.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1997-2017. All Rights Reserved. +%% Copyright Ericsson AB 1997-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -630,7 +630,7 @@ is_head(Bin) when is_binary(Bin) -> %% Writes MaxB bytes on each file. %% Creates a file called Name.idx in the Dir. This %% file contains the last written FileName as one byte, and -%% follwing that, the sizes of each file (size 0 number of items). +%% following that, the sizes of each file (size 0 number of items). %% On startup, this file is read, and the next available %% filename is used as first log file. %% Reports can be browsed with Report Browser Tool (rb), or diff --git a/lib/kernel/src/dist_util.erl b/lib/kernel/src/dist_util.erl index b3507e5d13..ecc022b28d 100644 --- a/lib/kernel/src/dist_util.erl +++ b/lib/kernel/src/dist_util.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1999-2017. All Rights Reserved. +%% Copyright Ericsson AB 1999-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -27,8 +27,10 @@ %%-compile(export_all). -export([handshake_we_started/1, handshake_other_started/1, + strict_order_flags/0, start_timer/1, setup_timer/2, reset_timer/1, cancel_timer/1, + is_node_name/1, split_node/1, is_allowed/2, shutdown/3, shutdown/4]). -import(error_logger,[error_msg/2]). @@ -74,22 +76,52 @@ ticked = 0 }). -remove_flag(Flag, Flags) -> - case Flags band Flag of - 0 -> - Flags; - _ -> - Flags - Flag - end. +dflag2str(?DFLAG_PUBLISHED) -> + "PUBLISHED"; +dflag2str(?DFLAG_ATOM_CACHE) -> + "ATOM_CACHE"; +dflag2str(?DFLAG_EXTENDED_REFERENCES) -> + "EXTENDED_REFERENCES"; +dflag2str(?DFLAG_DIST_MONITOR) -> + "DIST_MONITOR"; +dflag2str(?DFLAG_FUN_TAGS) -> + "FUN_TAGS"; +dflag2str(?DFLAG_DIST_MONITOR_NAME) -> + "DIST_MONITOR_NAME"; +dflag2str(?DFLAG_HIDDEN_ATOM_CACHE) -> + "HIDDEN_ATOM_CACHE"; +dflag2str(?DFLAG_NEW_FUN_TAGS) -> + "NEW_FUN_TAGS"; +dflag2str(?DFLAG_EXTENDED_PIDS_PORTS) -> + "EXTENDED_PIDS_PORTS"; +dflag2str(?DFLAG_EXPORT_PTR_TAG) -> + "EXPORT_PTR_TAG"; +dflag2str(?DFLAG_BIT_BINARIES) -> + "BIT_BINARIES"; +dflag2str(?DFLAG_NEW_FLOATS) -> + "NEW_FLOATS"; +dflag2str(?DFLAG_UNICODE_IO) -> + "UNICODE_IO"; +dflag2str(?DFLAG_DIST_HDR_ATOM_CACHE) -> + "DIST_HDR_ATOM_CACHE"; +dflag2str(?DFLAG_SMALL_ATOM_TAGS) -> + "SMALL_ATOM_TAGS"; +dflag2str(?DFLAG_UTF8_ATOMS) -> + "UTF8_ATOMS"; +dflag2str(?DFLAG_MAP_TAG) -> + "MAP_TAG"; +dflag2str(?DFLAG_BIG_CREATION) -> + "BIG_CREATION"; +dflag2str(?DFLAG_SEND_SENDER) -> + "SEND_SENDER"; +dflag2str(?DFLAG_BIG_SEQTRACE_LABELS) -> + "BIG_SEQTRACE_LABELS"; +dflag2str(_) -> + "UNKNOWN". + adjust_flags(ThisFlags, OtherFlags) -> - case (?DFLAG_PUBLISHED band ThisFlags) band OtherFlags of - 0 -> - {remove_flag(?DFLAG_PUBLISHED, ThisFlags), - remove_flag(?DFLAG_PUBLISHED, OtherFlags)}; - _ -> - {ThisFlags, OtherFlags} - end. + ThisFlags band OtherFlags. publish_flag(hidden, _) -> 0; @@ -101,38 +133,56 @@ publish_flag(_, OtherNode) -> 0 end. -make_this_flags(RequestType, OtherNode) -> - publish_flag(RequestType, OtherNode) bor - %% The parenthesis below makes the compiler generate better code. - (?DFLAG_EXPORT_PTR_TAG bor - ?DFLAG_EXTENDED_PIDS_PORTS bor - ?DFLAG_EXTENDED_REFERENCES bor - ?DFLAG_DIST_MONITOR bor - ?DFLAG_FUN_TAGS bor - ?DFLAG_DIST_MONITOR_NAME bor - ?DFLAG_HIDDEN_ATOM_CACHE bor - ?DFLAG_NEW_FUN_TAGS bor - ?DFLAG_BIT_BINARIES bor - ?DFLAG_NEW_FLOATS bor - ?DFLAG_UNICODE_IO bor - ?DFLAG_DIST_HDR_ATOM_CACHE bor - ?DFLAG_SMALL_ATOM_TAGS bor - ?DFLAG_UTF8_ATOMS bor - ?DFLAG_MAP_TAG bor - ?DFLAG_BIG_CREATION). - -handshake_other_started(#hs_data{request_type=ReqType}=HSData0) -> + +%% Sync with dist.c +-record(erts_dflags, { + default, % flags erts prefers + mandatory, % flags erts needs + addable, % flags local dist implementation is allowed to add + rejectable, % flags local dist implementation is allowed to reject + strict_order % flags for features needing strict order delivery +}). + +-spec strict_order_flags() -> integer(). +strict_order_flags() -> + EDF = erts_internal:get_dflags(), + EDF#erts_dflags.strict_order. + +make_this_flags(RequestType, AddFlags, RejectFlags, OtherNode, + #erts_dflags{}=EDF) -> + case RejectFlags band (bnot EDF#erts_dflags.rejectable) of + 0 -> ok; + Rerror -> exit({"Rejecting non rejectable flags", Rerror}) + end, + case AddFlags band (bnot EDF#erts_dflags.addable) of + 0 -> ok; + Aerror -> exit({"Adding non addable flags", Aerror}) + end, + Flgs0 = EDF#erts_dflags.default, + Flgs1 = Flgs0 bor publish_flag(RequestType, OtherNode), + Flgs2 = Flgs1 bor AddFlags, + Flgs2 band (bnot RejectFlags). + +handshake_other_started(#hs_data{request_type=ReqType, + add_flags=AddFlgs0, + reject_flags=RejFlgs0, + require_flags=ReqFlgs0}=HSData0) -> + AddFlgs = convert_flags(AddFlgs0), + RejFlgs = convert_flags(RejFlgs0), + ReqFlgs = convert_flags(ReqFlgs0), {PreOtherFlags,Node,Version} = recv_name(HSData0), - PreThisFlags = make_this_flags(ReqType, Node), - {ThisFlags, OtherFlags} = adjust_flags(PreThisFlags, - PreOtherFlags), - HSData = HSData0#hs_data{this_flags=ThisFlags, - other_flags=OtherFlags, + EDF = erts_internal:get_dflags(), + PreThisFlags = make_this_flags(ReqType, AddFlgs, RejFlgs, Node, EDF), + ChosenFlags = adjust_flags(PreThisFlags, PreOtherFlags), + HSData = HSData0#hs_data{this_flags=ChosenFlags, + other_flags=ChosenFlags, other_version=Version, other_node=Node, - other_started=true}, - check_dflags(HSData), - is_allowed(HSData), + other_started=true, + add_flags=AddFlgs, + reject_flags=RejFlgs, + require_flags=ReqFlgs}, + check_dflags(HSData, EDF), ?debug({"MD5 connection from ~p (V~p)~n", [Node, HSData#hs_data.other_version]}), mark_pending(HSData), @@ -150,38 +200,16 @@ handshake_other_started(OldHsData) when element(1,OldHsData) =:= hs_data -> %% -%% check if connecting node is allowed to connect -%% with allow-node-scheme -%% -is_allowed(#hs_data{other_node = Node, - allowed = Allowed} = HSData) -> - case lists:member(Node, Allowed) of - false when Allowed =/= [] -> - send_status(HSData, not_allowed), - error_msg("** Connection attempt from " - "disallowed node ~w ** ~n", [Node]), - ?shutdown2(Node, {is_allowed, not_allowed}); - _ -> true - end. - -%% -%% Check that both nodes can handle the same types of extended -%% node containers. If they can not, abort the connection. +%% Check mandatory flags... %% check_dflags(#hs_data{other_node = Node, other_flags = OtherFlags, - other_started = OtherStarted} = HSData) -> - - Mandatory = [{?DFLAG_EXTENDED_REFERENCES, "EXTENDED_REFERENCES"}, - {?DFLAG_EXTENDED_PIDS_PORTS, "EXTENDED_PIDS_PORTS"}, - {?DFLAG_UTF8_ATOMS, "UTF8_ATOMS"}], - Missing = lists:filtermap(fun({Bit, Str}) -> - case Bit band OtherFlags of - Bit -> false; - 0 -> {true, Str} - end - end, - Mandatory), + other_started = OtherStarted, + require_flags = RequiredFlags} = HSData, + #erts_dflags{}=EDF) -> + + Mandatory = (EDF#erts_dflags.mandatory bor RequiredFlags), + Missing = check_mandatory(Mandatory, OtherFlags, []), case Missing of [] -> ok; @@ -201,6 +229,21 @@ check_dflags(#hs_data{other_node = Node, ?shutdown2(Node, {check_dflags_failed, Missing}) end. +check_mandatory(0, _OtherFlags, Missing) -> + Missing; +check_mandatory(Mandatory, OtherFlags, Missing) -> + Left = Mandatory band (Mandatory - 1), % clear lowest set bit + DFlag = Mandatory bxor Left, % only lowest set bit + NewMissing = case DFlag band OtherFlags of + 0 -> + %% Mandatory and missing... + [dflag2str(DFlag) | Missing]; + _ -> + %% Mandatory and present... + Missing + end, + check_mandatory(Left, OtherFlags, NewMissing). + %% No nodedown will be sent if we fail before this process has %% succeeded to mark the node as pending. @@ -314,17 +357,27 @@ flush_down() -> end. handshake_we_started(#hs_data{request_type=ReqType, - other_node=Node}=PreHSData) -> - PreThisFlags = make_this_flags(ReqType, Node), - HSData = PreHSData#hs_data{this_flags=PreThisFlags}, + other_node=Node, + add_flags=AddFlgs0, + reject_flags=RejFlgs0, + require_flags=ReqFlgs0}=PreHSData) -> + AddFlgs = convert_flags(AddFlgs0), + RejFlgs = convert_flags(RejFlgs0), + ReqFlgs = convert_flags(ReqFlgs0), + EDF = erts_internal:get_dflags(), + PreThisFlags = make_this_flags(ReqType, AddFlgs, RejFlgs, Node, EDF), + HSData = PreHSData#hs_data{this_flags = PreThisFlags, + add_flags = AddFlgs, + reject_flags = RejFlgs, + require_flags = ReqFlgs}, send_name(HSData), recv_status(HSData), {PreOtherFlags,ChallengeA} = recv_challenge(HSData), - {ThisFlags,OtherFlags} = adjust_flags(PreThisFlags, PreOtherFlags), - NewHSData = HSData#hs_data{this_flags = ThisFlags, - other_flags = OtherFlags, + ChosenFlags = adjust_flags(PreThisFlags, PreOtherFlags), + NewHSData = HSData#hs_data{this_flags = ChosenFlags, + other_flags = ChosenFlags, other_started = false}, - check_dflags(NewHSData), + check_dflags(NewHSData, EDF), MyChallenge = gen_challenge(), {MyCookie,HisCookie} = get_cookies(Node), send_challenge_reply(NewHSData,MyChallenge, @@ -336,15 +389,16 @@ handshake_we_started(#hs_data{request_type=ReqType, handshake_we_started(OldHsData) when element(1,OldHsData) =:= hs_data -> handshake_we_started(convert_old_hsdata(OldHsData)). -convert_old_hsdata({hs_data, KP, ON, TN, S, T, TF, A, OV, OF, OS, FS, FR, - FS_PRE, FS_POST, FG, FA, MFT, MFG, RT}) -> - #hs_data{ - kernel_pid = KP, other_node = ON, this_node = TN, socket = S, timer = T, - this_flags = TF, allowed = A, other_version = OV, other_flags = OF, - other_started = OS, f_send = FS, f_recv = FR, f_setopts_pre_nodeup = FS_PRE, - f_setopts_post_nodeup = FS_POST, f_getll = FG, f_address = FA, - mf_tick = MFT, mf_getstat = MFG, request_type = RT}. +convert_old_hsdata(OldHsData) -> + OHSDL = tuple_to_list(OldHsData), + NoMissing = tuple_size(#hs_data{}) - tuple_size(OldHsData), + true = NoMissing > 0, + list_to_tuple(OHSDL ++ lists:duplicate(NoMissing, undefined)). +convert_flags(Flags) when is_integer(Flags) -> + Flags; +convert_flags(_Undefined) -> + 0. %% -------------------------------------------------------------- %% The connection has been established. @@ -359,15 +413,20 @@ connection(#hs_data{other_node = Node, PType = publish_type(HSData#hs_data.other_flags), case FPreNodeup(Socket) of ok -> - do_setnode(HSData), % Succeeds or exits the process. + DHandle = do_setnode(HSData), % Succeeds or exits the process. Address = FAddress(Socket,Node), mark_nodeup(HSData,Address), case FPostNodeup(Socket) of ok -> + case HSData#hs_data.f_handshake_complete of + undefined -> ok; + HsComplete -> HsComplete(Socket, Node, DHandle) + end, con_loop({HSData#hs_data.kernel_pid, Node, Socket, PType, + DHandle, HSData#hs_data.mf_tick, HSData#hs_data.mf_getstat, HSData#hs_data.mf_setopts, @@ -425,18 +484,16 @@ do_setnode(#hs_data{other_node = Node, socket = Socket, [Node, Port, {publish_type(Flags), '(', Flags, ')', Version}]), - case (catch - erlang:setnode(Node, Port, - {Flags, Version, '', ''})) of - {'EXIT', {system_limit, _}} -> + try + erlang:setnode(Node, Port, {Flags, Version, '', ''}) + catch + error:system_limit -> error_msg("** Distribution system limit reached, " "no table space left for node ~w ** ~n", [Node]), ?shutdown(Node); - {'EXIT', Other} -> - exit(Other); - _Else -> - ok + error:Other:Stacktrace -> + exit({Other, Stacktrace}) end; _ -> error_msg("** Distribution connection error, " @@ -468,7 +525,13 @@ mark_nodeup(#hs_data{kernel_pid = Kernel, ?shutdown(Node) end. -con_loop({Kernel, Node, Socket, Type, MFTick, MFGetstat, MFSetOpts, MFGetOpts}=ConData, +getstat(DHandle, _Socket, undefined) -> + erlang:dist_get_stat(DHandle); +getstat(_DHandle, Socket, MFGetstat) -> + MFGetstat(Socket). + +con_loop({Kernel, Node, Socket, Type, DHandle, MFTick, MFGetstat, + MFSetOpts, MFGetOpts}=ConData, Tick) -> receive {tcp_closed, Socket} -> @@ -476,15 +539,15 @@ con_loop({Kernel, Node, Socket, Type, MFTick, MFGetstat, MFSetOpts, MFGetOpts}=C {Kernel, disconnect} -> ?shutdown2(Node, disconnected); {Kernel, aux_tick} -> - case MFGetstat(Socket) of + case getstat(DHandle, Socket, MFGetstat) of {ok, _, _, PendWrite} -> - send_tick(Socket, PendWrite, MFTick); + send_aux_tick(Type, Socket, PendWrite, MFTick); _ -> ignore_it end, con_loop(ConData, Tick); {Kernel, tick} -> - case send_tick(Socket, Tick, Type, + case send_tick(DHandle, Socket, Tick, Type, MFTick, MFGetstat) of {ok, NewTick} -> con_loop(ConData, NewTick); @@ -497,7 +560,7 @@ con_loop({Kernel, Node, Socket, Type, MFTick, MFGetstat, MFSetOpts, MFGetOpts}=C ?shutdown2(Node, send_net_tick_failed) end; {From, get_status} -> - case MFGetstat(Socket) of + case getstat(DHandle, Socket, MFGetstat) of {ok, Read, Write, _} -> From ! {self(), get_status, {ok, Read, Write}}, con_loop(ConData, Tick); @@ -564,33 +627,130 @@ send_challenge_ack(#hs_data{socket = Socket, f_send = FSend}, %% tcp_drv.c which used it to detect simultaneous connection %% attempts). %% -recv_name(#hs_data{socket = Socket, f_recv = Recv}) -> +recv_name(#hs_data{socket = Socket, f_recv = Recv} = HSData) -> case Recv(Socket, 0, infinity) of - {ok,Data} -> - get_name(Data); + {ok, + [$n,VersionA, VersionB, Flag1, Flag2, Flag3, Flag4 + | OtherNode] = Data} -> + case is_node_name(OtherNode) of + true -> + Flags = ?u32(Flag1, Flag2, Flag3, Flag4), + Version = ?u16(VersionA,VersionB), + is_allowed(HSData, Flags, OtherNode, Version); + false -> + ?shutdown(Data) + end; _ -> ?shutdown(no_node) end. -get_name([$n,VersionA, VersionB, Flag1, Flag2, Flag3, Flag4 | OtherNode] = Data) -> - case is_valid_name(OtherNode) of +is_node_name(OtherNodeName) -> + case string:split(OtherNodeName, "@", all) of + [Name,Host] -> + (not string:is_empty(Name)) + andalso (not string:is_empty(Host)); + _ -> + false + end. + +split_node(Node) -> + Split = string:split(listify(Node), "@", all), + case Split of + [Name,Host] -> + case string:is_empty(Name) of + true -> + Split; + false -> + case string:is_empty(Host) of + true -> + {name,Name}; + false -> + {node,Name,Host} + end + end; + [Host] -> + case string:is_empty(Host) of + true -> + Split; + false -> + {host,Host} + end + end. + +%% Check if connecting node is allowed to connect +%% with allow-node-scheme. An empty allowed list +%% allows all nodes. +%% +is_allowed(#hs_data{allowed = []}, Flags, Node, Version) -> + {Flags,list_to_atom(Node),Version}; +is_allowed(#hs_data{allowed = Allowed} = HSData, Flags, Node, Version) -> + case is_allowed(Node, Allowed) of true -> - {?u32(Flag1, Flag2, Flag3, Flag4), list_to_atom(OtherNode), - ?u16(VersionA,VersionB)}; + {Flags,list_to_atom(Node),Version}; false -> - ?shutdown(Data) - end; -get_name(Data) -> - ?shutdown(Data). - -is_valid_name(OtherNodeName) -> - case string:lexemes(OtherNodeName,"@") of - [_OtherNodeName,_OtherNodeHost] -> - true; - _else -> - false + send_status(HSData#hs_data{other_node = Node}, not_allowed), + error_msg("** Connection attempt from " + "disallowed node ~s ** ~n", [Node]), + ?shutdown2(Node, {is_allowed, not_allowed}) + end. + +%% The allowed list can contain node names, host names +%% or names before '@', in atom or list form: +%% [[email protected], "host.example.org", "node@"]. +%% An empty allowed list allows no nodes. +%% +%% Allow a node that matches any entry in the allowed list. +%% Also allow allowed entries as node to match, not from +%% this module; here the node has to be a valid name. +%% +is_allowed(_Node, []) -> + false; +is_allowed(Node, [Node|_Allowed]) -> + %% Just an optimization + true; +is_allowed(Node, [AllowedNode|Allowed]) -> + case split_node(AllowedNode) of + {node,AllowedName,AllowedHost} -> + %% Allowed node name + case split_node(Node) of + {node,AllowedName,AllowedHost} -> + true; + _ -> + is_allowed(Node, Allowed) + end; + {host,AllowedHost} -> + %% Allowed host name + case split_node(Node) of + {node,_,AllowedHost} -> + %% Matching Host part + true; + {host,AllowedHost} -> + %% Host matches Host + true; + _ -> + is_allowed(Node, Allowed) + end; + {name,AllowedName} -> + %% Allowed name before '@' + case split_node(Node) of + {node,AllowedName,_} -> + %% Matching Name part + true; + {name,AllowedName} -> + %% Name matches Name + true; + _ -> + is_allowed(Node, Allowed) + end; + _ -> + is_allowed(Node, Allowed) end. +listify(Atom) when is_atom(Atom) -> + atom_to_list(Atom); +listify(Node) when is_list(Node) -> + Node. + publish_type(Flags) -> case Flags band ?DFLAG_PUBLISHED of 0 -> @@ -731,51 +891,57 @@ send_status(#hs_data{socket = Socket, other_node = Node, %% The detection time interval is thus, by default, 45s < DT < 75s -%% A HIDDEN node is always (if not a pending write) ticked if -%% we haven't read anything as a hidden node only ticks when it receives -%% a TICK !! +%% A HIDDEN node is always ticked if we haven't read anything +%% as a (primitive) hidden node only ticks when it receives a TICK !! -send_tick(Socket, Tick, Type, MFTick, MFGetstat) -> +send_tick(DHandle, Socket, Tick, Type, MFTick, MFGetstat) -> #tick{tick = T0, read = Read, write = Write, - ticked = Ticked} = Tick, + ticked = Ticked0} = Tick, T = T0 + 1, T1 = T rem 4, - case MFGetstat(Socket) of - {ok, Read, _, _} when Ticked =:= T -> + case getstat(DHandle, Socket, MFGetstat) of + {ok, Read, _, _} when Ticked0 =:= T -> {error, not_responding}; - {ok, Read, W, Pend} when Type =:= hidden -> - send_tick(Socket, Pend, MFTick), - {ok, Tick#tick{write = W + 1, - tick = T1}}; - {ok, Read, Write, Pend} -> - send_tick(Socket, Pend, MFTick), - {ok, Tick#tick{write = Write + 1, - tick = T1}}; - {ok, R, Write, Pend} -> - send_tick(Socket, Pend, MFTick), - {ok, Tick#tick{write = Write + 1, - read = R, - tick = T1, - ticked = T}}; - {ok, Read, W, _} -> - {ok, Tick#tick{write = W, - tick = T1}}; - {ok, R, W, _} -> - {ok, Tick#tick{write = W, - read = R, - tick = T1, - ticked = T}}; + + {ok, R, W1, Pend} -> + RDiff = R - Read, + W2 = case need_to_tick(Type, RDiff, W1-Write, Pend) of + true -> + MFTick(Socket), + W1 + 1; + false -> + W1 + end, + + Ticked1 = case RDiff of + 0 -> Ticked0; + _ -> T + end, + + {ok, Tick#tick{write = W2, + tick = T1, + read = R, + ticked = Ticked1}}; + Error -> Error end. -send_tick(Socket, 0, MFTick) -> - MFTick(Socket); -send_tick(_, _Pend, _) -> - %% Dont send tick if pending write. - ok. +need_to_tick(_, _, 0, 0) -> % nothing written and empty send queue + true; +need_to_tick(_, _, 0, false) -> % nothing written and empty send queue + true; +need_to_tick(hidden, 0, _, _) -> % nothing read from hidden + true; +need_to_tick(_, _, _, _) -> + false. + +send_aux_tick(normal, _, Pend, _) when Pend /= false, Pend /= 0 -> + ok; %% Dont send tick if pending write. +send_aux_tick(_Type, Socket, _Pend, MFTick) -> + MFTick(Socket). %% ------------------------------------------------------------ %% Connection setup timeout timer. diff --git a/lib/kernel/src/erl_boot_server.erl b/lib/kernel/src/erl_boot_server.erl index ac81cc9689..4ac945ce01 100644 --- a/lib/kernel/src/erl_boot_server.erl +++ b/lib/kernel/src/erl_boot_server.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1996-2016. All Rights Reserved. +%% Copyright Ericsson AB 1996-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -58,13 +58,11 @@ -define(single_addr_mask, {255, 255, 255, 255}). --type ip4_address() :: {0..255,0..255,0..255,0..255}. - --spec start(Slaves) -> {'ok', Pid} | {'error', What} when +-spec start(Slaves) -> {'ok', Pid} | {'error', Reason} when Slaves :: [Host], - Host :: atom(), + Host :: inet:ip_address() | inet:hostname(), Pid :: pid(), - What :: any(). + Reason :: {'badarg', Slaves}. start(Slaves) -> case check_arg(Slaves) of @@ -74,11 +72,11 @@ start(Slaves) -> {error, {badarg, Slaves}} end. --spec start_link(Slaves) -> {'ok', Pid} | {'error', What} when +-spec start_link(Slaves) -> {'ok', Pid} | {'error', Reason} when Slaves :: [Host], - Host :: atom(), + Host :: inet:ip_address() | inet:hostname(), Pid :: pid(), - What :: any(). + Reason :: {'badarg', Slaves}. start_link(Slaves) -> case check_arg(Slaves) of @@ -104,10 +102,10 @@ check_arg([], Result) -> check_arg(_, _Result) -> error. --spec add_slave(Slave) -> 'ok' | {'error', What} when +-spec add_slave(Slave) -> 'ok' | {'error', Reason} when Slave :: Host, - Host :: atom(), - What :: any(). + Host :: inet:ip_address() | inet:hostname(), + Reason :: {'badarg', Slave}. add_slave(Slave) -> case inet:getaddr(Slave, inet) of @@ -117,10 +115,10 @@ add_slave(Slave) -> {error, {badarg, Slave}} end. --spec delete_slave(Slave) -> 'ok' | {'error', What} when +-spec delete_slave(Slave) -> 'ok' | {'error', Reason} when Slave :: Host, - Host :: atom(), - What :: any(). + Host :: inet:ip_address() | inet:hostname(), + Reason :: {'badarg', Slave}. delete_slave(Slave) -> case inet:getaddr(Slave, inet) of @@ -130,7 +128,7 @@ delete_slave(Slave) -> {error, {badarg, Slave}} end. --spec add_subnet(Mask :: ip4_address(), Addr :: ip4_address()) -> +-spec add_subnet(Netmask :: inet:ip_address(), Addr :: inet:ip_address()) -> 'ok' | {'error', any()}. add_subnet(Mask, Addr) when is_tuple(Mask), is_tuple(Addr) -> @@ -141,14 +139,15 @@ add_subnet(Mask, Addr) when is_tuple(Mask), is_tuple(Addr) -> {error, empty_subnet} end. --spec delete_subnet(Mask :: ip4_address(), Addr :: ip4_address()) -> 'ok'. +-spec delete_subnet(Netmask :: inet:ip_address(), + Addr :: inet:ip_address()) -> 'ok'. delete_subnet(Mask, Addr) when is_tuple(Mask), is_tuple(Addr) -> gen_server:call(boot_server, {delete, {Mask, Addr}}). -spec which_slaves() -> Slaves when - Slaves :: [Host], - Host :: atom(). + Slaves :: [Slave], + Slave :: {Netmask :: inet:ip_address(), Address :: inet:ip_address()}. which_slaves() -> gen_server:call(boot_server, which). @@ -253,9 +252,9 @@ handle_info({udp, U, IP, Port, Data}, S0) -> "~w is not a valid address ** ~n", [IP]), {noreply,S0}; {true,_,_} -> - case catch string:substr(Data, 1, length(?EBOOT_REQUEST)) of + case catch string:slice(Data, 0, length(?EBOOT_REQUEST)) of ?EBOOT_REQUEST -> - Vsn = string:substr(Data, length(?EBOOT_REQUEST)+1, length(Data)), + Vsn = string:slice(Data, length(?EBOOT_REQUEST), length(Data)), error_logger:error_msg("** Illegal boot server connection attempt: " "client version is ~s ** ~n", [Vsn]); _ -> diff --git a/lib/kernel/src/erl_epmd.erl b/lib/kernel/src/erl_epmd.erl index 7bc9e2ede3..b7e8868911 100644 --- a/lib/kernel/src/erl_epmd.erl +++ b/lib/kernel/src/erl_epmd.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1998-2016. All Rights Reserved. +%% Copyright Ericsson AB 1998-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -29,10 +29,20 @@ -define(port_please_failure2(Term), noop). -endif. +-ifndef(erlang_daemon_port). +-define(erlang_daemon_port, 4369). +-endif. +-ifndef(epmd_dist_high). +-define(epmd_dist_high, 4370). +-endif. +-ifndef(epmd_dist_low). +-define(epmd_dist_low, 4370). +-endif. + %% External exports -export([start/0, start_link/0, stop/0, port_please/2, port_please/3, names/0, names/1, - register_node/2, register_node/3, open/0, open/1, open/2]). + register_node/2, register_node/3, address_please/3, open/0, open/1, open/2]). %% gen_server callbacks -export([init/1, handle_call/3, handle_cast/2, handle_info/2, @@ -53,7 +63,7 @@ start() -> gen_server:start({local, erl_epmd}, ?MODULE, [], []). - +-spec start_link() -> {ok, pid()} | ignore | {error,term()}. start_link() -> gen_server:start_link({local, erl_epmd}, ?MODULE, [], []). @@ -66,9 +76,22 @@ stop() -> %% return {port, P, Version} | noport %% +-spec port_please(Name, Host) -> {ok, Port, Version} | noport when + Name :: string(), + Host :: inet:ip_address(), + Port :: non_neg_integer(), + Version :: non_neg_integer(). + port_please(Node, Host) -> port_please(Node, Host, infinity). +-spec port_please(Name, Host, Timeout) -> {ok, Port, Version} | noport when + Name :: string(), + Host :: inet:ip_address(), + Timeout :: non_neg_integer() | infinity, + Port :: non_neg_integer(), + Version :: non_neg_integer(). + port_please(Node,HostName, Timeout) when is_atom(HostName) -> port_please1(Node,atom_to_list(HostName), Timeout); port_please(Node,HostName, Timeout) when is_list(HostName) -> @@ -79,17 +102,34 @@ port_please(Node, EpmdAddr, Timeout) -> port_please1(Node,HostName, Timeout) -> - case inet:gethostbyname(HostName, inet, Timeout) of + Family = case inet_db:res_option(inet6) of + true -> + inet6; + false -> + inet + end, + case inet:gethostbyname(HostName, Family, Timeout) of {ok,{hostent, _Name, _ , _Af, _Size, [EpmdAddr | _]}} -> get_port(Node, EpmdAddr, Timeout); Else -> Else end. +-spec names() -> {ok, [{Name, Port}]} | {error, Reason} when + Name :: string(), + Port :: non_neg_integer(), + Reason :: address | file:posix(). + names() -> {ok, H} = inet:gethostname(), names(H). +-spec names(Host) -> {ok, [{Name, Port}]} | {error, Reason} when + Host :: atom() | string() | inet:ip_address(), + Name :: string(), + Port :: non_neg_integer(), + Reason :: address | file:posix(). + names(HostName) when is_atom(HostName); is_list(HostName) -> case inet:gethostbyname(HostName) of {ok,{hostent, _Name, _ , _Af, _Size, [EpmdAddr | _]}} -> @@ -100,9 +140,22 @@ names(HostName) when is_atom(HostName); is_list(HostName) -> names(EpmdAddr) -> get_names(EpmdAddr). +-spec register_node(Name, Port) -> Result when + Name :: string(), + Port :: non_neg_integer(), + Creation :: non_neg_integer(), + Result :: {ok, Creation} | {error, already_registered} | term(). register_node(Name, PortNo) -> - register_node(Name, PortNo, inet). + register_node(Name, PortNo, inet). + +-spec register_node(Name, Port, Driver) -> Result when + Name :: string(), + Port :: non_neg_integer(), + Driver :: inet_tcp | inet6_tcp | inet | inet6, + Creation :: non_neg_integer(), + Result :: {ok, Creation} | {error, already_registered} | term(). + register_node(Name, PortNo, inet_tcp) -> register_node(Name, PortNo, inet); register_node(Name, PortNo, inet6_tcp) -> @@ -110,6 +163,17 @@ register_node(Name, PortNo, inet6_tcp) -> register_node(Name, PortNo, Family) -> gen_server:call(erl_epmd, {register, Name, PortNo, Family}, infinity). +-spec address_please(Name, Host, AddressFamily) -> Success | {error, term()} when + Name :: string(), + Host :: string() | inet:ip_address(), + AddressFamily :: inet | inet6, + Port :: non_neg_integer(), + Version :: non_neg_integer(), + Success :: {ok, inet:ip_address()} | {ok, inet:ip_address(), Port, Version}. + +address_please(_Name, Host, AddressFamily) -> + inet:getaddr(Host, AddressFamily). + %%%---------------------------------------------------------------------- %%% Callback functions from gen_server %%%---------------------------------------------------------------------- diff --git a/lib/kernel/src/erl_reply.erl b/lib/kernel/src/erl_reply.erl index e1e046cbb4..e1c4ffe839 100644 --- a/lib/kernel/src/erl_reply.erl +++ b/lib/kernel/src/erl_reply.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1997-2016. All Rights Reserved. +%% Copyright Ericsson AB 1997-2017. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -42,7 +42,7 @@ reply(_) -> %% convert ip number to tuple ip_string_to_tuple(Ip) -> - [Ip1,Ip2,Ip3,Ip4] = string:tokens(Ip,"."), + [Ip1,Ip2,Ip3,Ip4] = string:lexemes(Ip,"."), {list_to_integer(Ip1), list_to_integer(Ip2), list_to_integer(Ip3), diff --git a/lib/kernel/src/erl_signal_handler.erl b/lib/kernel/src/erl_signal_handler.erl index 22f235d4e4..5be905d8ae 100644 --- a/lib/kernel/src/erl_signal_handler.erl +++ b/lib/kernel/src/erl_signal_handler.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1996-2017. All Rights Reserved. +%% Copyright Ericsson AB 1996-2018. All Rights Reserved. %% %% The contents of this file are subject to the Erlang Public License, %% Version 1.1, (the "License"); you may not use this file except in @@ -19,12 +19,21 @@ -module(erl_signal_handler). -behaviour(gen_event). --export([init/1, format_status/2, +-export([start/0, init/1, format_status/2, handle_event/2, handle_call/2, handle_info/2, terminate/2, code_change/3]). -record(state,{}). +start() -> + %% add signal handler + case whereis(erl_signal_server) of + %% in case of minimal mode + undefined -> ok; + _ -> + gen_event:add_handler(erl_signal_server, erl_signal_handler, []) + end. + init(_Args) -> {ok, #state{}}. diff --git a/lib/kernel/src/error_handler.erl b/lib/kernel/src/error_handler.erl index 59ca8e690d..a89ef83261 100644 --- a/lib/kernel/src/error_handler.erl +++ b/lib/kernel/src/error_handler.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1996-2016. All Rights Reserved. +%% Copyright Ericsson AB 1996-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -106,8 +106,8 @@ crash(M, F, A) -> crash(Tuple) -> try erlang:error(undef) catch - error:undef -> - Stk = [Tuple|tl(erlang:get_stacktrace())], + error:undef:Stacktrace -> + Stk = [Tuple|tl(Stacktrace)], erlang:raise(error, undef, Stk) end. diff --git a/lib/kernel/src/error_logger.erl b/lib/kernel/src/error_logger.erl index 9bf8547745..e324be5290 100644 --- a/lib/kernel/src/error_logger.erl +++ b/lib/kernel/src/error_logger.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1996-2017. All Rights Reserved. +%% Copyright Ericsson AB 1996-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -19,22 +19,23 @@ %% -module(error_logger). --export([start/0,start_link/0,format/2,error_msg/1,error_msg/2,error_report/1, +-include("logger_internal.hrl"). + +-export([start/0,start_link/0,stop/0, + format/2,error_msg/1,error_msg/2,error_report/1, error_report/2,info_report/1,info_report/2,warning_report/1, warning_report/2,error_info/1, info_msg/1,info_msg/2,warning_msg/1,warning_msg/2, - logfile/1,tty/1,swap_handler/1, + logfile/1,tty/1, add_report_handler/1,add_report_handler/2, - delete_report_handler/1]). + delete_report_handler/1, + which_report_handlers/0]). --export([init/1, - handle_event/2, handle_call/2, handle_info/2, - terminate/2]). +%% logger callbacks +-export([adding_handler/1, removing_handler/1, log/2]). -export([get_format_depth/0, limit_term/1]). --define(buffer_size, 10). - %%----------------------------------------------------------------- %% Types used in this file %%----------------------------------------------------------------- @@ -43,8 +44,6 @@ | 'info' | 'info_msg' | 'info_report' | 'warning_msg' | 'warning_report'. --type state() :: {non_neg_integer(), non_neg_integer(), [term()]}. - %%% BIF -export([warning_map/0]). @@ -59,26 +58,164 @@ warning_map() -> %%----------------------------------------------------------------- --spec start() -> {'ok', pid()} | {'error', any()}. +%%%----------------------------------------------------------------- +%%% Start the event manager process under logger_sup, which is part of +%%% the kernel application's supervision tree. +-spec start() -> 'ok' | {'error', any()}. start() -> - case gen_event:start({local, error_logger}) of - {ok, Pid} -> - simple_logger(?buffer_size), - {ok, Pid}; - Error -> Error + case whereis(?MODULE) of + undefined -> + ErrorLogger = + #{id => ?MODULE, + start => {?MODULE, start_link, []}, + restart => transient, + shutdown => 2000, + type => worker, + modules => dynamic}, + case supervisor:start_child(logger_sup, ErrorLogger) of + {ok,Pid} -> + ok = logger_handler_watcher:register_handler(?MODULE,Pid); + Error -> + Error + end; + _ -> + ok end. +%%%----------------------------------------------------------------- +%%% Start callback specified in child specification to supervisor, see start/0 -spec start_link() -> {'ok', pid()} | {'error', any()}. start_link() -> - case gen_event:start_link({local, error_logger}) of - {ok, Pid} -> - simple_logger(?buffer_size), - {ok, Pid}; - Error -> Error + gen_event:start_link({local, ?MODULE}, + [{spawn_opt,[{message_queue_data, off_heap}]}]). + +%%%----------------------------------------------------------------- +%%% Stop the event manager +-spec stop() -> ok. +stop() -> + case whereis(?MODULE) of + undefined -> + ok; + _Pid -> + _ = gen_event:stop(?MODULE,{shutdown,stopped},infinity), + _ = supervisor:delete_child(logger_sup,?MODULE), + ok + end. + +%%%----------------------------------------------------------------- +%%% Callbacks for logger +-spec adding_handler(logger:handler_config()) -> + {ok,logger:handler_config()} | {error,term()}. +adding_handler(#{id:=?MODULE}=Config) -> + case start() of + ok -> + {ok,Config}; + Error -> + Error end. +-spec removing_handler(logger:handler_config()) -> ok. +removing_handler(#{id:=?MODULE}) -> + stop(), + ok. + +-spec log(logger:log_event(),logger:handler_config()) -> ok. +log(#{level:=Level,msg:=Msg,meta:=Meta},_Config) -> + do_log(Level,Msg,Meta). + +do_log(Level,{report,Msg},#{?MODULE:=#{tag:=Tag,type:=Type}}=Meta) -> + %% From error_logger:*_report/1,2, or logger call which added + %% error_logger data to obtain backwards compatibility with + %% error_logger:*_report/1,2 + Report = + case Msg of + #{label:=_,report:=R} -> R; + _ -> Msg + end, + notify(Level,Tag,Type,Report,Meta); +do_log(Level,{report,Msg},#{?MODULE:=#{tag:=Tag}}=Meta) -> + {Format,Args} = + case Msg of + #{label:=_,format:=F,args:=A} -> + %% From error_logger:*_msg/1,2. + %% In order to be backwards compatible with handling + %% of faulty parameters to error_logger:*_msg/1,2, + %% don't use report_cb here. + {F,A}; + _ -> + %% From logger call which added error_logger data to + %% obtain backwards compatibility with error_logger:*_msg/1,2 + case maps:get(report_cb,Meta,fun logger:format_report/1) of + RCBFun when is_function(RCBFun,1) -> + try RCBFun(Msg) of + {F,A} when is_list(F), is_list(A) -> + {F,A}; + Other -> + {"REPORT_CB ERROR: ~tp; Returned: ~tp",[Msg,Other]} + catch C:R -> + {"REPORT_CB CRASH: ~tp; Reason: ~tp",[Msg,{C,R}]} + end; + RCBFun when is_function(RCBFun,2) -> + try RCBFun(Msg,#{depth=>get_format_depth(), + chars_limit=>unlimited, + single_line=>false}) of + Chardata when ?IS_STRING(Chardata) -> + {"~ts",[Chardata]}; + Other -> + {"REPORT_CB ERROR: ~tp; Returned: ~tp",[Msg,Other]} + catch C:R -> + {"REPORT_CB CRASH: ~tp; Reason: ~tp",[Msg,{C,R}]} + end + end + end, + notify(Level,Tag,Format,Args,Meta); +do_log(Level,{Format,Args},#{?MODULE:=#{tag:=Tag}}=Meta) + when is_list(Format), is_list(Args) -> + %% From logger call which added error_logger data to obtain + %% backwards compatibility with error_logger:*_msg/1,2 + notify(Level,Tag,Format,Args,Meta); +do_log(_Level,_Msg,_Meta) -> + %% Ignore the rest - i.e. to get backwards compatibility with + %% error_logger, you must use the error_logger API for logging. + %% Some modules within OTP go around this by adding an + %% error_logger field to its metadata. This is done only to allow + %% complete backwards compatibility for log events originating + %% from within OTP, while still using the new logger interface. + ok. + +-spec notify(logger:level(), msg_tag(), any(), any(), map()) -> 'ok'. +notify(Level,Tag0,FormatOrType0,ArgsOrReport,#{pid:=Pid0,gl:=GL,?MODULE:=My}) -> + {Tag,FormatOrType} = maybe_map_warnings(Level,Tag0,FormatOrType0), + Pid = case maps:get(emulator,My,false) of + true -> emulator; + _ -> Pid0 + end, + gen_event:notify(?MODULE,{Tag,GL,{Pid,FormatOrType,ArgsOrReport}}). + +%% For backwards compatibility with really old even handlers, check +%% the warning map and update tag and type. +maybe_map_warnings(warning,Tag,FormatOrType) -> + case error_logger:warning_map() of + warning -> + {Tag,FormatOrType}; + Level -> + {fix_warning_tag(Level,Tag),fix_warning_type(Level,FormatOrType)} + end; +maybe_map_warnings(_,Tag,FormatOrType) -> + {Tag,FormatOrType}. + +fix_warning_tag(error,warning_msg) -> error; +fix_warning_tag(error,warning_report) -> error_report; +fix_warning_tag(info,warning_msg) -> info_msg; +fix_warning_tag(info,warning_report) -> info_report; +fix_warning_tag(_,Tag) -> Tag. + +fix_warning_type(error,std_warning) -> std_error; +fix_warning_type(info,std_warning) -> std_info; +fix_warning_type(_,Type) -> Type. + %%----------------------------------------------------------------- %% These two simple old functions generate events tagged 'error' %% Used for simple messages; error or information. @@ -95,14 +232,18 @@ error_msg(Format) -> Data :: list(). error_msg(Format, Args) -> - notify({error, group_leader(), {self(), Format, Args}}). + logger:log(error, + #{label=>{?MODULE,error_msg}, + format=>Format, + args=>Args}, + meta(error)). -spec format(Format, Data) -> 'ok' when Format :: string(), Data :: list(). format(Format, Args) -> - notify({error, group_leader(), {self(), Format, Args}}). + error_msg(Format, Args). %%----------------------------------------------------------------- %% This functions should be used for error reports. Events @@ -124,7 +265,10 @@ error_report(Report) -> Report :: report(). error_report(Type, Report) -> - notify({error_report, group_leader(), {self(), Type, Report}}). + logger:log(error, + #{label=>{?MODULE,error_report}, + report=>Report}, + meta(error_report,Type)). %%----------------------------------------------------------------- %% This function should be used for warning reports. @@ -146,25 +290,10 @@ warning_report(Report) -> Report :: report(). warning_report(Type, Report) -> - {Tag, NType} = case error_logger:warning_map() of - info -> - if - Type =:= std_warning -> - {info_report, std_info}; - true -> - {info_report, Type} - end; - warning -> - {warning_report, Type}; - error -> - if - Type =:= std_warning -> - {error_report, std_error}; - true -> - {error_report, Type} - end - end, - notify({Tag, group_leader(), {self(), NType, Report}}). + logger:log(warning, + #{label=>{?MODULE,warning_report}, + report=>Report}, + meta(warning_report,Type)). %%----------------------------------------------------------------- %% This function provides similar functions as error_msg for @@ -183,15 +312,11 @@ warning_msg(Format) -> Data :: list(). warning_msg(Format, Args) -> - Tag = case error_logger:warning_map() of - warning -> - warning_msg; - info -> - info_msg; - error -> - error - end, - notify({Tag, group_leader(), {self(), Format, Args}}). + logger:log(warning, + #{label=>{?MODULE,warning_msg}, + format=>Format, + args=>Args}, + meta(warning_msg)). %%----------------------------------------------------------------- %% This function should be used for information reports. Events @@ -210,7 +335,10 @@ info_report(Report) -> Report :: report(). info_report(Type, Report) -> - notify({info_report, group_leader(), {self(), Type, Report}}). + logger:log(notice, + #{label=>{?MODULE,info_report}, + report=>Report}, + meta(info_report,Type)). %%----------------------------------------------------------------- %% This function provides similar functions as error_msg for @@ -228,7 +356,11 @@ info_msg(Format) -> Data :: list(). info_msg(Format, Args) -> - notify({info_msg, group_leader(), {self(), Format, Args}}). + logger:log(notice, + #{label=>{?MODULE,info_msg}, + format=>Format, + args=>Args}, + meta(info_msg)). %%----------------------------------------------------------------- %% Used by the init process. Events are tagged 'info'. @@ -236,38 +368,75 @@ info_msg(Format, Args) -> -spec error_info(Error :: any()) -> 'ok'. +%% unused? error_info(Error) -> - notify({info, group_leader(), {self(), Error, []}}). - --spec notify({msg_tag(), pid(), {pid(), any(), any()}}) -> 'ok'. - -notify(Msg) -> - gen_event:notify(error_logger, Msg). - --type swap_handler_type() :: 'false' | 'silent' | 'tty' | {'logfile', string()}. --spec swap_handler(Type :: swap_handler_type()) -> any(). - -swap_handler(tty) -> - R = gen_event:swap_handler(error_logger, {error_logger, swap}, - {error_logger_tty_h, []}), - ok = simple_logger(), - R; -swap_handler({logfile, File}) -> - R = gen_event:swap_handler(error_logger, {error_logger, swap}, - {error_logger_file_h, File}), - ok = simple_logger(), - R; -swap_handler(silent) -> - _ = gen_event:delete_handler(error_logger, error_logger, delete), - ok = simple_logger(); -swap_handler(false) -> - ok. % keep primitive event handler as-is + {Format,Args} = + case string_p(Error) of + true -> {Error,[]}; + false -> {"~p",[Error]} + end, + MyMeta = #{tag=>info,type=>Error}, + logger:log(notice, Format, Args, #{?MODULE=>MyMeta,domain=>[Error]}). + +%%----------------------------------------------------------------- +%% Create metadata +meta(Tag) -> + meta(Tag,undefined). +meta(Tag,Type) -> + meta(Tag,Type,#{report_cb=>fun report_to_format/1}). +meta(Tag,undefined,Meta0) -> + Meta0#{?MODULE=>#{tag=>Tag}}; +meta(Tag,Type,Meta0) -> + maybe_add_domain(Tag,Type,Meta0#{?MODULE=>#{tag=>Tag,type=>Type}}). + +%% This is to prevent events of non standard type from being printed +%% with the standard logger. Similar to how error_logger_tty_h +%% discards events of non standard type. +maybe_add_domain(error_report,std_error,Meta) -> Meta; +maybe_add_domain(info_report,std_info,Meta) -> Meta; +maybe_add_domain(warning_report,std_warning,Meta) -> Meta; +maybe_add_domain(_,Type,Meta) -> Meta#{domain=>[Type]}. + +%% ----------------------------------------------------------------- +%% Report formatting - i.e. Term => {Format,Args} +%% This was earlier done in the event handler (error_logger_tty_h, etc) +%% ----------------------------------------------------------------- +report_to_format(#{label:={?MODULE,_}, + report:=Report}) when is_map(Report) -> + %% logger:format_otp_report does maps:to_list, and for backwards + %% compatibility reasons we don't want that. + {"~tp\n",[Report]}; +report_to_format(#{label:={?MODULE,_}, + format:=Format, + args:=Args}) -> + %% This is not efficient, but needed for backwards compatibility + %% in giving faulty arguments to the *_msg functions. + try io_lib:scan_format(Format,Args) of + _ -> {Format,Args} + catch _:_ -> + {"ERROR: ~tp - ~tp",[Format,Args]} + end; +report_to_format(Term) -> + logger:format_otp_report(Term). + +string_p(List) when is_list(List) -> + string_p1(lists:flatten(List)); +string_p(_) -> + false. +string_p1([]) -> + false; +string_p1(FlatList) -> + io_lib:printable_list(FlatList). + +%% ----------------------------------------------------------------- +%% Stuff directly related to the event manager +%% ----------------------------------------------------------------- -spec add_report_handler(Handler) -> any() when Handler :: module(). add_report_handler(Module) when is_atom(Module) -> - gen_event:add_handler(error_logger, Module, []). + add_report_handler(Module, []). -spec add_report_handler(Handler, Args) -> Result when Handler :: module(), @@ -275,24 +444,37 @@ add_report_handler(Module) when is_atom(Module) -> Result :: gen_event:add_handler_ret(). add_report_handler(Module, Args) when is_atom(Module) -> - gen_event:add_handler(error_logger, Module, Args). + _ = logger:add_handler(?MODULE,?MODULE,#{level=>info,filter_default=>log}), + gen_event:add_handler(?MODULE, Module, Args). -spec delete_report_handler(Handler) -> Result when Handler :: module(), Result :: gen_event:del_handler_ret(). delete_report_handler(Module) when is_atom(Module) -> - gen_event:delete_handler(error_logger, Module, []). - -%% Start the lowest level error_logger handler with Buffer. - -simple_logger(Buffer_size) when is_integer(Buffer_size) -> - gen_event:add_handler(error_logger, error_logger, Buffer_size). - -%% Start the lowest level error_logger handler without Buffer. + case whereis(?MODULE) of + Pid when is_pid(Pid) -> + Return = gen_event:delete_handler(?MODULE, Module, []), + case gen_event:which_handlers(?MODULE) of + [] -> + %% Don't want a lot of logs here if it's not needed + _ = logger:remove_handler(?MODULE), + ok; + _ -> + ok + end, + Return; + _ -> + ok + end. -simple_logger() -> - gen_event:add_handler(error_logger, error_logger, []). +which_report_handlers() -> + case whereis(?MODULE) of + Pid when is_pid(Pid) -> + gen_event:which_handlers(?MODULE); + undefined -> + [] + end. %% Log all errors to File for all eternity @@ -308,26 +490,35 @@ simple_logger() -> FilenameReason :: no_log_file. logfile({open, File}) -> - case lists:member(error_logger_file_h, - gen_event:which_handlers(error_logger)) of + case lists:member(error_logger_file_h,which_report_handlers()) of true -> {error, allready_have_logfile}; _ -> - gen_event:add_handler(error_logger, error_logger_file_h, File) + add_report_handler(error_logger_file_h, File) end; logfile(close) -> - case gen_event:delete_handler(error_logger, error_logger_file_h, normal) of - {error,Reason} -> - {error,Reason}; - _ -> - ok + case whereis(?MODULE) of + Pid when is_pid(Pid) -> + case gen_event:delete_handler(?MODULE, error_logger_file_h, normal) of + {error,Reason} -> + {error,Reason}; + _ -> + ok + end; + _ -> + {error,module_not_found} end; logfile(filename) -> - case gen_event:call(error_logger, error_logger_file_h, filename) of - {error,_} -> - {error, no_log_file}; - Val -> - Val + case whereis(?MODULE) of + Pid when is_pid(Pid) -> + case gen_event:call(?MODULE, error_logger_file_h, filename) of + {error,_} -> + {error, no_log_file}; + Val -> + Val + end; + _ -> + {error, no_log_file} end. %% Possibly turn off all tty printouts, maybe we only want the errors @@ -337,190 +528,38 @@ logfile(filename) -> Flag :: boolean(). tty(true) -> - Hs = gen_event:which_handlers(error_logger), - case lists:member(error_logger_tty_h, Hs) of - false -> - gen_event:add_handler(error_logger, error_logger_tty_h, []); - true -> - ignore - end, + _ = case lists:member(error_logger_tty_h, which_report_handlers()) of + false -> + case logger:get_handler_config(default) of + {ok,#{module:=logger_std_h,config:=#{type:=standard_io}}} -> + logger:remove_handler_filter(default, + error_logger_tty_false); + _ -> + logger:add_handler(error_logger_tty_true,logger_std_h, + #{filter_default=>stop, + filters=>?DEFAULT_HANDLER_FILTERS( + [otp]), + formatter=>{?DEFAULT_FORMATTER, + ?DEFAULT_FORMAT_CONFIG}, + config=>#{type=>standard_io}}) + end; + true -> + ok + end, ok; tty(false) -> - gen_event:delete_handler(error_logger, error_logger_tty_h, []), + delete_report_handler(error_logger_tty_h), + _ = logger:remove_handler(error_logger_tty_true), + _ = case logger:get_handler_config(default) of + {ok,#{module:=logger_std_h,config:=#{type:=standard_io}}} -> + logger:add_handler_filter(default,error_logger_tty_false, + {fun(_,_) -> stop end, ok}); + _ -> + ok + end, ok. - -%%% --------------------------------------------------- -%%% This is the default error_logger handler. -%%% --------------------------------------------------- - --spec init(term()) -> {'ok', state() | []}. - -init(Max) when is_integer(Max) -> - {ok, {Max, 0, []}}; -%% This one is called if someone took over from us, and now wants to -%% go back. -init({go_back, _PostState}) -> - {ok, {?buffer_size, 0, []}}; -init(_) -> - %% The error logger process may receive a huge amount of - %% messages. Make sure that they are stored off heap to - %% avoid exessive GCs. - process_flag(message_queue_data, off_heap), - {ok, []}. - --spec handle_event(term(), state()) -> {'ok', state()}. - -handle_event({Type, GL, Msg}, State) when node(GL) =/= node() -> - gen_event:notify({error_logger, node(GL)},{Type, GL, Msg}), - %% handle_event2({Type, GL, Msg}, State); %% Shall we do something - {ok, State}; %% at this node too ??? -handle_event({info_report, _, {_, Type, _}}, State) when Type =/= std_info -> - {ok, State}; %% Ignore other info reports here -handle_event(Event, State) -> - handle_event2(Event, State). - --spec handle_info(term(), state()) -> {'ok', state()}. - -handle_info({emulator, GL, Chars}, State) when node(GL) =/= node() -> - {error_logger, node(GL)} ! {emulator, GL, add_node(Chars,self())}, - {ok, State}; -handle_info({emulator, GL, Chars}, State) -> - handle_event2({emulator, GL, Chars}, State); -handle_info(_, State) -> - {ok, State}. - --spec handle_call(term(), state()) -> {'ok', {'error', 'bad_query'}, state()}. - -handle_call(_Query, State) -> {ok, {error, bad_query}, State}. - --spec terminate(term(), state()) -> {'error_logger', [term()]}. - -terminate(swap, {_, 0, Buff}) -> - {error_logger, Buff}; -terminate(swap, {_, Lost, Buff}) -> - Myevent = {info, group_leader(), {self(), {lost_messages, Lost}, []}}, - {error_logger, [tag_event(Myevent)|Buff]}; -terminate(_, _) -> - {error_logger, []}. - -handle_event2(Event, {1, Lost, Buff}) -> - display(tag_event(Event)), - {ok, {1, Lost+1, Buff}}; -handle_event2(Event, {N, Lost, Buff}) -> - Tagged = tag_event(Event), - display(Tagged), - {ok, {N-1, Lost, [Tagged|Buff]}}; -handle_event2(_, State) -> - {ok, State}. - -tag_event(Event) -> - {erlang:localtime(), Event}. - -display({Tag,{error,_,{_,Format,Args}}}) -> - display2(Tag,Format,Args); -display({Tag,{error_report,_,{_,Type,Report}}}) -> - display2(Tag,Type,Report); -display({Tag,{info_report,_,{_,Type,Report}}}) -> - display2(Tag,Type,Report); -display({Tag,{info,_,{_,Error,_}}}) -> - display2(Tag,Error,[]); -display({Tag,{info_msg,_,{_,Format,Args}}}) -> - display2(Tag,Format,Args); -display({Tag,{warning_report,_,{_,Type,Report}}}) -> - display2(Tag,Type,Report); -display({Tag,{warning_msg,_,{_,Format,Args}}}) -> - display2(Tag,Format,Args); -display({Tag,{emulator,_,Chars}}) -> - display2(Tag,Chars,[]). - -add_node(X, Pid) when is_atom(X) -> - add_node(atom_to_list(X), Pid); -add_node(X, Pid) -> - lists:concat([X,"** at node ",node(Pid)," **~n"]). - -%% Can't do io_lib:format - -display2({{_Y,_Mo,_D},{_H,_Mi,_S}} = Date, F, A) -> - display_date(Date), - display3(string_p(F), F, A). - -display_date({{Y,Mo,D},{H,Mi,S}}) -> - erlang:display_string( - integer_to_list(Y) ++ "-" ++ - two_digits(Mo) ++ "-" ++ - two_digits(D) ++ " " ++ - two_digits(H) ++ ":" ++ - two_digits(Mi) ++ ":" ++ - two_digits(S) ++ " "). - -two_digits(N) when 0 =< N, N =< 9 -> - [$0, $0 + N]; -two_digits(N) -> - integer_to_list(N). - -display3(true, F, A) -> - %% Format string with arguments - erlang:display_string(F ++ "\n"), - [begin - erlang:display_string("\t"), - erlang:display(Arg) - end || Arg <- A], - ok; -display3(false, Atom, A) when is_atom(Atom) -> - %% The widest atom seems to be 'supervisor_report' at 17. - ColumnWidth = 20, - AtomString = atom_to_list(Atom), - AtomLength = length(AtomString), - Padding = lists:duplicate(ColumnWidth - AtomLength, $\s), - erlang:display_string(AtomString ++ Padding), - display4(A); -display3(_, F, A) -> - erlang:display({F, A}). - -display4([A, []]) -> - %% Not sure why crash reports look like this. - display4(A); -display4(A = [_|_]) -> - case lists:all(fun({Key,_Value}) -> is_atom(Key); (_) -> false end, A) of - true -> - erlang:display_string("\n"), - lists:foreach( - fun({Key, Value}) -> - erlang:display_string( - " " ++ - atom_to_list(Key) ++ - ": "), - erlang:display(Value) - end, A); - false -> - erlang:display(A) - end; -display4(A) -> - erlang:display(A). - -string_p([]) -> - false; -string_p(Term) -> - string_p1(Term). - -string_p1([H|T]) when is_integer(H), H >= $\s, H < 255 -> - string_p1(T); -string_p1([$\n|T]) -> string_p1(T); -string_p1([$\r|T]) -> string_p1(T); -string_p1([$\t|T]) -> string_p1(T); -string_p1([$\v|T]) -> string_p1(T); -string_p1([$\b|T]) -> string_p1(T); -string_p1([$\f|T]) -> string_p1(T); -string_p1([$\e|T]) -> string_p1(T); -string_p1([H|T]) when is_list(H) -> - case string_p1(H) of - true -> string_p1(T); - _ -> false - end; -string_p1([]) -> true; -string_p1(_) -> false. - +%%%----------------------------------------------------------------- -spec limit_term(term()) -> term(). limit_term(Term) -> @@ -535,6 +574,8 @@ get_format_depth() -> case application:get_env(kernel, error_logger_format_depth) of {ok, Depth} when is_integer(Depth) -> max(10, Depth); + {ok, unlimited} -> + unlimited; undefined -> unlimited end. diff --git a/lib/kernel/src/erts_debug.erl b/lib/kernel/src/erts_debug.erl index ad92aafc2f..1270de4144 100644 --- a/lib/kernel/src/erts_debug.erl +++ b/lib/kernel/src/erts_debug.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1999-2016. All Rights Reserved. +%% Copyright Ericsson AB 1999-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -21,7 +21,7 @@ %% Low-level debugging support. EXPERIMENTAL! --export([size/1,df/1,df/2,df/3,ic/1]). +-export([size/1,df/1,df/2,df/3,dis_to_file/2,ic/1]). %% This module contains the following *experimental* BIFs: %% disassemble/1 @@ -32,11 +32,11 @@ %%% BIFs -export([breakpoint/2, disassemble/1, display/1, dist_ext_to_term/2, - dump_monitors/1, dump_links/1, flat_size/1, - get_internal_state/1, instructions/0, lock_counters/1, + flat_size/1, get_internal_state/1, instructions/0, map_info/1, same/2, set_internal_state/2, - size_shared/1, copy_shared/1, dirty_cpu/2, dirty_io/2, - dirty/3]). + size_shared/1, copy_shared/1, dirty_cpu/2, dirty_io/2, dirty/3, + lcnt_control/1, lcnt_control/2, lcnt_collect/0, lcnt_clear/0, + lc_graph/0, lc_graph_to_dot/2, lc_graph_merge/2]). -spec breakpoint(MFA, Flag) -> non_neg_integer() when MFA :: {Module :: module(), @@ -70,18 +70,6 @@ display(_) -> dist_ext_to_term(_, _) -> erlang:nif_error(undef). --spec dump_monitors(Id) -> true when - Id :: pid() | atom(). - -dump_monitors(_) -> - erlang:nif_error(undef). - --spec dump_links(Id) -> true when - Id :: pid() | port() | atom(). - -dump_links(_) -> - erlang:nif_error(undef). - -spec flat_size(Term) -> non_neg_integer() when Term :: term(). @@ -142,12 +130,31 @@ ic(F) when is_function(F) -> io:format("Total: ~w~n",[lists:sum([C||{_I,C}<-Is])]), R. --spec lock_counters(info) -> term(); - (clear) -> ok; - ({copy_save, boolean()}) -> boolean(); - ({process_locks, boolean()}) -> boolean(). +-spec lcnt_control + (copy_save, boolean()) -> ok; + (mask, list(atom())) -> ok. -lock_counters(_) -> +lcnt_control(_Option, _Value) -> + erlang:nif_error(undef). + +-spec lcnt_control + (copy_save) -> boolean(); + (mask) -> list(atom()). + +lcnt_control(_Option) -> + erlang:nif_error(undef). + +-type lcnt_lock_info() :: {atom(), term(), atom(), term()}. + +-spec lcnt_collect() -> + list({duration, {non_neg_integer(), non_neg_integer()}} | + {locks, list(lcnt_lock_info())}). + +lcnt_collect() -> + erlang:nif_error(undef). + +-spec lcnt_clear() -> ok. +lcnt_clear() -> erlang:nif_error(undef). -spec same(Term1, Term2) -> boolean() when @@ -359,16 +366,21 @@ df(Mod, Func, Arity) when is_atom(Mod), is_atom(Func) -> catch _:_ -> {undef,Mod} end. -dff(File, Fs) when is_pid(File), is_list(Fs) -> - lists:foreach(fun(Mfa) -> - disassemble_function(File, Mfa), - io:nl(File) - end, Fs); -dff(Name, Fs) when is_list(Name) -> - case file:open(Name, [write]) of +-spec dis_to_file(module(), file:filename()) -> df_ret(). + +dis_to_file(Mod, Name) when is_atom(Mod) -> + try Mod:module_info(functions) of + Fs0 when is_list(Fs0) -> + Fs = [{Mod,Func,Arity} || {Func,Arity} <- Fs0], + dff(Name, Fs) + catch _:_ -> {undef,Mod} + end. + +dff(Name, Fs) -> + case file:open(Name, [write,raw,delayed_write]) of {ok,F} -> try - dff(F, Fs) + dff_1(F, Fs) after _ = file:close(F) end; @@ -376,12 +388,18 @@ dff(Name, Fs) when is_list(Name) -> {error,{badopen,Reason}} end. +dff_1(File, Fs) -> + lists:foreach(fun(Mfa) -> + disassemble_function(File, Mfa), + file:write(File, "\n") + end, Fs). + disassemble_function(File, {_,_,_}=MFA) -> cont_dis(File, erts_debug:disassemble(MFA), MFA). cont_dis(_, false, _) -> ok; cont_dis(File, {Addr,Str,MFA}, MFA) -> - io:put_chars(File, binary_to_list(Str)), + ok = file:write(File, Str), cont_dis(File, erts_debug:disassemble(Addr), MFA); cont_dis(_, {_,_,_}, _) -> ok. @@ -390,3 +408,90 @@ cont_dis(_, {_,_,_}, _) -> ok. map_info(_) -> erlang:nif_error(undef). + +%% Create file "lc_graph.<pid>" with all actual lock dependencies +%% recorded so far by the VM. +%% Needs debug VM or --enable-lock-checking config, returns 'notsup' otherwise. +lc_graph() -> + erts_debug:set_internal_state(available_internal_state, true), + erts_debug:get_internal_state(lc_graph). + +%% Convert "lc_graph.<pid>" file to https://www.graphviz.org dot format. +lc_graph_to_dot(OutFile, InFile) -> + {ok, [LL0]} = file:consult(InFile), + + [{"NO LOCK",0} | LL] = LL0, + Map = maps:from_list([{Id, Name} || {Name, Id, _, _} <- LL]), + + case file:open(OutFile, [exclusive]) of + {ok, Out} -> + ok = file:write(Out, "digraph G {\n"), + + [dot_print_lock(Out, Lck, Map) || Lck <- LL], + + ok = file:write(Out, "}\n"), + ok = file:close(Out); + + {error,eexist} -> + {"File already exists", OutFile} + end. + +dot_print_lock(Out, {_Name, Id, Lst, _}, Map) -> + [dot_print_edge(Out, From, Id, Map) || From <- Lst], + ok. + +dot_print_edge(_, 0, _, _) -> + ignore; % "NO LOCK" +dot_print_edge(Out, From, To, Map) -> + io:format(Out, "~p -> ~p;\n", [maps:get(From,Map), maps:get(To,Map)]). + + +%% Merge several "lc_graph" files into one file. +lc_graph_merge(OutFile, InFiles) -> + LLs = lists:map(fun(InFile) -> + {ok, [LL]} = file:consult(InFile), + LL + end, + InFiles), + + Res = lists:foldl(fun(A, B) -> lcg_merge(A, B) end, + hd(LLs), + tl(LLs)), + case file:open(OutFile, [exclusive]) of + {ok, Out} -> + try + lcg_print(Out, Res) + after + file:close(Out) + end, + ok; + {error, eexist} -> + {"File already exists", OutFile} + end. + +lcg_merge(A, B) -> + lists:zipwith(fun(LA, LB) -> lcg_merge_locks(LA, LB) end, + A, B). + +lcg_merge_locks(L, L) -> + L; +lcg_merge_locks({Name, Id, DA, IA}, {Name, Id, DB, IB}) -> + Direct = lists:umerge(DA, DB), + Indirect = lists:umerge(IA, IB), + {Name, Id, Direct, Indirect -- Direct}. + + +lcg_print(Out, LL) -> + io:format(Out, "[", []), + lcg_print_locks(Out, LL), + io:format(Out, "].\n", []), + ok. + +lcg_print_locks(Out, [{_,_}=NoLock | Rest]) -> + io:format(Out, "~p,\n", [NoLock]), + lcg_print_locks(Out, Rest); +lcg_print_locks(Out, [LastLock]) -> + io:format(Out, "~w", [LastLock]); +lcg_print_locks(Out, [Lock | Rest]) -> + io:format(Out, "~w,\n", [Lock]), + lcg_print_locks(Out, Rest). diff --git a/lib/kernel/src/file.erl b/lib/kernel/src/file.erl index 933f2d5f65..1d4e37196c 100644 --- a/lib/kernel/src/file.erl +++ b/lib/kernel/src/file.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1996-2017. All Rights Reserved. +%% Copyright Ericsson AB 1996-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -69,10 +69,10 @@ %% Types that can be used from other modules -- alphabetically ordered. -export_type([date_time/0, fd/0, file_info/0, filename/0, filename_all/0, - io_device/0, name/0, name_all/0, posix/0]). + io_device/0, mode/0, name/0, name_all/0, posix/0]). %%% Includes and defines --include("file.hrl"). +-include("file_int.hrl"). -define(FILE_IO_SERVER_TABLE, file_io_servers). @@ -101,14 +101,25 @@ -type deep_list() :: [char() | atom() | deep_list()]. -type name() :: string() | atom() | deep_list(). -type name_all() :: string() | atom() | deep_list() | (RawFilename :: binary()). --type posix() :: 'eacces' | 'eagain' | 'ebadf' | 'ebusy' | 'edquot' - | 'eexist' | 'efault' | 'efbig' | 'eintr' | 'einval' - | 'eio' | 'eisdir' | 'eloop' | 'emfile' | 'emlink' - | 'enametoolong' - | 'enfile' | 'enodev' | 'enoent' | 'enomem' | 'enospc' - | 'enotblk' | 'enotdir' | 'enotsup' | 'enxio' | 'eperm' - | 'epipe' | 'erofs' | 'espipe' | 'esrch' | 'estale' - | 'exdev'. +-type posix() :: + 'eacces' | 'eagain' | + 'ebadf' | 'ebadmsg' | 'ebusy' | + 'edeadlk' | 'edeadlock' | 'edquot' | + 'eexist' | + 'efault' | 'efbig' | 'eftype' | + 'eintr' | 'einval' | 'eio' | 'eisdir' | + 'eloop' | + 'emfile' | 'emlink' | 'emultihop' | + 'enametoolong' | 'enfile' | + 'enobufs' | 'enodev' | 'enolck' | 'enolink' | 'enoent' | + 'enomem' | 'enospc' | 'enosr' | 'enostr' | 'enosys' | + 'enotblk' | 'enotdir' | 'enotsup' | 'enxio' | + 'eopnotsupp' | 'eoverflow' | + 'eperm' | 'epipe' | + 'erange' | 'erofs' | + 'espipe' | 'esrch' | 'estale' | + 'etxtbsy' | + 'exdev'. -type date_time() :: calendar:datetime(). -type posix_file_advise() :: 'normal' | 'sequential' | 'random' | 'no_reuse' | 'will_need' | 'dont_need'. @@ -454,41 +465,23 @@ raw_write_file_info(Name, #file_info{} = Info) -> Reason :: posix() | badarg | system_limit. open(Item, ModeList) when is_list(ModeList) -> - case lists:member(raw, ModeList) of - %% Raw file, use ?PRIM_FILE to handle this file - true -> + case {lists:member(raw, ModeList), lists:member(ram, ModeList)} of + {false, false} -> + %% File server file Args = [file_name(Item) | ModeList], case check_args(Args) of ok -> [FileName | _] = Args, - %% We rely on the returned Handle (in {ok, Handle}) - %% being a pid() or a #file_descriptor{} - ?PRIM_FILE:open(FileName, ModeList); + call(open, [FileName, ModeList]); Error -> Error - end; - false -> - case lists:member(ram, ModeList) of - %% RAM file, use ?RAM_FILE to handle this file - true -> - case check_args(ModeList) of - ok -> - ?RAM_FILE:open(Item, ModeList); - Error -> - Error - end; - %% File server file - false -> - Args = [file_name(Item) | ModeList], - case check_args(Args) of - ok -> - [FileName | _] = Args, - call(open, [FileName, ModeList]); - Error -> - Error - end - end + end; + {true, _Either} -> + raw_file_io:open(file_name(Item), ModeList); + {false, true} -> + ram_file:open(Item, ModeList) end; + %% Old obsolete mode specification in atom or 2-tuple format open(Item, Mode) -> open(Item, mode_list(Mode)). @@ -1254,15 +1247,18 @@ sendfile(File, _Sock, _Offet, _Bytes, _Opts) when is_pid(File) -> sendfile(File, Sock, Offset, Bytes, []) -> sendfile(File, Sock, Offset, Bytes, ?MAX_CHUNK_SIZE, [], [], []); sendfile(File, Sock, Offset, Bytes, Opts) -> - ChunkSize0 = proplists:get_value(chunk_size, Opts, ?MAX_CHUNK_SIZE), - ChunkSize = if ChunkSize0 > ?MAX_CHUNK_SIZE -> - ?MAX_CHUNK_SIZE; - true -> ChunkSize0 - end, - %% Support for headers, trailers and options has been removed because the - %% Darwin and BSD API for using it does not play nice with - %% non-blocking sockets. See unix_efile.c for more info. - sendfile(File, Sock, Offset, Bytes, ChunkSize, [], [], Opts). + try proplists:get_value(chunk_size, Opts, ?MAX_CHUNK_SIZE) of + ChunkSize0 when is_integer(ChunkSize0) -> + ChunkSize = erlang:min(ChunkSize0, ?MAX_CHUNK_SIZE), + %% Support for headers, trailers and options has been removed + %% because the Darwin and BSD API for using it does not play nice + %% with non-blocking sockets. See unix_efile.c for more info. + sendfile(File, Sock, Offset, Bytes, ChunkSize, [], [], Opts); + _Other -> + {error, badarg} + catch + error:_ -> {error, badarg} + end. %% sendfile/2 -spec sendfile(Filename, Socket) -> @@ -1397,8 +1393,8 @@ eval_stream2({ok,Form,EndLine}, Fd, H, Last, E, Bs0) -> try erl_eval:exprs(Form, Bs0) of {value,V,Bs} -> eval_stream(Fd, H, EndLine, {V}, E, Bs) - catch Class:Reason -> - Error = {EndLine,?MODULE,{Class,Reason,erlang:get_stacktrace()}}, + catch Class:Reason:StackTrace -> + Error = {EndLine,?MODULE,{Class,Reason,StackTrace}}, eval_stream(Fd, H, EndLine, Last, [Error|E], Bs0) end; eval_stream2({error,What,EndLine}, Fd, H, Last, E, Bs) -> diff --git a/lib/kernel/src/file_int.hrl b/lib/kernel/src/file_int.hrl new file mode 100644 index 0000000000..bafc330c04 --- /dev/null +++ b/lib/kernel/src/file_int.hrl @@ -0,0 +1,33 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2017. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% + +%% +%% Internal definitions for the 'file' module and friends. +%% + +-ifndef(FILE_INTERNAL_HRL_). +-define(FILE_INTERNAL_HRL_, 1). + +-include("file.hrl"). + +-define(CALL_FD(Fd, Method, Args), + apply(Fd#file_descriptor.module, Method, [Fd | Args])). + +-endif. diff --git a/lib/kernel/src/file_io_server.erl b/lib/kernel/src/file_io_server.erl index deb7b315b1..34d5497a4a 100644 --- a/lib/kernel/src/file_io_server.erl +++ b/lib/kernel/src/file_io_server.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 2000-2015. All Rights Reserved. +%% Copyright Ericsson AB 2000-2017. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -28,7 +28,8 @@ -record(state, {handle,owner,mref,buf,read_mode,unic}). --define(PRIM_FILE, prim_file). +-include("file_int.hrl"). + -define(READ_SIZE_LIST, 128). -define(READ_SIZE_BINARY, (8*1024)). @@ -67,8 +68,9 @@ do_start(Spawn, Owner, FileName, ModeList) -> erlang:dt_restore_tag(Utag), %% process_flag(trap_exit, true), case parse_options(ModeList) of - {ReadMode, UnicodeMode, Opts} -> - case ?PRIM_FILE:open(FileName, Opts) of + {ReadMode, UnicodeMode, Opts0} -> + Opts = maybe_add_read_ahead(ReadMode, Opts0), + case raw_file_io:open(FileName, [raw | Opts]) of {error, Reason} = Error -> Self ! {Ref, Error}, exit(Reason); @@ -157,6 +159,24 @@ valid_enc({utf32,little}) -> valid_enc(_Other) -> {error,badarg}. +%% Add a small read_ahead buffer if the file is opened for reading +%% only in list mode and no read_ahead is already given. +maybe_add_read_ahead(binary, Opts) -> + Opts; +maybe_add_read_ahead(list, Opts) -> + P = fun(read_ahead) -> true; + ({read_ahead,_}) -> true; + (append) -> true; + (exclusive) -> true; + (write) -> true; + (_) -> false + end, + case lists:any(P, Opts) of + false -> + [{read_ahead, 4096}|Opts]; + true -> + Opts + end. server_loop(#state{mref = Mref} = State) -> receive @@ -205,7 +225,7 @@ io_reply(From, ReplyAs, Reply) -> file_request({advise,Offset,Length,Advise}, #state{handle=Handle}=State) -> - case ?PRIM_FILE:advise(Handle, Offset, Length, Advise) of + case ?CALL_FD(Handle, advise, [Offset, Length, Advise]) of {error,Reason}=Reply -> {stop,Reason,Reply,State}; Reply -> @@ -213,7 +233,7 @@ file_request({advise,Offset,Length,Advise}, end; file_request({allocate, Offset, Length}, #state{handle = Handle} = State) -> - Reply = ?PRIM_FILE:allocate(Handle, Offset, Length), + Reply = ?CALL_FD(Handle, allocate, [Offset, Length]), {reply, Reply, State}; file_request({pread,At,Sz}, State) when At =:= cur; @@ -256,7 +276,7 @@ file_request({pwrite,At,Data}, end; file_request(datasync, #state{handle=Handle}=State) -> - case ?PRIM_FILE:datasync(Handle) of + case ?CALL_FD(Handle, datasync, []) of {error,Reason}=Reply -> {stop,Reason,Reply,State}; Reply -> @@ -264,7 +284,7 @@ file_request(datasync, end; file_request(sync, #state{handle=Handle}=State) -> - case ?PRIM_FILE:sync(Handle) of + case ?CALL_FD(Handle, sync, []) of {error,Reason}=Reply -> {stop,Reason,Reply,State}; Reply -> @@ -272,7 +292,7 @@ file_request(sync, end; file_request(close, #state{handle=Handle}=State) -> - case ?PRIM_FILE:close(Handle) of + case ?CALL_FD(Handle, close, []) of {error,Reason}=Reply -> {stop,Reason,Reply,State#state{buf= <<>>}}; Reply -> @@ -288,7 +308,7 @@ file_request({position,At}, end; file_request(truncate, #state{handle=Handle}=State) -> - case ?PRIM_FILE:truncate(Handle) of + case ?CALL_FD(Handle, truncate, []) of {error,Reason}=Reply -> {stop,Reason,Reply,State#state{buf= <<>>}}; Reply -> @@ -398,7 +418,7 @@ io_request_loop([Request|Tail], %% put_chars(Chars, latin1, #state{handle=Handle, unic=latin1}=State) -> NewState = State#state{buf = <<>>}, - case ?PRIM_FILE:write(Handle, Chars) of + case ?CALL_FD(Handle, write, [Chars]) of {error,Reason}=Reply -> {stop,Reason,Reply,NewState}; Reply -> @@ -408,7 +428,7 @@ put_chars(Chars, InEncoding, #state{handle=Handle, unic=OutEncoding}=State) -> NewState = State#state{buf = <<>>}, case unicode:characters_to_binary(Chars,InEncoding,OutEncoding) of Bin when is_binary(Bin) -> - case ?PRIM_FILE:write(Handle, Bin) of + case ?CALL_FD(Handle, write, [Bin]) of {error,Reason}=Reply -> {stop,Reason,Reply,NewState}; Reply -> @@ -422,7 +442,7 @@ put_chars(Chars, InEncoding, #state{handle=Handle, unic=OutEncoding}=State) -> get_line(S, {<<>>, Cont}, OutEnc, #state{handle=Handle, read_mode=Mode, unic=InEnc}=State) -> - case ?PRIM_FILE:read(Handle, read_size(Mode)) of + case ?CALL_FD(Handle, read, [read_size(Mode)]) of {ok,Bin} -> get_line(S, convert_enc([Cont, Bin], InEnc, OutEnc), OutEnc, State); eof -> @@ -472,7 +492,7 @@ get_chars(N, OutEnc,#state{handle=Handle,buf=Buf,read_mode=ReadMode,unic=latin1} BufSize = byte_size(Buf), NeedSize = N-BufSize, Size = erlang:max(NeedSize, ?READ_SIZE_BINARY), - case ?PRIM_FILE:read(Handle, Size) of + case ?CALL_FD(Handle, read, [Size]) of {ok, B} -> if BufSize+byte_size(B) < N -> std_reply(cat(Buf, B, ReadMode,latin1,OutEnc), State); @@ -504,7 +524,7 @@ get_chars(N, OutEnc,#state{handle=Handle,buf=Buf,read_mode=ReadMode,unic=InEncod %% Need more, Try to read 4*needed in bytes... NeedSize = (N - BufCount) * 4, Size = erlang:max(NeedSize, ?READ_SIZE_BINARY), - case ?PRIM_FILE:read(Handle, Size) of + case ?CALL_FD(Handle, read, [Size]) of {ok, B} -> NewBuf = list_to_binary([Buf,B]), {NewCount,NewSplit} = count_and_find(NewBuf,N,InEncoding), @@ -544,7 +564,7 @@ get_chars(Mod, Func, XtraArg, OutEnc, #state{buf=Buf}=State) -> get_chars_empty(Mod, Func, XtraArg, S, latin1, #state{handle=Handle,read_mode=ReadMode, unic=latin1}=State) -> - case ?PRIM_FILE:read(Handle, read_size(ReadMode)) of + case ?CALL_FD(Handle, read, [read_size(ReadMode)]) of {ok,Bin} -> get_chars_apply(Mod, Func, XtraArg, S, latin1, State, Bin); eof -> @@ -554,7 +574,7 @@ get_chars_empty(Mod, Func, XtraArg, S, latin1, end; get_chars_empty(Mod, Func, XtraArg, S, OutEnc, #state{handle=Handle,read_mode=ReadMode}=State) -> - case ?PRIM_FILE:read(Handle, read_size(ReadMode)) of + case ?CALL_FD(Handle, read, [read_size(ReadMode)]) of {ok,Bin} -> get_chars_apply(Mod, Func, XtraArg, S, OutEnc, State, Bin); eof -> @@ -564,7 +584,7 @@ get_chars_empty(Mod, Func, XtraArg, S, OutEnc, end. get_chars_notempty(Mod, Func, XtraArg, S, OutEnc, #state{handle=Handle,read_mode=ReadMode,buf = B}=State) -> - case ?PRIM_FILE:read(Handle, read_size(ReadMode)) of + case ?CALL_FD(Handle, read, [read_size(ReadMode)]) of {ok,Bin} -> get_chars_apply(Mod, Func, XtraArg, S, OutEnc, State, list_to_binary([B,Bin])); eof -> @@ -918,13 +938,10 @@ cbv({utf32,little},_) -> %% Compensates ?PRIM_FILE:position/2 for the number of bytes %% we have buffered position(Handle, At, Buf) -> - ?PRIM_FILE:position( - Handle, - case At of - cur -> - {cur, -byte_size(Buf)}; - {cur, Offs} -> - {cur, Offs-byte_size(Buf)}; - _ -> - At - end). + SeekTo = + case At of + {cur, Offs} -> {cur, Offs-byte_size(Buf)}; + cur -> {cur, -byte_size(Buf)}; + _ -> At + end, + ?CALL_FD(Handle, position, [SeekTo]). diff --git a/lib/kernel/src/file_server.erl b/lib/kernel/src/file_server.erl index 6504174cbc..29eaa23375 100644 --- a/lib/kernel/src/file_server.erl +++ b/lib/kernel/src/file_server.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 2000-2016. All Rights Reserved. +%% Copyright Ericsson AB 2000-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -63,7 +63,7 @@ stop() -> %%% Callback functions from gen_server %%%---------------------------------------------------------------------- --type state() :: port(). % Internal type +-type state() :: term(). % Internal type %%---------------------------------------------------------------------- %% Func: init/1 @@ -73,18 +73,12 @@ stop() -> %% {stop, Reason} %%---------------------------------------------------------------------- --spec init([]) -> {'ok', state()} | {'stop', term()}. +-spec init([]) -> {'ok', state()}. init([]) -> process_flag(trap_exit, true), - case ?PRIM_FILE:start() of - {ok, Handle} -> - ?FILE_IO_SERVER_TABLE = - ets:new(?FILE_IO_SERVER_TABLE, [named_table]), - {ok, Handle}; - {error, Reason} -> - {stop, Reason} - end. + ?FILE_IO_SERVER_TABLE = ets:new(?FILE_IO_SERVER_TABLE, [named_table]), + {ok, undefined}. %%---------------------------------------------------------------------- %% Func: handle_call/3 @@ -101,7 +95,7 @@ init([]) -> {'reply', 'eof' | 'ok' | {'error', term()} | {'ok', term()}, state()} | {'stop', 'normal', 'stopped', state()}. -handle_call({open, Name, ModeList}, {Pid, _Tag} = _From, Handle) +handle_call({open, Name, ModeList}, {Pid, _Tag} = _From, State) when is_list(ModeList) -> Child = ?FILE_IO_SERVER:start_link(Pid, Name, ModeList), case Child of @@ -110,78 +104,78 @@ handle_call({open, Name, ModeList}, {Pid, _Tag} = _From, Handle) _ -> ok end, - {reply, Child, Handle}; + {reply, Child, State}; -handle_call({open, _Name, _Mode}, _From, Handle) -> - {reply, {error, einval}, Handle}; +handle_call({open, _Name, _Mode}, _From, State) -> + {reply, {error, einval}, State}; -handle_call({read_file, Name}, _From, Handle) -> - {reply, ?PRIM_FILE:read_file(Name), Handle}; +handle_call({read_file, Name}, _From, State) -> + {reply, ?PRIM_FILE:read_file(Name), State}; -handle_call({write_file, Name, Bin}, _From, Handle) -> - {reply, ?PRIM_FILE:write_file(Name, Bin), Handle}; +handle_call({write_file, Name, Bin}, _From, State) -> + {reply, ?PRIM_FILE:write_file(Name, Bin), State}; -handle_call({set_cwd, Name}, _From, Handle) -> - {reply, ?PRIM_FILE:set_cwd(Handle, Name), Handle}; +handle_call({set_cwd, Name}, _From, State) -> + {reply, ?PRIM_FILE:set_cwd(Name), State}; -handle_call({delete, Name}, _From, Handle) -> - {reply, ?PRIM_FILE:delete(Handle, Name), Handle}; +handle_call({delete, Name}, _From, State) -> + {reply, ?PRIM_FILE:delete(Name), State}; -handle_call({rename, Fr, To}, _From, Handle) -> - {reply, ?PRIM_FILE:rename(Handle, Fr, To), Handle}; +handle_call({rename, Fr, To}, _From, State) -> + {reply, ?PRIM_FILE:rename(Fr, To), State}; -handle_call({make_dir, Name}, _From, Handle) -> - {reply, ?PRIM_FILE:make_dir(Handle, Name), Handle}; +handle_call({make_dir, Name}, _From, State) -> + {reply, ?PRIM_FILE:make_dir(Name), State}; -handle_call({del_dir, Name}, _From, Handle) -> - {reply, ?PRIM_FILE:del_dir(Handle, Name), Handle}; +handle_call({del_dir, Name}, _From, State) -> + {reply, ?PRIM_FILE:del_dir(Name), State}; -handle_call({list_dir, Name}, _From, Handle) -> - {reply, ?PRIM_FILE:list_dir(Handle, Name), Handle}; -handle_call({list_dir_all, Name}, _From, Handle) -> - {reply, ?PRIM_FILE:list_dir_all(Handle, Name), Handle}; +handle_call({list_dir, Name}, _From, State) -> + {reply, ?PRIM_FILE:list_dir(Name), State}; +handle_call({list_dir_all, Name}, _From, State) -> + {reply, ?PRIM_FILE:list_dir_all(Name), State}; -handle_call(get_cwd, _From, Handle) -> - {reply, ?PRIM_FILE:get_cwd(Handle), Handle}; -handle_call({get_cwd}, _From, Handle) -> - {reply, ?PRIM_FILE:get_cwd(Handle), Handle}; -handle_call({get_cwd, Name}, _From, Handle) -> - {reply, ?PRIM_FILE:get_cwd(Handle, Name), Handle}; +handle_call(get_cwd, _From, State) -> + {reply, ?PRIM_FILE:get_cwd(), State}; +handle_call({get_cwd}, _From, State) -> + {reply, ?PRIM_FILE:get_cwd(), State}; +handle_call({get_cwd, Name}, _From, State) -> + {reply, ?PRIM_FILE:get_cwd(Name), State}; -handle_call({read_file_info, Name}, _From, Handle) -> - {reply, ?PRIM_FILE:read_file_info(Handle, Name), Handle}; +handle_call({read_file_info, Name}, _From, State) -> + {reply, ?PRIM_FILE:read_file_info(Name), State}; -handle_call({read_file_info, Name, Opts}, _From, Handle) -> - {reply, ?PRIM_FILE:read_file_info(Handle, Name, Opts), Handle}; +handle_call({read_file_info, Name, Opts}, _From, State) -> + {reply, ?PRIM_FILE:read_file_info(Name, Opts), State}; -handle_call({altname, Name}, _From, Handle) -> - {reply, ?PRIM_FILE:altname(Handle, Name), Handle}; +handle_call({altname, Name}, _From, State) -> + {reply, ?PRIM_FILE:altname(Name), State}; -handle_call({write_file_info, Name, Info}, _From, Handle) -> - {reply, ?PRIM_FILE:write_file_info(Handle, Name, Info), Handle}; +handle_call({write_file_info, Name, Info}, _From, State) -> + {reply, ?PRIM_FILE:write_file_info(Name, Info), State}; -handle_call({write_file_info, Name, Info, Opts}, _From, Handle) -> - {reply, ?PRIM_FILE:write_file_info(Handle, Name, Info, Opts), Handle}; +handle_call({write_file_info, Name, Info, Opts}, _From, State) -> + {reply, ?PRIM_FILE:write_file_info(Name, Info, Opts), State}; -handle_call({read_link_info, Name}, _From, Handle) -> - {reply, ?PRIM_FILE:read_link_info(Handle, Name), Handle}; +handle_call({read_link_info, Name}, _From, State) -> + {reply, ?PRIM_FILE:read_link_info(Name), State}; -handle_call({read_link_info, Name, Opts}, _From, Handle) -> - {reply, ?PRIM_FILE:read_link_info(Handle, Name, Opts), Handle}; +handle_call({read_link_info, Name, Opts}, _From, State) -> + {reply, ?PRIM_FILE:read_link_info(Name, Opts), State}; -handle_call({read_link, Name}, _From, Handle) -> - {reply, ?PRIM_FILE:read_link(Handle, Name), Handle}; -handle_call({read_link_all, Name}, _From, Handle) -> - {reply, ?PRIM_FILE:read_link_all(Handle, Name), Handle}; +handle_call({read_link, Name}, _From, State) -> + {reply, ?PRIM_FILE:read_link(Name), State}; +handle_call({read_link_all, Name}, _From, State) -> + {reply, ?PRIM_FILE:read_link_all(Name), State}; -handle_call({make_link, Old, New}, _From, Handle) -> - {reply, ?PRIM_FILE:make_link(Handle, Old, New), Handle}; +handle_call({make_link, Old, New}, _From, State) -> + {reply, ?PRIM_FILE:make_link(Old, New), State}; -handle_call({make_symlink, Old, New}, _From, Handle) -> - {reply, ?PRIM_FILE:make_symlink(Handle, Old, New), Handle}; +handle_call({make_symlink, Old, New}, _From, State) -> + {reply, ?PRIM_FILE:make_symlink(Old, New), State}; handle_call({copy, SourceName, SourceOpts, DestName, DestOpts, Length}, - _From, Handle) -> + _From, State) -> Reply = case ?PRIM_FILE:open(SourceName, [read, binary | SourceOpts]) of {ok, Source} -> @@ -201,14 +195,14 @@ handle_call({copy, SourceName, SourceOpts, DestName, DestOpts, Length}, {error, _} = Error -> Error end, - {reply, Reply, Handle}; + {reply, Reply, State}; -handle_call(stop, _From, Handle) -> - {stop, normal, stopped, Handle}; +handle_call(stop, _From, State) -> + {stop, normal, stopped, State}; -handle_call(Request, From, Handle) -> - error_logger:error_msg("handle_call(~p, ~p, _)", [Request, From]), - {noreply, Handle}. +handle_call(Request, From, State) -> + error_logger:error_msg("handle_call(~tp, ~tp, _)", [Request, From]), + {noreply, State}. %%---------------------------------------------------------------------- %% Func: handle_cast/2 @@ -220,7 +214,7 @@ handle_call(Request, From, Handle) -> -spec handle_cast(term(), state()) -> {'noreply', state()}. handle_cast(Msg, State) -> - error_logger:error_msg("handle_cast(~p, _)", [Msg]), + error_logger:error_msg("handle_cast(~tp, _)", [Msg]), {noreply, State}. %%---------------------------------------------------------------------- @@ -231,19 +225,14 @@ handle_cast(Msg, State) -> %%---------------------------------------------------------------------- -spec handle_info(term(), state()) -> - {'noreply', state()} | {'stop', 'normal', state()}. + {'noreply', state()}. -handle_info({'EXIT', Pid, _Reason}, Handle) when is_pid(Pid) -> +handle_info({'EXIT', Pid, _Reason}, State) when is_pid(Pid) -> ets:delete(?FILE_IO_SERVER_TABLE, Pid), - {noreply, Handle}; - -handle_info({'EXIT', Handle, _Reason}, Handle) -> - error_logger:error_msg("Port controlling ~w terminated in ~w", - [?FILE_SERVER, ?MODULE]), - {stop, normal, Handle}; + {noreply, State}; handle_info(Info, State) -> - error_logger:error_msg("handle_Info(~p, _)", [Info]), + error_logger:error_msg("handle_Info(~tp, _)", [Info]), {noreply, State}. %%---------------------------------------------------------------------- @@ -254,8 +243,8 @@ handle_info(Info, State) -> -spec terminate(term(), state()) -> 'ok'. -terminate(_Reason, Handle) -> - ?PRIM_FILE:stop(Handle). +terminate(_Reason, _State) -> + ok. %%---------------------------------------------------------------------- %% Func: code_change/3 diff --git a/lib/kernel/src/gen_sctp.erl b/lib/kernel/src/gen_sctp.erl index a6aa0edd15..d893d44079 100644 --- a/lib/kernel/src/gen_sctp.erl +++ b/lib/kernel/src/gen_sctp.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 2007-2016. All Rights Reserved. +%% Copyright Ericsson AB 2007-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -66,7 +66,12 @@ {sctp_set_peer_primary_addr, #sctp_setpeerprim{}} | {sctp_status, #sctp_status{}} | {sndbuf, non_neg_integer()} | - {tos, non_neg_integer()}. + {tos, non_neg_integer()} | + {tclass, non_neg_integer()} | + {ttl, non_neg_integer()} | + {recvtos, boolean()} | + {recvtclass, boolean()} | + {recvttl, boolean()}. -type option_name() :: active | buffer | @@ -97,7 +102,12 @@ sctp_set_peer_primary_addr | sctp_status | sndbuf | - tos. + tos | + tclass | + ttl | + recvtos | + recvtclass | + recvttl. -type sctp_socket() :: port(). -export_type([assoc_id/0, option/0, option_name/0, sctp_socket/0]). @@ -118,6 +128,8 @@ open() -> | inet:address_family() | {port,Port} | {type,SockType} + | {netns, file:filename_all()} + | {bind_to_device, binary()} | option(), IP :: inet:ip_address() | any | loopback, Port :: inet:port_number(), @@ -363,7 +375,7 @@ send(S, AssocChange, Stream, Data) -> Socket :: sctp_socket(), FromIP :: inet:ip_address(), FromPort :: inet:port_number(), - AncData :: [#sctp_sndrcvinfo{}], + AncData :: [#sctp_sndrcvinfo{} | inet:ancillary_data()], Data :: binary() | string() | #sctp_sndrcvinfo{} | #sctp_assoc_change{} | #sctp_paddr_change{} | #sctp_adaptation_event{}, @@ -380,7 +392,7 @@ recv(S) -> Timeout :: timeout(), FromIP :: inet:ip_address(), FromPort :: inet:port_number(), - AncData :: [#sctp_sndrcvinfo{}], + AncData :: [#sctp_sndrcvinfo{} | inet:ancillary_data()], Data :: binary() | string() | #sctp_sndrcvinfo{} | #sctp_assoc_change{} | #sctp_paddr_change{} | #sctp_adaptation_event{}, diff --git a/lib/kernel/src/gen_tcp.erl b/lib/kernel/src/gen_tcp.erl index ac61dbc792..7f7833ec23 100644 --- a/lib/kernel/src/gen_tcp.erl +++ b/lib/kernel/src/gen_tcp.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1997-2016. All Rights Reserved. +%% Copyright Ericsson AB 1997-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -62,7 +62,14 @@ {show_econnreset, boolean()} | {sndbuf, non_neg_integer()} | {tos, non_neg_integer()} | + {tclass, non_neg_integer()} | + {ttl, non_neg_integer()} | + {recvtos, boolean()} | + {recvtclass, boolean()} | + {recvttl, boolean()} | {ipv6_v6only, boolean()}. +-type pktoptions_value() :: + {pktoptions, inet:ancillary_data()}. -type option_name() :: active | buffer | @@ -81,6 +88,7 @@ nodelay | packet | packet_size | + pktoptions | priority | {raw, Protocol :: non_neg_integer(), @@ -94,6 +102,12 @@ show_econnreset | sndbuf | tos | + tclass | + ttl | + recvtos | + recvtclass | + recvttl | + pktoptions | ipv6_v6only. -type connect_option() :: {ip, inet:socket_address()} | @@ -102,6 +116,8 @@ inet:address_family() | {port, inet:port_number()} | {tcp_module, module()} | + {netns, file:filename_all()} | + {bind_to_device, binary()} | option(). -type listen_option() :: {ip, inet:socket_address()} | @@ -111,11 +127,13 @@ {port, inet:port_number()} | {backlog, B :: non_neg_integer()} | {tcp_module, module()} | + {netns, file:filename_all()} | + {bind_to_device, binary()} | option(). -type socket() :: port(). -export_type([option/0, option_name/0, connect_option/0, listen_option/0, - socket/0]). + socket/0, pktoptions_value/0]). %% %% Connect a socket diff --git a/lib/kernel/src/gen_udp.erl b/lib/kernel/src/gen_udp.erl index 3121544719..d6e8652e77 100644 --- a/lib/kernel/src/gen_udp.erl +++ b/lib/kernel/src/gen_udp.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1997-2016. All Rights Reserved. +%% Copyright Ericsson AB 1997-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -51,6 +51,11 @@ {reuseaddr, boolean()} | {sndbuf, non_neg_integer()} | {tos, non_neg_integer()} | + {tclass, non_neg_integer()} | + {ttl, non_neg_integer()} | + {recvtos, boolean()} | + {recvtclass, boolean()} | + {recvttl, boolean()} | {ipv6_v6only, boolean()}. -type option_name() :: active | @@ -76,6 +81,12 @@ reuseaddr | sndbuf | tos | + tclass | + ttl | + recvtos | + recvtclass | + recvttl | + pktoptions | ipv6_v6only. -type socket() :: port(). @@ -97,6 +108,8 @@ open(Port) -> | {ifaddr, inet:socket_address()} | inet:address_family() | {port, inet:port_number()} + | {netns, file:filename_all()} + | {bind_to_device, binary()} | option(), Socket :: socket(), Reason :: inet:posix(). @@ -145,11 +158,13 @@ send(S, Packet) when is_port(S) -> end. -spec recv(Socket, Length) -> - {ok, {Address, Port, Packet}} | {error, Reason} when + {ok, RecvData} | {error, Reason} when Socket :: socket(), Length :: non_neg_integer(), + RecvData :: {Address, Port, Packet} | {Address, Port, AncData, Packet}, Address :: inet:ip_address() | inet:returned_non_ip_address(), Port :: inet:port_number(), + AncData :: inet:ancillary_data(), Packet :: string() | binary(), Reason :: not_owner | inet:posix(). @@ -162,12 +177,14 @@ recv(S,Len) when is_port(S), is_integer(Len) -> end. -spec recv(Socket, Length, Timeout) -> - {ok, {Address, Port, Packet}} | {error, Reason} when + {ok, RecvData} | {error, Reason} when Socket :: socket(), Length :: non_neg_integer(), Timeout :: timeout(), + RecvData :: {Address, Port, Packet} | {Address, Port, AncData, Packet}, Address :: inet:ip_address() | inet:returned_non_ip_address(), Port :: inet:port_number(), + AncData :: inet:ancillary_data(), Packet :: string() | binary(), Reason :: not_owner | inet:posix(). diff --git a/lib/kernel/src/global.erl b/lib/kernel/src/global.erl index a9e92b28b8..a38522eb5c 100644 --- a/lib/kernel/src/global.erl +++ b/lib/kernel/src/global.erl @@ -262,7 +262,7 @@ check_dupname(Name, Pid) -> {ok, allow} -> true; _ -> - S = "global: ~w registered under several names: ~w\n", + S = "global: ~w registered under several names: ~tw\n", Names = [Name | [Name1 || {_Pid, Name1} <- PidNames]], error_logger:error_msg(S, [Pid, Names]), false @@ -659,7 +659,7 @@ handle_call(stop, _From, S) -> handle_call(Request, From, S) -> error_logger:warning_msg("The global_name_server " "received an unexpected message:\n" - "handle_call(~p, ~p, _)\n", + "handle_call(~tp, ~tp, _)\n", [Request, From]), {noreply, S}. @@ -828,7 +828,7 @@ handle_cast({async_del_lock, _ResourceId, _Pid}, S) -> handle_cast(Request, S) -> error_logger:warning_msg("The global_name_server " "received an unexpected message:\n" - "handle_cast(~p, _)\n", [Request]), + "handle_cast(~tp, _)\n", [Request]), {noreply, S}. %%======================================================================== @@ -955,7 +955,7 @@ handle_info({'DOWN', MonitorRef, process, _Pid, _Info}, S0) -> handle_info(Message, S) -> error_logger:warning_msg("The global_name_server " "received an unexpected message:\n" - "handle_info(~p, _)\n", [Message]), + "handle_info(~tp, _)\n", [Message]), {noreply, S}. @@ -1949,13 +1949,13 @@ exchange_names([{Name, Pid, Method} | Tail], Node, Ops, Res) -> exchange_names(Tail, Node, [Op | Ops], [Op | Res]); {badrpc, Badrpc} -> error_logger:info_msg("global: badrpc ~w received when " - "conflicting name ~w was found\n", + "conflicting name ~tw was found\n", [Badrpc, Name]), Op = {insert, {Name, Pid, Method}}, exchange_names(Tail, Node, [Op | Ops], Res); Else -> error_logger:info_msg("global: Resolve method ~w for " - "conflicting name ~w returned ~w\n", + "conflicting name ~tw returned ~tw\n", [Method, Name, Else]), Op = {delete, Name}, exchange_names(Tail, Node, [Op | Ops], [Op | Res]) @@ -1984,7 +1984,7 @@ minmax(P1,P2) -> Pid2 :: pid(). random_exit_name(Name, Pid, Pid2) -> {Min, Max} = minmax(Pid, Pid2), - error_logger:info_msg("global: Name conflict terminating ~w\n", + error_logger:info_msg("global: Name conflict terminating ~tw\n", [{Name, Max}]), exit(Max, kill), Min. @@ -2200,7 +2200,7 @@ unexpected_message({'EXIT', _Pid, _Reason}, _What) -> ok; unexpected_message(Message, What) -> error_logger:warning_msg("The global_name_server ~w process " - "received an unexpected message:\n~p\n", + "received an unexpected message:\n~tp\n", [What, Message]). %%% Utilities diff --git a/lib/kernel/src/group.erl b/lib/kernel/src/group.erl index bf785959ff..5625ae6eb7 100644 --- a/lib/kernel/src/group.erl +++ b/lib/kernel/src/group.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1996-2017. All Rights Reserved. +%% Copyright Ericsson AB 1996-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -114,7 +114,7 @@ server_loop(Drv, Shell, Buf0) -> {io_request,From,ReplyAs,Req} when is_pid(From) -> %% This io_request may cause a transition to a couple of %% selective receive loops elsewhere in this module. - Buf = io_request(Req, From, ReplyAs, Drv, Buf0), + Buf = io_request(Req, From, ReplyAs, Drv, Shell, Buf0), server_loop(Drv, Shell, Buf); {reply,{{From,ReplyAs},Reply}} -> io_reply(From, ReplyAs, Reply), @@ -135,7 +135,7 @@ server_loop(Drv, Shell, Buf0) -> exit(R); %% We want to throw away any term that we don't handle (standard %% practice in receive loops), but not any {Drv,_} tuples which are - %% handled in io_request/5. + %% handled in io_request/6. NotDrvTuple when (not is_tuple(NotDrvTuple)) orelse (tuple_size(NotDrvTuple) =/= 2) orelse (element(1, NotDrvTuple) =/= Drv) -> @@ -177,8 +177,8 @@ set_unicode_state(Drv,Bool) -> end. -io_request(Req, From, ReplyAs, Drv, Buf0) -> - case io_request(Req, Drv, {From,ReplyAs}, Buf0) of +io_request(Req, From, ReplyAs, Drv, Shell, Buf0) -> + case io_request(Req, Drv, Shell, {From,ReplyAs}, Buf0) of {ok,Reply,Buf} -> io_reply(From, ReplyAs, Reply), Buf; @@ -208,7 +208,7 @@ io_request(Req, From, ReplyAs, Drv, Buf0) -> %% %% These put requests have to be synchronous to the driver as otherwise %% there is no guarantee that the data has actually been printed. -io_request({put_chars,unicode,Chars}, Drv, From, Buf) -> +io_request({put_chars,unicode,Chars}, Drv, _Shell, From, Buf) -> case catch unicode:characters_to_binary(Chars,utf8) of Binary when is_binary(Binary) -> send_drv(Drv, {put_chars_sync, unicode, Binary, {From,ok}}), @@ -216,7 +216,7 @@ io_request({put_chars,unicode,Chars}, Drv, From, Buf) -> _ -> {error,{error,{put_chars, unicode,Chars}},Buf} end; -io_request({put_chars,unicode,M,F,As}, Drv, From, Buf) -> +io_request({put_chars,unicode,M,F,As}, Drv, _Shell, From, Buf) -> case catch apply(M, F, As) of Binary when is_binary(Binary) -> send_drv(Drv, {put_chars_sync, unicode, Binary, {From,ok}}), @@ -230,12 +230,12 @@ io_request({put_chars,unicode,M,F,As}, Drv, From, Buf) -> {error,{error,F},Buf} end end; -io_request({put_chars,latin1,Binary}, Drv, From, Buf) when is_binary(Binary) -> +io_request({put_chars,latin1,Binary}, Drv, _Shell, From, Buf) when is_binary(Binary) -> send_drv(Drv, {put_chars_sync, unicode, unicode:characters_to_binary(Binary,latin1), {From,ok}}), {noreply,Buf}; -io_request({put_chars,latin1,Chars}, Drv, From, Buf) -> +io_request({put_chars,latin1,Chars}, Drv, _Shell, From, Buf) -> case catch unicode:characters_to_binary(Chars,latin1) of Binary when is_binary(Binary) -> send_drv(Drv, {put_chars_sync, unicode, Binary, {From,ok}}), @@ -243,7 +243,7 @@ io_request({put_chars,latin1,Chars}, Drv, From, Buf) -> _ -> {error,{error,{put_chars,latin1,Chars}},Buf} end; -io_request({put_chars,latin1,M,F,As}, Drv, From, Buf) -> +io_request({put_chars,latin1,M,F,As}, Drv, _Shell, From, Buf) -> case catch apply(M, F, As) of Binary when is_binary(Binary) -> send_drv(Drv, {put_chars_sync, unicode, @@ -260,30 +260,30 @@ io_request({put_chars,latin1,M,F,As}, Drv, From, Buf) -> end end; -io_request({get_chars,Encoding,Prompt,N}, Drv, _From, Buf) -> - get_chars(Prompt, io_lib, collect_chars, N, Drv, Buf, Encoding); -io_request({get_line,Encoding,Prompt}, Drv, _From, Buf) -> - get_chars(Prompt, io_lib, collect_line, [], Drv, Buf, Encoding); -io_request({get_until,Encoding, Prompt,M,F,As}, Drv, _From, Buf) -> - get_chars(Prompt, io_lib, get_until, {M,F,As}, Drv, Buf, Encoding); -io_request({get_password,_Encoding},Drv,_From,Buf) -> - get_password_chars(Drv, Buf); -io_request({setopts,Opts}, Drv, _From, Buf) when is_list(Opts) -> +io_request({get_chars,Encoding,Prompt,N}, Drv, Shell, _From, Buf) -> + get_chars_n(Prompt, io_lib, collect_chars, N, Drv, Shell, Buf, Encoding); +io_request({get_line,Encoding,Prompt}, Drv, Shell, _From, Buf) -> + get_chars_line(Prompt, io_lib, collect_line, [], Drv, Shell, Buf, Encoding); +io_request({get_until,Encoding, Prompt,M,F,As}, Drv, Shell, _From, Buf) -> + get_chars_line(Prompt, io_lib, get_until, {M,F,As}, Drv, Shell, Buf, Encoding); +io_request({get_password,_Encoding},Drv,Shell,_From,Buf) -> + get_password_chars(Drv, Shell, Buf); +io_request({setopts,Opts}, Drv, _Shell, _From, Buf) when is_list(Opts) -> setopts(Opts, Drv, Buf); -io_request(getopts, Drv, _From, Buf) -> +io_request(getopts, Drv, _Shell, _From, Buf) -> getopts(Drv, Buf); -io_request({requests,Reqs}, Drv, From, Buf) -> - io_requests(Reqs, {ok,ok,Buf}, From, Drv); +io_request({requests,Reqs}, Drv, Shell, From, Buf) -> + io_requests(Reqs, {ok,ok,Buf}, From, Drv, Shell); %% New in R12 -io_request({get_geometry,columns},Drv,_From,Buf) -> +io_request({get_geometry,columns},Drv,_Shell,_From,Buf) -> case get_tty_geometry(Drv) of {W,_H} -> {ok,W,Buf}; _ -> {error,{error,enotsup},Buf} end; -io_request({get_geometry,rows},Drv,_From,Buf) -> +io_request({get_geometry,rows},Drv,_Shell,_From,Buf) -> case get_tty_geometry(Drv) of {_W,H} -> {ok,H,Buf}; @@ -292,40 +292,40 @@ io_request({get_geometry,rows},Drv,_From,Buf) -> end; %% BC with pre-R13 -io_request({put_chars,Chars}, Drv, From, Buf) -> - io_request({put_chars,latin1,Chars}, Drv, From, Buf); -io_request({put_chars,M,F,As}, Drv, From, Buf) -> - io_request({put_chars,latin1,M,F,As}, Drv, From, Buf); -io_request({get_chars,Prompt,N}, Drv, From, Buf) -> - io_request({get_chars,latin1,Prompt,N}, Drv, From, Buf); -io_request({get_line,Prompt}, Drv, From, Buf) -> - io_request({get_line,latin1,Prompt}, Drv, From, Buf); -io_request({get_until, Prompt,M,F,As}, Drv, From, Buf) -> - io_request({get_until,latin1, Prompt,M,F,As}, Drv, From, Buf); -io_request(get_password,Drv,From,Buf) -> - io_request({get_password,latin1},Drv,From,Buf); - - - -io_request(_, _Drv, _From, Buf) -> +io_request({put_chars,Chars}, Drv, Shell, From, Buf) -> + io_request({put_chars,latin1,Chars}, Drv, Shell, From, Buf); +io_request({put_chars,M,F,As}, Drv, Shell, From, Buf) -> + io_request({put_chars,latin1,M,F,As}, Drv, Shell, From, Buf); +io_request({get_chars,Prompt,N}, Drv, Shell, From, Buf) -> + io_request({get_chars,latin1,Prompt,N}, Drv, Shell, From, Buf); +io_request({get_line,Prompt}, Drv, Shell, From, Buf) -> + io_request({get_line,latin1,Prompt}, Drv, Shell, From, Buf); +io_request({get_until, Prompt,M,F,As}, Drv, Shell, From, Buf) -> + io_request({get_until,latin1, Prompt,M,F,As}, Drv, Shell, From, Buf); +io_request(get_password,Drv,Shell,From,Buf) -> + io_request({get_password,latin1},Drv,Shell,From,Buf); + + + +io_request(_, _Drv, _Shell, _From, Buf) -> {error,{error,request},Buf}. -%% Status = io_requests(RequestList, PrevStat, From, Drv) +%% Status = io_requests(RequestList, PrevStat, From, Drv, Shell) %% Process a list of output requests as long as %% the previous status is 'ok' or noreply. %% %% We use undefined as the From for all but the last request %% in order to discards acknowledgements from those requests. %% -io_requests([R|Rs], {noreply,Buf}, From, Drv) -> +io_requests([R|Rs], {noreply,Buf}, From, Drv, Shell) -> ReqFrom = if Rs =:= [] -> From; true -> undefined end, - io_requests(Rs, io_request(R, Drv, ReqFrom, Buf), From, Drv); -io_requests([R|Rs], {ok,ok,Buf}, From, Drv) -> + io_requests(Rs, io_request(R, Drv, Shell, ReqFrom, Buf), From, Drv, Shell); +io_requests([R|Rs], {ok,ok,Buf}, From, Drv, Shell) -> ReqFrom = if Rs =:= [] -> From; true -> undefined end, - io_requests(Rs, io_request(R, Drv, ReqFrom, Buf), From, Drv); -io_requests([_|_], Error, _From, _Drv) -> + io_requests(Rs, io_request(R, Drv, Shell, ReqFrom, Buf), From, Drv, Shell); +io_requests([_|_], Error, _From, _Drv, _Shell) -> Error; -io_requests([], Stat, _From, _) -> +io_requests([], Stat, _From, _, _Shell) -> Stat. %% io_reply(From, ReplyAs, Reply) @@ -333,7 +333,7 @@ io_requests([], Stat, _From, _) -> %% The ACK contains the return value. io_reply(undefined, _ReplyAs, _Reply) -> - %% Ignore these replies as they are generated from io_requests/4. + %% Ignore these replies as they are generated from io_requests/5. ok; io_reply(From, ReplyAs, Reply) -> From ! {io_reply,ReplyAs,Reply}, @@ -434,7 +434,7 @@ getopts(Drv,Buf) -> {ok,[Exp,Echo,Bin,Uni],Buf}. -%% get_chars(Prompt, Module, Function, XtraArgument, Drv, Buffer) +%% get_chars_*(Prompt, Module, Function, XtraArgument, Drv, Buffer) %% Gets characters from the input Drv until as the applied function %% returns {stop,Result,Rest}. Does not block output until input has been %% received. @@ -442,8 +442,8 @@ getopts(Drv,Buf) -> %% {Result,NewSaveBuffer} %% {error,What,NewSaveBuffer} -get_password_chars(Drv,Buf) -> - case get_password_line(Buf, Drv) of +get_password_chars(Drv,Shell,Buf) -> + case get_password_line(Buf, Drv, Shell) of {done, Line, Buf1} -> {ok, Line, Buf1}; interrupted -> @@ -452,36 +452,62 @@ get_password_chars(Drv,Buf) -> {exit, terminated} end. -get_chars(Prompt, M, F, Xa, Drv, Buf, Encoding) -> +get_chars_n(Prompt, M, F, Xa, Drv, Shell, Buf, Encoding) -> Pbs = prompt_bytes(Prompt, Encoding), - get_chars_loop(Pbs, M, F, Xa, Drv, Buf, start, Encoding). + case get(echo) of + true -> + get_chars_loop(Pbs, M, F, Xa, Drv, Shell, Buf, start, Encoding); + false -> + get_chars_n_loop(Pbs, M, F, Xa, Drv, Shell, Buf, start, Encoding) + end. + +get_chars_line(Prompt, M, F, Xa, Drv, Shell, Buf, Encoding) -> + Pbs = prompt_bytes(Prompt, Encoding), + get_chars_loop(Pbs, M, F, Xa, Drv, Shell, Buf, start, Encoding). -get_chars_loop(Pbs, M, F, Xa, Drv, Buf0, State, Encoding) -> - Result = case get(echo) of +get_chars_loop(Pbs, M, F, Xa, Drv, Shell, Buf0, State, Encoding) -> + Result = case get(echo) of true -> - get_line(Buf0, Pbs, Drv, Encoding); + get_line(Buf0, Pbs, Drv, Shell, Encoding); false -> % get_line_echo_off only deals with lists % and does not need encoding... - get_line_echo_off(Buf0, Pbs, Drv) + get_line_echo_off(Buf0, Pbs, Drv, Shell) end, case Result of - {done,Line,Buf1} -> - get_chars_apply(Pbs, M, F, Xa, Drv, Buf1, State, Line, Encoding); + {done,Line,Buf} -> + get_chars_apply(Pbs, M, F, Xa, Drv, Shell, Buf, State, Line, Encoding); interrupted -> {error,{error,interrupted},[]}; terminated -> {exit,terminated} end. -get_chars_apply(Pbs, M, F, Xa, Drv, Buf, State0, Line, Encoding) -> +get_chars_apply(Pbs, M, F, Xa, Drv, Shell, Buf, State0, Line, Encoding) -> case catch M:F(State0, cast(Line,get(read_mode), Encoding), Encoding, Xa) of - {stop,Result,Rest} -> - {ok,Result,append(Rest, Buf, Encoding)}; - {'EXIT',_} -> - {error,{error,err_func(M, F, Xa)},[]}; - State1 -> - get_chars_loop(Pbs, M, F, Xa, Drv, Buf, State1, Encoding) + {stop,Result,Rest} -> + {ok,Result,append(Rest, Buf, Encoding)}; + {'EXIT',_} -> + {error,{error,err_func(M, F, Xa)},[]}; + State1 -> + get_chars_loop(Pbs, M, F, Xa, Drv, Shell, Buf, State1, Encoding) + end. + +get_chars_n_loop(Pbs, M, F, Xa, Drv, Shell, Buf0, State, Encoding) -> + try M:F(State, cast(Buf0, get(read_mode), Encoding), Encoding, Xa) of + {stop,Result,Rest} -> + {ok, Result, Rest}; + State1 -> + case get_chars_echo_off(Pbs, Drv, Shell) of + interrupted -> + {error,{error,interrupted},[]}; + terminated -> + {exit,terminated}; + Buf -> + get_chars_n_loop(Pbs, M, F, Xa, Drv, Shell, Buf, State1, Encoding) + end + catch _:_ -> + {error,{error,err_func(M, F, Xa)},[]} end. %% Convert error code to make it look as before @@ -497,24 +523,24 @@ err_func(_, F, _) -> %% {done,LineChars,RestChars} %% interrupted -get_line(Chars, Pbs, Drv, Encoding) -> +get_line(Chars, Pbs, Drv, Shell, Encoding) -> {more_chars,Cont,Rs} = edlin:start(Pbs), send_drv_reqs(Drv, Rs), - get_line1(edlin:edit_line(Chars, Cont), Drv, new_stack(get(line_buffer)), + get_line1(edlin:edit_line(Chars, Cont), Drv, Shell, new_stack(get(line_buffer)), Encoding). -get_line1({done,Line,Rest,Rs}, Drv, Ls, _Encoding) -> +get_line1({done,Line,Rest,Rs}, Drv, _Shell, Ls, _Encoding) -> send_drv_reqs(Drv, Rs), save_line_buffer(Line, get_lines(Ls)), {done,Line,Rest}; -get_line1({undefined,{_A,Mode,Char},Cs,Cont,Rs}, Drv, Ls0, Encoding) +get_line1({undefined,{_A,Mode,Char},Cs,Cont,Rs}, Drv, Shell, Ls0, Encoding) when ((Mode =:= none) and (Char =:= $\^P)) or ((Mode =:= meta_left_sq_bracket) and (Char =:= $A)) -> send_drv_reqs(Drv, Rs), case up_stack(save_line(Ls0, edlin:current_line(Cont))) of {none,_Ls} -> send_drv(Drv, beep), - get_line1(edlin:edit_line(Cs, Cont), Drv, Ls0, Encoding); + get_line1(edlin:edit_line(Cs, Cont), Drv, Shell, Ls0, Encoding); {Lcs,Ls} -> send_drv_reqs(Drv, edlin:erase_line(Cont)), {more_chars,Ncont,Nrs} = edlin:start(edlin:prompt(Cont)), @@ -522,16 +548,17 @@ get_line1({undefined,{_A,Mode,Char},Cs,Cont,Rs}, Drv, Ls0, Encoding) get_line1(edlin:edit_line1(lists:sublist(Lcs, 1, length(Lcs)-1), Ncont), Drv, + Shell, Ls, Encoding) end; -get_line1({undefined,{_A,Mode,Char},Cs,Cont,Rs}, Drv, Ls0, Encoding) +get_line1({undefined,{_A,Mode,Char},Cs,Cont,Rs}, Drv, Shell, Ls0, Encoding) when ((Mode =:= none) and (Char =:= $\^N)) or ((Mode =:= meta_left_sq_bracket) and (Char =:= $B)) -> send_drv_reqs(Drv, Rs), case down_stack(save_line(Ls0, edlin:current_line(Cont))) of {none,_Ls} -> send_drv(Drv, beep), - get_line1(edlin:edit_line(Cs, Cont), Drv, Ls0, Encoding); + get_line1(edlin:edit_line(Cs, Cont), Drv, Shell, Ls0, Encoding); {Lcs,Ls} -> send_drv_reqs(Drv, edlin:erase_line(Cont)), {more_chars,Ncont,Nrs} = edlin:start(edlin:prompt(Cont)), @@ -539,6 +566,7 @@ get_line1({undefined,{_A,Mode,Char},Cs,Cont,Rs}, Drv, Ls0, Encoding) get_line1(edlin:edit_line1(lists:sublist(Lcs, 1, length(Lcs)-1), Ncont), Drv, + Shell, Ls, Encoding) end; %% ^R = backward search, ^S = forward search. @@ -551,7 +579,7 @@ get_line1({undefined,{_A,Mode,Char},Cs,Cont,Rs}, Drv, Ls0, Encoding) %% new modes: search, search_quit, search_found. These are added to %% the regular ones (none, meta_left_sq_bracket) and handle special %% cases of history search. -get_line1({undefined,{_A,Mode,Char},Cs,Cont,Rs}, Drv, Ls, Encoding) +get_line1({undefined,{_A,Mode,Char},Cs,Cont,Rs}, Drv, Shell, Ls, Encoding) when ((Mode =:= none) and (Char =:= $\^R)) -> send_drv_reqs(Drv, Rs), %% drop current line, move to search mode. We store the current @@ -561,8 +589,8 @@ get_line1({undefined,{_A,Mode,Char},Cs,Cont,Rs}, Drv, Ls, Encoding) Pbs = prompt_bytes("(search)`': ", Encoding), {more_chars,Ncont,Nrs} = edlin:start(Pbs, search), send_drv_reqs(Drv, Nrs), - get_line1(edlin:edit_line1(Cs, Ncont), Drv, Ls, Encoding); -get_line1({expand, Before, Cs0, Cont,Rs}, Drv, Ls0, Encoding) -> + get_line1(edlin:edit_line1(Cs, Ncont), Drv, Shell, Ls, Encoding); +get_line1({expand, Before, Cs0, Cont,Rs}, Drv, Shell, Ls0, Encoding) -> send_drv_reqs(Drv, Rs), ExpandFun = get(expand_fun), {Found, Add, Matches} = ExpandFun(Before), @@ -577,37 +605,37 @@ get_line1({expand, Before, Cs0, Cont,Rs}, Drv, Ls0, Encoding) -> send_drv(Drv, {put_chars, unicode, unicode:characters_to_binary(MatchStr,unicode)}), [$\^L | Cs1] end, - get_line1(edlin:edit_line(Cs, Cont), Drv, Ls0, Encoding); -get_line1({undefined,_Char,Cs,Cont,Rs}, Drv, Ls, Encoding) -> + get_line1(edlin:edit_line(Cs, Cont), Drv, Shell, Ls0, Encoding); +get_line1({undefined,_Char,Cs,Cont,Rs}, Drv, Shell, Ls, Encoding) -> send_drv_reqs(Drv, Rs), send_drv(Drv, beep), - get_line1(edlin:edit_line(Cs, Cont), Drv, Ls, Encoding); + get_line1(edlin:edit_line(Cs, Cont), Drv, Shell, Ls, Encoding); %% The search item was found and accepted (new line entered on the exact %% result found) -get_line1({_What,Cont={line,_Prompt,_Chars,search_found},Rs}, Drv, Ls0, Encoding) -> +get_line1({_What,Cont={line,_Prompt,_Chars,search_found},Rs}, Drv, Shell, Ls0, Encoding) -> Line = edlin:current_line(Cont), %% this may create duplicate entries. Ls = save_line(new_stack(get_lines(Ls0)), Line), - get_line1({done, Line, "", Rs}, Drv, Ls, Encoding); + get_line1({done, Line, "", Rs}, Drv, Shell, Ls, Encoding); %% The search mode has been exited, but the user wants to remain in line %% editing mode wherever that was, but editing the search result. -get_line1({What,Cont={line,_Prompt,_Chars,search_quit},Rs}, Drv, Ls, Encoding) -> +get_line1({What,Cont={line,_Prompt,_Chars,search_quit},Rs}, Drv, Shell, Ls, Encoding) -> Line = edlin:current_chars(Cont), %% Load back the old prompt with the correct line number. case get(search_quit_prompt) of undefined -> % should not happen. Fallback. LsFallback = save_line(new_stack(get_lines(Ls)), Line), - get_line1({done, "\n", Line, Rs}, Drv, LsFallback, Encoding); + get_line1({done, "\n", Line, Rs}, Drv, Shell, LsFallback, Encoding); Prompt -> % redraw the line and keep going with the same stack position NCont = {line,Prompt,{lists:reverse(Line),[]},none}, send_drv_reqs(Drv, Rs), send_drv_reqs(Drv, edlin:erase_line(Cont)), send_drv_reqs(Drv, edlin:redraw_line(NCont)), - get_line1({What, NCont ,[]}, Drv, pad_stack(Ls), Encoding) + get_line1({What, NCont ,[]}, Drv, Shell, pad_stack(Ls), Encoding) end; %% Search mode is entered. get_line1({What,{line,Prompt,{RevCmd0,_Aft},search},Rs}, - Drv, Ls0, Encoding) -> + Drv, Shell, Ls0, Encoding) -> send_drv_reqs(Drv, Rs), %% Figure out search direction. ^S and ^R are returned through edlin %% whenever we received a search while being already in search mode. @@ -629,61 +657,90 @@ get_line1({What,{line,Prompt,{RevCmd0,_Aft},search},Rs}, {Ls2, {RevCmd, "': "++Line}} end, Cont = {line,Prompt,NewStack,search}, - more_data(What, Cont, Drv, Ls, Encoding); -get_line1({What,Cont0,Rs}, Drv, Ls, Encoding) -> + more_data(What, Cont, Drv, Shell, Ls, Encoding); +get_line1({What,Cont0,Rs}, Drv, Shell, Ls, Encoding) -> send_drv_reqs(Drv, Rs), - more_data(What, Cont0, Drv, Ls, Encoding). + more_data(What, Cont0, Drv, Shell, Ls, Encoding). -more_data(What, Cont0, Drv, Ls, Encoding) -> +more_data(What, Cont0, Drv, Shell, Ls, Encoding) -> receive {Drv,{data,Cs}} -> - get_line1(edlin:edit_line(Cs, Cont0), Drv, Ls, Encoding); + get_line1(edlin:edit_line(Cs, Cont0), Drv, Shell, Ls, Encoding); {Drv,eof} -> - get_line1(edlin:edit_line(eof, Cont0), Drv, Ls, Encoding); + get_line1(edlin:edit_line(eof, Cont0), Drv, Shell, Ls, Encoding); {io_request,From,ReplyAs,Req} when is_pid(From) -> {more_chars,Cont,_More} = edlin:edit_line([], Cont0), send_drv_reqs(Drv, edlin:erase_line(Cont)), - io_request(Req, From, ReplyAs, Drv, []), %WRONG!!! + io_request(Req, From, ReplyAs, Drv, Shell, []), %WRONG!!! send_drv_reqs(Drv, edlin:redraw_line(Cont)), - get_line1({more_chars,Cont,[]}, Drv, Ls, Encoding); + get_line1({more_chars,Cont,[]}, Drv, Shell, Ls, Encoding); {reply,{{From,ReplyAs},Reply}} -> %% We take care of replies from puts here as well io_reply(From, ReplyAs, Reply), - more_data(What, Cont0, Drv, Ls, Encoding); + more_data(What, Cont0, Drv, Shell, Ls, Encoding); {'EXIT',Drv,interrupt} -> interrupted; {'EXIT',Drv,_} -> - terminated + terminated; + {'EXIT',Shell,R} -> + exit(R) after get_line_timeout(What)-> - get_line1(edlin:edit_line([], Cont0), Drv, Ls, Encoding) + get_line1(edlin:edit_line([], Cont0), Drv, Shell, Ls, Encoding) end. -get_line_echo_off(Chars, Pbs, Drv) -> +get_line_echo_off(Chars, Pbs, Drv, Shell) -> send_drv_reqs(Drv, [{put_chars, unicode,Pbs}]), - get_line_echo_off1(edit_line(Chars,[]), Drv). + get_line_echo_off1(edit_line(Chars,[]), Drv, Shell). -get_line_echo_off1({Chars,[]}, Drv) -> +get_line_echo_off1({Chars,[]}, Drv, Shell) -> receive {Drv,{data,Cs}} -> - get_line_echo_off1(edit_line(Cs, Chars), Drv); + get_line_echo_off1(edit_line(Cs, Chars), Drv, Shell); {Drv,eof} -> - get_line_echo_off1(edit_line(eof, Chars), Drv); + get_line_echo_off1(edit_line(eof, Chars), Drv, Shell); {io_request,From,ReplyAs,Req} when is_pid(From) -> - io_request(Req, From, ReplyAs, Drv, []), - get_line_echo_off1({Chars,[]}, Drv); + io_request(Req, From, ReplyAs, Drv, Shell, []), + get_line_echo_off1({Chars,[]}, Drv, Shell); {reply,{{From,ReplyAs},Reply}} when From =/= undefined -> %% We take care of replies from puts here as well io_reply(From, ReplyAs, Reply), - get_line_echo_off1({Chars,[]},Drv); + get_line_echo_off1({Chars,[]},Drv, Shell); {'EXIT',Drv,interrupt} -> interrupted; {'EXIT',Drv,_} -> - terminated + terminated; + {'EXIT',Shell,R} -> + exit(R) end; -get_line_echo_off1({Chars,Rest}, _Drv) -> +get_line_echo_off1({Chars,Rest}, _Drv, _Shell) -> {done,lists:reverse(Chars),case Rest of done -> []; _ -> Rest end}. +get_chars_echo_off(Pbs, Drv, Shell) -> + send_drv_reqs(Drv, [{put_chars, unicode,Pbs}]), + get_chars_echo_off1(Drv, Shell). + +get_chars_echo_off1(Drv, Shell) -> + receive + {Drv, {data, Cs}} -> + Cs; + {Drv, eof} -> + eof; + {io_request,From,ReplyAs,Req} when is_pid(From) -> + io_request(Req, From, ReplyAs, Drv, Shell, []), + get_chars_echo_off1(Drv, Shell); + {reply,{{From,ReplyAs},Reply}} when From =/= undefined -> + %% We take care of replies from puts here as well + io_reply(From, ReplyAs, Reply), + get_chars_echo_off1(Drv, Shell); + {'EXIT',Drv,interrupt} -> + interrupted; + {'EXIT',Drv,_} -> + terminated; + {'EXIT',Shell,R} -> + exit(R) + end. + %% We support line editing for the ICANON mode except the following %% line editing characters, which already has another meaning in %% echo-on mode (See Advanced Programming in the Unix Environment, 2nd ed, @@ -793,9 +850,9 @@ search_up_stack(Stack, Substr) -> case up_stack(Stack) of {none,NewStack} -> {none,NewStack}; {L, NewStack} -> - case string:str(L, Substr) of - 0 -> search_up_stack(NewStack, Substr); - _ -> {string:strip(L,right,$\n), NewStack} + case string:find(L, Substr) of + nomatch -> search_up_stack(NewStack, Substr); + _ -> {string:trim(L, trailing, "$\n"), NewStack} end end. @@ -803,39 +860,41 @@ search_down_stack(Stack, Substr) -> case down_stack(Stack) of {none,NewStack} -> {none,NewStack}; {L, NewStack} -> - case string:str(L, Substr) of - 0 -> search_down_stack(NewStack, Substr); - _ -> {string:strip(L,right,$\n), NewStack} + case string:find(L, Substr) of + nomatch -> search_down_stack(NewStack, Substr); + _ -> {string:trim(L, trailing, "$\n"), NewStack} end end. %% This is get_line without line editing (except for backspace) and %% without echo. -get_password_line(Chars, Drv) -> - get_password1(edit_password(Chars,[]),Drv). +get_password_line(Chars, Drv, Shell) -> + get_password1(edit_password(Chars,[]),Drv,Shell). -get_password1({Chars,[]}, Drv) -> +get_password1({Chars,[]}, Drv, Shell) -> receive {Drv,{data,Cs}} -> - get_password1(edit_password(Cs,Chars),Drv); + get_password1(edit_password(Cs,Chars),Drv,Shell); {io_request,From,ReplyAs,Req} when is_pid(From) -> %send_drv_reqs(Drv, [{delete_chars, -length(Pbs)}]), - io_request(Req, From, ReplyAs, Drv, []), %WRONG!!! + io_request(Req, From, ReplyAs, Drv, Shell, []), %WRONG!!! %% I guess the reason the above line is wrong is that Buf is %% set to []. But do we expect anything but plain output? - get_password1({Chars, []}, Drv); + get_password1({Chars, []}, Drv, Shell); {reply,{{From,ReplyAs},Reply}} -> %% We take care of replies from puts here as well io_reply(From, ReplyAs, Reply), - get_password1({Chars, []},Drv); + get_password1({Chars, []},Drv, Shell); {'EXIT',Drv,interrupt} -> interrupted; {'EXIT',Drv,_} -> - terminated + terminated; + {'EXIT',Shell,R} -> + exit(R) end; -get_password1({Chars,Rest},Drv) -> +get_password1({Chars,Rest},Drv,_Shell) -> send_drv_reqs(Drv,[{put_chars, unicode, "\n"}]), {done,lists:reverse(Chars),case Rest of done -> []; _ -> Rest end}. diff --git a/lib/kernel/src/group_history.erl b/lib/kernel/src/group_history.erl index 91f3663cc5..9745848992 100644 --- a/lib/kernel/src/group_history.erl +++ b/lib/kernel/src/group_history.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 2017. All Rights Reserved. +%% Copyright Ericsson AB 2017-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -260,7 +260,7 @@ resize_log(Name, _OldSize, NewSize) -> ok -> show('$#erlang-history-resize-result', "ok~n", []); - {error, {new_size_too_small, _}} -> + {error, {new_size_too_small, _, _}} -> show('$#erlang-history-resize-result', "failed (new size is too small)~n", []), disable_history(); diff --git a/lib/kernel/src/hipe_unified_loader.erl b/lib/kernel/src/hipe_unified_loader.erl index f4c7c277ed..5704cc79c2 100644 --- a/lib/kernel/src/hipe_unified_loader.erl +++ b/lib/kernel/src/hipe_unified_loader.erl @@ -236,9 +236,10 @@ load_common(Mod, Bin, Beam, Architecture) -> lists:foreach(fun({FE, DestAddress}) -> hipe_bifs:set_native_address_in_fe(FE, DestAddress) end, erase(closures_to_patch)), - ok = hipe_bifs:commit_patch_load(LoaderState), set_beam_call_traps(FunDefs), - ok; + export_funs(FunDefs), + ok = hipe_bifs:commit_patch_load(LoaderState), + ok; BeamBinary when is_binary(BeamBinary) -> %% Find all closures in the code. [] = erase(closures_to_patch), %Clean up, assertion. @@ -274,6 +275,7 @@ needs_trampolines(Architecture) -> arm -> true; powerpc -> true; ppc64 -> true; + amd64 -> true; _ -> false end. @@ -451,7 +453,7 @@ make_beam_stub(Mod, LoaderState, MD5, Beam, FunDefs, ClosuresToPatch) -> %%======================================================================== %% Patching %% @spec patch(refs(), BaseAddress::integer(), ConstAndZone::term(), -%% FunDefs::term(), TrampolineMap::term()) -> 'ok'. +%% FunDefs::term(), TrampolineMap::term()) -> 'ok' %% @type refs()=[{RefType::integer(), Reflist::reflist()} | refs()] %% %% @type reflist()= [{Data::term(), Offsets::offests()}|reflist()] diff --git a/lib/kernel/src/inet.erl b/lib/kernel/src/inet.erl index 6aef5476f1..9f22eb6aaa 100644 --- a/lib/kernel/src/inet.erl +++ b/lib/kernel/src/inet.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1997-2017. All Rights Reserved. +%% Copyright Ericsson AB 1997-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -34,7 +34,8 @@ ip/1, stats/0, options/0, pushf/3, popf/1, close/1, gethostname/0, gethostname/1, parse_ipv4_address/1, parse_ipv6_address/1, parse_ipv4strict_address/1, - parse_ipv6strict_address/1, parse_address/1, parse_strict_address/1, ntoa/1]). + parse_ipv6strict_address/1, parse_address/1, parse_strict_address/1, + ntoa/1, ipv4_mapped_ipv6_address/1]). -export([connect_options/2, listen_options/2, udp_options/2, sctp_options/2]). -export([udp_module/1, tcp_module/1, tcp_module/2, sctp_module/1]). @@ -72,10 +73,10 @@ %% timer interface -export([start_timer/1, timeout/1, timeout/2, stop_timer/1]). --export_type([address_family/0, hostent/0, hostname/0, ip4_address/0, +-export_type([address_family/0, socket_protocol/0, hostent/0, hostname/0, ip4_address/0, ip6_address/0, ip_address/0, port_number/0, local_address/0, socket_address/0, returned_non_ip_address/0, - socket_setopt/0, socket_getopt/0, + socket_setopt/0, socket_getopt/0, ancillary_data/0, posix/0, socket/0, stat_option/0]). %% imports -import(lists, [append/1, duplicate/2, filter/2, foldl/3]). @@ -104,7 +105,20 @@ {local, binary()} | {unspec, <<>>} | {undefined, any()}. --type posix() :: exbadport | exbadseq | file:posix(). +-type posix() :: + 'eaddrinuse' | 'eaddrnotavail' | 'eafnosupport' | 'ealready' | + 'econnaborted' | 'econnrefused' | 'econnreset' | + 'edestaddrreq' | + 'ehostdown' | 'ehostunreach' | + 'einprogress' | 'eisconn' | + 'emsgsize' | + 'enetdown' | 'enetunreach' | + 'enopkg' | 'enoprotoopt' | 'enotconn' | 'enotty' | 'enotsock' | + 'eproto' | 'eprotonosupport' | 'eprototype' | + 'esocktnosupport' | + 'etimedout' | + 'ewouldblock' | + 'exbadport' | 'exbadseq' | file:posix(). -type socket() :: port(). -type socket_setopt() :: @@ -140,6 +154,15 @@ 'running' | 'multicast' | 'loopback']} | {'hwaddr', ether_address()}. +-type getifaddrs_ifopts() :: + [Ifopt :: {flags, Flags :: [up | broadcast | loopback | + pointtopoint | running | multicast]} | + {addr, Addr :: ip_address()} | + {netmask, Netmask :: ip_address()} | + {broadaddr, Broadaddr :: ip_address()} | + {dstaddr, Dstaddr :: ip_address()} | + {hwaddr, Hwaddr :: [byte()]}]. + -type address_family() :: 'inet' | 'inet6' | 'local'. -type socket_protocol() :: 'tcp' | 'udp' | 'sctp'. -type socket_type() :: 'stream' | 'dgram' | 'seqpacket'. @@ -149,9 +172,13 @@ 'recv_cnt' | 'recv_max' | 'recv_avg' | 'recv_oct' | 'recv_dvi' | 'send_cnt' | 'send_max' | 'send_avg' | 'send_oct' | 'send_pend'. +-type ancillary_data() :: + [ {'tos', byte()} | {'tclass', byte()} | {'ttl', byte()} ]. + %%% --------------------------------- --spec get_rc() -> [{Par :: any(), Val :: any()}]. +-spec get_rc() -> [{Par :: atom(), Val :: any()} | + {Par :: atom(), Val1 :: any(), Val2 :: any()}]. get_rc() -> inet_db:get_rc(). @@ -287,7 +314,7 @@ setopts(Socket, Opts) -> {'ok', OptionValues} | {'error', posix()} when Socket :: socket(), Options :: [socket_getopt()], - OptionValues :: [socket_setopt()]. + OptionValues :: [socket_setopt() | gen_tcp:pktoptions_value()]. getopts(Socket, Opts) -> case prim_inet:getopts(Socket, Opts) of @@ -303,32 +330,32 @@ getopts(Socket, Opts) -> Other end. --spec getifaddrs(Socket :: socket()) -> - {'ok', [string()]} | {'error', posix()}. - +-spec getifaddrs( + [Option :: {netns, Namespace :: file:filename_all()}] + | socket()) -> + {'ok', [{Ifname :: string(), + Ifopts :: getifaddrs_ifopts()}]} + | {'error', posix()}. +getifaddrs(Opts) when is_list(Opts) -> + withsocket(fun(S) -> prim_inet:getifaddrs(S) end, Opts); getifaddrs(Socket) -> prim_inet:getifaddrs(Socket). --spec getifaddrs() -> {ok, Iflist} | {error, posix()} when - Iflist :: [{Ifname,[Ifopt]}], - Ifname :: string(), - Ifopt :: {flags,[Flag]} | {addr,Addr} | {netmask,Netmask} - | {broadaddr,Broadaddr} | {dstaddr,Dstaddr} - | {hwaddr,Hwaddr}, - Flag :: up | broadcast | loopback | pointtopoint - | running | multicast, - Addr :: ip_address(), - Netmask :: ip_address(), - Broadaddr :: ip_address(), - Dstaddr :: ip_address(), - Hwaddr :: [byte()]. - +-spec getifaddrs() -> + {'ok', [{Ifname :: string(), + Ifopts :: getifaddrs_ifopts()}]} + | {'error', posix()}. getifaddrs() -> withsocket(fun(S) -> prim_inet:getifaddrs(S) end). --spec getiflist(Socket :: socket()) -> - {'ok', [string()]} | {'error', posix()}. +-spec getiflist( + [Option :: {netns, Namespace :: file:filename_all()}] + | socket()) -> + {'ok', [string()]} | {'error', posix()}. + +getiflist(Opts) when is_list(Opts) -> + withsocket(fun(S) -> prim_inet:getiflist(S) end, Opts); getiflist(Socket) -> prim_inet:getiflist(Socket). @@ -345,11 +372,19 @@ getiflist() -> ifget(Socket, Name, Opts) -> prim_inet:ifget(Socket, Name, Opts). --spec ifget(Name :: string() | atom(), Opts :: [if_getopt()]) -> +-spec ifget( + Name :: string() | atom(), + Opts :: [if_getopt() | + {netns, Namespace :: file:filename_all()}]) -> {'ok', [if_getopt_result()]} | {'error', posix()}. ifget(Name, Opts) -> - withsocket(fun(S) -> prim_inet:ifget(S, Name, Opts) end). + {NSOpts,IFOpts} = + lists:partition( + fun ({netns,_}) -> true; + (_) -> false + end, Opts), + withsocket(fun(S) -> prim_inet:ifget(S, Name, IFOpts) end, NSOpts). -spec ifset(Socket :: socket(), Name :: string() | atom(), @@ -359,11 +394,19 @@ ifget(Name, Opts) -> ifset(Socket, Name, Opts) -> prim_inet:ifset(Socket, Name, Opts). --spec ifset(Name :: string() | atom(), Opts :: [if_setopt()]) -> +-spec ifset( + Name :: string() | atom(), + Opts :: [if_setopt() | + {netns, Namespace :: file:filename_all()}]) -> 'ok' | {'error', posix()}. ifset(Name, Opts) -> - withsocket(fun(S) -> prim_inet:ifset(S, Name, Opts) end). + {NSOpts,IFOpts} = + lists:partition( + fun ({netns,_}) -> true; + (_) -> false + end, Opts), + withsocket(fun(S) -> prim_inet:ifset(S, Name, IFOpts) end, NSOpts). -spec getif() -> {'ok', [{ip_address(), ip_address() | 'undefined', ip_address()}]} | @@ -373,10 +416,14 @@ getif() -> withsocket(fun(S) -> getif(S) end). %% backwards compatible getif --spec getif(Socket :: socket()) -> +-spec getif( + [Option :: {netns, Namespace :: file:filename_all()}] + | socket()) -> {'ok', [{ip_address(), ip_address() | 'undefined', ip_address()}]} | {'error', posix()}. +getif(Opts) when is_list(Opts) -> + withsocket(fun(S) -> getif(S) end, Opts); getif(Socket) -> case prim_inet:getiflist(Socket) of {ok, IfList} -> @@ -397,7 +444,10 @@ getif(Socket) -> end. withsocket(Fun) -> - case inet_udp:open(0,[]) of + withsocket(Fun, []). +%% +withsocket(Fun, Opts) -> + case inet_udp:open(0, Opts) of {ok,Socket} -> Res = Fun(Socket), inet_udp:close(Socket), @@ -674,6 +724,14 @@ parse_address(Addr) -> parse_strict_address(Addr) -> inet_parse:strict_address(Addr). +-spec ipv4_mapped_ipv6_address(ip_address()) -> ip_address(). +ipv4_mapped_ipv6_address({D1,D2,D3,D4}) + when (D1 bor D2 bor D3 bor D4) < 256 -> + {0,0,0,0,0,16#ffff,(D1 bsl 8) bor D2,(D3 bsl 8) bor D4}; +ipv4_mapped_ipv6_address({D1,D2,D3,D4,D5,D6,D7,D8}) + when (D1 bor D2 bor D3 bor D4 bor D5 bor D6 bor D7 bor D8) < 65536 -> + {D7 bsr 8,D7 band 255,D8 bsr 8,D8 band 255}. + %% Return a list of available options options() -> [ @@ -699,6 +757,7 @@ stats() -> %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% connect_options() -> [tos, tclass, priority, reuseaddr, keepalive, linger, sndbuf, recbuf, nodelay, + recvtos, recvtclass, ttl, recvttl, header, active, packet, packet_size, buffer, mode, deliver, line_delimiter, exit_on_close, high_watermark, low_watermark, high_msgq_watermark, low_msgq_watermark, send_timeout, send_timeout_close, delay_send, raw, @@ -767,6 +826,7 @@ con_add(Name, Val, #connect_opts{} = R, Opts, AllOpts) -> %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% listen_options() -> [tos, tclass, priority, reuseaddr, keepalive, linger, sndbuf, recbuf, nodelay, + recvtos, recvtclass, ttl, recvttl, header, active, packet, buffer, mode, deliver, backlog, ipv6_v6only, exit_on_close, high_watermark, low_watermark, high_msgq_watermark, low_msgq_watermark, send_timeout, send_timeout_close, delay_send, @@ -847,7 +907,7 @@ tcp_module_1(Opts, Address) -> %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% udp_options() -> [tos, tclass, priority, reuseaddr, sndbuf, recbuf, header, active, buffer, mode, - deliver, ipv6_v6only, + recvtos, recvtclass, ttl, recvttl, deliver, ipv6_v6only, broadcast, dontroute, multicast_if, multicast_ttl, multicast_loop, add_membership, drop_membership, read_packets,raw, high_msgq_watermark, low_msgq_watermark, bind_to_device]. @@ -917,8 +977,10 @@ udp_module(Opts) -> % (*) passing of open FDs ("fdopen") is not supported. sctp_options() -> [ % The following are generic inet options supported for SCTP sockets: - mode, active, buffer, tos, tclass, priority, dontroute, reuseaddr, linger, sndbuf, - recbuf, ipv6_v6only, high_msgq_watermark, low_msgq_watermark, + mode, active, buffer, tos, tclass, ttl, + priority, dontroute, reuseaddr, linger, + recvtos, recvtclass, recvttl, + sndbuf, recbuf, ipv6_v6only, high_msgq_watermark, low_msgq_watermark, bind_to_device, % Other options are SCTP-specific (though they may be similar to their @@ -1243,9 +1305,7 @@ gethostbyname_string(Name, Type) inet -> inet_parse:ipv4_address(Name); inet6 -> - %% XXX should we really translate IPv4 addresses here - %% even if we do not know if this host can do IPv6? - inet_parse:ipv6_address(Name) + inet_parse:ipv6strict_address(Name) end of {ok,IP} -> {ok,make_hostent(Name, [IP], [], Type)}; @@ -1451,11 +1511,14 @@ fdopen(Fd, Addr, Port, Opts, Protocol, Family, Type, Module) -> %% socket stat %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +-spec i() -> ok. i() -> i(tcp), i(udp), i(sctp). +-spec i(socket_protocol()) -> ok. i(Proto) -> i(Proto, [port, module, recv, sent, owner, local_address, foreign_address, state, type]). +-spec i(socket_protocol(), [atom()]) -> ok. i(tcp, Fs) -> ii(tcp_sockets(), Fs, tcp); i(udp, Fs) -> diff --git a/lib/kernel/src/inet6_tcp.erl b/lib/kernel/src/inet6_tcp.erl index a0d5d3df70..347b8b9a1b 100644 --- a/lib/kernel/src/inet6_tcp.erl +++ b/lib/kernel/src/inet6_tcp.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1997-2016. All Rights Reserved. +%% Copyright Ericsson AB 1997-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -167,7 +167,7 @@ listen(Port, Opts) -> %% Accept %% accept(L) -> - case prim_inet:accept(L) of + case prim_inet:accept(L, accept_family_opts()) of {ok, S} -> inet_db:register_socket(S, ?MODULE), {ok,S}; @@ -175,13 +175,15 @@ accept(L) -> end. accept(L, Timeout) -> - case prim_inet:accept(L, Timeout) of + case prim_inet:accept(L, Timeout, accept_family_opts()) of {ok, S} -> inet_db:register_socket(S, ?MODULE), {ok,S}; Error -> Error end. +accept_family_opts() -> [tclass, recvtclass]. + %% %% Create a port/socket from a file descriptor %% diff --git a/lib/kernel/src/inet_config.erl b/lib/kernel/src/inet_config.erl index 4bbc520449..9f76360b8b 100644 --- a/lib/kernel/src/inet_config.erl +++ b/lib/kernel/src/inet_config.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1997-2016. All Rights Reserved. +%% Copyright Ericsson AB 1997-2017. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -369,7 +369,7 @@ win32_load1(Reg,Type,HFileKey) -> end. win32_split_line(Line,nt) -> inet_parse:split_line(Line); -win32_split_line(Line,windows) -> string:tokens(Line, ","). +win32_split_line(Line,windows) -> string:lexemes(Line, ","). win32_get_strings(Reg, Names) -> win32_get_strings(Reg, Names, []). diff --git a/lib/kernel/src/inet_dns.erl b/lib/kernel/src/inet_dns.erl index d5f982cc51..f1f58bc872 100644 --- a/lib/kernel/src/inet_dns.erl +++ b/lib/kernel/src/inet_dns.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1997-2016. All Rights Reserved. +%% Copyright Ericsson AB 1997-2017. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -29,7 +29,7 @@ -export([decode/1, encode/1]). --import(lists, [reverse/1, reverse/2, nthtail/2]). +-import(lists, [reverse/1]). -include("inet_int.hrl"). -include("inet_dns.hrl"). @@ -473,7 +473,7 @@ decode_data(<<Order:16,Preference:16,Data0/binary>>, _, ?S_NAPTR, Buffer) -> {Data2,Services} = decode_string(Data1), {Data,Regexp} = decode_characters(Data2, utf8), Replacement = decode_domain(Data, Buffer), - {Order,Preference,string:to_lower(Flags),string:to_lower(Services), + {Order,Preference,string:lowercase(Flags),string:lowercase(Services), Regexp,Replacement}; %% ?S_OPT falls through to default decode_data(Data, _, ?S_TXT, _) -> diff --git a/lib/kernel/src/inet_hosts.erl b/lib/kernel/src/inet_hosts.erl index 0bdf00ac30..fc653bf0d3 100644 --- a/lib/kernel/src/inet_hosts.erl +++ b/lib/kernel/src/inet_hosts.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1997-2016. All Rights Reserved. +%% Copyright Ericsson AB 1997-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -72,9 +72,6 @@ gethostbyname(Name, Type, Byname, Byaddr) -> gethostbyaddr({A,B,C,D}=IP) when ?ip(A,B,C,D) -> gethostbyaddr(IP, inet); -%% ipv4 only ipv6 address -gethostbyaddr({0,0,0,0,0,16#ffff=F,G,H}) when ?ip6(0,0,0,0,0,F,G,H) -> - gethostbyaddr({G bsr 8, G band 255, H bsr 8, H band 255}); gethostbyaddr({A,B,C,D,E,F,G,H}=IP) when ?ip6(A,B,C,D,E,F,G,H) -> gethostbyaddr(IP, inet6); gethostbyaddr(Addr) when is_list(Addr) -> diff --git a/lib/kernel/src/inet_int.hrl b/lib/kernel/src/inet_int.hrl index bc5b67f7bf..f6525d7261 100644 --- a/lib/kernel/src/inet_int.hrl +++ b/lib/kernel/src/inet_int.hrl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1997-2017. All Rights Reserved. +%% Copyright Ericsson AB 1997-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -100,6 +100,8 @@ -define(TCP_REQ_RECV, 42). -define(TCP_REQ_UNRECV, 43). -define(TCP_REQ_SHUTDOWN, 44). +-define(TCP_REQ_SENDFILE, 45). + %% UDP and SCTP requests -define(PACKET_REQ_RECV, 60). %%-define(SCTP_REQ_LISTEN, 61). MERGED @@ -155,6 +157,12 @@ -define(INET_LOPT_LINE_DELIM, 40). -define(INET_OPT_TCLASS, 41). -define(INET_OPT_BIND_TO_DEVICE, 42). +-define(INET_OPT_RECVTOS, 43). +-define(INET_OPT_RECVTCLASS, 44). +-define(INET_OPT_PKTOPTIONS, 45). +-define(INET_OPT_TTL, 46). +-define(INET_OPT_RECVTTL, 47). +-define(TCP_OPT_NOPUSH, 48). % Specific SCTP options: separate range: -define(SCTP_OPT_RTOINFO, 100). -define(SCTP_OPT_ASSOCINFO, 101). @@ -319,6 +327,12 @@ [((X) bsr 24) band 16#ff, ((X) bsr 16) band 16#ff, ((X) bsr 8) band 16#ff, (X) band 16#ff]). +-define(int64(X), + [((X) bsr 56) band 16#ff, ((X) bsr 48) band 16#ff, + ((X) bsr 40) band 16#ff, ((X) bsr 32) band 16#ff, + ((X) bsr 24) band 16#ff, ((X) bsr 16) band 16#ff, + ((X) bsr 8) band 16#ff, (X) band 16#ff]). + -define(intAID(X), % For SCTP AssocID ?int32(X)). diff --git a/lib/kernel/src/inet_parse.erl b/lib/kernel/src/inet_parse.erl index 29804dc50b..e9685c6554 100644 --- a/lib/kernel/src/inet_parse.erl +++ b/lib/kernel/src/inet_parse.erl @@ -95,7 +95,7 @@ hosts(Fname,File) -> %% interface with a %if suffix. These kind of %% addresses maybe need to be gracefully handled %% throughout inet* and inet_drv. - case string:tokens(Address, "%") of + case string:lexemes(Address, "%") of [Addr,_] -> {ok,_} = address(Addr), skip; @@ -407,7 +407,7 @@ is_dom1([C | Cs]) when C >= $a, C =< $z -> is_dom_ldh(Cs); is_dom1([C | Cs]) when C >= $A, C =< $Z -> is_dom_ldh(Cs); is_dom1([C | Cs]) when C >= $0, C =< $9 -> case is_dom_ldh(Cs) of - true -> is_dom2(string:tokens([C | Cs],".")); + true -> is_dom2(string:lexemes([C | Cs],".")); false -> false end; is_dom1(_) -> false. diff --git a/lib/kernel/src/inet_res.erl b/lib/kernel/src/inet_res.erl index 90e49ddfdf..6454802b04 100644 --- a/lib/kernel/src/inet_res.erl +++ b/lib/kernel/src/inet_res.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1997-2016. All Rights Reserved. +%% Copyright Ericsson AB 1997-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -349,9 +349,6 @@ gethostbyaddr_tm({A,B,C,D} = IP, Timer) when ?ip(A,B,C,D) -> {ok, HEnt} -> {ok, HEnt}; _ -> res_gethostbyaddr(dn_in_addr_arpa(A,B,C,D), IP, Timer) end; -%% ipv4 only ipv6 address -gethostbyaddr_tm({0,0,0,0,0,16#ffff,G,H},Timer) when is_integer(G+H) -> - gethostbyaddr_tm({G div 256, G rem 256, H div 256, H rem 256},Timer); gethostbyaddr_tm({A,B,C,D,E,F,G,H} = IP, Timer) when ?ip6(A,B,C,D,E,F,G,H) -> inet_db:res_update_conf(), case inet_db:gethostbyaddr(IP) of @@ -431,28 +428,7 @@ gethostbyname(Name,Family,Timeout) -> gethostbyname_tm(Name,inet,Timer) -> getbyname_tm(Name,?S_A,Timer); gethostbyname_tm(Name,inet6,Timer) -> - case getbyname_tm(Name,?S_AAAA,Timer) of - {ok,HEnt} -> {ok,HEnt}; - {error,nxdomain} -> - case getbyname_tm(Name, ?S_A,Timer) of - {ok, HEnt} -> - %% rewrite to a ipv4 only ipv6 address - {ok, - HEnt#hostent { - h_addrtype = inet6, - h_length = 16, - h_addr_list = - lists:map( - fun({A,B,C,D}) -> - {0,0,0,0,0,16#ffff,A*256+B,C*256+D} - end, HEnt#hostent.h_addr_list) - }}; - Error -> - Error - end; - Error -> - Error - end; + getbyname_tm(Name,?S_AAAA,Timer); gethostbyname_tm(_Name, _Family, _Timer) -> {error, einval}. @@ -859,15 +835,17 @@ query_ns(S0, Id, Buffer, IP, Port, Timer, Retry, I, {ok,S} -> Timeout = inet:timeout( (Tm * (1 bsl I)) div Retry, Timer), - {S, case query_udp( S, Id, Buffer, IP, Port, Timeout, Verbose) of {ok,#dns_rec{header=H}} when H#dns_header.tc -> TcpTimeout = inet:timeout(Tm*5, Timer), - query_tcp( - TcpTimeout, Id, Buffer, IP, Port, Verbose); - Reply -> Reply - end}; + {S, query_tcp( + TcpTimeout, Id, Buffer, IP, Port, Verbose)}; + {error, econnrefused} = Err -> + ok = udp_close(S), + {#sock{}, Err}; + Reply -> {S, Reply} + end; Error -> {S0,Error} end diff --git a/lib/kernel/src/inet_tcp.erl b/lib/kernel/src/inet_tcp.erl index dac6b3119d..f1e3116856 100644 --- a/lib/kernel/src/inet_tcp.erl +++ b/lib/kernel/src/inet_tcp.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1997-2016. All Rights Reserved. +%% Copyright Ericsson AB 1997-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -164,7 +164,7 @@ listen(Port, Opts) -> %% Accept %% accept(L) -> - case prim_inet:accept(L) of + case prim_inet:accept(L, accept_family_opts()) of {ok, S} -> inet_db:register_socket(S, ?MODULE), {ok,S}; @@ -172,13 +172,15 @@ accept(L) -> end. accept(L, Timeout) -> - case prim_inet:accept(L, Timeout) of + case prim_inet:accept(L, Timeout, accept_family_opts()) of {ok, S} -> inet_db:register_socket(S, ?MODULE), {ok,S}; Error -> Error end. +accept_family_opts() -> [tos, ttl, recvtos, recvttl]. + %% %% Create a port/socket from a file descriptor %% diff --git a/lib/kernel/src/inet_tcp_dist.erl b/lib/kernel/src/inet_tcp_dist.erl index e3fdb1bb22..c37212b0f9 100644 --- a/lib/kernel/src/inet_tcp_dist.erl +++ b/lib/kernel/src/inet_tcp_dist.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1997-2017. All Rights Reserved. +%% Copyright Ericsson AB 1997-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -283,73 +283,22 @@ do_setup(Driver, Kernel, Node, Type, MyNode, LongOrShortNames, SetupTime) -> ?trace("~p~n",[{inet_tcp_dist,self(),setup,Node}]), [Name, Address] = splitnode(Driver, Node, LongOrShortNames), AddressFamily = Driver:family(), - case inet:getaddr(Address, AddressFamily) of + ErlEpmd = net_kernel:epmd_module(), + {ARMod, ARFun} = get_address_resolver(ErlEpmd), + Timer = dist_util:start_timer(SetupTime), + case ARMod:ARFun(Name, Address, AddressFamily) of + {ok, Ip, TcpPort, Version} -> + ?trace("address_please(~p) -> version ~p~n", + [Node,Version]), + do_setup_connect(Driver, Kernel, Node, Address, AddressFamily, + Ip, TcpPort, Version, Type, MyNode, Timer); {ok, Ip} -> - Timer = dist_util:start_timer(SetupTime), - ErlEpmd = net_kernel:epmd_module(), case ErlEpmd:port_please(Name, Ip) of {port, TcpPort, Version} -> ?trace("port_please(~p) -> version ~p~n", [Node,Version]), - dist_util:reset_timer(Timer), - case - Driver:connect( - Ip, TcpPort, - connect_options([{active, false}, {packet, 2}])) - of - {ok, Socket} -> - HSData = #hs_data{ - kernel_pid = Kernel, - other_node = Node, - this_node = MyNode, - socket = Socket, - timer = Timer, - this_flags = 0, - other_version = Version, - f_send = fun Driver:send/2, - f_recv = fun Driver:recv/3, - f_setopts_pre_nodeup = - fun(S) -> - inet:setopts - (S, - [{active, false}, - {packet, 4}, - nodelay()]) - end, - f_setopts_post_nodeup = - fun(S) -> - inet:setopts - (S, - [{active, true}, - {deliver, port}, - {packet, 4}, - nodelay()]) - end, - - f_getll = fun inet:getll/1, - f_address = - fun(_,_) -> - #net_address{ - address = {Ip,TcpPort}, - host = Address, - protocol = tcp, - family = AddressFamily} - end, - mf_tick = fun(S) -> ?MODULE:tick(Driver, S) end, - mf_getstat = fun ?MODULE:getstat/1, - request_type = Type, - mf_setopts = fun ?MODULE:setopts/2, - mf_getopts = fun ?MODULE:getopts/2 - }, - dist_util:handshake_we_started(HSData); - _ -> - %% Other Node may have closed since - %% port_please ! - ?trace("other node (~p) " - "closed since port_please.~n", - [Node]), - ?shutdown(Node) - end; + do_setup_connect(Driver, Kernel, Node, Address, AddressFamily, + Ip, TcpPort, Version, Type, MyNode, Timer); _ -> ?trace("port_please (~p) " "failed.~n", [Node]), @@ -361,6 +310,71 @@ do_setup(Driver, Kernel, Node, Type, MyNode, LongOrShortNames, SetupTime) -> ?shutdown(Node) end. +%% +%% Actual setup of connection +%% +do_setup_connect(Driver, Kernel, Node, Address, AddressFamily, + Ip, TcpPort, Version, Type, MyNode, Timer) -> + dist_util:reset_timer(Timer), + case + Driver:connect( + Ip, TcpPort, + connect_options([{active, false}, {packet, 2}])) + of + {ok, Socket} -> + HSData = #hs_data{ + kernel_pid = Kernel, + other_node = Node, + this_node = MyNode, + socket = Socket, + timer = Timer, + this_flags = 0, + other_version = Version, + f_send = fun Driver:send/2, + f_recv = fun Driver:recv/3, + f_setopts_pre_nodeup = + fun(S) -> + inet:setopts + (S, + [{active, false}, + {packet, 4}, + nodelay()]) + end, + f_setopts_post_nodeup = + fun(S) -> + inet:setopts + (S, + [{active, true}, + {deliver, port}, + {packet, 4}, + nodelay()]) + end, + + f_getll = fun inet:getll/1, + f_address = + fun(_,_) -> + #net_address{ + address = {Ip,TcpPort}, + host = Address, + protocol = tcp, + family = AddressFamily} + end, + mf_tick = fun(S) -> ?MODULE:tick(Driver, S) end, + mf_getstat = fun ?MODULE:getstat/1, + request_type = Type, + mf_setopts = fun ?MODULE:setopts/2, + mf_getopts = fun ?MODULE:getopts/2 + }, + dist_util:handshake_we_started(HSData); + _ -> + %% Other Node may have closed since + %% discovery ! + ?trace("other node (~p) " + "closed since discovery (port_please).~n", + [Node]), + ?shutdown(Node) + end. + connect_options(Opts) -> case application:get_env(kernel, inet_dist_connect_options) of {ok,ConnectOpts} -> @@ -430,6 +444,16 @@ get_tcp_address(Driver, Socket) -> }. %% ------------------------------------------------------------ +%% Determine if EPMD module supports address resolving. Default +%% is to use inet:getaddr/2. +%% ------------------------------------------------------------ +get_address_resolver(EpmdModule) -> + case erlang:function_exported(EpmdModule, address_please, 3) of + true -> {EpmdModule, address_please}; + _ -> {erl_epmd, address_please} + end. + +%% ------------------------------------------------------------ %% Do only accept new connection attempts from nodes at our %% own LAN, if the check_ip environment parameter is true. %% ------------------------------------------------------------ diff --git a/lib/kernel/src/kernel.app.src b/lib/kernel/src/kernel.app.src index e150938487..fe073621c8 100644 --- a/lib/kernel/src/kernel.app.src +++ b/lib/kernel/src/kernel.app.src @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1996-2017. All Rights Reserved. +%% Copyright Ericsson AB 1996-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -57,8 +57,21 @@ inet_tcp_dist, kernel, kernel_config, + kernel_refc, local_tcp, local_udp, + logger, + logger_backend, + logger_config, + logger_disk_log_h, + logger_filters, + logger_formatter, + logger_h_common, + logger_handler_watcher, + logger_server, + logger_simple_h, + logger_std_h, + logger_sup, net, net_adm, net_kernel, @@ -88,6 +101,13 @@ inet_udp, inet_sctp, pg2, + raw_file_io, + raw_file_io_compressed, + raw_file_io_deflate, + raw_file_io_delayed, + raw_file_io_inflate, + raw_file_io_list, + raw_file_io_raw, seq_trace, standard_error, wrap_log_reader]}, @@ -107,7 +127,11 @@ heart, init, kernel_config, + kernel_refc, kernel_sup, + logger, + logger_handler_watcher, + logger_sup, net_kernel, net_sup, rex, @@ -118,8 +142,10 @@ inet_db, pg2]}, {applications, []}, - {env, [{error_logger, tty}]}, + {env, [{logger_level, notice}, + {logger_sasl_compatible, false} + ]}, {mod, {kernel, []}}, - {runtime_dependencies, ["erts-9.0", "stdlib-3.0", "sasl-3.0"]} + {runtime_dependencies, ["erts-10.1", "stdlib-3.5", "sasl-3.0"]} ] }. diff --git a/lib/kernel/src/kernel.appup.src b/lib/kernel/src/kernel.appup.src index 77085b2064..0c0435e051 100644 --- a/lib/kernel/src/kernel.appup.src +++ b/lib/kernel/src/kernel.appup.src @@ -18,7 +18,13 @@ %% %CopyrightEnd% {"%VSN%", %% Up from - max one major revision back - [{<<"5\\.[0-2](\\.[0-9]+)*">>,[restart_new_emulator]}], % OTP-19.* + [{<<"5\\.3(\\.[0-9]+)*">>,[restart_new_emulator]}, % OTP-20.0 + {<<"5\\.4(\\.[0-9]+)*">>,[restart_new_emulator]}, % OTP-20.1+ + {<<"6\\.0(\\.[0-9]+)*">>,[restart_new_emulator]}, % OTP-21.0 + {<<"6\\.1(\\.[0-9]+)*">>,[restart_new_emulator]}], % OTP-21.1 %% Down to - max one major revision back - [{<<"5\\.[0-2](\\.[0-9]+)*">>,[restart_new_emulator]}] % OTP-19.* + [{<<"5\\.3(\\.[0-9]+)*">>,[restart_new_emulator]}, % OTP-20.0 + {<<"5\\.4(\\.[0-9]+)*">>,[restart_new_emulator]}, % OTP-20.1+ + {<<"6\\.0(\\.[0-9]+)*">>,[restart_new_emulator]}, % OTP-21.0 + {<<"6\\.1(\\.[0-9]+)*">>,[restart_new_emulator]}] % OTP-21.1 }. diff --git a/lib/kernel/src/kernel.erl b/lib/kernel/src/kernel.erl index cba57088ec..c68d04e279 100644 --- a/lib/kernel/src/kernel.erl +++ b/lib/kernel/src/kernel.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1996-2017. All Rights Reserved. +%% Copyright Ericsson AB 1996-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -30,24 +30,13 @@ %%% Callback functions for the kernel application. %%%----------------------------------------------------------------- start(_, []) -> + %% Setup the logger and configure the kernel logger environment + ok = logger:internal_init_logger(), case supervisor:start_link({local, kernel_sup}, kernel, []) of {ok, Pid} -> - %% add signal handler - case whereis(erl_signal_server) of - %% in case of minimal mode - undefined -> ok; - _ -> - ok = gen_event:add_handler(erl_signal_server, erl_signal_handler, []) - end, - %% add error handler - Type = get_error_logger_type(), - case error_logger:swap_handler(Type) of - ok -> {ok, Pid, []}; - Error -> - %% Not necessary since the node will crash anyway: - exit(Pid, shutdown), - Error - end; + ok = erl_signal_handler:start(), + ok = logger:add_handlers(kernel), + {ok, Pid, []}; Error -> Error end. @@ -62,16 +51,6 @@ config_change(Changed, New, Removed) -> do_global_groups_change(Changed, New, Removed), ok. -get_error_logger_type() -> - case application:get_env(kernel, error_logger) of - {ok, tty} -> tty; - {ok, {file, File}} when is_list(File) -> {logfile, File}; - {ok, false} -> false; - {ok, silent} -> silent; - undefined -> tty; % default value - {ok, Bad} -> exit({bad_config, {kernel, {error_logger, Bad}}}) - end. - %%%----------------------------------------------------------------- %%% The process structure in kernel is as shown in the figure. %%% @@ -111,6 +90,13 @@ init([]) -> type => worker, modules => [kernel_config]}, + RefC = #{id => kernel_refc, + start => {kernel_refc, start_link, []}, + restart => permanent, + shutdown => 2000, + type => worker, + modules => [kernel_refc]}, + Code = #{id => code_server, start => {code, start_link, []}, restart => permanent, @@ -146,9 +132,18 @@ init([]) -> type => supervisor, modules => [?MODULE]}, + + LoggerSup = #{id => logger_sup, + start => {logger_sup, start_link, []}, + restart => permanent, + shutdown => infinity, + type => supervisor, + modules => [logger_sup]}, + case init:get_argument(mode) of {ok, [["minimal"]]} -> - {ok, {SupFlags, [Code, File, StdError, User, Config, SafeSup]}}; + {ok, {SupFlags, + [Code, File, StdError, User, LoggerSup, Config, RefC, SafeSup]}}; _ -> Rpc = #{id => rex, start => {rpc, start_link, []}, @@ -199,7 +194,7 @@ init([]) -> {ok, {SupFlags, [Code, Rpc, Global, InetDb | DistAC] ++ [NetSup, GlGroup, File, SigSrv, - StdError, User, Config, SafeSup] ++ Timer}} + StdError, User, Config, RefC, SafeSup, LoggerSup] ++ Timer}} end; init(safe) -> SupFlags = #{strategy => one_for_one, diff --git a/lib/kernel/src/kernel_config.erl b/lib/kernel/src/kernel_config.erl index 535083ef27..691a266c2d 100644 --- a/lib/kernel/src/kernel_config.erl +++ b/lib/kernel/src/kernel_config.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1996-2016. All Rights Reserved. +%% Copyright Ericsson AB 1996-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -30,11 +30,8 @@ %%%----------------------------------------------------------------- %%% This module implements a process that configures the kernel %%% application. -%%% Its purpose is that in the init phase add an error_logger -%%% and when it dies (when the kernel application dies) deleting the -%%% previously installed error_logger. -%%% Also, this process waits for other nodes at startup, if -%%% specified. +%%% Its purpose is that in the init phase waits for other nodes at startup, +%%% if specified. %%%----------------------------------------------------------------- start_link() -> gen_server:start_link(kernel_config, [], []). diff --git a/lib/kernel/src/kernel_refc.erl b/lib/kernel/src/kernel_refc.erl new file mode 100644 index 0000000000..8e04ff99d8 --- /dev/null +++ b/lib/kernel/src/kernel_refc.erl @@ -0,0 +1,139 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2017-2018. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(kernel_refc). + +-behaviour(gen_server). + +%% External exports +-export([start_link/0, scheduler_wall_time/1]). +%% Internal exports +-export([init/1, handle_info/2, terminate/2]). +-export([handle_call/3, handle_cast/2, code_change/3]). + +%%%----------------------------------------------------------------- +%%% This module implements a process that handles reference counters for +%%% various erts or other kernel resources which needs reference counting. +%%% +%%% Should not be documented nor used directly by user applications. +%%%----------------------------------------------------------------- +start_link() -> + gen_server:start_link({local,kernel_refc}, kernel_refc, [], []). + +-spec scheduler_wall_time(boolean()) -> boolean(). +scheduler_wall_time(Bool) -> + gen_server:call(kernel_refc, {scheduler_wall_time, self(), Bool}, infinity). + +%%----------------------------------------------------------------- +%% Callback functions from gen_server +%%----------------------------------------------------------------- + +-spec init([]) -> {'ok', map()}. + +init([]) -> + resource(scheduler_wall_time, false), + {ok, #{scheduler_wall_time=>#{}}}. + +-spec handle_call(term(), term(), State) -> {'reply', term(), State}. +handle_call({What, Who, false}, _From, State) -> + {Reply, Cnt} = do_stop(What, maps:get(What, State), Who), + {reply, Reply, State#{What:=Cnt}}; +handle_call({What, Who, true}, _From, State) -> + {Reply, Cnt} = do_start(What, maps:get(What, State), Who), + {reply, Reply, State#{What:=Cnt}}; +handle_call(_, _From, State) -> + {reply, badarg, State}. + +-spec handle_cast(term(), State) -> {'noreply', State}. +handle_cast(_, State) -> + {noreply, State}. + +-spec handle_info(term(), State) -> {'noreply', State}. +handle_info({'DOWN', _Ref, process, Pid, _}, State) -> + Cleanup = fun(Resource, Cnts) -> + cleanup(Resource, Cnts, Pid) + end, + {noreply, maps:map(Cleanup, State)}; +handle_info(_, State) -> + {noreply, State}. + +-spec terminate(term(), term()) -> 'ok'. +terminate(_Reason, _State) -> + ok. + +-spec code_change(term(), State, term()) -> {'ok', State}. +code_change(_OldVsn, State, _Extra) -> + {ok, State}. + +%%----------------------------------------------------------------- +%% Internal functions +%%----------------------------------------------------------------- + +do_start(Resource, Cnt, Pid) -> + case maps:get(Pid, Cnt, undefined) of + undefined -> + Ref = erlang:monitor(process, Pid), + case any(Cnt) of + true -> + {true, Cnt#{Pid=>{1, Ref}}}; + false -> + resource(Resource, true), + {false, Cnt#{Pid=>{1, Ref}}} + end; + {N, Ref} -> + {true, Cnt#{Pid=>{N+1, Ref}}} + end. + +do_stop(Resource, Cnt0, Pid) -> + case maps:get(Pid, Cnt0, undefined) of + undefined -> + {any(Cnt0), Cnt0}; + {1, Ref} -> + erlang:demonitor(Ref, [flush]), + Cnt = maps:remove(Pid, Cnt0), + case any(Cnt) of + true -> + {true, Cnt}; + false -> + resource(Resource, false), + {true, Cnt} + end; + {N, Ref} -> + {true, Cnt0#{Pid=>{N-1, Ref}}} + end. + +cleanup(Resource, Cnt0, Pid) -> + case maps:is_key(Pid, Cnt0) of + true -> + Cnt = maps:remove(Pid, Cnt0), + case any(Cnt) of + true -> + Cnt; + false -> + resource(Resource, false), + Cnt + end; + false -> + Cnt0 + end. + +any(Cnt) -> maps:size(Cnt) > 0. + +resource(scheduler_wall_time, Enable) -> + _ = erts_internal:scheduler_wall_time(Enable). diff --git a/lib/kernel/src/logger.erl b/lib/kernel/src/logger.erl new file mode 100644 index 0000000000..6762998d4f --- /dev/null +++ b/lib/kernel/src/logger.erl @@ -0,0 +1,948 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2017-2018. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(logger). + +%% Log interface +-export([emergency/1,emergency/2,emergency/3, + alert/1,alert/2,alert/3, + critical/1,critical/2,critical/3, + error/1,error/2,error/3, + warning/1,warning/2,warning/3, + notice/1,notice/2,notice/3, + info/1,info/2,info/3, + debug/1,debug/2,debug/3]). +-export([log/2,log/3,log/4]). + +%% Called by macro +-export([allow/2,macro_log/3,macro_log/4,macro_log/5,add_default_metadata/1]). + +%% Configuration +-export([add_handler/3, remove_handler/1, + add_primary_filter/2, add_handler_filter/3, + remove_primary_filter/1, remove_handler_filter/2, + set_module_level/2, + unset_module_level/1, unset_module_level/0, + set_application_level/2, unset_application_level/1, + get_module_level/0, get_module_level/1, + set_primary_config/1, set_primary_config/2, + set_handler_config/2, set_handler_config/3, + update_primary_config/1, + update_handler_config/2, update_handler_config/3, + update_formatter_config/2, update_formatter_config/3, + get_primary_config/0, get_handler_config/1, + get_handler_config/0, get_handler_ids/0, get_config/0, + add_handlers/1]). + +%% Private configuration +-export([internal_init_logger/0]). + +%% Misc +-export([compare_levels/2]). +-export([set_process_metadata/1, update_process_metadata/1, + unset_process_metadata/0, get_process_metadata/0]). + +%% Basic report formatting +-export([format_report/1, format_otp_report/1]). + +-export([internal_log/2,filter_stacktrace/2]). + +-include("logger_internal.hrl"). +-include("logger.hrl"). + +%%%----------------------------------------------------------------- +%%% Types +-type log_event() :: #{level:=level(), + msg:={io:format(),[term()]} | + {report,report()} | + {string,unicode:chardata()}, + meta:=metadata()}. +-type level() :: emergency | alert | critical | error | + warning | notice | info | debug. +-type report() :: map() | [{atom(),term()}]. +-type report_cb() :: fun((report()) -> {io:format(),[term()]}) | + fun((report(),report_cb_config()) -> unicode:chardata()). +-type report_cb_config() :: #{depth := pos_integer() | unlimited, + chars_limit := pos_integer() | unlimited, + single_line := boolean()}. +-type msg_fun() :: fun((term()) -> {io:format(),[term()]} | + report() | + unicode:chardata()). +-type metadata() :: #{pid => pid(), + gl => pid(), + time => timestamp(), + mfa => {module(),atom(),non_neg_integer()}, + file => file:filename(), + line => non_neg_integer(), + domain => [atom()], + report_cb => report_cb(), + atom() => term()}. +-type location() :: #{mfa := {module(),atom(),non_neg_integer()}, + file := file:filename(), + line := non_neg_integer()}. +-type handler_id() :: atom(). +-type filter_id() :: atom(). +-type filter() :: {fun((log_event(),filter_arg()) -> + filter_return()),filter_arg()}. +-type filter_arg() :: term(). +-type filter_return() :: stop | ignore | log_event(). +-type primary_config() :: #{level => level() | all | none, + filter_default => log | stop, + filters => [{filter_id(),filter()}]}. +-type handler_config() :: #{id => handler_id(), + config => term(), + level => level() | all | none, + module => module(), + filter_default => log | stop, + filters => [{filter_id(),filter()}], + formatter => {module(),formatter_config()}}. +-type timestamp() :: integer(). +-type formatter_config() :: #{atom() => term()}. + +-type config_handler() :: {handler, handler_id(), module(), handler_config()}. + +-type config_logger() :: [{handler,default,undefined} | + config_handler() | + {filters,log | stop,[{filter_id(),filter()}]} | + {module_level,level(),[module()]}]. + +-export_type([log_event/0, + level/0, + report/0, + report_cb/0, + report_cb_config/0, + msg_fun/0, + metadata/0, + primary_config/0, + handler_config/0, + handler_id/0, + filter_id/0, + filter/0, + filter_arg/0, + filter_return/0, + config_handler/0, + formatter_config/0]). + +%%%----------------------------------------------------------------- +%%% API +emergency(X) -> + log(emergency,X). +emergency(X,Y) -> + log(emergency,X,Y). +emergency(X,Y,Z) -> + log(emergency,X,Y,Z). + +alert(X) -> + log(alert,X). +alert(X,Y) -> + log(alert,X,Y). +alert(X,Y,Z) -> + log(alert,X,Y,Z). + +critical(X) -> + log(critical,X). +critical(X,Y) -> + log(critical,X,Y). +critical(X,Y,Z) -> + log(critical,X,Y,Z). + +error(X) -> + log(error,X). +error(X,Y) -> + log(error,X,Y). +error(X,Y,Z) -> + log(error,X,Y,Z). + +warning(X) -> + log(warning,X). +warning(X,Y) -> + log(warning,X,Y). +warning(X,Y,Z) -> + log(warning,X,Y,Z). + +notice(X) -> + log(notice,X). +notice(X,Y) -> + log(notice,X,Y). +notice(X,Y,Z) -> + log(notice,X,Y,Z). + +info(X) -> + log(info,X). +info(X,Y) -> + log(info,X,Y). +info(X,Y,Z) -> + log(info,X,Y,Z). + +debug(X) -> + log(debug,X). +debug(X,Y) -> + log(debug,X,Y). +debug(X,Y,Z) -> + log(debug,X,Y,Z). + +-spec log(Level,StringOrReport) -> ok when + Level :: level(), + StringOrReport :: unicode:chardata() | report(). +log(Level, StringOrReport) -> + do_log(Level,StringOrReport,#{}). + +-spec log(Level,StringOrReport,Metadata) -> ok when + Level :: level(), + StringOrReport :: unicode:chardata() | report(), + Metadata :: metadata(); + (Level,Format,Args) -> ok when + Level :: level(), + Format :: io:format(), + Args ::[term()]; + (Level,Fun,FunArgs) -> ok when + Level :: level(), + Fun :: msg_fun(), + FunArgs :: term(). +log(Level, StringOrReport, Metadata) + when is_map(Metadata), not is_function(StringOrReport) -> + do_log(Level,StringOrReport,Metadata); +log(Level, FunOrFormat, Args) -> + do_log(Level,{FunOrFormat,Args},#{}). + +-spec log(Level,Format, Args, Metadata) -> ok when + Level :: level(), + Format :: io:format(), + Args :: [term()], + Metadata :: metadata(); + (Level,Fun,FunArgs,Metadata) -> ok when + Level :: level(), + Fun :: msg_fun(), + FunArgs :: term(), + Metadata :: metadata(). +log(Level, FunOrFormat, Args, Metadata) -> + do_log(Level,{FunOrFormat,Args},Metadata). + +-spec allow(Level,Module) -> boolean() when + Level :: level(), + Module :: module(). +allow(Level,Module) when ?IS_LEVEL(Level), is_atom(Module) -> + logger_config:allow(?LOGGER_TABLE,Level,Module). + + +-spec macro_log(Location,Level,StringOrReport) -> ok when + Location :: location(), + Level :: level(), + StringOrReport :: unicode:chardata() | report(). +macro_log(Location,Level,StringOrReport) -> + log_allowed(Location,Level,StringOrReport,#{}). + +-spec macro_log(Location,Level,StringOrReport,Meta) -> ok when + Location :: location(), + Level :: level(), + StringOrReport :: unicode:chardata() | report(), + Meta :: metadata(); + (Location,Level,Format,Args) -> ok when + Location :: location(), + Level :: level(), + Format :: io:format(), + Args ::[term()]; + (Location,Level,Fun,FunArgs) -> ok when + Location :: location(), + Level :: level(), + Fun :: msg_fun(), + FunArgs :: term(). +macro_log(Location,Level,StringOrReport,Meta) + when is_map(Meta), not is_function(StringOrReport) -> + log_allowed(Location,Level,StringOrReport,Meta); +macro_log(Location,Level,FunOrFormat,Args) -> + log_allowed(Location,Level,{FunOrFormat,Args},#{}). + +-spec macro_log(Location,Level,Format,Args,Meta) -> ok when + Location :: location(), + Level :: level(), + Format :: io:format(), + Args ::[term()], + Meta :: metadata(); + (Location,Level,Fun,FunArgs,Meta) -> ok when + Location :: location(), + Level :: level(), + Fun :: msg_fun(), + FunArgs :: term(), + Meta :: metadata(). +macro_log(Location,Level,FunOrFormat,Args,Meta) -> + log_allowed(Location,Level,{FunOrFormat,Args},Meta). + +-spec format_otp_report(Report) -> FormatArgs when + Report :: report(), + FormatArgs :: {io:format(),[term()]}. +format_otp_report(#{label:=_,report:=Report}) -> + format_report(Report); +format_otp_report(Report) -> + format_report(Report). + +-spec format_report(Report) -> FormatArgs when + Report :: report(), + FormatArgs :: {io:format(),[term()]}. +format_report(Report) when is_map(Report) -> + format_report(maps:to_list(Report)); +format_report(Report) when is_list(Report) -> + case lists:flatten(Report) of + [] -> + {"~tp",[[]]}; + FlatList -> + case string_p1(FlatList) of + true -> + {"~ts",[FlatList]}; + false -> + format_term_list(Report,[],[]) + end + end; +format_report(Report) -> + {"~tp",[Report]}. + +format_term_list([{Tag,Data}|T],Format,Args) -> + PorS = case string_p(Data) of + true -> "s"; + false -> "p" + end, + format_term_list(T,[" ~tp: ~t"++PorS|Format],[Data,Tag|Args]); +format_term_list([Data|T],Format,Args) -> + format_term_list(T,[" ~tp"|Format],[Data|Args]); +format_term_list([],Format,Args) -> + {lists:flatten(lists:join($\n,lists:reverse(Format))),lists:reverse(Args)}. + +string_p(List) when is_list(List) -> + string_p1(lists:flatten(List)); +string_p(_) -> + false. + +string_p1([]) -> + false; +string_p1(FlatList) -> + io_lib:printable_unicode_list(FlatList). + +internal_log(Level,Term) when is_atom(Level) -> + erlang:display_string("Logger - "++ atom_to_list(Level) ++ ": "), + erlang:display(Term). + +%%%----------------------------------------------------------------- +%%% Configuration +-spec add_primary_filter(FilterId,Filter) -> ok | {error,term()} when + FilterId :: filter_id(), + Filter :: filter(). +add_primary_filter(FilterId,Filter) -> + logger_server:add_filter(primary,{FilterId,Filter}). + +-spec add_handler_filter(HandlerId,FilterId,Filter) -> ok | {error,term()} when + HandlerId :: handler_id(), + FilterId :: filter_id(), + Filter :: filter(). +add_handler_filter(HandlerId,FilterId,Filter) -> + logger_server:add_filter(HandlerId,{FilterId,Filter}). + + +-spec remove_primary_filter(FilterId) -> ok | {error,term()} when + FilterId :: filter_id(). +remove_primary_filter(FilterId) -> + logger_server:remove_filter(primary,FilterId). + +-spec remove_handler_filter(HandlerId,FilterId) -> ok | {error,term()} when + HandlerId :: handler_id(), + FilterId :: filter_id(). +remove_handler_filter(HandlerId,FilterId) -> + logger_server:remove_filter(HandlerId,FilterId). + +-spec add_handler(HandlerId,Module,Config) -> ok | {error,term()} when + HandlerId :: handler_id(), + Module :: module(), + Config :: handler_config(). +add_handler(HandlerId,Module,Config) -> + logger_server:add_handler(HandlerId,Module,Config). + +-spec remove_handler(HandlerId) -> ok | {error,term()} when + HandlerId :: handler_id(). +remove_handler(HandlerId) -> + logger_server:remove_handler(HandlerId). + +-spec set_primary_config(level,Level) -> ok | {error,term()} when + Level :: level() | all | none; + (filter_default,FilterDefault) -> ok | {error,term()} when + FilterDefault :: log | stop; + (filters,Filters) -> ok | {error,term()} when + Filters :: [{filter_id(),filter()}]. +set_primary_config(Key,Value) -> + logger_server:set_config(primary,Key,Value). + +-spec set_primary_config(Config) -> ok | {error,term()} when + Config :: primary_config(). +set_primary_config(Config) -> + logger_server:set_config(primary,Config). + +-spec set_handler_config(HandlerId,level,Level) -> Return when + HandlerId :: handler_id(), + Level :: level() | all | none, + Return :: ok | {error,term()}; + (HandlerId,filter_default,FilterDefault) -> Return when + HandlerId :: handler_id(), + FilterDefault :: log | stop, + Return :: ok | {error,term()}; + (HandlerId,filters,Filters) -> Return when + HandlerId :: handler_id(), + Filters :: [{filter_id(),filter()}], + Return :: ok | {error,term()}; + (HandlerId,formatter,Formatter) -> Return when + HandlerId :: handler_id(), + Formatter :: {module(), formatter_config()}, + Return :: ok | {error,term()}; + (HandlerId,config,Config) -> Return when + HandlerId :: handler_id(), + Config :: term(), + Return :: ok | {error,term()}. +set_handler_config(HandlerId,Key,Value) -> + logger_server:set_config(HandlerId,Key,Value). + +-spec set_handler_config(HandlerId,Config) -> ok | {error,term()} when + HandlerId :: handler_id(), + Config :: handler_config(). +set_handler_config(HandlerId,Config) -> + logger_server:set_config(HandlerId,Config). + +-spec update_primary_config(Config) -> ok | {error,term()} when + Config :: primary_config(). +update_primary_config(Config) -> + logger_server:update_config(primary,Config). + +-spec update_handler_config(HandlerId,level,Level) -> Return when + HandlerId :: handler_id(), + Level :: level() | all | none, + Return :: ok | {error,term()}; + (HandlerId,filter_default,FilterDefault) -> Return when + HandlerId :: handler_id(), + FilterDefault :: log | stop, + Return :: ok | {error,term()}; + (HandlerId,filters,Filters) -> Return when + HandlerId :: handler_id(), + Filters :: [{filter_id(),filter()}], + Return :: ok | {error,term()}; + (HandlerId,formatter,Formatter) -> Return when + HandlerId :: handler_id(), + Formatter :: {module(), formatter_config()}, + Return :: ok | {error,term()}; + (HandlerId,config,Config) -> Return when + HandlerId :: handler_id(), + Config :: term(), + Return :: ok | {error,term()}. +update_handler_config(HandlerId,Key,Value) -> + logger_server:update_config(HandlerId,Key,Value). + +-spec update_handler_config(HandlerId,Config) -> ok | {error,term()} when + HandlerId :: handler_id(), + Config :: handler_config(). +update_handler_config(HandlerId,Config) -> + logger_server:update_config(HandlerId,Config). + +-spec get_primary_config() -> Config when + Config :: primary_config(). +get_primary_config() -> + {ok,Config} = logger_config:get(?LOGGER_TABLE,primary), + maps:remove(handlers,Config). + +-spec get_handler_config(HandlerId) -> {ok,Config} | {error,term()} when + HandlerId :: handler_id(), + Config :: handler_config(). +get_handler_config(HandlerId) -> + case logger_config:get(?LOGGER_TABLE,HandlerId) of + {ok,#{module:=Module}=Config} -> + {ok,try Module:filter_config(Config) + catch _:_ -> Config + end}; + Error -> + Error + end. + +-spec get_handler_config() -> [Config] when + Config :: handler_config(). +get_handler_config() -> + [begin + {ok,Config} = get_handler_config(HandlerId), + Config + end || HandlerId <- get_handler_ids()]. + +-spec get_handler_ids() -> [HandlerId] when + HandlerId :: handler_id(). +get_handler_ids() -> + {ok,#{handlers:=HandlerIds}} = logger_config:get(?LOGGER_TABLE,primary), + HandlerIds. + +-spec update_formatter_config(HandlerId,FormatterConfig) -> + ok | {error,term()} when + HandlerId :: handler_id(), + FormatterConfig :: formatter_config(). +update_formatter_config(HandlerId,FormatterConfig) -> + logger_server:update_formatter_config(HandlerId,FormatterConfig). + +-spec update_formatter_config(HandlerId,Key,Value) -> + ok | {error,term()} when + HandlerId :: handler_id(), + Key :: atom(), + Value :: term(). +update_formatter_config(HandlerId,Key,Value) -> + logger_server:update_formatter_config(HandlerId,#{Key=>Value}). + +-spec set_module_level(Modules,Level) -> ok | {error,term()} when + Modules :: [module()] | module(), + Level :: level() | all | none. +set_module_level(Module,Level) when is_atom(Module) -> + set_module_level([Module],Level); +set_module_level(Modules,Level) -> + logger_server:set_module_level(Modules,Level). + +-spec unset_module_level(Modules) -> ok when + Modules :: [module()] | module(). +unset_module_level(Module) when is_atom(Module) -> + unset_module_level([Module]); +unset_module_level(Modules) -> + logger_server:unset_module_level(Modules). + +-spec unset_module_level() -> ok. +unset_module_level() -> + logger_server:unset_module_level(). + +-spec set_application_level(Application,Level) -> ok | {error, not_loaded} when + Application :: atom(), + Level :: level() | all | none. +set_application_level(App,Level) -> + case application:get_key(App, modules) of + {ok, Modules} -> + set_module_level(Modules, Level); + undefined -> + {error, {not_loaded, App}} + end. + +-spec unset_application_level(Application) -> ok | {error, not_loaded} when + Application :: atom(). +unset_application_level(App) -> + case application:get_key(App, modules) of + {ok, Modules} -> + unset_module_level(Modules); + undefined -> + {error, {not_loaded, App}} + end. + +-spec get_module_level(Modules) -> [{Module,Level}] when + Modules :: [Module] | Module, + Module :: module(), + Level :: level() | all | none. +get_module_level(Module) when is_atom(Module) -> + get_module_level([Module]); +get_module_level(Modules) when is_list(Modules) -> + [{M,L} || {M,L} <- get_module_level(), + lists:member(M,Modules)]. + +-spec get_module_level() -> [{Module,Level}] when + Module :: module(), + Level :: level() | all | none. +get_module_level() -> + logger_config:get_module_level(?LOGGER_TABLE). + +%%%----------------------------------------------------------------- +%%% Misc +-spec compare_levels(Level1,Level2) -> eq | gt | lt when + Level1 :: level(), + Level2 :: level(). +compare_levels(Level,Level) when ?IS_LEVEL(Level) -> + eq; +compare_levels(Level1,Level2) when ?IS_LEVEL(Level1), ?IS_LEVEL(Level2) -> + Int1 = logger_config:level_to_int(Level1), + Int2 = logger_config:level_to_int(Level2), + if Int1 < Int2 -> gt; + true -> lt + end; +compare_levels(Level1,Level2) -> + erlang:error(badarg,[Level1,Level2]). + +-spec set_process_metadata(Meta) -> ok when + Meta :: metadata(). +set_process_metadata(Meta) when is_map(Meta) -> + _ = put(?LOGGER_META_KEY,Meta), + ok; +set_process_metadata(Meta) -> + erlang:error(badarg,[Meta]). + +-spec update_process_metadata(Meta) -> ok when + Meta :: metadata(). +update_process_metadata(Meta) when is_map(Meta) -> + case get_process_metadata() of + undefined -> + set_process_metadata(Meta); + Meta0 when is_map(Meta0) -> + set_process_metadata(maps:merge(Meta0,Meta)), + ok + end; +update_process_metadata(Meta) -> + erlang:error(badarg,[Meta]). + +-spec get_process_metadata() -> Meta | undefined when + Meta :: metadata(). +get_process_metadata() -> + get(?LOGGER_META_KEY). + +-spec unset_process_metadata() -> ok. +unset_process_metadata() -> + _ = erase(?LOGGER_META_KEY), + ok. + +-spec get_config() -> #{primary=>primary_config(), + handlers=>[handler_config()], + module_levels=>[{module(),level() | all | none}]}. +get_config() -> + #{primary=>get_primary_config(), + handlers=>get_handler_config(), + module_levels=>lists:keysort(1,get_module_level())}. + +-spec internal_init_logger() -> ok | {error,term()}. +%% This function is responsible for config of the logger +%% This is done before add_handlers because we want the +%% logger settings to take effect before the kernel supervisor +%% tree is started. +internal_init_logger() -> + try + Env = get_logger_env(kernel), + check_logger_config(kernel,Env), + ok = logger:set_primary_config(level, get_logger_level()), + ok = logger:set_primary_config(filter_default, + get_primary_filter_default(Env)), + + [case logger:add_primary_filter(Id, Filter) of + ok -> ok; + {error, Reason} -> throw(Reason) + end || {Id, Filter} <- get_primary_filters(Env)], + + [case logger:set_module_level(Modules, Level) of + ok -> ok; + {error, Reason} -> throw(Reason) + end || {module_level, Level, Modules} <- Env], + + case logger:set_handler_config(simple,filters, + get_default_handler_filters()) of + ok -> ok; + {error,{not_found,simple}} -> ok + end, + + init_kernel_handlers(Env) + catch throw:Reason -> + ?LOG_ERROR("Invalid logger config: ~p", [Reason]), + {error, {bad_config, {kernel, Reason}}} + end. + +-spec init_kernel_handlers(config_logger()) -> ok | {error,term()}. +%% Setup the kernel environment variables to be correct +%% The actual handlers are started by a call to add_handlers. +init_kernel_handlers(Env) -> + try + case get_logger_type(Env) of + {ok,silent} -> + ok = logger:remove_handler(simple); + {ok,false} -> + ok; + {ok,Type} -> + init_default_config(Type,Env) + end + catch throw:Reason -> + ?LOG_ERROR("Invalid default handler config: ~p", [Reason]), + {error, {bad_config, {kernel, Reason}}} + end. + +-spec add_handlers(Application) -> ok | {error,term()} when + Application :: atom(); + (HandlerConfig) -> ok | {error,term()} when + HandlerConfig :: [config_handler()]. +%% This function is responsible for resolving the handler config +%% and then starting the correct handlers. This is done after the +%% kernel supervisor tree has been started as it needs the logger_sup. +add_handlers(App) when is_atom(App) -> + add_handlers(App,get_logger_env(App)); +add_handlers(HandlerConfig) -> + add_handlers(application:get_application(),HandlerConfig). + +add_handlers(App,HandlerConfig) -> + try + check_logger_config(App,HandlerConfig), + DefaultAdded = + lists:foldl( + fun({handler, default = Id, Module, Config}, _) + when not is_map_key(filters, Config) -> + %% The default handler should have a couple of extra filters + %% set on it by default. + DefConfig = #{ filter_default => stop, + filters => get_default_handler_filters()}, + setup_handler(Id, Module, maps:merge(DefConfig,Config)), + true; + ({handler, Id, Module, Config}, Default) -> + setup_handler(Id, Module, Config), + Default orelse Id == default; + (_,Default) -> Default + end, false, HandlerConfig), + %% If a default handler was added we try to remove the simple_logger + %% If the simple logger exists it will replay its log events + %% to the handler(s) added in the fold above. + [case logger:remove_handler(simple) of + ok -> ok; + {error,{not_found,simple}} -> ok + end || DefaultAdded], + ok + catch throw:Reason0 -> + Reason = + case App of + undefined -> Reason0; + _ -> {App,Reason0} + end, + ?LOG_ERROR("Invalid logger handler config: ~p", [Reason]), + {error, {bad_config, {handler, Reason}}} + end. + +setup_handler(Id, Module, Config) -> + case logger:add_handler(Id, Module, Config) of + ok -> ok; + {error, Reason} -> throw(Reason) + end. + +check_logger_config(_,[]) -> + ok; +check_logger_config(App,[{handler,_,_,_}|Env]) -> + check_logger_config(App,Env); +check_logger_config(kernel,[{handler,default,undefined}|Env]) -> + check_logger_config(kernel,Env); +check_logger_config(kernel,[{filters,_,_}|Env]) -> + check_logger_config(kernel,Env); +check_logger_config(kernel,[{module_level,_,_}|Env]) -> + check_logger_config(kernel,Env); +check_logger_config(_,Bad) -> + throw(Bad). + +-spec get_logger_type(config_logger()) -> + {ok, standard_io | false | silent | + {file, file:name_all()} | + {file, file:name_all(), [file:mode()]}}. +get_logger_type(Env) -> + case application:get_env(kernel, error_logger) of + {ok, tty} -> + {ok, standard_io}; + {ok, {file, File}} when is_list(File) -> + {ok, {file, File}}; + {ok, false} -> + {ok, false}; + {ok, silent} -> + {ok, silent}; + undefined -> + case lists:member({handler,default,undefined}, Env) of + true -> + {ok, false}; + false -> + {ok, standard_io} % default value + end; + {ok, Bad} -> + throw({error_logger, Bad}) + end. + +get_logger_level() -> + case application:get_env(kernel,logger_level,info) of + Level when ?IS_LEVEL(Level); Level=:=all; Level=:=none -> + Level; + Level -> + throw({logger_level, Level}) + end. + +get_primary_filter_default(Env) -> + case lists:keyfind(filters,1,Env) of + {filters,Default,_} -> + Default; + false -> + log + end. + +get_primary_filters(Env) -> + case [F || F={filters,_,_} <- Env] of + [{filters,_,Filters}] -> + case lists:all(fun({_,_}) -> true; (_) -> false end,Filters) of + true -> Filters; + false -> throw({invalid_filters,Filters}) + end; + [] -> []; + _ -> throw({multiple_filters,Env}) + end. + +%% This function looks at the kernel logger environment +%% and updates it so that the correct logger is configured +init_default_config(Type,Env) when Type==standard_io; + Type==standard_error; + element(1,Type)==file -> + DefaultFormatter = #{formatter=>{?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}}, + DefaultConfig = DefaultFormatter#{config=>#{type=>Type}}, + NewLoggerEnv = + case lists:keyfind(default, 2, Env) of + {handler, default, logger_std_h, Config} -> + %% Only want to add the logger_std_h config + %% if not configured by user AND the default + %% handler is still the logger_std_h. + lists:keyreplace(default, 2, Env, + {handler, default, logger_std_h, + maps:merge(DefaultConfig,Config)}); + {handler, default, Module,Config} -> + %% Add default formatter. The point of this + %% is to get the expected formatter config + %% for the default handler, since this + %% differs from the default values that + %% logger_formatter itself adds. + lists:keyreplace(default, 2, Env, + {handler, default, Module, + maps:merge(DefaultFormatter,Config)}); + _ -> + %% Nothing has been configured, use default + [{handler, default, logger_std_h, DefaultConfig} | Env] + end, + application:set_env(kernel, logger, NewLoggerEnv, [{timeout,infinity}]). + +get_default_handler_filters() -> + case application:get_env(kernel, logger_sasl_compatible, false) of + true -> + ?DEFAULT_HANDLER_FILTERS([otp]); + false -> + ?DEFAULT_HANDLER_FILTERS([otp,sasl]) + end. + +get_logger_env(App) -> + application:get_env(App, logger, []). + +%%%----------------------------------------------------------------- +%%% Internal +do_log(Level,Msg,#{mfa:={Module,_,_}}=Meta) -> + case logger_config:allow(?LOGGER_TABLE,Level,Module) of + true -> + log_allowed(#{},Level,Msg,Meta); + false -> + ok + end; +do_log(Level,Msg,Meta) -> + case logger_config:allow(?LOGGER_TABLE,Level) of + true -> + log_allowed(#{},Level,Msg,Meta); + false -> + ok + end. + +-spec log_allowed(Location,Level,Msg,Meta) -> ok when + Location :: location() | #{}, + Level :: level(), + Msg :: {msg_fun(),term()} | + {io:format(),[term()]} | + report() | + unicode:chardata(), + Meta :: metadata(). +log_allowed(Location,Level,{Fun,FunArgs},Meta) when is_function(Fun,1) -> + try Fun(FunArgs) of + Msg={Format,Args} when is_list(Format), is_list(Args) -> + log_allowed(Location,Level,Msg,Meta); + Report when ?IS_REPORT(Report) -> + log_allowed(Location,Level,Report,Meta); + String when ?IS_STRING(String) -> + log_allowed(Location,Level,String,Meta); + Other -> + log_allowed(Location,Level, + {"LAZY_FUN ERROR: ~tp; Returned: ~tp", + [{Fun,FunArgs},Other]}, + Meta) + catch C:R -> + log_allowed(Location,Level, + {"LAZY_FUN CRASH: ~tp; Reason: ~tp", + [{Fun,FunArgs},{C,R}]}, + Meta) + end; +log_allowed(Location,Level,Msg,Meta0) when is_map(Meta0) -> + %% Metadata priorities are: + %% Location (added in API macros) - will be overwritten by process + %% metadata (set by set_process_metadata/1), which in turn will be + %% overwritten by the metadata given as argument in the log call + %% (function or macro). + Meta = add_default_metadata( + maps:merge(Location,maps:merge(proc_meta(),Meta0))), + case node(maps:get(gl,Meta)) of + Node when Node=/=node() -> + log_remote(Node,Level,Msg,Meta), + do_log_allowed(Level,Msg,Meta); + _ -> + do_log_allowed(Level,Msg,Meta) + end. + +do_log_allowed(Level,{Format,Args}=Msg,Meta) + when ?IS_LEVEL(Level), + is_list(Format), + is_list(Args), + is_map(Meta) -> + logger_backend:log_allowed(#{level=>Level,msg=>Msg,meta=>Meta},tid()); +do_log_allowed(Level,Report,Meta) + when ?IS_LEVEL(Level), + ?IS_REPORT(Report), + is_map(Meta) -> + logger_backend:log_allowed(#{level=>Level,msg=>{report,Report},meta=>Meta}, + tid()); +do_log_allowed(Level,String,Meta) + when ?IS_LEVEL(Level), + ?IS_STRING(String), + is_map(Meta) -> + logger_backend:log_allowed(#{level=>Level,msg=>{string,String},meta=>Meta}, + tid()). +tid() -> + ets:whereis(?LOGGER_TABLE). + +log_remote(Node,Level,{Format,Args},Meta) -> + log_remote(Node,{log,Level,Format,Args,Meta}); +log_remote(Node,Level,Msg,Meta) -> + log_remote(Node,{log,Level,Msg,Meta}). + +log_remote(Node,Request) -> + {logger,Node} ! Request, + ok. + +add_default_metadata(Meta) -> + add_default_metadata([pid,gl,time],Meta). + +add_default_metadata([Key|Keys],Meta) -> + case maps:is_key(Key,Meta) of + true -> + add_default_metadata(Keys,Meta); + false -> + add_default_metadata(Keys,Meta#{Key=>default(Key)}) + end; +add_default_metadata([],Meta) -> + Meta. + +proc_meta() -> + case get_process_metadata() of + ProcMeta when is_map(ProcMeta) -> ProcMeta; + _ -> #{} + end. + +default(pid) -> self(); +default(gl) -> group_leader(); +default(time) -> erlang:system_time(microsecond). + +%% Remove everything upto and including this module from the stacktrace +filter_stacktrace(Module,[{Module,_,_,_}|_]) -> + []; +filter_stacktrace(Module,[H|T]) -> + [H|filter_stacktrace(Module,T)]; +filter_stacktrace(_,[]) -> + []. diff --git a/lib/kernel/src/logger_backend.erl b/lib/kernel/src/logger_backend.erl new file mode 100644 index 0000000000..432c671afd --- /dev/null +++ b/lib/kernel/src/logger_backend.erl @@ -0,0 +1,133 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2017-2018. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(logger_backend). + +-export([log_allowed/2]). + +-include("logger_internal.hrl"). + +-define(OWN_KEYS,[level,filters,filter_default,handlers]). + +%%%----------------------------------------------------------------- +%%% The default logger backend +log_allowed(Log, Tid) -> + {ok,Config} = logger_config:get(Tid,primary), + Filters = maps:get(filters,Config,[]), + case apply_filters(primary,Log,Filters,Config) of + stop -> + ok; + Log1 -> + Handlers = maps:get(handlers,Config,[]), + call_handlers(Log1,Handlers,Tid) + end, + ok. + +call_handlers(#{level:=Level}=Log,[Id|Handlers],Tid) -> + case logger_config:get(Tid,Id,Level) of + {ok,#{module:=Module}=Config} -> + Filters = maps:get(filters,Config,[]), + case apply_filters(Id,Log,Filters,Config) of + stop -> + ok; + Log1 -> + Config1 = maps:without(?OWN_KEYS,Config), + try Module:log(Log1,Config1) + catch C:R:S -> + case logger:remove_handler(Id) of + ok -> + logger:internal_log( + error,{removed_failing_handler,Id}), + ?LOG_INTERNAL( + debug, + [{logger,removed_failing_handler}, + {handler,{Id,Module}}, + {log_event,Log1}, + {config,Config1}, + {reason,{C,R,filter_stacktrace(S)}}]); + {error,{not_found,_}} -> + %% Probably already removed by other client + %% Don't report again + ok; + {error,Reason} -> + ?LOG_INTERNAL( + debug, + [{logger,remove_handler_failed}, + {reason,Reason}]) + end + end + end; + _ -> + ok + end, + call_handlers(Log,Handlers,Tid); +call_handlers(_Log,[],_Tid) -> + ok. + +apply_filters(Owner,Log,Filters,Config) -> + case do_apply_filters(Owner,Log,Filters,ignore) of + stop -> + stop; + ignore -> + case maps:get(filter_default,Config) of + log -> + Log; + stop -> + stop + end; + Log1 -> + Log1 + end. + +do_apply_filters(Owner,Log,[{_Id,{FilterFun,FilterArgs}}=Filter|Filters],State) -> + try FilterFun(Log,FilterArgs) of + stop -> + stop; + ignore -> + do_apply_filters(Owner,Log,Filters,State); + Log1=#{level:=Level,msg:=Msg,meta:=Meta} + when is_atom(Level), ?IS_MSG(Msg), is_map(Meta) -> + do_apply_filters(Owner,Log1,Filters,log); + Bad -> + handle_filter_failed(Filter,Owner,Log,{bad_return_value,Bad}) + catch C:R:S -> + handle_filter_failed(Filter,Owner,Log,{C,R,filter_stacktrace(S)}) + end; +do_apply_filters(_Owner,_Log,[],ignore) -> + ignore; +do_apply_filters(_Owner,Log,[],log) -> + Log. + +handle_filter_failed({Id,_}=Filter,Owner,Log,Reason) -> + case logger_server:remove_filter(Owner,Id) of + ok -> + logger:internal_log(error,{removed_failing_filter,Id}), + ?LOG_INTERNAL(debug, + [{logger,removed_failing_filter}, + {filter,Filter}, + {owner,Owner}, + {log_event,Log}, + {reason,Reason}]); + _ -> + ok + end, + ignore. + +filter_stacktrace(Stacktrace) -> + logger:filter_stacktrace(?MODULE,Stacktrace). diff --git a/lib/kernel/src/logger_config.erl b/lib/kernel/src/logger_config.erl new file mode 100644 index 0000000000..6bfe658552 --- /dev/null +++ b/lib/kernel/src/logger_config.erl @@ -0,0 +1,150 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2017-2018. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(logger_config). + +-export([new/1,delete/2, + exist/2, + allow/2,allow/3, + get/2, get/3, + create/3, set/3, + set_module_level/3,unset_module_level/2, + get_module_level/1,cache_module_level/2, + level_to_int/1]). + +-include("logger_internal.hrl"). + +new(Name) -> + _ = ets:new(Name,[set,protected,named_table,{write_concurrency,true}]), + ets:whereis(Name). + +delete(Tid,Id) -> + ets:delete(Tid,table_key(Id)). + +allow(Tid,Level,Module) -> + LevelInt = level_to_int(Level), + case ets:lookup(Tid,Module) of + [{Module,{ModLevel,cached}}] when is_integer(ModLevel), + LevelInt =< ModLevel -> + true; + [{Module,ModLevel}] when is_integer(ModLevel), + LevelInt =< ModLevel -> + true; + [] -> + logger_server:cache_module_level(Module), + allow(Tid,Level); + _ -> + false + end. + +allow(Tid,Level) -> + GlobalLevelInt = ets:lookup_element(Tid,?PRIMARY_KEY,2), + level_to_int(Level) =< GlobalLevelInt. + +exist(Tid,What) -> + ets:member(Tid,table_key(What)). + +get(Tid,What) -> + case ets:lookup(Tid,table_key(What)) of + [{_,_,Config}] -> + {ok,Config}; + [] -> + {error,{not_found,What}} + end. + +get(Tid,What,Level) -> + MS = [{{table_key(What),'$1','$2'}, + [{'>=','$1',level_to_int(Level)}], + ['$2']}], + case ets:select(Tid,MS) of + [] -> error; + [Data] -> {ok,Data} + end. + +create(Tid,What,Config) -> + LevelInt = level_to_int(maps:get(level,Config)), + ets:insert(Tid,{table_key(What),LevelInt,Config}). + +set(Tid,What,Config) -> + LevelInt = level_to_int(maps:get(level,Config)), + %% Should do this only if the level has actually changed. Possibly + %% overwrite instead of delete? + case What of + primary -> + _ = ets:select_delete(Tid,[{{'_',{'$1',cached}}, + [{'=/=','$1',LevelInt}], + [true]}]), + ok; + _ -> + ok + end, + ets:update_element(Tid,table_key(What),[{2,LevelInt},{3,Config}]), + ok. + +set_module_level(Tid,Modules,Level) -> + LevelInt = level_to_int(Level), + [ets:insert(Tid,{Module,LevelInt}) || Module <- Modules], + ok. + +%% should possibly overwrite instead of delete? +unset_module_level(Tid,all) -> + MS = [{{'$1','$2'},[{is_atom,'$1'},{is_integer,'$2'}],[true]}], + _ = ets:select_delete(Tid,MS), + ok; +unset_module_level(Tid,Modules) -> + [ets:delete(Tid,Module) || Module <- Modules], + ok. + +get_module_level(Tid) -> + MS = [{{'$1','$2'},[{is_atom,'$1'},{is_integer,'$2'}],[{{'$1','$2'}}]}], + Modules = ets:select(Tid,MS), + lists:sort([{M,int_to_level(L)} || {M,L} <- Modules]). + +cache_module_level(Tid,Module) -> + GlobalLevelInt = ets:lookup_element(Tid,?PRIMARY_KEY,2), + ets:insert_new(Tid,{Module,{GlobalLevelInt,cached}}), + ok. + +level_to_int(none) -> ?LOG_NONE; +level_to_int(emergency) -> ?EMERGENCY; +level_to_int(alert) -> ?ALERT; +level_to_int(critical) -> ?CRITICAL; +level_to_int(error) -> ?ERROR; +level_to_int(warning) -> ?WARNING; +level_to_int(notice) -> ?NOTICE; +level_to_int(info) -> ?INFO; +level_to_int(debug) -> ?DEBUG; +level_to_int(all) -> ?LOG_ALL. + +int_to_level(?LOG_NONE) -> none; +int_to_level(?EMERGENCY) -> emergency; +int_to_level(?ALERT) -> alert; +int_to_level(?CRITICAL) -> critical; +int_to_level(?ERROR) -> error; +int_to_level(?WARNING) -> warning; +int_to_level(?NOTICE) -> notice; +int_to_level(?INFO) -> info; +int_to_level(?DEBUG) -> debug; +int_to_level(?LOG_ALL) -> all. + +%%%----------------------------------------------------------------- +%%% Internal + +table_key(primary) -> ?PRIMARY_KEY; +table_key(HandlerId) -> {?HANDLER_KEY,HandlerId}. diff --git a/lib/kernel/src/logger_disk_log_h.erl b/lib/kernel/src/logger_disk_log_h.erl new file mode 100644 index 0000000000..2a81458ec8 --- /dev/null +++ b/lib/kernel/src/logger_disk_log_h.erl @@ -0,0 +1,740 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2017-2018. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(logger_disk_log_h). + +-behaviour(gen_server). + +-include("logger.hrl"). +-include("logger_internal.hrl"). +-include("logger_h_common.hrl"). + +%%% API +-export([start_link/3, info/1, filesync/1, reset/1]). + +%% gen_server callbacks +-export([init/1, handle_call/3, handle_cast/2, handle_info/2, + terminate/2, code_change/3]). + +%% logger callbacks +-export([log/2, adding_handler/1, removing_handler/1, changing_config/3, + filter_config/1]). + +%% handler internal +-export([log_handler_info/4]). + +%%%=================================================================== +%%% API +%%%=================================================================== + +%%%----------------------------------------------------------------- +%%% Start a disk_log handler process and link to caller. +%%% This function is called by the kernel supervisor when this +%%% handler process gets added (as a result of calling add/3). +-spec start_link(Name, Config, HandlerState) -> {ok,Pid} | {error,Reason} when + Name :: atom(), + Config :: logger:handler_config(), + HandlerState :: map(), + Pid :: pid(), + Reason :: term(). + +start_link(Name, Config, HandlerState) -> + proc_lib:start_link(?MODULE,init,[[Name,Config,HandlerState]]). + +%%%----------------------------------------------------------------- +%%% +-spec filesync(Name) -> ok | {error,Reason} when + Name :: atom(), + Reason :: handler_busy | {badarg,term()}. + +filesync(Name) when is_atom(Name) -> + try + gen_server:call(?name_to_reg_name(?MODULE,Name), + disk_log_sync, ?DEFAULT_CALL_TIMEOUT) + catch + _:{timeout,_} -> {error,handler_busy} + end; +filesync(Name) -> + {error,{badarg,{filesync,[Name]}}}. + +%%%----------------------------------------------------------------- +%%% +-spec info(Name) -> Info | {error,Reason} when + Name :: atom(), + Info :: term(), + Reason :: handler_busy | {badarg,term()}. + +info(Name) when is_atom(Name) -> + try + gen_server:call(?name_to_reg_name(?MODULE,Name), + info, ?DEFAULT_CALL_TIMEOUT) + catch + _:{timeout,_} -> {error,handler_busy} + end; +info(Name) -> + {error,{badarg,{info,[Name]}}}. + +%%%----------------------------------------------------------------- +%%% +-spec reset(Name) -> ok | {error,Reason} when + Name :: atom(), + Reason :: handler_busy | {badarg,term()}. + +reset(Name) when is_atom(Name) -> + try + gen_server:call(?name_to_reg_name(?MODULE,Name), + reset, ?DEFAULT_CALL_TIMEOUT) + catch + _:{timeout,_} -> {error,handler_busy} + end; +reset(Name) -> + {error,{badarg,{reset,[Name]}}}. + + +%%%=================================================================== +%%% logger callbacks +%%%=================================================================== + +%%%----------------------------------------------------------------- +%%% Handler being added +adding_handler(#{id:=Name}=Config) -> + case check_config(adding, Config) of + {ok, #{config:=HConfig}=Config1} -> + %% create initial handler state by merging defaults with config + HState = maps:merge(get_init_state(), HConfig), + case logger_h_common:overload_levels_ok(HState) of + true -> + start(Name, Config1, HState); + false -> + #{sync_mode_qlen := SMQL, + drop_mode_qlen := DMQL, + flush_qlen := FQL} = HState, + {error,{invalid_levels,{SMQL,DMQL,FQL}}} + end; + Error -> + Error + end. + +%%%----------------------------------------------------------------- +%%% Updating handler config +changing_config(SetOrUpdate,OldConfig=#{config:=OldHConfig},NewConfig) -> + WriteOnce = maps:with([type,file,max_no_files,max_no_bytes],OldHConfig), + ReadOnly = maps:with([handler_pid,mode_tab],OldHConfig), + NewHConfig0 = maps:get(config, NewConfig, #{}), + Default = + case SetOrUpdate of + set -> + %% Do not reset write-once fields to defaults + maps:merge(get_default_config(),WriteOnce); + update -> + OldHConfig + end, + + %% Allow (accidentially) included read-only fields - just overwrite them + NewHConfig = maps:merge(maps:merge(Default,NewHConfig0),ReadOnly), + + %% But fail if write-once fields are changed + case maps:with([type,file,max_no_files,max_no_bytes],NewHConfig) of + WriteOnce -> + changing_config1(maps:get(handler_pid,OldHConfig), + OldConfig, + NewConfig#{config=>NewHConfig}); + Other -> + {Old,New} = logger_server:diff_maps(WriteOnce,Other), + {error,{illegal_config_change,#{config=>Old},#{config=>New}}} + end. + +changing_config1(HPid, OldConfig, NewConfig) -> + case check_config(changing, NewConfig) of + Result = {ok,NewConfig1} -> + try gen_server:call(HPid, {change_config,OldConfig,NewConfig1}, + ?DEFAULT_CALL_TIMEOUT) of + ok -> Result; + Error -> Error + catch + _:{timeout,_} -> {error,handler_busy} + end; + Error -> + Error + end. + +check_config(adding, #{id:=Name}=Config) -> + %% merge handler specific config data + HConfig1 = maps:get(config, Config, #{}), + HConfig2 = maps:merge(get_default_config(), HConfig1), + HConfig3 = merge_default_logopts(Name, HConfig2), + case check_h_config(maps:to_list(HConfig3)) of + ok -> + {ok,Config#{config=>HConfig3}}; + Error -> + Error + end; +check_config(changing, Config) -> + HConfig = maps:get(config, Config, #{}), + case check_h_config(maps:to_list(HConfig)) of + ok -> {ok,Config}; + Error -> Error + end. + +merge_default_logopts(Name, HConfig) -> + Type = maps:get(type, HConfig, wrap), + {DefaultNoFiles,DefaultNoBytes} = + case Type of + halt -> {undefined,infinity}; + _wrap -> {10,1048576} + end, + {ok,Dir} = file:get_cwd(), + Defaults = #{file => filename:join(Dir,Name), + max_no_files => DefaultNoFiles, + max_no_bytes => DefaultNoBytes, + type => Type}, + maps:merge(Defaults, HConfig). + +check_h_config([{file,File}|Config]) when is_list(File) -> + check_h_config(Config); +check_h_config([{max_no_files,undefined}|Config]) -> + check_h_config(Config); +check_h_config([{max_no_files,N}|Config]) when is_integer(N), N>0 -> + check_h_config(Config); +check_h_config([{max_no_bytes,infinity}|Config]) -> + check_h_config(Config); +check_h_config([{max_no_bytes,N}|Config]) when is_integer(N), N>0 -> + check_h_config(Config); +check_h_config([{type,Type}|Config]) when Type==wrap; Type==halt -> + check_h_config(Config); +check_h_config([Other | Config]) -> + case logger_h_common:check_common_config(Other) of + valid -> + check_h_config(Config); + invalid -> + {error,{invalid_config,?MODULE,Other}} + end; +check_h_config([]) -> + ok. + +%%%----------------------------------------------------------------- +%%% Handler being removed +removing_handler(#{id:=Name}) -> + stop(Name). + +%%%----------------------------------------------------------------- +%%% Log a string or report +-spec log(LogEvent, Config) -> ok when + LogEvent :: logger:log_event(), + Config :: logger:handler_config(). + +log(LogEvent, Config = #{id := Name, + config := #{handler_pid := HPid, + mode_tab := ModeTab}}) -> + %% if the handler has crashed, we must drop this event + %% and hope the handler restarts so we can try again + true = is_process_alive(HPid), + Bin = logger_h_common:log_to_binary(LogEvent, Config), + logger_h_common:call_cast_or_drop(Name, HPid, ModeTab, Bin). + +%%%----------------------------------------------------------------- +%%% Remove internal fields from configuration +filter_config(#{config:=HConfig}=Config) -> + Config#{config=>maps:without([handler_pid,mode_tab],HConfig)}. + +%%%=================================================================== +%%% gen_server callbacks +%%%=================================================================== + +init([Name, + Config = #{config := HConfig = #{file:=File, + type:=Type, + max_no_bytes:=MNB, + max_no_files:=MNF}}, + State = #{dl_sync_int := DLSyncInt}]) -> + + RegName = ?name_to_reg_name(?MODULE,Name), + register(RegName, self()), + process_flag(trap_exit, true), + process_flag(message_queue_data, off_heap), + + ?init_test_hooks(), + ?start_observation(Name), + + LogOpts = #{file=>File, type=>Type, max_no_bytes=>MNB, max_no_files=>MNF}, + case open_disk_log(Name, File, Type, MNB, MNF) of + ok -> + try ets:new(Name, [public]) of + ModeTab -> + ?set_mode(ModeTab, async), + T0 = ?timestamp(), + State1 = + ?merge_with_stats(State#{ + id => Name, + mode_tab => ModeTab, + mode => async, + dl_sync => DLSyncInt, + log_opts => LogOpts, + last_qlen => 0, + last_log_ts => T0, + burst_win_ts => T0, + burst_msg_count => 0, + last_op => sync, + prev_log_result => ok, + prev_sync_result => ok, + prev_disk_log_info => undefined}), + Config1 = + Config#{config => HConfig#{handler_pid => self(), + mode_tab => ModeTab}}, + proc_lib:init_ack({ok,self(),Config1}), + gen_server:cast(self(), repeated_disk_log_sync), + case logger_h_common:unset_restart_flag(Name, ?MODULE) of + true -> + %% inform about restart + gen_server:cast(self(), {log_handler_info, + "Handler ~p restarted", + [Name]}); + false -> + %% initial start + ok + end, + gen_server:enter_loop(?MODULE, [], State1) + catch + _:Error -> + unregister(RegName), + logger_h_common:error_notify({open_disk_log,Name,Error}), + proc_lib:init_ack(Error) + end; + Error -> + unregister(RegName), + logger_h_common:error_notify({open_disk_log,Name,Error}), + proc_lib:init_ack(Error) + end. + +%% This is the synchronous log event. +handle_call({log, Bin}, _From, State) -> + {Result,State1} = do_log(Bin, call, State), + %% Result == ok | dropped + {reply, Result, State1}; + +handle_call(disk_log_sync, _From, State = #{id := Name}) -> + State1 = #{prev_sync_result := Result} = disk_log_sync(Name, State), + {reply, Result, State1}; + +handle_call({change_config,_OldConfig,NewConfig}, _From, + State = #{filesync_repeat_interval := FSyncInt0}) -> + HConfig = maps:get(config, NewConfig, #{}), + State1 = #{sync_mode_qlen := SMQL, + drop_mode_qlen := DMQL, + flush_qlen := FQL} = maps:merge(State, HConfig), + case logger_h_common:overload_levels_ok(State1) of + true -> + _ = + case maps:get(filesync_repeat_interval, HConfig, undefined) of + undefined -> + ok; + no_repeat -> + _ = logger_h_common:cancel_timer(maps:get(rep_sync_tref, + State, + undefined)); + FSyncInt0 -> + ok; + _FSyncInt1 -> + _ = logger_h_common:cancel_timer(maps:get(rep_sync_tref, + State, + undefined)), + _ = gen_server:cast(self(), repeated_disk_log_sync) + end, + {reply, ok, State1}; + false -> + {reply, {error,{invalid_levels,{SMQL,DMQL,FQL}}}, State} + end; + +handle_call(info, _From, State) -> + {reply, State, State}; + +handle_call(reset, _From, State) -> + State1 = ?merge_with_stats(State), + {reply, ok, State1#{last_qlen => 0, + last_log_ts => ?timestamp(), + prev_log_result => ok, + prev_sync_result => ok, + prev_disk_log_info => undefined}}; + +handle_call(stop, _From, State) -> + {stop, {shutdown,stopped}, ok, State}. + + +%% This is the asynchronous log event. +handle_cast({log, Bin}, State) -> + {_,State1} = do_log(Bin, cast, State), + {noreply, State1}; + +handle_cast({log_handler_info, Format, Args}, State = #{id:=Name}) -> + log_handler_info(Name, Format, Args, State), + {noreply, State}; + +%% If FILESYNC_REPEAT_INTERVAL is set to a millisec value, this +%% clause gets called repeatedly by the handler. In order to +%% guarantee that a filesync *always* happens after the last log +%% event, the repeat operation must be active! +handle_cast(repeated_disk_log_sync, + State = #{id := Name, + filesync_repeat_interval := FSyncInt, + last_op := LastOp}) -> + State1 = + if is_integer(FSyncInt) -> + %% only do filesync if something has been + %% written since last time we checked + NewState = if LastOp == sync -> + State; + true -> + disk_log_sync(Name, State) + end, + {ok,TRef} = + timer:apply_after(FSyncInt, gen_server,cast, + [self(),repeated_disk_log_sync]), + NewState#{rep_sync_tref => TRef, last_op => sync}; + true -> + State + end, + {noreply,State1}. + +%% The disk log owner must handle status messages from disk_log. +handle_info({disk_log, _Node, _Log, {wrap,_NoLostItems}}, State) -> + {noreply, State}; +handle_info({disk_log, _Node, Log, Info = {truncated,_NoLostItems}}, + State = #{id := Name, prev_disk_log_info := PrevInfo}) -> + error_notify_new(Info, PrevInfo, {disk_log,Name,Log,Info}), + {noreply, State#{prev_disk_log_info => Info}}; +handle_info({disk_log, _Node, Log, Info = {blocked_log,_Items}}, + State = #{id := Name, prev_disk_log_info := PrevInfo}) -> + error_notify_new(Info, PrevInfo, {disk_log,Name,Log,Info}), + {noreply, State#{prev_disk_log_info => Info}}; +handle_info({disk_log, _Node, Log, full}, + State = #{id := Name, prev_disk_log_info := PrevInfo}) -> + error_notify_new(full, PrevInfo, {disk_log,Name,Log,full}), + {noreply, State#{prev_disk_log_info => full}}; +handle_info({disk_log, _Node, Log, Info = {error_status,_Status}}, + State = #{id := Name, prev_disk_log_info := PrevInfo}) -> + error_notify_new(Info, PrevInfo, {disk_log,Name,Log,Info}), + {noreply, State#{prev_disk_log_info => Info}}; + +handle_info({'EXIT',_Pid,_Why}, State = #{id := _Name}) -> + {noreply, State}; + +handle_info(_, State) -> + {noreply, State}. + +terminate(Reason, State = #{id := Name}) -> + _ = logger_h_common:cancel_timer(maps:get(rep_sync_tref, State, + undefined)), + _ = close_disk_log(Name, normal), + ok = logger_h_common:stop_or_restart(Name, Reason, State), + unregister(?name_to_reg_name(?MODULE, Name)), + ok. + +code_change(_OldVsn, State, _Extra) -> + {ok, State}. + +%%%----------------------------------------------------------------- +%%% Internal functions + +%%%----------------------------------------------------------------- +%%% +get_default_config() -> + #{sync_mode_qlen => ?SYNC_MODE_QLEN, + drop_mode_qlen => ?DROP_MODE_QLEN, + flush_qlen => ?FLUSH_QLEN, + burst_limit_enable => ?BURST_LIMIT_ENABLE, + burst_limit_max_count => ?BURST_LIMIT_MAX_COUNT, + burst_limit_window_time => ?BURST_LIMIT_WINDOW_TIME, + overload_kill_enable => ?OVERLOAD_KILL_ENABLE, + overload_kill_qlen => ?OVERLOAD_KILL_QLEN, + overload_kill_mem_size => ?OVERLOAD_KILL_MEM_SIZE, + overload_kill_restart_after => ?OVERLOAD_KILL_RESTART_AFTER, + filesync_repeat_interval => ?FILESYNC_REPEAT_INTERVAL}. + +get_init_state() -> + #{dl_sync_int => ?CONTROLLER_SYNC_INTERVAL, + filesync_ok_qlen => ?FILESYNC_OK_QLEN}. + +%%%----------------------------------------------------------------- +%%% Add a disk_log handler to the logger. +%%% This starts a dedicated handler process which should always +%%% exist if the handler is registered with logger (and should not +%%% exist if the handler is not registered). +%%% +%%% Config is the logger:handler_config() map. Handler specific parameters +%%% should be provided with a sub map associated with a key named +%%% 'config', e.g: +%%% +%%% Config = #{config => #{sync_mode_qlen => 50} +%%% +%%% The 'config' sub map will also contain parameters for configuring +%%% the disk_log: +%%% +%%% Config = #{config => #{file => file:filename(), +%%% max_no_bytes => integer(), +%%% max_no_files => integer(), +%%% type => wrap | halt}}. +%%% +%%% If type == halt, then max_no_files is ignored. +%%% +%%% The disk_log handler process is linked to logger_sup, which is +%%% part of the kernel application's supervision tree. +start(Name, Config, HandlerState) -> + LoggerDLH = + #{id => Name, + start => {?MODULE, start_link, [Name,Config,HandlerState]}, + restart => temporary, + shutdown => 2000, + type => worker, + modules => [?MODULE]}, + case supervisor:start_child(logger_sup, LoggerDLH) of + {ok,Pid,Config1} -> + ok = logger_handler_watcher:register_handler(Name,Pid), + {ok,Config1}; + Error -> + Error + end. + +%%%----------------------------------------------------------------- +%%% Stop and remove the handler. +stop(Name) -> + case whereis(?name_to_reg_name(?MODULE,Name)) of + undefined -> + ok; + Pid -> + %% We don't want to do supervisor:terminate_child here + %% since we need to distinguish this explicit stop from a + %% system termination in order to avoid circular attempts + %% at removing the handler (implying deadlocks and + %% timeouts). + %% And we don't need to do supervisor:delete_child, since + %% the restart type is temporary, which means that the + %% child specification is automatically removed from the + %% supervisor when the process dies. + _ = gen_server:call(Pid, stop), + ok + end. + +%%%----------------------------------------------------------------- +%%% Logging and overload control. +-define(update_dl_sync(C, Interval), + if C == 0 -> Interval; + true -> C-1 end). + +%% check for overload between every event (and set Mode to async, +%% sync or drop accordingly), but never flush the whole mailbox +%% before LogWindowSize events have been handled +do_log(Bin, CallOrCast, State = #{id:=Name, mode := Mode0}) -> + T1 = ?timestamp(), + + %% check if the handler is getting overloaded, or if it's + %% recovering from overload (the check must be done for each + %% event to react quickly to large bursts of events and + %% to ensure that the handler can never end up in drop mode + %% with an empty mailbox, which would stop operation) + {Mode1,QLen,Mem,State1} = logger_h_common:check_load(State), + + if (Mode1 == drop) andalso (Mode0 =/= drop) -> + log_handler_info(Name, "Handler ~p switched to drop mode", + [Name], State); + (Mode0 == drop) andalso ((Mode1 == async) orelse (Mode1 == sync)) -> + log_handler_info(Name, "Handler ~p switched to ~w mode", + [Name,Mode1], State); + true -> + ok + end, + + %% kill the handler if it can't keep up with the load + logger_h_common:kill_if_choked(Name, QLen, Mem, ?MODULE, State), + + if Mode1 == flush -> + flush(Name, QLen, T1, State1); + true -> + write(Name, Mode1, T1, Bin, CallOrCast, State1) + end. + +%% this function is called by do_log/3 after an overload check +%% has been performed, where QLen > FlushQLen +flush(Name, _QLen0, T1, State=#{last_log_ts := _T0, mode_tab := ModeTab}) -> + %% flush messages in the mailbox (a limited number in + %% order to not cause long delays) + NewFlushed = logger_h_common:flush_log_events(?FLUSH_MAX_N), + + %% write info in log about flushed messages + log_handler_info(Name, "Handler ~p flushed ~w log events", + [Name,NewFlushed], State), + + %% because of the receive loop when flushing messages, the + %% handler will be scheduled out often and the mailbox could + %% grow very large, so we'd better check the queue again here + {_,_QLen1} = process_info(self(), message_queue_len), + ?observe(Name,{max_qlen,_QLen1}), + + %% Add 1 for the current log event + ?observe(Name,{flushed,NewFlushed+1}), + + State1 = ?update_max_time(?diff_time(T1,_T0),State), + {dropped,?update_other(flushed,FLUSHED,NewFlushed, + State1#{mode => ?set_mode(ModeTab,async), + last_qlen => 0, + last_log_ts => T1})}. + +%% this function is called to write to disk_log +write(Name, Mode, T1, Bin, _CallOrCast, + State = #{mode_tab := ModeTab, + dl_sync := DLSync, + dl_sync_int := DLSyncInt, + last_qlen := LastQLen, + last_log_ts := T0}) -> + %% check if we need to limit the number of writes + %% during a burst of log events + {DoWrite,BurstWinT,BurstMsgCount} = logger_h_common:limit_burst(State), + + %% only send a synhrounous event to the disk_log process + %% every DLSyncInt time, to give the handler time between + %% writes so it can keep up with incoming messages + {Status,LastQLen1,State1} = + if DoWrite, DLSync == 0 -> + ?observe(Name,{_CallOrCast,1}), + NewState = disk_log_write(Name, Bin, State), + {ok, element(2,process_info(self(),message_queue_len)), + NewState}; + DoWrite -> + ?observe(Name,{_CallOrCast,1}), + NewState = disk_log_write(Name, Bin, State), + {ok, LastQLen, NewState}; + not DoWrite -> + ?observe(Name,{flushed,1}), + {dropped, LastQLen, State} + end, + + %% Check if the time since the previous log event is long enough - + %% and the queue length small enough - to assume the mailbox has + %% been emptied, and if so, do filesync operation and reset mode to + %% async. Note that this is the best we can do to detect an idle + %% handler without setting a timer after each log call/cast. If the + %% time between two consecutive log events is fast and no new + %% event comes in after the last one, idle state won't be detected! + Time = ?diff_time(T1,T0), + {Mode1,BurstMsgCount1,State2} = + if (LastQLen1 < ?FILESYNC_OK_QLEN) andalso + (Time > ?IDLE_DETECT_TIME_USEC) -> + {?change_mode(ModeTab,Mode,async), 0, disk_log_sync(Name,State1)}; + true -> + {Mode, BurstMsgCount,State1} + end, + + State3 = + ?update_calls_or_casts(_CallOrCast,1,State2), + State4 = + ?update_max_time(Time, + State3#{mode => Mode1, + last_qlen := LastQLen1, + last_log_ts => T1, + burst_win_ts => BurstWinT, + burst_msg_count => BurstMsgCount1, + dl_sync => ?update_dl_sync(DLSync,DLSyncInt)}), + {Status,State4}. + + +log_handler_info(Name, Format, Args, State) -> + Config = + case logger:get_handler_config(Name) of + {ok,Conf} -> Conf; + _ -> #{formatter=>{?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}} + end, + Meta = #{time=>erlang:system_time(microsecond)}, + Bin = logger_h_common:log_to_binary(#{level => notice, + msg => {Format,Args}, + meta => Meta}, Config), + _ = disk_log_write(Name, Bin, State), + ok. + + +open_disk_log(Name, File, Type, MaxNoBytes, MaxNoFiles) -> + case filelib:ensure_dir(File) of + ok -> + Size = + if Type == halt -> MaxNoBytes; + Type == wrap -> {MaxNoBytes,MaxNoFiles} + end, + Opts = [{name, Name}, + {file, File}, + {size, Size}, + {type, Type}, + {linkto, self()}, + {repair, false}, + {format, external}, + {notify, true}, + {quiet, true}, + {mode, read_write}], + case disk_log:open(Opts) of + {ok,Name} -> + ok; + Error = {error,_Reason} -> + Error + end; + Error -> + Error + end. + +close_disk_log(Name, _) -> + _ = ?disk_log_sync(Name), + _ = disk_log:lclose(Name), + ok. + +disk_log_write(Name, Bin, State) -> + case ?disk_log_blog(Name, Bin) of + ok -> + State#{prev_log_result => ok, last_op => write}; + LogError -> + _ = case maps:get(prev_log_result, State) of + LogError -> + %% don't report same error twice + ok; + _ -> + LogOpts = maps:get(log_opts, State), + logger_h_common:error_notify({Name,log, + LogOpts, + LogError}) + end, + State#{prev_log_result => LogError} + end. + +disk_log_sync(Name, State) -> + case ?disk_log_sync(Name) of + ok -> + State#{prev_sync_result => ok, last_op => sync}; + SyncError -> + _ = case maps:get(prev_sync_result, State) of + SyncError -> + %% don't report same error twice + ok; + _ -> + LogOpts = maps:get(log_opts, State), + logger_h_common:error_notify({Name,filesync, + LogOpts, + SyncError}) + end, + State#{prev_sync_result => SyncError} + end. + +error_notify_new(Info,Info, _Term) -> + ok; +error_notify_new(_Info0,_Info1, Term) -> + logger_h_common:error_notify(Term). diff --git a/lib/kernel/src/logger_filters.erl b/lib/kernel/src/logger_filters.erl new file mode 100644 index 0000000000..0664c598e1 --- /dev/null +++ b/lib/kernel/src/logger_filters.erl @@ -0,0 +1,127 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2017-2018. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(logger_filters). + +-export([domain/2, + level/2, + progress/2, + remote_gl/2]). + +-include("logger_internal.hrl"). +-define(IS_ACTION(A), (A==log orelse A==stop)). + +-spec domain(LogEvent,Extra) -> logger:filter_return() when + LogEvent :: logger:log_event(), + Extra :: {Action,Compare,MatchDomain}, + Action :: log | stop, + Compare :: super | sub | equal | not_equal | undefined, + MatchDomain :: list(atom()). +domain(#{meta:=Meta}=LogEvent,{Action,Compare,MatchDomain}) + when ?IS_ACTION(Action) andalso + (Compare==super orelse + Compare==sub orelse + Compare==equal orelse + Compare==not_equal orelse + Compare==undefined) andalso + is_list(MatchDomain) -> + filter_domain(Compare,Meta,MatchDomain,on_match(Action,LogEvent)); +domain(LogEvent,Extra) -> + erlang:error(badarg,[LogEvent,Extra]). + +-spec level(LogEvent,Extra) -> logger:filter_return() when + LogEvent :: logger:log_event(), + Extra :: {Action,Operator,MatchLevel}, + Action :: log | stop, + Operator :: neq | eq | lt | gt | lteq | gteq, + MatchLevel :: logger:level(). +level(#{level:=L1}=LogEvent,{Action,Op,L2}) + when ?IS_ACTION(Action) andalso + (Op==neq orelse + Op==eq orelse + Op==lt orelse + Op==gt orelse + Op==lteq orelse + Op==gteq) andalso + ?IS_LEVEL(L2) -> + filter_level(Op,L1,L2,on_match(Action,LogEvent)); +level(LogEvent,Extra) -> + erlang:error(badarg,[LogEvent,Extra]). + +-spec progress(LogEvent,Extra) -> logger:filter_return() when + LogEvent :: logger:log_event(), + Extra :: log | stop. +progress(LogEvent,Action) when ?IS_ACTION(Action) -> + filter_progress(LogEvent,on_match(Action,LogEvent)); +progress(LogEvent,Action) -> + erlang:error(badarg,[LogEvent,Action]). + +-spec remote_gl(LogEvent,Extra) -> logger:filter_return() when + LogEvent :: logger:log_event(), + Extra :: log | stop. +remote_gl(LogEvent,Action) when ?IS_ACTION(Action) -> + filter_remote_gl(LogEvent,on_match(Action,LogEvent)); +remote_gl(LogEvent,Action) -> + erlang:error(badarg,[LogEvent,Action]). + +%%%----------------------------------------------------------------- +%%% Internal +filter_domain(super,#{domain:=Domain},MatchDomain,OnMatch) -> + is_prefix(Domain,MatchDomain,OnMatch); +filter_domain(sub,#{domain:=Domain},MatchDomain,OnMatch) -> + is_prefix(MatchDomain,Domain,OnMatch); +filter_domain(equal,#{domain:=Domain},Domain,OnMatch) -> + OnMatch; +filter_domain(not_equal,#{domain:=Domain},MatchDomain,OnMatch) + when Domain=/=MatchDomain -> + OnMatch; +filter_domain(Compare,Meta,_,OnMatch) -> + case maps:is_key(domain,Meta) of + false when Compare==undefined; Compare==not_equal -> OnMatch; + _ -> ignore + end. + +is_prefix(D1,D2,OnMatch) when is_list(D1), is_list(D2) -> + case lists:prefix(D1,D2) of + true -> OnMatch; + false -> ignore + end; +is_prefix(_,_,_) -> + ignore. + +filter_level(Op,L1,L2,OnMatch) -> + case logger:compare_levels(L1,L2) of + eq when Op==eq; Op==lteq; Op==gteq -> OnMatch; + lt when Op==lt; Op==lteq; Op==neq -> OnMatch; + gt when Op==gt; Op==gteq; Op==neq -> OnMatch; + _ -> ignore + end. + +filter_progress(#{msg:={report,#{label:={_,progress}}}},OnMatch) -> + OnMatch; +filter_progress(_,_) -> + ignore. + +filter_remote_gl(#{meta:=#{gl:=GL}},OnMatch) when node(GL)=/=node() -> + OnMatch; +filter_remote_gl(_,_) -> + ignore. + +on_match(log,LogEvent) -> LogEvent; +on_match(stop,_) -> stop. diff --git a/lib/kernel/src/logger_formatter.erl b/lib/kernel/src/logger_formatter.erl new file mode 100644 index 0000000000..ded89bac9f --- /dev/null +++ b/lib/kernel/src/logger_formatter.erl @@ -0,0 +1,514 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2017-2018. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(logger_formatter). + +-export([format/2]). +-export([check_config/1]). + +-include("logger_internal.hrl"). + +%%%----------------------------------------------------------------- +%%% Types +-type config() :: #{chars_limit => pos_integer() | unlimited, + depth => pos_integer() | unlimited, + legacy_header => boolean(), + max_size => pos_integer() | unlimited, + report_cb => logger:report_cb(), + single_line => boolean(), + template => template(), + time_designator => byte(), + time_offset => integer() | [byte()]}. +-type template() :: [metakey() | {metakey(),template(),template()} | string()]. +-type metakey() :: atom() | [atom()]. + +%%%----------------------------------------------------------------- +%%% API +-spec format(LogEvent,Config) -> unicode:chardata() when + LogEvent :: logger:log_event(), + Config :: config(). +format(#{level:=Level,msg:=Msg0,meta:=Meta},Config0) + when is_map(Config0) -> + Config = add_default_config(Config0), + Meta1 = maybe_add_legacy_header(Level,Meta,Config), + Template = maps:get(template,Config), + {BT,AT0} = lists:splitwith(fun(msg) -> false; (_) -> true end, Template), + {DoMsg,AT} = + case AT0 of + [msg|Rest] -> {true,Rest}; + _ ->{false,AT0} + end, + B = do_format(Level,Meta1,BT,Config), + A = do_format(Level,Meta1,AT,Config), + MsgStr = + if DoMsg -> + Config1 = + case maps:get(chars_limit,Config) of + unlimited -> + Config; + Size0 -> + Size = + case Size0 - string:length([B,A]) of + S when S>=0 -> S; + _ -> 0 + end, + Config#{chars_limit=>Size} + end, + MsgStr0 = format_msg(Msg0,Meta1,Config1), + case maps:get(single_line,Config) of + true -> + %% Trim leading and trailing whitespaces, and replace + %% newlines with ", " + re:replace(string:trim(MsgStr0),",?\r?\n\s*",", ", + [{return,list},global,unicode]); + _false -> + MsgStr0 + end; + true -> + "" + end, + truncate([B,MsgStr,A],maps:get(max_size,Config)). + +do_format(Level,Data,[level|Format],Config) -> + [to_string(level,Level,Config)|do_format(Level,Data,Format,Config)]; +do_format(Level,Data,[{Key,IfExist,Else}|Format],Config) -> + String = + case value(Key,Data) of + {ok,Value} -> do_format(Level,Data#{Key=>Value},IfExist,Config); + error -> do_format(Level,Data,Else,Config) + end, + [String|do_format(Level,Data,Format,Config)]; +do_format(Level,Data,[Key|Format],Config) + when is_atom(Key) orelse + (is_list(Key) andalso is_atom(hd(Key))) -> + String = + case value(Key,Data) of + {ok,Value} -> to_string(Key,Value,Config); + error -> "" + end, + [String|do_format(Level,Data,Format,Config)]; +do_format(Level,Data,[Str|Format],Config) -> + [Str|do_format(Level,Data,Format,Config)]; +do_format(_Level,_Data,[],_Config) -> + []. + +value(Key,Meta) when is_map_key(Key,Meta) -> + {ok,maps:get(Key,Meta)}; +value([Key|Keys],Meta) when is_map_key(Key,Meta) -> + value(Keys,maps:get(Key,Meta)); +value([],Value) -> + {ok,Value}; +value(_,_) -> + error. + +to_string(time,Time,Config) -> + format_time(Time,Config); +to_string(mfa,MFA,Config) -> + format_mfa(MFA,Config); +to_string(_,Value,Config) -> + to_string(Value,Config). + +to_string(X,_) when is_atom(X) -> + atom_to_list(X); +to_string(X,_) when is_integer(X) -> + integer_to_list(X); +to_string(X,_) when is_pid(X) -> + pid_to_list(X); +to_string(X,_) when is_reference(X) -> + ref_to_list(X); +to_string(X,Config) when is_list(X) -> + case printable_list(lists:flatten(X)) of + true -> X; + _ -> io_lib:format(p(Config),[X]) + end; +to_string(X,Config) -> + io_lib:format(p(Config),[X]). + +printable_list([]) -> + false; +printable_list(X) -> + io_lib:printable_list(X). + +format_msg({string,Chardata},Meta,Config) -> + format_msg({"~ts",[Chardata]},Meta,Config); +format_msg({report,_}=Msg,Meta,#{report_cb:=Fun}=Config) + when is_function(Fun,1); is_function(Fun,2) -> + format_msg(Msg,Meta#{report_cb=>Fun},maps:remove(report_cb,Config)); +format_msg({report,Report},#{report_cb:=Fun}=Meta,Config) when is_function(Fun,1) -> + try Fun(Report) of + {Format,Args} when is_list(Format), is_list(Args) -> + format_msg({Format,Args},maps:remove(report_cb,Meta),Config); + Other -> + P = p(Config), + format_msg({"REPORT_CB/1 ERROR: "++P++"; Returned: "++P, + [Report,Other]},Meta,Config) + catch C:R:S -> + P = p(Config), + format_msg({"REPORT_CB/1 CRASH: "++P++"; Reason: "++P, + [Report,{C,R,logger:filter_stacktrace(?MODULE,S)}]}, + Meta,Config) + end; +format_msg({report,Report},#{report_cb:=Fun}=Meta,Config) when is_function(Fun,2) -> + try Fun(Report,maps:with([depth,chars_limit,single_line],Config)) of + Chardata when ?IS_STRING(Chardata) -> + try chardata_to_list(Chardata) % already size limited by report_cb + catch _:_ -> + P = p(Config), + format_msg({"REPORT_CB/2 ERROR: "++P++"; Returned: "++P, + [Report,Chardata]},Meta,Config) + end; + Other -> + P = p(Config), + format_msg({"REPORT_CB/2 ERROR: "++P++"; Returned: "++P, + [Report,Other]},Meta,Config) + catch C:R:S -> + P = p(Config), + format_msg({"REPORT_CB/2 CRASH: "++P++"; Reason: "++P, + [Report,{C,R,logger:filter_stacktrace(?MODULE,S)}]}, + Meta,Config) + end; +format_msg({report,Report},Meta,Config) -> + format_msg({report,Report}, + Meta#{report_cb=>fun logger:format_report/1}, + Config); +format_msg(Msg,_Meta,#{depth:=Depth,chars_limit:=CharsLimit, + single_line:=Single}) -> + Opts = chars_limit_to_opts(CharsLimit), + format_msg(Msg, Depth, Opts, Single). + +chars_limit_to_opts(unlimited) -> []; +chars_limit_to_opts(CharsLimit) -> [{chars_limit,CharsLimit}]. + +format_msg({Format0,Args},Depth,Opts,Single) -> + try + Format1 = io_lib:scan_format(Format0, Args), + Format = reformat(Format1, Depth, Single), + io_lib:build_text(Format,Opts) + catch C:R:S -> + P = p(Single), + FormatError = "FORMAT ERROR: "++P++" - "++P, + case Format0 of + FormatError -> + %% already been here - avoid failing cyclically + erlang:raise(C,R,S); + _ -> + format_msg({FormatError,[Format0,Args]},Depth,Opts,Single) + end + end. + +reformat(Format,unlimited,false) -> + Format; +reformat([#{control_char:=C}=M|T], Depth, true) when C =:= $p -> + [limit_depth(M#{width => 0}, Depth)|reformat(T, Depth, true)]; +reformat([#{control_char:=C}=M|T], Depth, true) when C =:= $P -> + [M#{width => 0}|reformat(T, Depth, true)]; +reformat([#{control_char:=C}=M|T], Depth, Single) when C =:= $p; C =:= $w -> + [limit_depth(M, Depth)|reformat(T, Depth, Single)]; +reformat([H|T], Depth, Single) -> + [H|reformat(T, Depth, Single)]; +reformat([], _, _) -> + []. + +limit_depth(M0, unlimited) -> + M0; +limit_depth(#{control_char:=C0, args:=Args}=M0, Depth) -> + C = C0 - ($a - $A), %To uppercase. + M0#{control_char:=C,args:=Args++[Depth]}. + +chardata_to_list(Chardata) -> + case unicode:characters_to_list(Chardata,unicode) of + List when is_list(List) -> + List; + Error -> + throw(Error) + end. + +truncate(String,unlimited) -> + String; +truncate(String,Size) -> + Length = string:length(String), + if Length>Size -> + case lists:reverse(lists:flatten(String)) of + [$\n|_] -> + string:slice(String,0,Size-4)++"...\n"; + _ -> + string:slice(String,0,Size-3)++"..." + end; + true -> + String + end. + +%% SysTime is the system time in microseconds +format_time(SysTime,#{time_offset:=Offset,time_designator:=Des}) + when is_integer(SysTime) -> + calendar:system_time_to_rfc3339(SysTime,[{unit,microsecond}, + {offset,Offset}, + {time_designator,Des}]). + +%% SysTime is the system time in microseconds +timestamp_to_datetimemicro(SysTime,Config) when is_integer(SysTime) -> + Micro = SysTime rem 1000000, + Sec = SysTime div 1000000, + UniversalTime = erlang:posixtime_to_universaltime(Sec), + {{Date,Time},UtcStr} = + case offset_to_utc(maps:get(time_offset,Config)) of + true -> {UniversalTime,"UTC "}; + _ -> {erlang:universaltime_to_localtime(UniversalTime),""} + end, + {Date,Time,Micro,UtcStr}. + +format_mfa({M,F,A},_) when is_atom(M), is_atom(F), is_integer(A) -> + atom_to_list(M)++":"++atom_to_list(F)++"/"++integer_to_list(A); +format_mfa({M,F,A},Config) when is_atom(M), is_atom(F), is_list(A) -> + format_mfa({M,F,length(A)},Config); +format_mfa(MFA,Config) -> + to_string(MFA,Config). + +maybe_add_legacy_header(Level, + #{time:=Timestamp}=Meta, + #{legacy_header:=true}=Config) -> + #{title:=Title}=MyMeta = add_legacy_title(Level,Meta,Config), + {{Y,Mo,D},{H,Mi,S},Micro,UtcStr} = + timestamp_to_datetimemicro(Timestamp,Config), + Header = + io_lib:format("=~ts==== ~w-~s-~4w::~2..0w:~2..0w:~2..0w.~6..0w ~s===", + [Title,D,month(Mo),Y,H,Mi,S,Micro,UtcStr]), + Meta#{?MODULE=>MyMeta#{header=>Header}}; +maybe_add_legacy_header(_,Meta,_) -> + Meta. + +add_legacy_title(_Level,#{?MODULE:=#{title:=_}=MyMeta},_) -> + MyMeta; +add_legacy_title(Level,Meta,Config) -> + case maps:get(?MODULE,Meta,#{}) of + #{title:=_}=MyMeta -> + MyMeta; + MyMeta -> + TitleLevel = + case (Level=:=notice andalso maps:find(error_logger,Meta)) of + {ok,_} -> + maps:get(error_logger_notice_header,Config); + _ -> + Level + end, + Title = string:uppercase(atom_to_list(TitleLevel)) ++ " REPORT", + MyMeta#{title=>Title} + end. + +month(1) -> "Jan"; +month(2) -> "Feb"; +month(3) -> "Mar"; +month(4) -> "Apr"; +month(5) -> "May"; +month(6) -> "Jun"; +month(7) -> "Jul"; +month(8) -> "Aug"; +month(9) -> "Sep"; +month(10) -> "Oct"; +month(11) -> "Nov"; +month(12) -> "Dec". + +%% Ensure that all valid configuration parameters exist in the final +%% configuration map +add_default_config(Config0) -> + Default = + #{chars_limit=>unlimited, + error_logger_notice_header=>info, + legacy_header=>false, + single_line=>true, + time_designator=>$T}, + MaxSize = get_max_size(maps:get(max_size,Config0,undefined)), + Depth = get_depth(maps:get(depth,Config0,undefined)), + Offset = get_offset(maps:get(time_offset,Config0,undefined)), + add_default_template(maps:merge(Default,Config0#{max_size=>MaxSize, + depth=>Depth, + time_offset=>Offset})). + +add_default_template(#{template:=_}=Config) -> + Config; +add_default_template(Config) -> + Config#{template=>default_template(Config)}. + +default_template(#{legacy_header:=true}) -> + ?DEFAULT_FORMAT_TEMPLATE_HEADER; +default_template(#{single_line:=true}) -> + ?DEFAULT_FORMAT_TEMPLATE_SINGLE; +default_template(_) -> + ?DEFAULT_FORMAT_TEMPLATE. + +get_max_size(undefined) -> + unlimited; +get_max_size(S) -> + max(10,S). + +get_depth(undefined) -> + error_logger:get_format_depth(); +get_depth(S) -> + max(5,S). + +get_offset(undefined) -> + utc_to_offset(get_utc_config()); +get_offset(Offset) -> + Offset. + +utc_to_offset(true) -> + "Z"; +utc_to_offset(false) -> + "". + +get_utc_config() -> + %% SASL utc_log overrides stdlib config - in order to have uniform + %% timestamps in log messages + case application:get_env(sasl, utc_log) of + {ok, Val} when is_boolean(Val) -> Val; + _ -> + case application:get_env(stdlib, utc_log) of + {ok, Val} when is_boolean(Val) -> Val; + _ -> false + end + end. + +offset_to_utc(Z) when Z=:=0; Z=:="z"; Z=:="Z" -> + true; +offset_to_utc([$+|Tz]) -> + case io_lib:fread("~d:~d", Tz) of + {ok, [0, 0], []} -> + true; + _ -> + false + end; +offset_to_utc(_) -> + false. + +-spec check_config(Config) -> ok | {error,term()} when + Config :: config(). +check_config(Config) when is_map(Config) -> + do_check_config(maps:to_list(Config)); +check_config(Config) -> + {error,{invalid_formatter_config,?MODULE,Config}}. + +do_check_config([{Type,L}|Config]) when Type == chars_limit; + Type == depth; + Type == max_size -> + case check_limit(L) of + ok -> do_check_config(Config); + error -> {error,{invalid_formatter_config,?MODULE,{Type,L}}} + end; +do_check_config([{single_line,SL}|Config]) when is_boolean(SL) -> + do_check_config(Config); +do_check_config([{legacy_header,LH}|Config]) when is_boolean(LH) -> + do_check_config(Config); +do_check_config([{error_logger_notice_header,ELNH}|Config]) when ELNH == info; + ELNH == notice -> + do_check_config(Config); +do_check_config([{report_cb,RCB}|Config]) when is_function(RCB,1); + is_function(RCB,2) -> + do_check_config(Config); +do_check_config([{template,T}|Config]) -> + case check_template(T) of + ok -> do_check_config(Config); + error -> {error,{invalid_formatter_template,?MODULE,T}} + end; +do_check_config([{time_offset,Offset}|Config]) -> + case check_offset(Offset) of + ok -> + do_check_config(Config); + error -> + {error,{invalid_formatter_config,?MODULE,{time_offset,Offset}}} + end; +do_check_config([{time_designator,Char}|Config]) when Char>=0, Char=<255 -> + case io_lib:printable_latin1_list([Char]) of + true -> + do_check_config(Config); + false -> + {error,{invalid_formatter_config,?MODULE,{time_designator,Char}}} + end; +do_check_config([C|_]) -> + {error,{invalid_formatter_config,?MODULE,C}}; +do_check_config([]) -> + ok. + +check_limit(L) when is_integer(L), L>0 -> + ok; +check_limit(unlimited) -> + ok; +check_limit(_) -> + error. + +check_template([Key|T]) when is_atom(Key) -> + check_template(T); +check_template([Key|T]) when is_list(Key), is_atom(hd(Key)) -> + case lists:all(fun(X) when is_atom(X) -> true; + (_) -> false + end, + Key) of + true -> + check_template(T); + false -> + error + end; +check_template([{Key,IfExist,Else}|T]) + when is_atom(Key) orelse + (is_list(Key) andalso is_atom(hd(Key))) -> + case check_template(IfExist) of + ok -> + case check_template(Else) of + ok -> + check_template(T); + error -> + error + end; + error -> + error + end; +check_template([Str|T]) when is_list(Str) -> + case io_lib:printable_unicode_list(Str) of + true -> check_template(T); + false -> error + end; +check_template([]) -> + ok; +check_template(_) -> + error. + +check_offset(I) when is_integer(I) -> + ok; +check_offset(Tz) when Tz=:=""; Tz=:="Z"; Tz=:="z" -> + ok; +check_offset([Sign|Tz]) when Sign=:=$+; Sign=:=$- -> + check_timezone(Tz); +check_offset(_) -> + error. + +check_timezone(Tz) -> + try io_lib:fread("~d:~d", Tz) of + {ok, [_, _], []} -> + ok; + _ -> + error + catch _:_ -> + error + end. + +p(#{single_line:=Single}) -> + p(Single); +p(true) -> + "~0tp"; +p(false) -> + "~tp". diff --git a/lib/kernel/src/logger_h_common.erl b/lib/kernel/src/logger_h_common.erl new file mode 100644 index 0000000000..94c640cb92 --- /dev/null +++ b/lib/kernel/src/logger_h_common.erl @@ -0,0 +1,339 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2017-2018. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(logger_h_common). + +-include("logger_h_common.hrl"). +-include("logger_internal.hrl"). + +-export([log_to_binary/2, + check_common_config/1, + call_cast_or_drop/4, + check_load/1, + limit_burst/1, + kill_if_choked/5, + flush_log_events/0, + flush_log_events/1, + handler_exit/2, + set_restart_flag/2, + unset_restart_flag/2, + cancel_timer/1, + stop_or_restart/3, + overload_levels_ok/1, + error_notify/1, + info_notify/1]). + +%%%----------------------------------------------------------------- +%%% Convert log data on any form to binary +-spec log_to_binary(LogEvent,Config) -> LogString when + LogEvent :: logger:log_event(), + Config :: logger:handler_config(), + LogString :: binary(). +log_to_binary(#{msg:={report,_},meta:=#{report_cb:=_}}=Log,Config) -> + do_log_to_binary(Log,Config); +log_to_binary(#{msg:={report,_},meta:=Meta}=Log,Config) -> + DefaultReportCb = fun logger:format_otp_report/1, + do_log_to_binary(Log#{meta=>Meta#{report_cb=>DefaultReportCb}},Config); +log_to_binary(Log,Config) -> + do_log_to_binary(Log,Config). + +do_log_to_binary(Log,Config) -> + {Formatter,FormatterConfig} = + maps:get(formatter,Config,{?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}), + String = try_format(Log,Formatter,FormatterConfig), + try string_to_binary(String) + catch C2:R2:S2 -> + ?LOG_INTERNAL(debug,[{formatter_error,Formatter}, + {config,FormatterConfig}, + {log_event,Log}, + {bad_return_value,String}, + {catched,{C2,R2,S2}}]), + <<"FORMATTER ERROR: bad return value">> + end. + +try_format(Log,Formatter,FormatterConfig) -> + try Formatter:format(Log,FormatterConfig) + catch + C:R:S -> + ?LOG_INTERNAL(debug,[{formatter_crashed,Formatter}, + {config,FormatterConfig}, + {log_event,Log}, + {reason, + {C,R,logger:filter_stacktrace(?MODULE,S)}}]), + case {?DEFAULT_FORMATTER,#{}} of + {Formatter,FormatterConfig} -> + "DEFAULT FORMATTER CRASHED"; + {DefaultFormatter,DefaultConfig} -> + try_format(Log#{msg=>{"FORMATTER CRASH: ~tp", + [maps:get(msg,Log)]}}, + DefaultFormatter,DefaultConfig) + end + end. + +string_to_binary(String) -> + case unicode:characters_to_binary(String) of + Binary when is_binary(Binary) -> + Binary; + Error -> + throw(Error) + end. + + +%%%----------------------------------------------------------------- +%%% Check that the configuration term is valid +check_common_config({mode_tab,_Tid}) -> + valid; +check_common_config({handler_pid,Pid}) when is_pid(Pid) -> + valid; + +check_common_config({sync_mode_qlen,N}) when is_integer(N) -> + valid; +check_common_config({drop_mode_qlen,N}) when is_integer(N) -> + valid; +check_common_config({flush_qlen,N}) when is_integer(N) -> + valid; + +check_common_config({burst_limit_enable,Bool}) when Bool == true; + Bool == false -> + valid; +check_common_config({burst_limit_max_count,N}) when is_integer(N) -> + valid; +check_common_config({burst_limit_window_time,N}) when is_integer(N) -> + valid; + +check_common_config({overload_kill_enable,Bool}) when Bool == true; + Bool == false -> + valid; +check_common_config({overload_kill_qlen,N}) when is_integer(N) -> + valid; +check_common_config({overload_kill_mem_size,N}) when is_integer(N) -> + valid; +check_common_config({overload_kill_restart_after,NorA}) when is_integer(NorA); + NorA == infinity -> + valid; + +check_common_config({filesync_repeat_interval,NorA}) when is_integer(NorA); + NorA == no_repeat -> + valid; +check_common_config(_) -> + invalid. + + +%%%----------------------------------------------------------------- +%%% Overload Protection +call_cast_or_drop(_Name, HandlerPid, ModeTab, Bin) -> + %% If the handler process is getting overloaded, the log event + %% will be synchronous instead of asynchronous (slows down the + %% logging tempo of a process doing lots of logging. If the + %% handler is choked, drop mode is set and no event will be sent. + try ?get_mode(ModeTab) of + async -> + gen_server:cast(HandlerPid, {log,Bin}); + sync -> + try gen_server:call(HandlerPid, {log,Bin}, ?DEFAULT_CALL_TIMEOUT) of + %% if return value from call == dropped, the + %% message has been flushed by handler and should + %% therefore not be counted as dropped in stats + ok -> ok; + dropped -> ok + catch + _:{timeout,_} -> + ?observe(_Name,{dropped,1}) + end; + drop -> + ?observe(_Name,{dropped,1}) + catch + %% if the ETS table doesn't exist (maybe because of a + %% handler restart), we can only drop the event + _:_ -> ?observe(_Name,{dropped,1}) + end, + ok. + +handler_exit(_Name, Reason) -> + exit(Reason). + +set_restart_flag(Name, Module) -> + Flag = list_to_atom(lists:concat([Module,"_",Name,"_restarting"])), + spawn(fun() -> + register(Flag, self()), + timer:sleep(infinity) + end), + ok. + +unset_restart_flag(Name, Module) -> + Flag = list_to_atom(lists:concat([Module,"_",Name,"_restarting"])), + case whereis(Flag) of + undefined -> + false; + Pid -> + exit(Pid, kill), + true + end. + +check_load(State = #{id:=_Name, mode_tab := ModeTab, mode := Mode, + sync_mode_qlen := SyncModeQLen, + drop_mode_qlen := DropModeQLen, + flush_qlen := FlushQLen}) -> + {_,Mem} = process_info(self(), memory), + ?observe(_Name,{max_mem,Mem}), + {_,QLen} = process_info(self(), message_queue_len), + ?observe(_Name,{max_qlen,QLen}), + %% When the handler process gets scheduled in, it's impossible + %% to predict the QLen. We could jump "up" arbitrarily from say + %% async to sync, async to drop, sync to flush, etc. However, when + %% the handler process manages the log events (without flushing), + %% one after the other, we will move "down" from drop to sync and + %% from sync to async. This way we don't risk getting stuck in + %% drop or sync mode with an empty mailbox. + {Mode1,_NewDrops,_NewFlushes} = + if + QLen >= FlushQLen -> + {flush, 0,1}; + QLen >= DropModeQLen -> + %% Note that drop mode will force log events to + %% be dropped on the client side (never sent get to + %% the handler). + IncDrops = if Mode == drop -> 0; true -> 1 end, + {?change_mode(ModeTab, Mode, drop), IncDrops,0}; + QLen >= SyncModeQLen -> + {?change_mode(ModeTab, Mode, sync), 0,0}; + true -> + {?change_mode(ModeTab, Mode, async), 0,0} + end, + State1 = ?update_other(drops,DROPS,_NewDrops,State), + {Mode1, QLen, Mem, + ?update_other(flushes,FLUSHES,_NewFlushes, + State1#{last_qlen => QLen})}. + +limit_burst(#{burst_limit_enable := false}) -> + {true,0,0}; +limit_burst(#{burst_win_ts := BurstWinT0, + burst_msg_count := BurstMsgCount, + burst_limit_window_time := BurstLimitWinTime, + burst_limit_max_count := BurstLimitMaxCnt}) -> + if (BurstMsgCount >= BurstLimitMaxCnt) -> + %% the limit for allowed messages has been reached + BurstWinT1 = ?timestamp(), + case ?diff_time(BurstWinT1,BurstWinT0) of + BurstCheckTime when BurstCheckTime < (BurstLimitWinTime*1000) -> + %% we're still within the burst time frame + {false,BurstWinT0,BurstMsgCount}; + _BurstCheckTime -> + %% burst time frame passed, reset counters + {true,BurstWinT1,0} + end; + true -> + %% the limit for allowed messages not yet reached + {true,BurstWinT0,BurstMsgCount+1} + end. + +kill_if_choked(Name, QLen, Mem, HandlerMod, + State = #{overload_kill_enable := KillIfOL, + overload_kill_qlen := OLKillQLen, + overload_kill_mem_size := OLKillMem}) -> + if KillIfOL andalso + ((QLen > OLKillQLen) orelse (Mem > OLKillMem)) -> + HandlerMod:log_handler_info(Name, + "Handler ~p overloaded and stopping", + [Name], State), + set_restart_flag(Name, HandlerMod), + handler_exit(Name, {shutdown,{overloaded,Name,QLen,Mem}}); + true -> + ok + end. + +flush_log_events() -> + flush_log_events(-1). + +flush_log_events(Limit) -> + process_flag(priority, high), + Flushed = flush_log_events(0, Limit), + process_flag(priority, normal), + Flushed. + +flush_log_events(Limit, Limit) -> + Limit; +flush_log_events(N, Limit) -> + %% flush log events but leave other events, such as + %% filesync, info and change_config, so that these + %% have a chance to be processed even under heavy load + receive + {'$gen_cast',{log,_}} -> + flush_log_events(N+1, Limit); + {'$gen_call',{Pid,MRef},{log,_}} -> + Pid ! {MRef, dropped}, + flush_log_events(N+1, Limit) + after + 0 -> N + end. + +cancel_timer(TRef) when is_atom(TRef) -> ok; +cancel_timer(TRef) -> timer:cancel(TRef). + + +stop_or_restart(Name, {shutdown,Reason={overloaded,_Name,_QLen,_Mem}}, + #{overload_kill_restart_after := RestartAfter}) -> + %% If we're terminating because of an overload situation (see + %% logger_h_common:kill_if_choked/4), we need to remove the handler + %% and set a restart timer. A separate process must perform this + %% in order to avoid deadlock. + HandlerPid = self(), + ConfigResult = logger:get_handler_config(Name), + RemoveAndRestart = + fun() -> + MRef = erlang:monitor(process, HandlerPid), + receive + {'DOWN',MRef,_,_,_} -> + ok + after 30000 -> + error_notify(Reason), + exit(HandlerPid, kill) + end, + case ConfigResult of + {ok,#{module:=HMod}=HConfig0} when is_integer(RestartAfter) -> + _ = logger:remove_handler(Name), + HConfig = try HMod:filter_config(HConfig0) + catch _:_ -> HConfig0 + end, + _ = timer:apply_after(RestartAfter, logger, add_handler, + [Name,HMod,HConfig]); + {ok,_} -> + _ = logger:remove_handler(Name); + {error,CfgReason} when is_integer(RestartAfter) -> + error_notify({Name,restart_impossible,CfgReason}); + {error,_} -> + ok + end + end, + spawn(RemoveAndRestart), + ok; +stop_or_restart(_Name, _Reason, _State) -> + ok. + +overload_levels_ok(HandlerConfig) -> + SMQL = maps:get(sync_mode_qlen, HandlerConfig, ?SYNC_MODE_QLEN), + DMQL = maps:get(drop_mode_qlen, HandlerConfig, ?DROP_MODE_QLEN), + FQL = maps:get(flush_qlen, HandlerConfig, ?FLUSH_QLEN), + (DMQL > 1) andalso (SMQL =< DMQL) andalso (DMQL =< FQL). + +error_notify(Term) -> + ?internal_log(error, Term). + +info_notify(Term) -> + ?internal_log(info, Term). diff --git a/lib/kernel/src/logger_h_common.hrl b/lib/kernel/src/logger_h_common.hrl new file mode 100644 index 0000000000..e0a7b6e3ca --- /dev/null +++ b/lib/kernel/src/logger_h_common.hrl @@ -0,0 +1,263 @@ + +%%%----------------------------------------------------------------- +%%% Overload protection configuration + +%%! *** NOTE *** +%%! It's important that: +%%! SYNC_MODE_QLEN =< DROP_MODE_QLEN =< FLUSH_QLEN +%%! and that DROP_MODE_QLEN >= 2. +%%! Otherwise the handler could end up in drop mode with no new +%%! log requests to process. This would cause all future requests +%%! to be dropped (no switch to async mode would ever take place). + +%% This specifies the message_queue_len value where the log +%% requests switch from asynchronous casts to synchronous calls. +-define(SYNC_MODE_QLEN, 10). +%% Above this message_queue_len, log requests will be dropped, +%% i.e. no log requests get sent to the handler process. +-define(DROP_MODE_QLEN, 200). +%% Above this message_queue_len, the handler process will flush +%% its mailbox and only leave this number of messages in it. +-define(FLUSH_QLEN, 1000). + +%% Never flush more than this number of messages in one go, +%% or the handler will be unresponsive for seconds (keep this +%% number as large as possible or the mailbox could grow large). +-define(FLUSH_MAX_N, 5000). + +%% BURST_LIMIT_MAX_COUNT is the max number of log requests allowed +%% to be written within a BURST_LIMIT_WINDOW_TIME time frame. +-define(BURST_LIMIT_ENABLE, true). +-define(BURST_LIMIT_MAX_COUNT, 500). +-define(BURST_LIMIT_WINDOW_TIME, 1000). + +%% This enables/disables the feature to automatically get the +%% handler terminated if it gets too loaded (and can't keep up). +-define(OVERLOAD_KILL_ENABLE, false). +%% If the message_queue_len goes above this size even after +%% flushing has been performed, the handler is terminated. +-define(OVERLOAD_KILL_QLEN, 20000). +%% If the memory usage exceeds this level +-define(OVERLOAD_KILL_MEM_SIZE, 3000000). + +%% This is the default time that the handler will wait before +%% restarting and accepting new requests. The value 'infinity' +%% disables restarts. +-define(OVERLOAD_KILL_RESTART_AFTER, 5000). +%%-define(OVERLOAD_KILL_RESTART_AFTER, infinity). + +%% The handler sends asynchronous write requests to the process +%% controlling the i/o device, but every once in this interval +%% will the write request be synchronous, so that the i/o device +%% process doesn't get overloaded. This gives the handler time +%% to keep up with its mailbox in overload situations, even if +%% the i/o is slow. +-define(CONTROLLER_SYNC_INTERVAL, 20). +%% The handler will not perform a file sync operation if the +%% mailbox size is greater than this number. This is to ensure +%% the handler process doesn't get overloaded while waiting for +%% an expensive file sync operation to finish. +-define(FILESYNC_OK_QLEN, 2). +%% Do a file/disk_log sync operation every integer() millisec +%% (if necessary) or set to 'no_repeat' to only do file sync when +%% the handler is idle. Note that file sync is not guaranteed to +%% happen automatically if this operation is disabled. +-define(FILESYNC_REPEAT_INTERVAL, 5000). +%%-define(FILESYNC_REPEAT_INTERVAL, no_repeat). + +%% This is the time after last message received that we think/hope +%% that the handler has an empty mailbox (no new log request has +%% come in). +-define(IDLE_DETECT_TIME_MSEC, 100). +-define(IDLE_DETECT_TIME_USEC, 100000). + +%% Default disk log option values +-define(DISK_LOG_TYPE, wrap). +-define(DISK_LOG_MAX_NO_FILES, 10). +-define(DISK_LOG_MAX_NO_BYTES, 1048576). + +%%%----------------------------------------------------------------- +%%% Utility macros + +-define(name_to_reg_name(MODULE,Name), + list_to_atom(lists:concat([MODULE,"_",Name]))). + +%%%----------------------------------------------------------------- +%%% Overload protection macros + +-define(timestamp(), erlang:monotonic_time(microsecond)). + +-define(get_mode(Tid), + case ets:lookup(Tid, mode) of + [{mode,M}] -> M; + _ -> async + end). + +-define(set_mode(Tid, M), + begin ets:insert(Tid, {mode,M}), M end). + +-define(change_mode(Tid, M0, M1), + if M0 == M1 -> + M0; + true -> + ets:insert(Tid, {mode,M1}), + M1 + end). + +-define(min(X1, X2), + if X2 == undefined -> X1; + X2 < X1 -> X2; + true -> X1 + end). + +-define(max(X1, X2), + if + X2 == undefined -> X1; + X2 > X1 -> X2; + true -> X1 + end). + +-define(diff_time(OS_T1, OS_T0), OS_T1-OS_T0). + +%%%----------------------------------------------------------------- +%%% The test hook macros make it possible to observe and manipulate +%%% internal handler functionality. When enabled, these macros will +%%% slow down execution and therefore should not be include in code +%%% to be officially released. + +%%-define(TEST_HOOKS, true). +-ifdef(TEST_HOOKS). + -define(TEST_HOOKS_TAB, logger_h_test_hooks). + + -define(init_test_hooks(), + _ = case ets:whereis(?TEST_HOOKS_TAB) of + undefined -> ets:new(?TEST_HOOKS_TAB, [named_table,public]); + _ -> ok + end, + ets:insert(?TEST_HOOKS_TAB, {internal_log,{logger,internal_log}}), + ets:insert(?TEST_HOOKS_TAB, {file_write,ok}), + ets:insert(?TEST_HOOKS_TAB, {file_datasync,ok}), + ets:insert(?TEST_HOOKS_TAB, {disk_log_blog,ok}), + ets:insert(?TEST_HOOKS_TAB, {disk_log_sync,ok})). + + -define(set_internal_log(MOD_FUNC), + ets:insert(?TEST_HOOKS_TAB, {internal_log,MOD_FUNC})). + + -define(set_result(OPERATION, RESULT), + ets:insert(?TEST_HOOKS_TAB, {OPERATION,RESULT})). + + -define(set_defaults(), + ets:insert(?TEST_HOOKS_TAB, {internal_log,{logger,internal_log}}), + ets:insert(?TEST_HOOKS_TAB, {file_write,ok}), + ets:insert(?TEST_HOOKS_TAB, {file_datasync,ok}), + ets:insert(?TEST_HOOKS_TAB, {disk_log_blog,ok}), + ets:insert(?TEST_HOOKS_TAB, {disk_log_sync,ok})). + + -define(internal_log(TYPE, TERM), + try ets:lookup(?TEST_HOOKS_TAB, internal_log) of + [{_,{LMOD,LFUNC}}] -> apply(LMOD, LFUNC, [TYPE,TERM]); + _ -> logger:internal_log(TYPE, TERM) + catch _:_ -> logger:internal_log(TYPE, TERM) end). + + -define(file_write(DEVICE, DATA), + try ets:lookup(?TEST_HOOKS_TAB, file_write) of + [{_,ok}] -> file:write(DEVICE, DATA); + [{_,ERROR}] -> ERROR + catch _:_ -> file:write(DEVICE, DATA) end). + + -define(file_datasync(DEVICE), + try ets:lookup(?TEST_HOOKS_TAB, file_datasync) of + [{_,ok}] -> file:datasync(DEVICE); + [{_,ERROR}] -> ERROR + catch _:_ -> file:datasync(DEVICE) end). + + -define(disk_log_blog(LOG, DATA), + try ets:lookup(?TEST_HOOKS_TAB, disk_log_blog) of + [{_,ok}] -> disk_log:blog(LOG, DATA); + [{_,ERROR}] -> ERROR + catch _:_ -> disk_log:blog(LOG, DATA) end). + + -define(disk_log_sync(LOG), + try ets:lookup(?TEST_HOOKS_TAB, disk_log_sync) of + [{_,ok}] -> disk_log:sync(LOG); + [{_,ERROR}] -> ERROR + catch _:_ -> disk_log:sync(LOG) end). + + -define(DEFAULT_CALL_TIMEOUT, 5000). + +-else. % DEFAULTS! + -define(TEST_HOOKS_TAB, undefined). + -define(init_test_hooks(), ok). + -define(set_internal_log(_MOD_FUNC), ok). + -define(set_result(_OPERATION, _RESULT), ok). + -define(set_defaults(), ok). + -define(internal_log(TYPE, TERM), logger:internal_log(TYPE, TERM)). + -define(file_write(DEVICE, DATA), file:write(DEVICE, DATA)). + -define(file_datasync(DEVICE), file:datasync(DEVICE)). + -define(disk_log_blog(LOG, DATA), disk_log:blog(LOG, DATA)). + -define(disk_log_sync(LOG), disk_log:sync(LOG)). + -define(DEFAULT_CALL_TIMEOUT, 10000). +-endif. + +%%%----------------------------------------------------------------- +%%% These macros enable statistics counters in the state of the +%%% handler which is useful for analysing the overload protection +%%% behaviour. These counters should not be included in code to be +%%% officially released (as some counters will grow very large +%%% over time). + +%%-define(SAVE_STATS, true). +-ifdef(SAVE_STATS). + -define(merge_with_stats(STATE), + STATE#{flushes => 0, flushed => 0, drops => 0, + casts => 0, calls => 0, + max_qlen => 0, max_time => 0}). + + -define(update_max_qlen(QLEN, STATE), + begin #{max_qlen := QLEN0} = STATE, + STATE#{max_qlen => ?max(QLEN0,QLEN)} end). + + -define(update_calls_or_casts(CALL_OR_CAST, INC, STATE), + case CALL_OR_CAST of + cast -> + #{casts := CASTS0} = STATE, + STATE#{casts => CASTS0+INC}; + call -> + #{calls := CALLS0} = STATE, + STATE#{calls => CALLS0+INC} + end). + + -define(update_max_time(TIME, STATE), + begin #{max_time := TIME0} = STATE, + STATE#{max_time => ?max(TIME0,TIME)} end). + + -define(update_other(OTHER, VAR, INCVAL, STATE), + begin #{OTHER := VAR} = STATE, + STATE#{OTHER => VAR+INCVAL} end). + +-else. % DEFAULT! + -define(merge_with_stats(STATE), STATE). + -define(update_max_qlen(_QLEN, STATE), STATE). + -define(update_calls_or_casts(_CALL_OR_CAST, _INC, STATE), STATE). + -define(update_max_time(_TIME, STATE), STATE). + -define(update_other(_OTHER, _VAR, _INCVAL, STATE), STATE). +-endif. + +%%%----------------------------------------------------------------- +%%% These macros enable callbacks that make it possible to analyse +%%% the overload protection behaviour from outside the handler +%%% process (including dropped requests on the client side). +%%% An external callback module (?OBSERVER_MOD) is required which +%%% is not part of the kernel application. For this reason, these +%%% callbacks should not be included in code to be officially released. + +%%-define(OBSERVER_MOD, logger_test). +-ifdef(OBSERVER_MOD). + -define(start_observation(NAME), ?OBSERVER:start_observation(NAME)). + -define(observe(NAME,EVENT), ?OBSERVER:observe(NAME,EVENT)). + +-else. % DEFAULT! + -define(start_observation(_NAME), ok). + -define(observe(_NAME,_EVENT), ok). +-endif. +%%! <--- diff --git a/lib/kernel/src/logger_handler_watcher.erl b/lib/kernel/src/logger_handler_watcher.erl new file mode 100644 index 0000000000..b75c74c643 --- /dev/null +++ b/lib/kernel/src/logger_handler_watcher.erl @@ -0,0 +1,113 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2018. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(logger_handler_watcher). + +-behaviour(gen_server). + +%% API +-export([start_link/0]). +-export([register_handler/2]). + +%% gen_server callbacks +-export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2]). + +-define(SERVER, ?MODULE). + +-record(state, {handlers}). + +%%%=================================================================== +%%% API +%%%=================================================================== + +-spec start_link() -> {ok, Pid :: pid()} | + {error, Error :: {already_started, pid()}} | + {error, Error :: term()} | + ignore. +start_link() -> + gen_server:start_link({local, ?SERVER}, ?MODULE, [], []). + +-spec register_handler(Id::logger:handler_id(),Pid::pid()) -> ok. +register_handler(Id,Pid) -> + gen_server:call(?SERVER,{register,Id,Pid}). + +%%%=================================================================== +%%% gen_server callbacks +%%%=================================================================== + +-spec init(Args :: term()) -> {ok, State :: term()} | + {ok, State :: term(), Timeout :: timeout()} | + {ok, State :: term(), hibernate} | + {stop, Reason :: term()} | + ignore. +init([]) -> + process_flag(trap_exit, true), + {ok, #state{handlers=[]}}. + +-spec handle_call(Request :: term(), From :: {pid(), term()}, State :: term()) -> + {reply, Reply :: term(), NewState :: term()} | + {reply, Reply :: term(), NewState :: term(), Timeout :: timeout()} | + {reply, Reply :: term(), NewState :: term(), hibernate} | + {noreply, NewState :: term()} | + {noreply, NewState :: term(), Timeout :: timeout()} | + {noreply, NewState :: term(), hibernate} | + {stop, Reason :: term(), Reply :: term(), NewState :: term()} | + {stop, Reason :: term(), NewState :: term()}. +handle_call({register,Id,Pid}, _From, #state{handlers=Hs}=State) -> + Ref = erlang:monitor(process,Pid), + Hs1 = lists:keystore(Id,1,Hs,{Id,Ref}), + {reply, ok, State#state{handlers=Hs1}}. + +-spec handle_cast(Request :: term(), State :: term()) -> + {noreply, NewState :: term()} | + {noreply, NewState :: term(), Timeout :: timeout()} | + {noreply, NewState :: term(), hibernate} | + {stop, Reason :: term(), NewState :: term()}. +handle_cast(_Request, State) -> + {noreply, State}. + +-spec handle_info(Info :: timeout() | term(), State :: term()) -> + {noreply, NewState :: term()} | + {noreply, NewState :: term(), Timeout :: timeout()} | + {noreply, NewState :: term(), hibernate} | + {stop, Reason :: normal | term(), NewState :: term()}. +handle_info({'DOWN',Ref,process,_,shutdown}, #state{handlers=Hs}=State) -> + case lists:keytake(Ref,2,Hs) of + {value,{Id,Ref},Hs1} -> + %% Probably terminated by supervisor. Remove the handler to avoid + %% error printouts due to failing handler. + _ = case logger:get_handler_config(Id) of + {ok,_} -> + logger:remove_handler(Id); + _ -> + ok + end, + {noreply,State#state{handlers=Hs1}}; + false -> + {noreply, State} + end; +handle_info({'DOWN',Ref,process,_,_OtherReason}, #state{handlers=Hs}=State) -> + {noreply,State#state{handlers=lists:keydelete(Ref,2,Hs)}}; +handle_info(_Other,State) -> + {noreply,State}. + +-spec terminate(Reason :: normal | shutdown | {shutdown, term()} | term(), + State :: term()) -> any(). +terminate(_Reason, _State) -> + ok. diff --git a/lib/kernel/src/logger_internal.hrl b/lib/kernel/src/logger_internal.hrl new file mode 100644 index 0000000000..d96a4ac78b --- /dev/null +++ b/lib/kernel/src/logger_internal.hrl @@ -0,0 +1,101 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2017-2018. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-include_lib("kernel/include/logger.hrl"). +-define(LOGGER_TABLE,logger). +-define(PRIMARY_KEY,'$primary_config$'). +-define(HANDLER_KEY,'$handler_config$'). +-define(LOGGER_META_KEY,'$logger_metadata$'). +-define(STANDARD_HANDLER, default). +-define(DEFAULT_HANDLER_FILTERS,?DEFAULT_HANDLER_FILTERS([otp])). +-define(DEFAULT_HANDLER_FILTERS(Domain), + [{remote_gl,{fun logger_filters:remote_gl/2,stop}}, + {domain,{fun logger_filters:domain/2,{log,super,Domain}}}, + {no_domain,{fun logger_filters:domain/2,{log,undefined,[]}}}]). +-define(DEFAULT_FORMATTER,logger_formatter). +-define(DEFAULT_FORMAT_CONFIG,#{legacy_header=>true, + single_line=>false}). +-define(DEFAULT_FORMAT_TEMPLATE_HEADER, + [[logger_formatter,header],"\n",msg,"\n"]). +-define(DEFAULT_FORMAT_TEMPLATE_SINGLE, + [time," ",level,": ",msg,"\n"]). +-define(DEFAULT_FORMAT_TEMPLATE, + [time," ",level,":\n",msg,"\n"]). + +-define(DEFAULT_LOGGER_CALL_TIMEOUT, infinity). + +-define(LOG_INTERNAL(Level,Report), + case logger:allow(Level,?MODULE) of + true -> + %% Spawn this to avoid deadlocks + _ = spawn(logger,macro_log,[?LOCATION,Level,Report, + logger:add_default_metadata(#{})]), + ok; + false -> + ok + end). + +%%%----------------------------------------------------------------- +%%% Levels +%%% Using same as syslog +-define(LEVELS,[none, + emergency, + alert, + critical, + error, + warning, + notice, + info, + debug, + all]). +-define(LOG_NONE,-1). +-define(EMERGENCY,0). +-define(ALERT,1). +-define(CRITICAL,2). +-define(ERROR,3). +-define(WARNING,4). +-define(NOTICE,5). +-define(INFO,6). +-define(DEBUG,7). +-define(LOG_ALL,10). + +-define(IS_LEVEL(L), + (L=:=emergency orelse + L=:=alert orelse + L=:=critical orelse + L=:=error orelse + L=:=warning orelse + L=:=notice orelse + L=:=info orelse + L=:=debug)). + +-define(IS_MSG(Msg), + ((is_tuple(Msg) andalso tuple_size(Msg)==2) + andalso + (is_list(element(1,Msg)) andalso is_list(element(2,Msg))) + orelse + (element(1,Msg)==report andalso ?IS_REPORT(element(2,Msg))) + orelse + (element(1,Msg)==string andalso ?IS_STRING(element(2,Msg))))). + +-define(IS_REPORT(Report), + (is_map(Report) orelse (is_list(Report) andalso is_tuple(hd(Report))))). + +-define(IS_STRING(String), + (is_list(String) orelse is_binary(String))). diff --git a/lib/kernel/src/logger_server.erl b/lib/kernel/src/logger_server.erl new file mode 100644 index 0000000000..b7735dbcf7 --- /dev/null +++ b/lib/kernel/src/logger_server.erl @@ -0,0 +1,595 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2017-2018. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(logger_server). + +-behaviour(gen_server). + +%% API +-export([start_link/0, + add_handler/3, remove_handler/1, + add_filter/2, remove_filter/2, + set_module_level/2, unset_module_level/0, + unset_module_level/1, cache_module_level/1, + set_config/2, set_config/3, + update_config/2, update_config/3, + update_formatter_config/2]). + +%% Helper +-export([diff_maps/2]). + +%% gen_server callbacks +-export([init/1, handle_call/3, handle_cast/2, handle_info/2, + terminate/2]). + +-include("logger_internal.hrl"). + +-define(SERVER, logger). +-define(LOGGER_SERVER_TAG, '$logger_cb_process'). + +-record(state, {tid, async_req, async_req_queue}). + +%%%=================================================================== +%%% API +%%%=================================================================== + +start_link() -> + gen_server:start_link({local, ?SERVER}, ?MODULE, [], []). + +add_handler(Id,Module,Config0) -> + try {check_id(Id),check_mod(Module)} of + {ok,ok} -> + case sanity_check(Id,Config0) of + ok -> + Default = default_config(Id,Module), + Config = maps:merge(Default,Config0), + call({add_handler,Id,Module,Config}); + Error -> + Error + end + catch throw:Error -> + {error,Error} + end. + +remove_handler(HandlerId) -> + call({remove_handler,HandlerId}). + +add_filter(Owner,Filter) -> + case sanity_check(Owner,filters,[Filter]) of + ok -> call({add_filter,Owner,Filter}); + Error -> Error + end. + +remove_filter(Owner,FilterId) -> + call({remove_filter,Owner,FilterId}). + +set_module_level(Modules,Level) when is_list(Modules) -> + case lists:all(fun(M) -> is_atom(M) end,Modules) of + true -> + case sanity_check(primary,level,Level) of + ok -> call({set_module_level,Modules,Level}); + Error -> Error + end; + false -> + {error,{not_a_list_of_modules,Modules}} + end; +set_module_level(Modules,_) -> + {error,{not_a_list_of_modules,Modules}}. + +unset_module_level() -> + call({unset_module_level,all}). + +unset_module_level(Modules) when is_list(Modules) -> + case lists:all(fun(M) -> is_atom(M) end,Modules) of + true -> + call({unset_module_level,Modules}); + false -> + {error,{not_a_list_of_modules,Modules}} + end; +unset_module_level(Modules) -> + {error,{not_a_list_of_modules,Modules}}. + +cache_module_level(Module) -> + gen_server:cast(?SERVER,{cache_module_level,Module}). + +set_config(Owner,Key,Value) -> + case sanity_check(Owner,Key,Value) of + ok -> + call({change_config,set,Owner,Key,Value}); + Error -> + Error + end. + +set_config(Owner,Config) -> + case sanity_check(Owner,Config) of + ok -> + call({change_config,set,Owner,Config}); + Error -> + Error + end. + +update_config(Owner,Key,Value) -> + case sanity_check(Owner,Key,Value) of + ok -> + call({change_config,update,Owner,Key,Value}); + Error -> + Error + end. + +update_config(Owner, Config) -> + case sanity_check(Owner,Config) of + ok -> + call({change_config,update,Owner,Config}); + Error -> + Error + end. + +update_formatter_config(HandlerId, FormatterConfig) + when is_map(FormatterConfig) -> + call({update_formatter_config,HandlerId,FormatterConfig}); +update_formatter_config(_HandlerId, FormatterConfig) -> + {error,{invalid_formatter_config,FormatterConfig}}. + + +%%%=================================================================== +%%% gen_server callbacks +%%%=================================================================== + +init([]) -> + process_flag(trap_exit, true), + put(?LOGGER_SERVER_TAG,true), + Tid = logger_config:new(?LOGGER_TABLE), + PrimaryConfig = maps:merge(default_config(primary), + #{handlers=>[simple]}), + logger_config:create(Tid,primary,PrimaryConfig), + SimpleConfig0 = maps:merge(default_config(simple,logger_simple_h), + #{filter_default=>stop, + filters=>?DEFAULT_HANDLER_FILTERS}), + %% If this fails, then the node should crash + {ok,SimpleConfig} = logger_simple_h:adding_handler(SimpleConfig0), + logger_config:create(Tid,simple,SimpleConfig), + {ok, #state{tid=Tid, async_req_queue = queue:new()}}. + +handle_call({add_handler,Id,Module,HConfig}, From, #state{tid=Tid}=State) -> + case logger_config:exist(Tid,Id) of + true -> + {reply,{error,{already_exist,Id}},State}; + false -> + call_h_async( + fun() -> + %% inform the handler + call_h(Module,adding_handler,[HConfig],{ok,HConfig}) + end, + fun({ok,HConfig1}) -> + %% We know that the call_h would have loaded the module + %% if it existed, so it is safe here to call function_exported + %% to find out if this is a valid handler + case erlang:function_exported(Module, log, 2) of + true -> + logger_config:create(Tid,Id,HConfig1), + {ok,Config} = logger_config:get(Tid,primary), + Handlers = maps:get(handlers,Config,[]), + logger_config:set(Tid,primary, + Config#{handlers=>[Id|Handlers]}); + false -> + {error,{invalid_handler, + {function_not_exported, + {Module,log,2}}}} + end; + ({error,HReason}) -> + {error,{handler_not_added,HReason}} + end,From,State) + end; +handle_call({remove_handler,HandlerId}, From, #state{tid=Tid}=State) -> + case logger_config:get(Tid,HandlerId) of + {ok,#{module:=Module}=HConfig} -> + {ok,Config} = logger_config:get(Tid,primary), + Handlers0 = maps:get(handlers,Config,[]), + Handlers = lists:delete(HandlerId,Handlers0), + call_h_async( + fun() -> + %% inform the handler + call_h(Module,removing_handler,[HConfig],ok) + end, + fun(_Res) -> + logger_config:set(Tid,primary,Config#{handlers=>Handlers}), + logger_config:delete(Tid,HandlerId), + ok + end,From,State); + _ -> + {reply,{error,{not_found,HandlerId}},State} + end; +handle_call({add_filter,Id,Filter}, _From,#state{tid=Tid}=State) -> + Reply = do_add_filter(Tid,Id,Filter), + {reply,Reply,State}; +handle_call({remove_filter,Id,FilterId}, _From, #state{tid=Tid}=State) -> + Reply = do_remove_filter(Tid,Id,FilterId), + {reply,Reply,State}; +handle_call({change_config,SetOrUpd,primary,Config0}, _From, + #state{tid=Tid}=State) -> + {ok,#{handlers:=Handlers}=OldConfig} = logger_config:get(Tid,primary), + Default = + case SetOrUpd of + set -> default_config(primary); + update -> OldConfig + end, + Config = maps:merge(Default,Config0), + Reply = logger_config:set(Tid,primary,Config#{handlers=>Handlers}), + {reply,Reply,State}; +handle_call({change_config,_SetOrUpd,primary,Key,Value}, _From, + #state{tid=Tid}=State) -> + {ok,OldConfig} = logger_config:get(Tid,primary), + Reply = logger_config:set(Tid,primary,OldConfig#{Key=>Value}), + {reply,Reply,State}; +handle_call({change_config,SetOrUpd,HandlerId,Config0}, From, + #state{tid=Tid}=State) -> + case logger_config:get(Tid,HandlerId) of + {ok,#{module:=Module}=OldConfig} -> + Default = + case SetOrUpd of + set -> default_config(HandlerId,Module); + update -> OldConfig + end, + Config = maps:merge(Default,Config0), + case check_config_change(OldConfig,Config) of + ok -> + call_h_async( + fun() -> + call_h(Module,changing_config, + [SetOrUpd,OldConfig,Config], + {ok,Config}) + end, + fun({ok,Config1}) -> + logger_config:set(Tid,HandlerId,Config1); + (Error) -> + Error + end,From,State); + Error -> + {reply,Error,State} + end; + _ -> + {reply,{error,{not_found,HandlerId}},State} + end; +handle_call({change_config,SetOrUpd,HandlerId,Key,Value}, From, + #state{tid=Tid}=State) -> + case logger_config:get(Tid,HandlerId) of + {ok,#{module:=Module}=OldConfig} -> + Config = OldConfig#{Key=>Value}, + case check_config_change(OldConfig,Config) of + ok -> + call_h_async( + fun() -> + call_h(Module,changing_config, + [SetOrUpd,OldConfig,Config], + {ok,Config}) + end, + fun({ok,Config1}) -> + logger_config:set(Tid,HandlerId,Config1); + (Error) -> + Error + end,From,State); + Error -> + {reply,Error,State} + end; + _ -> + {reply,{error,{not_found,HandlerId}},State} + end; +handle_call({update_formatter_config,HandlerId,NewFConfig},_From, + #state{tid=Tid}=State) -> + Reply = + case logger_config:get(Tid,HandlerId) of + {ok,#{formatter:={FMod,OldFConfig}}=Config} -> + try + FConfig = maps:merge(OldFConfig,NewFConfig), + check_formatter({FMod,FConfig}), + logger_config:set(Tid,HandlerId, + Config#{formatter=>{FMod,FConfig}}) + catch throw:Reason -> {error,Reason} + end; + _ -> + {error,{not_found,HandlerId}} + end, + {reply,Reply,State}; +handle_call({set_module_level,Modules,Level}, _From, #state{tid=Tid}=State) -> + Reply = logger_config:set_module_level(Tid,Modules,Level), + {reply,Reply,State}; +handle_call({unset_module_level,Modules}, _From, #state{tid=Tid}=State) -> + Reply = logger_config:unset_module_level(Tid,Modules), + {reply,Reply,State}. + +handle_cast({async_req_reply,_Ref,_Reply} = Reply,State) -> + call_h_reply(Reply,State); +handle_cast({cache_module_level,Module}, #state{tid=Tid}=State) -> + logger_config:cache_module_level(Tid,Module), + {noreply, State}. + +%% Interface for those who can't call the API - e.g. the emulator, or +%% places related to code loading. +%% +%% This can also be log events from remote nodes which are sent from +%% logger.erl when the group leader of the client process is on a +%% same node as the client process itself. +handle_info({log,Level,Format,Args,Meta}, State) -> + logger:log(Level,Format,Args,Meta), + {noreply, State}; +handle_info({log,Level,Report,Meta}, State) -> + logger:log(Level,Report,Meta), + {noreply, State}; +handle_info({Ref,_Reply},State) when is_reference(Ref) -> + %% Assuming this is a timed-out gen_server reply - ignoring + {noreply, State}; +handle_info({'DOWN',_Ref,_Proc,_Pid,_Reason} = Down,State) -> + call_h_reply(Down,State); +handle_info(Unexpected,State) when element(1,Unexpected) == 'EXIT' -> + %% The simple handler will send an 'EXIT' message when it is replaced + %% We may as well ignore all 'EXIT' messages that we get + ?LOG_INTERNAL(debug, + [{logger,got_unexpected_message}, + {process,?SERVER}, + {message,Unexpected}]), + {noreply,State}; +handle_info(Unexpected,State) -> + ?LOG_INTERNAL(info, + [{logger,got_unexpected_message}, + {process,?SERVER}, + {message,Unexpected}]), + {noreply,State}. + +terminate(_Reason, _State) -> + ok. + +%%%=================================================================== +%%% Internal functions +%%%=================================================================== +call(Request) -> + Action = element(1,Request), + case get(?LOGGER_SERVER_TAG) of + true when + Action == add_handler; Action == remove_handler; + Action == add_filter; Action == remove_filter; + Action == change_config -> + {error,{attempting_syncronous_call_to_self,Request}}; + _ -> + gen_server:call(?SERVER,Request,?DEFAULT_LOGGER_CALL_TIMEOUT) + end. + +do_add_filter(Tid,Id,{FId,_} = Filter) -> + case logger_config:get(Tid,Id) of + {ok,Config} -> + Filters = maps:get(filters,Config,[]), + case lists:keymember(FId,1,Filters) of + true -> + {error,{already_exist,FId}}; + false -> + logger_config:set(Tid,Id,Config#{filters=>[Filter|Filters]}) + end; + Error -> + Error + end. + +do_remove_filter(Tid,Id,FilterId) -> + case logger_config:get(Tid,Id) of + {ok,Config} -> + Filters0 = maps:get(filters,Config,[]), + case lists:keytake(FilterId,1,Filters0) of + {value,_,Filters} -> + logger_config:set(Tid,Id,Config#{filters=>Filters}); + false -> + {error,{not_found,FilterId}} + end; + Error -> + Error + end. + +default_config(primary) -> + #{level=>notice, + filters=>[], + filter_default=>log}; +default_config(Id) -> + #{id=>Id, + level=>all, + filters=>[], + filter_default=>log, + formatter=>{?DEFAULT_FORMATTER,#{}}}. +default_config(Id,Module) -> + (default_config(Id))#{module=>Module}. + +sanity_check(Owner,Key,Value) -> + sanity_check_1(Owner,[{Key,Value}]). + +sanity_check(HandlerId,Config) when is_map(Config) -> + sanity_check_1(HandlerId,maps:to_list(Config)); +sanity_check(_,Config) -> + {error,{invalid_config,Config}}. + +sanity_check_1(Owner,Config) when is_list(Config) -> + try + Type = get_type(Owner), + check_config(Type,Config) + catch throw:Error -> {error,Error} + end. + +get_type(primary) -> + primary; +get_type(Id) -> + check_id(Id), + handler. + +check_config(Owner,[{level,Level}|Config]) -> + check_level(Level), + check_config(Owner,Config); +check_config(Owner,[{filters,Filters}|Config]) -> + check_filters(Filters), + check_config(Owner,Config); +check_config(Owner,[{filter_default,FD}|Config]) -> + check_filter_default(FD), + check_config(Owner,Config); +check_config(handler,[{formatter,Formatter}|Config]) -> + check_formatter(Formatter), + check_config(handler,Config); +check_config(primary,[C|_]) -> + throw({invalid_primary_config,C}); +check_config(handler,[{_,_}|Config]) -> + %% Arbitrary config elements are allowed for handlers + check_config(handler,Config); +check_config(_,[]) -> + ok. + +check_id(Id) when is_atom(Id) -> + ok; +check_id(Id) -> + throw({invalid_id,Id}). + +check_mod(Mod) when is_atom(Mod) -> + ok; +check_mod(Mod) -> + throw({invalid_module,Mod}). + +check_level(Level) -> + case lists:member(Level,?LEVELS) of + true -> + ok; + false -> + throw({invalid_level,Level}) + end. + +check_filters([{Id,{Fun,_Args}}|Filters]) when is_atom(Id), is_function(Fun,2) -> + check_filters(Filters); +check_filters([Filter|_]) -> + throw({invalid_filter,Filter}); +check_filters([]) -> + ok; +check_filters(Filters) -> + throw({invalid_filters,Filters}). + +check_filter_default(FD) when FD==stop; FD==log -> + ok; +check_filter_default(FD) -> + throw({invalid_filter_default,FD}). + +check_formatter({Mod,Config}) -> + check_mod(Mod), + try Mod:check_config(Config) of + ok -> ok; + {error,Error} -> throw(Error) + catch + C:R:S -> + case {C,R,S} of + {error,undef,[{Mod,check_config,[Config],_}|_]} -> + ok; + _ -> + throw({callback_crashed, + {C,R,logger:filter_stacktrace(?MODULE,S)}}) + end + end; +check_formatter(Formatter) -> + throw({invalid_formatter,Formatter}). + +%% When changing configuration for a handler, the id and module fields +%% can not be changed. +check_config_change(#{id:=Id,module:=Module},#{id:=Id,module:=Module}) -> + ok; +check_config_change(OldConfig,NewConfig) -> + {Old,New} = logger_server:diff_maps(maps:with([id,module],OldConfig), + maps:with([id,module],NewConfig)), + {error,{illegal_config_change,Old,New}}. + +call_h(Module, Function, Args, DefRet) -> + %% Not calling code:ensure_loaded + erlang:function_exported here, + %% since in some rare terminal cases, the code_server might not + %% exist and we'll get a deadlock in removing the handler. + try apply(Module, Function, Args) + catch + C:R:S -> + case {C,R,S} of + {error,undef,[{Module,Function=changing_config,Args,_}|_]} + when length(Args)=:=3 -> + %% Backwards compatible call, if changing_config/3 + %% did not exist. + call_h(Module, Function, tl(Args), DefRet); + {error,undef,[{Module,Function,Args,_}|_]} -> + DefRet; + _ -> + ST = logger:filter_stacktrace(?MODULE,S), + ?LOG_INTERNAL(error, + [{logger,callback_crashed}, + {process,?SERVER}, + {reason,{C,R,ST}}]), + {error,{callback_crashed,{C,R,ST}}} + end + end. + +%% There are all sort of API functions that can cause deadlocks if called +%% from the handler callbacks. So we spawn a process that does the request +%% for the logger_server. There are still APIs that will cause problems, +%% namely logger:add_handler +call_h_async(AsyncFun,PostFun,From,#state{ async_req = undefined } = State) -> + Parent = self(), + {Pid, Ref} = spawn_monitor( + fun() -> + put(?LOGGER_SERVER_TAG,true), + receive Ref -> Ref end, + gen_server:cast(Parent, {async_req_reply, Ref, AsyncFun()}) + end), + Pid ! Ref, + {noreply,State#state{ async_req = {Ref,PostFun,From} }}; +call_h_async(AsyncFun,PostFun,From,#state{ async_req_queue = Q } = State) -> + {noreply,State#state{ async_req_queue = queue:in({AsyncFun,PostFun,From},Q) }}. + +call_h_reply({async_req_reply,Ref,Reply}, + #state{ async_req = {Ref,PostFun,From}, async_req_queue = Q} = State) -> + erlang:demonitor(Ref,[flush]), + _ = gen_server:reply(From, PostFun(Reply)), + {Value,NewQ} = queue:out(Q), + NewState = State#state{ async_req = undefined, + async_req_queue = NewQ }, + case Value of + {value,{AsyncFun,NPostFun,NFrom}} -> + call_h_async(AsyncFun,NPostFun,NFrom,NewState); + empty -> + {noreply,NewState} + end; +call_h_reply({'DOWN',Ref,_Proc,Pid,Reason}, #state{ async_req = {Ref,_PostFun,_From}} = State) -> + %% This clause should only be triggered if someone explicitly sends an exit signal + %% to the spawned process. It is only here to make sure that the logger_server does + %% not deadlock if that happens. + ?LOG_INTERNAL(error, + [{logger,process_exited}, + {process,Pid}, + {reason,Reason}]), + call_h_reply( + {async_req_reply,Ref,{error,{logger_process_exited,Pid,Reason}}}, + State); +call_h_reply(Unexpected,State) -> + ?LOG_INTERNAL(info, + [{logger,got_unexpected_message}, + {process,?SERVER}, + {message,Unexpected}]), + {noreply,State}. + +%% Return two maps containing only the fields that differ. +diff_maps(M1,M2) -> + diffs(lists:sort(maps:to_list(M1)),lists:sort(maps:to_list(M2)),#{},#{}). + +diffs([H|T1],[H|T2],D1,D2) -> + diffs(T1,T2,D1,D2); +diffs([{K,V1}|T1],[{K,V2}|T2],D1,D2) -> + diffs(T1,T2,D1#{K=>V1},D2#{K=>V2}); +diffs([],[],D1,D2) -> + {D1,D2}. diff --git a/lib/kernel/src/logger_simple_h.erl b/lib/kernel/src/logger_simple_h.erl new file mode 100644 index 0000000000..8b51dd8569 --- /dev/null +++ b/lib/kernel/src/logger_simple_h.erl @@ -0,0 +1,212 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2017-2018. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(logger_simple_h). + +-export([adding_handler/1, removing_handler/1, log/2]). + +%% This module implements a simple handler for logger. It is the +%% default used during system start. + +%%%----------------------------------------------------------------- +%%% Logger callback + +adding_handler(#{id:=simple}=Config) -> + Me = self(), + case whereis(?MODULE) of + undefined -> + {Pid,Ref} = spawn_opt(fun() -> init(Me) end, + [link,monitor,{message_queue_data,off_heap}]), + receive + {'DOWN',Ref,process,Pid,Reason} -> + {error,Reason}; + {Pid,started} -> + erlang:demonitor(Ref), + {ok,Config} + end; + _ -> + {error,{handler_process_name_already_exists,?MODULE}} + end. + +removing_handler(#{id:=simple}) -> + case whereis(?MODULE) of + undefined -> + ok; + Pid -> + Ref = erlang:monitor(process,Pid), + unlink(Pid), + Pid ! stop, + receive {'DOWN',Ref,process,Pid,_} -> + ok + end + end. + +log(#{meta:=#{error_logger:=#{tag:=info_report,type:=Type}}},_Config) + when Type=/=std_info -> + %% Skip info reports that are not 'std_info' (ref simple logger in + %% error_logger) + ok; +log(#{msg:=_,meta:=#{time:=_}}=Log,_Config) -> + _ = case whereis(?MODULE) of + undefined -> + %% Is the node on the way down? Real emergency? + %% Log directly from client just to get it out + do_log( + #{level=>error, + msg=>{report,{error,simple_handler_process_dead}}, + meta=>#{time=>erlang:system_time(microsecond)}}), + do_log(Log); + _ -> + ?MODULE ! {log,Log} + end, + ok; +log(_,_) -> + %% Unexpected log. + %% We don't want to crash the simple logger, so ignore this. + ok. + +%%%----------------------------------------------------------------- +%%% Process +init(Starter) -> + register(?MODULE,self()), + Starter ! {self(),started}, + loop(#{buffer_size=>10,dropped=>0,buffer=>[]}). + +loop(Buffer) -> + receive + stop -> + %% We replay the logger messages of there is + %% a default handler when the simple handler + %% is removed. + case logger:get_handler_config(default) of + {ok, _} -> + replay_buffer(Buffer); + _ -> + ok + end; + {log,#{msg:=_,meta:=#{time:=_}}=Log} -> + do_log(Log), + loop(update_buffer(Buffer,Log)); + _ -> + %% Unexpected message - flush it! + loop(Buffer) + end. + +update_buffer(#{buffer_size:=0,dropped:=D}=Buffer,_Log) -> + Buffer#{dropped=>D+1}; +update_buffer(#{buffer_size:=S,buffer:=B}=Buffer,Log) -> + Buffer#{buffer_size=>S-1,buffer=>[Log|B]}. + +replay_buffer(#{ dropped := D, buffer := Buffer }) -> + lists:foreach( + fun F(#{msg := {Tag, Msg}} = L) when Tag =:= string; Tag =:= report -> + F(L#{ msg := Msg }); + F(#{ level := Level, msg := Msg, meta := MD}) -> + logger:log(Level, Msg, MD) + end, lists:reverse(Buffer, drop_msg(D))). + +drop_msg(0) -> + []; +drop_msg(N) -> + [#{level=>info, + msg=>{"Simple handler buffer full, dropped ~w messages",[N]}, + meta=>#{time=>erlang:system_time(microsecond)}}]. + +%%%----------------------------------------------------------------- +%%% Internal + +%% Can't do io_lib:format + +do_log(#{msg:={report,Report}, + meta:=#{time:=T,error_logger:=#{type:=Type}}}) -> + display_date(T), + display_report(Type,Report); +do_log(#{msg:=Msg,meta:=#{time:=T}}) -> + display_date(T), + display(Msg). + +display_date(Timestamp) when is_integer(Timestamp) -> + Micro = Timestamp rem 1000000, + Sec = Timestamp div 1000000, + {{Y,Mo,D},{H,Mi,S}} = erlang:universaltime_to_localtime( + erlang:posixtime_to_universaltime(Sec)), + erlang:display_string( + integer_to_list(Y) ++ "-" ++ + pad(Mo,2) ++ "-" ++ + pad(D,2) ++ " " ++ + pad(H,2) ++ ":" ++ + pad(Mi,2) ++ ":" ++ + pad(S,2) ++ "." ++ + pad(Micro,6) ++ " "). + +pad(Int,Size) when is_integer(Int) -> + pad(integer_to_list(Int),Size); +pad(Str,Size) when length(Str)==Size -> + Str; +pad(Str,Size) -> + pad([$0|Str],Size). + +display({string,Chardata}) -> + try unicode:characters_to_list(Chardata) of + String -> erlang:display_string(String), erlang:display_string("\n") + catch _:_ -> erlang:display(Chardata) + end; +display({report,Report}) when is_map(Report) -> + display_report(maps:to_list(Report)); +display({report,Report}) -> + display_report(Report); +display({F, A}) when is_list(F), is_list(A) -> + erlang:display_string(F ++ "\n"), + [begin + erlang:display_string("\t"), + erlang:display(Arg) + end || Arg <- A], + ok. + +display_report(Atom, A) when is_atom(Atom) -> + %% The widest atom seems to be 'supervisor_report' at 17. + ColumnWidth = 20, + AtomString = atom_to_list(Atom), + AtomLength = length(AtomString), + Padding = lists:duplicate(ColumnWidth - AtomLength, $\s), + erlang:display_string(AtomString ++ Padding), + display_report(A); +display_report(F, A) -> + erlang:display({F, A}). + +display_report([A, []]) -> + %% Special case for crash reports when process has no links + display_report(A); +display_report(A = [_|_]) -> + case lists:all(fun({Key,_Value}) -> is_atom(Key); (_) -> false end, A) of + true -> + erlang:display_string("\n"), + lists:foreach( + fun({Key, Value}) -> + erlang:display_string( + " " ++ + atom_to_list(Key) ++ + ": "), + erlang:display(Value) + end, A); + false -> + erlang:display(A) + end; +display_report(A) -> + erlang:display(A). diff --git a/lib/kernel/src/logger_std_h.erl b/lib/kernel/src/logger_std_h.erl new file mode 100644 index 0000000000..42e0f5caf4 --- /dev/null +++ b/lib/kernel/src/logger_std_h.erl @@ -0,0 +1,843 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2017-2018. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(logger_std_h). + +-behaviour(gen_server). + +-include("logger.hrl"). +-include("logger_internal.hrl"). +-include("logger_h_common.hrl"). + +-include_lib("kernel/include/file.hrl"). + +%% API +-export([start_link/3, info/1, filesync/1, reset/1]). + +%% gen_server and proc_lib callbacks +-export([init/1, handle_call/3, handle_cast/2, handle_info/2, + terminate/2, code_change/3]). + +%% logger callbacks +-export([log/2, adding_handler/1, removing_handler/1, changing_config/3, + filter_config/1]). + +%% handler internal +-export([log_handler_info/4]). + +%%%=================================================================== +%%% API +%%%=================================================================== + +%%%----------------------------------------------------------------- +%%% Start a standard handler process and link to caller. +%%% This function is called by the kernel supervisor when this +%%% handler process gets added +-spec start_link(Name, Config, HandlerState) -> {ok,Pid} | {error,Reason} when + Name :: atom(), + Config :: logger:handler_config(), + HandlerState :: map(), + Pid :: pid(), + Reason :: term(). + +start_link(Name, Config, HandlerState) -> + proc_lib:start_link(?MODULE,init,[[Name,Config,HandlerState]]). + +%%%----------------------------------------------------------------- +%%% +-spec filesync(Name) -> ok | {error,Reason} when + Name :: atom(), + Reason :: handler_busy | {badarg,term()}. + +filesync(Name) when is_atom(Name) -> + try + gen_server:call(?name_to_reg_name(?MODULE,Name), + filesync, ?DEFAULT_CALL_TIMEOUT) + catch + _:{timeout,_} -> {error,handler_busy} + end; +filesync(Name) -> + {error,{badarg,{filesync,[Name]}}}. + +%%%----------------------------------------------------------------- +%%% +-spec info(Name) -> Info | {error,Reason} when + Name :: atom(), + Info :: term(), + Reason :: handler_busy | {badarg,term()}. + +info(Name) when is_atom(Name) -> + try + gen_server:call(?name_to_reg_name(?MODULE,Name), + info, ?DEFAULT_CALL_TIMEOUT) + catch + _:{timeout,_} -> {error,handler_busy} + end; +info(Name) -> + {error,{badarg,{info,[Name]}}}. + +%%%----------------------------------------------------------------- +%%% +-spec reset(Name) -> ok | {error,Reason} when + Name :: atom(), + Reason :: handler_busy | {badarg,term()}. + +reset(Name) when is_atom(Name) -> + try + gen_server:call(?name_to_reg_name(?MODULE,Name), + reset, ?DEFAULT_CALL_TIMEOUT) + catch + _:{timeout,_} -> {error,handler_busy} + end; +reset(Name) -> + {error,{badarg,{reset,[Name]}}}. + + +%%%=================================================================== +%%% logger callbacks +%%%=================================================================== + +%%%----------------------------------------------------------------- +%%% Handler being added +adding_handler(#{id:=Name}=Config) -> + case check_config(adding, Config) of + {ok, #{config:=HConfig}=Config1} -> + %% create initial handler state by merging defaults with config + HState = maps:merge(get_init_state(), HConfig), + case logger_h_common:overload_levels_ok(HState) of + true -> + start(Name, Config1, HState); + false -> + #{sync_mode_qlen := SMQL, + drop_mode_qlen := DMQL, + flush_qlen := FQL} = HState, + {error,{invalid_levels,{SMQL,DMQL,FQL}}} + end; + Error -> + Error + end. + +%%%----------------------------------------------------------------- +%%% Updating handler config +changing_config(SetOrUpdate,OldConfig=#{config:=OldHConfig},NewConfig) -> + WriteOnce = maps:with([type],OldHConfig), + ReadOnly = maps:with([handler_pid,mode_tab],OldHConfig), + NewHConfig0 = maps:get(config, NewConfig, #{}), + Default = + case SetOrUpdate of + set -> + %% Do not reset write-once fields to defaults + maps:merge(get_default_config(),WriteOnce); + update -> + OldHConfig + end, + + %% Allow (accidentially) included read-only fields - just overwrite them + NewHConfig = maps:merge(maps:merge(Default, NewHConfig0),ReadOnly), + + %% But fail if write-once fields are changed + case maps:with([type],NewHConfig) of + WriteOnce -> + changing_config1(maps:get(handler_pid,OldHConfig), + OldConfig, + NewConfig#{config=>NewHConfig}); + Other -> + {error,{illegal_config_change,#{config=>WriteOnce},#{config=>Other}}} + end. + +changing_config1(HPid, OldConfig, NewConfig) -> + case check_config(changing, NewConfig) of + Result = {ok,NewConfig1} -> + try gen_server:call(HPid, {change_config,OldConfig,NewConfig1}, + ?DEFAULT_CALL_TIMEOUT) of + ok -> Result; + HError -> HError + catch + _:{timeout,_} -> {error,handler_busy} + end; + Error -> + Error + end. + +check_config(adding, Config) -> + %% Merge in defaults on handler level + HConfig0 = maps:get(config, Config, #{}), + HConfig = maps:merge(get_default_config(),HConfig0), + case check_h_config(maps:to_list(HConfig)) of + ok -> + {ok,Config#{config=>HConfig}}; + Error -> + Error + end; +check_config(changing, Config) -> + HConfig = maps:get(config, Config, #{}), + case check_h_config(maps:to_list(HConfig)) of + ok -> {ok,Config}; + Error -> Error + end. + +check_h_config([{type,Type} | Config]) when Type == standard_io; + Type == standard_error -> + check_h_config(Config); +check_h_config([{type,{file,File}} | Config]) when is_list(File) -> + check_h_config(Config); +check_h_config([{type,{file,File,Modes}} | Config]) when is_list(File), + is_list(Modes) -> + check_h_config(Config); +check_h_config([Other | Config]) -> + case logger_h_common:check_common_config(Other) of + valid -> + check_h_config(Config); + invalid -> + {error,{invalid_config,?MODULE,Other}} + end; +check_h_config([]) -> + ok. + + +%%%----------------------------------------------------------------- +%%% Handler being removed +removing_handler(#{id:=Name}) -> + stop(Name). + +%%%----------------------------------------------------------------- +%%% Log a string or report +-spec log(LogEvent, Config) -> ok when + LogEvent :: logger:log_event(), + Config :: logger:handler_config(). + +log(LogEvent, Config = #{id := Name, + config := #{handler_pid := HPid, + mode_tab := ModeTab}}) -> + %% if the handler has crashed, we must drop this event + %% and hope the handler restarts so we can try again + true = is_process_alive(HPid), + Bin = logger_h_common:log_to_binary(LogEvent, Config), + logger_h_common:call_cast_or_drop(Name, HPid, ModeTab, Bin). + +%%%----------------------------------------------------------------- +%%% Remove internal fields from configuration +filter_config(#{config:=HConfig}=Config) -> + Config#{config=>maps:without([handler_pid,mode_tab],HConfig)}. + +%%%=================================================================== +%%% gen_server callbacks +%%%=================================================================== + +init([Name, Config = #{config := HConfig}, + State0 = #{type := Type, file_ctrl_sync_int := FileCtrlSyncInt}]) -> + RegName = ?name_to_reg_name(?MODULE,Name), + register(RegName, self()), + process_flag(trap_exit, true), + process_flag(message_queue_data, off_heap), + + ?init_test_hooks(), + ?start_observation(Name), + + case do_init(Name, Type) of + {ok,InitState} -> + try ets:new(Name, [public]) of + ModeTab -> + ?set_mode(ModeTab, async), + State = maps:merge(State0, InitState), + T0 = ?timestamp(), + State1 = + ?merge_with_stats(State#{ + mode_tab => ModeTab, + mode => async, + file_ctrl_sync => FileCtrlSyncInt, + last_qlen => 0, + last_log_ts => T0, + last_op => sync, + burst_win_ts => T0, + burst_msg_count => 0}), + Config1 = + Config#{config => HConfig#{handler_pid => self(), + mode_tab => ModeTab}}, + proc_lib:init_ack({ok,self(),Config1}), + gen_server:cast(self(), repeated_filesync), + gen_server:enter_loop(?MODULE, [], State1) + catch + _:Error -> + unregister(RegName), + logger_h_common:error_notify({init_handler,Name,Error}), + proc_lib:init_ack(Error) + end; + Error -> + unregister(RegName), + logger_h_common:error_notify({init_handler,Name,Error}), + proc_lib:init_ack(Error) + end. + +do_init(Name, Type) -> + case open_log_file(Name, Type) of + {ok,FileCtrlPid} -> + case logger_h_common:unset_restart_flag(Name, ?MODULE) of + true -> + %% inform about restart + gen_server:cast(self(), {log_handler_info, + "Handler ~p restarted", + [Name]}); + false -> + %% initial start + ok + end, + {ok,#{id=>Name,type=>Type,file_ctrl_pid=>FileCtrlPid}}; + Error -> + Error + end. + +%% This is the synchronous log event. +handle_call({log, Bin}, _From, State) -> + {Result,State1} = do_log(Bin, call, State), + %% Result == ok | dropped + {reply,Result, State1}; + +handle_call(filesync, _From, State = #{type := Type, + file_ctrl_pid := FileCtrlPid}) -> + if is_atom(Type) -> + {reply, ok, State}; + true -> + {reply, file_ctrl_filesync_sync(FileCtrlPid), State#{last_op=>sync}} + end; + +handle_call({change_config,_OldConfig,NewConfig}, _From, + State = #{filesync_repeat_interval := FSyncInt0}) -> + HConfig = maps:get(config, NewConfig, #{}), + State1 = maps:merge(State, HConfig), + case logger_h_common:overload_levels_ok(State1) of + true -> + _ = + case maps:get(filesync_repeat_interval, HConfig, undefined) of + undefined -> + ok; + no_repeat -> + _ = logger_h_common:cancel_timer(maps:get(rep_sync_tref, + State, + undefined)); + FSyncInt0 -> + ok; + _FSyncInt1 -> + _ = logger_h_common:cancel_timer(maps:get(rep_sync_tref, + State, + undefined)), + gen_server:cast(self(), repeated_filesync) + end, + {reply, ok, State1}; + false -> + #{sync_mode_qlen := SMQL, + drop_mode_qlen := DMQL, + flush_qlen := FQL} = State1, + {reply, {error,{invalid_levels,{SMQL,DMQL,FQL}}}, State} + end; + +handle_call(info, _From, State) -> + {reply, State, State}; + +handle_call(reset, _From, State) -> + State1 = ?merge_with_stats(State), + {reply, ok, State1#{last_qlen => 0, + last_log_ts => ?timestamp()}}; + +handle_call(stop, _From, State) -> + {stop, {shutdown,stopped}, ok, State}. + +%% This is the asynchronous log event. +handle_cast({log, Bin}, State) -> + {_,State1} = do_log(Bin, cast, State), + {noreply, State1}; + +handle_cast({log_handler_info, Format, Args}, State = #{id:=Name}) -> + log_handler_info(Name, Format, Args, State), + {noreply, State}; + +%% If FILESYNC_REPEAT_INTERVAL is set to a millisec value, this +%% clause gets called repeatedly by the handler. In order to +%% guarantee that a filesync *always* happens after the last log +%% event, the repeat operation must be active! +handle_cast(repeated_filesync, + State = #{type := Type, + file_ctrl_pid := FileCtrlPid, + filesync_repeat_interval := FSyncInt, + last_op := LastOp}) -> + State1 = + if not is_atom(Type), is_integer(FSyncInt) -> + %% only do filesync if something has been + %% written since last time we checked + if LastOp == sync -> + ok; + true -> + file_ctrl_filesync_async(FileCtrlPid) + end, + {ok,TRef} = + timer:apply_after(FSyncInt, gen_server,cast, + [self(),repeated_filesync]), + State#{rep_sync_tref => TRef, last_op => sync}; + true -> + State + end, + {noreply,State1}. + +handle_info({'EXIT',Pid,Why}, State = #{id := Name, type := FileInfo}) -> + case maps:get(file_ctrl_pid, State, undefined) of + Pid -> + %% file error, terminate handler + logger_h_common:handler_exit(Name, + {error,{write_failed,FileInfo,Why}}); + _Other -> + %% ignore EXIT + ok + end, + {noreply, State}; + +handle_info(_Info, State) -> + {noreply, State}. + +terminate(Reason, State = #{id:=Name, file_ctrl_pid:=FWPid, + type:=_FileInfo}) -> + _ = logger_h_common:cancel_timer(maps:get(rep_sync_tref, State, + undefined)), + case is_process_alive(FWPid) of + true -> + unlink(FWPid), + _ = file_ctrl_stop(FWPid), + MRef = erlang:monitor(process, FWPid), + receive + {'DOWN',MRef,_,_,_} -> + ok + after + ?DEFAULT_CALL_TIMEOUT -> + exit(FWPid, kill) + end; + false -> + ok + end, + ok = logger_h_common:stop_or_restart(Name, Reason, State), + unregister(?name_to_reg_name(?MODULE, Name)), + ok. + +code_change(_OldVsn, State, _Extra) -> + {ok, State}. + +%%%=================================================================== +%%% Internal functions +%%%=================================================================== + +%%%----------------------------------------------------------------- +%%% +get_default_config() -> + #{type => standard_io, + sync_mode_qlen => ?SYNC_MODE_QLEN, + drop_mode_qlen => ?DROP_MODE_QLEN, + flush_qlen => ?FLUSH_QLEN, + burst_limit_enable => ?BURST_LIMIT_ENABLE, + burst_limit_max_count => ?BURST_LIMIT_MAX_COUNT, + burst_limit_window_time => ?BURST_LIMIT_WINDOW_TIME, + overload_kill_enable => ?OVERLOAD_KILL_ENABLE, + overload_kill_qlen => ?OVERLOAD_KILL_QLEN, + overload_kill_mem_size => ?OVERLOAD_KILL_MEM_SIZE, + overload_kill_restart_after => ?OVERLOAD_KILL_RESTART_AFTER, + filesync_repeat_interval => ?FILESYNC_REPEAT_INTERVAL}. + +get_init_state() -> + #{file_ctrl_sync_int => ?CONTROLLER_SYNC_INTERVAL, + filesync_ok_qlen => ?FILESYNC_OK_QLEN}. + +%%%----------------------------------------------------------------- +%%% Add a standard handler to the logger. +%%% This starts a dedicated handler process which should always +%%% exist if the handler is registered with logger (and should not +%%% exist if the handler is not registered). +%%% +%%% Handler specific config should be provided with a sub map associated +%%% with a key named 'config', e.g: +%%% +%%% Config = #{config => #{sync_mode_qlen => 50} +%%% +%%% The standard handler process is linked to logger_sup, which is +%%% part of the kernel application's supervision tree. +start(Name, Config, HandlerState) -> + LoggerStdH = + #{id => Name, + start => {?MODULE, start_link, [Name,Config,HandlerState]}, + restart => temporary, + shutdown => 2000, + type => worker, + modules => [?MODULE]}, + case supervisor:start_child(logger_sup, LoggerStdH) of + {ok,Pid,Config1} -> + ok = logger_handler_watcher:register_handler(Name,Pid), + {ok,Config1}; + Error -> + Error + end. + +%%%----------------------------------------------------------------- +%%% Stop and remove the handler. +stop(Name) -> + case whereis(?name_to_reg_name(?MODULE,Name)) of + undefined -> + ok; + Pid -> + %% We don't want to do supervisor:terminate_child here + %% since we need to distinguish this explicit stop from a + %% system termination in order to avoid circular attempts + %% at removing the handler (implying deadlocks and + %% timeouts). + %% And we don't need to do supervisor:delete_child, since + %% the restart type is temporary, which means that the + %% child specification is automatically removed from the + %% supervisor when the process dies. + _ = gen_server:call(Pid, stop), + ok + end. + +%%%----------------------------------------------------------------- +%%% Logging and overload control. +-define(update_file_ctrl_sync(C, Interval), + if C == 0 -> Interval; + true -> C-1 end). + +%% check for overload between every event (and set Mode to async, +%% sync or drop accordingly), but never flush the whole mailbox +%% before LogWindowSize events have been handled +do_log(Bin, CallOrCast, State = #{id:=Name, mode:=Mode0}) -> + T1 = ?timestamp(), + + %% check if the handler is getting overloaded, or if it's + %% recovering from overload (the check must be done for each + %% event to react quickly to large bursts of events and + %% to ensure that the handler can never end up in drop mode + %% with an empty mailbox, which would stop operation) + {Mode1,QLen,Mem,State1} = logger_h_common:check_load(State), + + if (Mode1 == drop) andalso (Mode0 =/= drop) -> + log_handler_info(Name, "Handler ~p switched to drop mode", + [Name], State); + (Mode0 == drop) andalso ((Mode1 == async) orelse (Mode1 == sync)) -> + log_handler_info(Name, "Handler ~p switched to ~w mode", + [Name,Mode1], State); + true -> + ok + end, + + %% kill the handler if it can't keep up with the load + logger_h_common:kill_if_choked(Name, QLen, Mem, ?MODULE, State), + + if Mode1 == flush -> + flush(Name, QLen, T1, State1); + true -> + write(Name, Mode1, T1, Bin, CallOrCast, State1) + end. + +%% this clause is called by do_log/3 after an overload check +%% has been performed, where QLen > FlushQLen +flush(Name, _QLen0, T1, State=#{last_log_ts := _T0, mode_tab := ModeTab}) -> + %% flush messages in the mailbox (a limited number in + %% order to not cause long delays) + NewFlushed = logger_h_common:flush_log_events(?FLUSH_MAX_N), + + %% write info in log about flushed messages + log_handler_info(Name, "Handler ~p flushed ~w log events", + [Name,NewFlushed], State), + + %% because of the receive loop when flushing messages, the + %% handler will be scheduled out often and the mailbox could + %% grow very large, so we'd better check the queue again here + {_,_QLen1} = process_info(self(), message_queue_len), + ?observe(Name,{max_qlen,_QLen1}), + + %% Add 1 for the current log event + ?observe(Name,{flushed,NewFlushed+1}), + + State1 = ?update_max_time(?diff_time(T1,_T0),State), + {dropped,?update_other(flushed,FLUSHED,NewFlushed, + State1#{mode => ?set_mode(ModeTab,async), + last_qlen => 0, + last_log_ts => T1})}. + +%% this clause is called to write to file +write(_Name, Mode, T1, Bin, _CallOrCast, + State = #{mode_tab := ModeTab, + file_ctrl_pid := FileCtrlPid, + file_ctrl_sync := FileCtrlSync, + last_qlen := LastQLen, + last_log_ts := T0, + file_ctrl_sync_int := FileCtrlSyncInt}) -> + %% check if we need to limit the number of writes + %% during a burst of log events + {DoWrite,BurstWinT,BurstMsgCount} = logger_h_common:limit_burst(State), + + %% only send a synhrounous event to the file controller process + %% every FileCtrlSyncInt time, to give the handler time between + %% file writes so it can keep up with incoming messages + {Result,LastQLen1} = + if DoWrite, FileCtrlSync == 0 -> + ?observe(_Name,{_CallOrCast,1}), + file_write_sync(FileCtrlPid, Bin, false), + {ok,element(2, process_info(self(), message_queue_len))}; + DoWrite -> + ?observe(_Name,{_CallOrCast,1}), + file_write_async(FileCtrlPid, Bin), + {ok,LastQLen}; + not DoWrite -> + ?observe(_Name,{flushed,1}), + {dropped,LastQLen} + end, + + %% Check if the time since the previous log event is long enough - + %% and the queue length small enough - to assume the mailbox has + %% been emptied, and if so, do filesync operation and reset mode to + %% async. Note that this is the best we can do to detect an idle + %% handler without setting a timer after each log call/cast. If the + %% time between two consecutive log events is fast and no new + %% event comes in after the last one, idle state won't be detected! + Time = ?diff_time(T1,T0), + {Mode1,BurstMsgCount1} = + if (LastQLen1 < ?FILESYNC_OK_QLEN) andalso + (Time > ?IDLE_DETECT_TIME_USEC) -> + %% do filesync if necessary + case maps:get(type, State) of + Std when is_atom(Std) -> + ok; + _File -> + file_ctrl_filesync_async(FileCtrlPid) + end, + {?change_mode(ModeTab, Mode, async),0}; + true -> + {Mode,BurstMsgCount} + end, + State1 = + ?update_calls_or_casts(_CallOrCast,1,State), + State2 = + ?update_max_time(Time, + State1#{mode => Mode1, + last_qlen := LastQLen1, + last_log_ts => T1, + last_op => write, + burst_win_ts => BurstWinT, + burst_msg_count => BurstMsgCount1, + file_ctrl_sync => + ?update_file_ctrl_sync(FileCtrlSync, + FileCtrlSyncInt)}), + {Result,State2}. + +open_log_file(HandlerName, FileInfo) -> + case file_ctrl_start(HandlerName, FileInfo) of + OK = {ok,_FileCtrlPid} -> OK; + Error -> Error + end. + +do_open_log_file({file,File}) -> + do_open_log_file({file,File,[raw,append,delayed_write]}); + +do_open_log_file({file,File,[]}) -> + do_open_log_file({file,File,[raw,append,delayed_write]}); + +do_open_log_file({file,File,Modes}) -> + try + case filelib:ensure_dir(File) of + ok -> + file:open(File, Modes); + Error -> + Error + end + catch + _:Reason -> {error,Reason} + end. + +close_log_file(Std) when Std == standard_io; Std == standard_error -> + ok; +close_log_file(Fd) -> + _ = file:datasync(Fd), + _ = file:close(Fd). + + +log_handler_info(Name, Format, Args, #{file_ctrl_pid := FileCtrlPid}) -> + Config = + case logger:get_handler_config(Name) of + {ok,Conf} -> Conf; + _ -> #{formatter=>{?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}} + end, + Meta = #{time=>erlang:system_time(microsecond)}, + Bin = logger_h_common:log_to_binary(#{level => notice, + msg => {Format,Args}, + meta => Meta}, Config), + _ = file_write_async(FileCtrlPid, Bin), + ok. + +%%%----------------------------------------------------------------- +%%% File control process + +file_ctrl_start(HandlerName, FileInfo) -> + Starter = self(), + FileCtrlPid = + spawn_link(fun() -> + file_ctrl_init(HandlerName, FileInfo, Starter) + end), + receive + {FileCtrlPid,ok} -> + {ok,FileCtrlPid}; + {FileCtrlPid,Error} -> + Error + after + ?DEFAULT_CALL_TIMEOUT -> + {error,file_ctrl_process_not_started} + end. + +file_ctrl_stop(Pid) -> + Pid ! stop. + +file_write_async(Pid, Bin) -> + Pid ! {log,Bin}, + ok. + +file_write_sync(Pid, Bin, FileSync) -> + case file_ctrl_call(Pid, {log,self(),Bin,FileSync}) of + {error,Reason} -> + {error,{write_failed,Bin,Reason}}; + Result -> + Result + end. + +file_ctrl_filesync_async(Pid) -> + Pid ! filesync, + ok. + +file_ctrl_filesync_sync(Pid) -> + file_ctrl_call(Pid, {filesync,self()}). + +file_ctrl_call(Pid, Msg) -> + MRef = monitor(process, Pid), + Pid ! {Msg,MRef}, + receive + {MRef,Result} -> + demonitor(MRef, [flush]), + Result; + {'DOWN',MRef,_Type,_Object,Reason} -> + {error,Reason} + after + ?DEFAULT_CALL_TIMEOUT -> + {error,{no_response,Pid}} + end. + +file_ctrl_init(HandlerName, FileInfo, Starter) when is_tuple(FileInfo) -> + process_flag(message_queue_data, off_heap), + FileName = element(2, FileInfo), + case do_open_log_file(FileInfo) of + {ok,Fd} -> + Starter ! {self(),ok}, + file_ctrl_loop(Fd, file, FileName, false, ok, ok, HandlerName); + {error,Reason} -> + Starter ! {self(),{error,{open_failed,FileName,Reason}}} + end; +file_ctrl_init(HandlerName, StdDev, Starter) -> + Starter ! {self(),ok}, + file_ctrl_loop(StdDev, standard_io, StdDev, false, ok, ok, HandlerName). + +file_ctrl_loop(Fd, Type, DevName, Synced, + PrevWriteResult, PrevSyncResult, HandlerName) -> + receive + %% asynchronous event + {log,Bin} -> + Result = if Type == file -> + write_to_dev(Fd, Bin, DevName, + PrevWriteResult, HandlerName); + true -> + io:put_chars(Fd, Bin) + end, + file_ctrl_loop(Fd, Type, DevName, false, + Result, PrevSyncResult, HandlerName); + + %% synchronous event + {{log,From,Bin,FileSync},MRef} -> + if Type == file -> + %% check that file hasn't been deleted + CheckFile = + fun() -> {ok,_} = file:read_file_info(DevName) end, + spawn_link(CheckFile), + WResult = write_to_dev(Fd, Bin, DevName, + PrevWriteResult, HandlerName), + {Synced1,SResult} = + if not FileSync -> + {false,PrevSyncResult}; + true -> + case sync_dev(Fd, DevName, + PrevSyncResult, HandlerName) of + ok -> {true,ok}; + Error -> {false,Error} + end + end, + From ! {MRef,ok}, + file_ctrl_loop(Fd, Type, DevName, Synced1, + WResult, SResult, HandlerName); + true -> + _ = io:put_chars(Fd, Bin), + From ! {MRef,ok}, + file_ctrl_loop(Fd, Type, DevName, false, + ok, PrevSyncResult, HandlerName) + end; + + filesync when not Synced -> + Result = sync_dev(Fd, DevName, PrevSyncResult, HandlerName), + file_ctrl_loop(Fd, Type, DevName, true, + PrevWriteResult, Result, HandlerName); + + filesync -> + file_ctrl_loop(Fd, Type, DevName, true, + PrevWriteResult, PrevSyncResult, HandlerName); + + {{filesync,From},MRef} -> + Result = if not Synced -> + sync_dev(Fd, DevName, PrevSyncResult, HandlerName); + true -> + ok + end, + From ! {MRef,ok}, + file_ctrl_loop(Fd, Type, DevName, true, + PrevWriteResult, Result, HandlerName); + + stop -> + _ = close_log_file(Fd), + stopped + end. + +write_to_dev(Fd, Bin, FileName, PrevWriteResult, HandlerName) -> + case ?file_write(Fd, Bin) of + ok -> + ok; + PrevWriteResult -> + %% don't report same error twice + PrevWriteResult; + Error -> + logger_h_common:error_notify({HandlerName,write,FileName,Error}), + Error + end. + +sync_dev(Fd, DevName, PrevSyncResult, HandlerName) -> + case ?file_datasync(Fd) of + ok -> + ok; + PrevSyncResult -> + %% don't report same error twice + PrevSyncResult; + Error -> + logger_h_common:error_notify({HandlerName,filesync,DevName,Error}), + Error + end. diff --git a/lib/kernel/src/logger_sup.erl b/lib/kernel/src/logger_sup.erl new file mode 100644 index 0000000000..3d6f482e20 --- /dev/null +++ b/lib/kernel/src/logger_sup.erl @@ -0,0 +1,57 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2017-2018. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(logger_sup). + +-behaviour(supervisor). + +%% API +-export([start_link/0]). + +%% Supervisor callbacks +-export([init/1]). + +-define(SERVER, ?MODULE). + +%%%=================================================================== +%%% API functions +%%%=================================================================== + +start_link() -> + supervisor:start_link({local, ?SERVER}, ?MODULE, []). + +%%%=================================================================== +%%% Supervisor callbacks +%%%=================================================================== + +init([]) -> + + SupFlags = #{strategy => one_for_one, + intensity => 1, + period => 5}, + + Watcher = #{id => logger_handler_watcher, + start => {logger_handler_watcher, start_link, []}, + shutdown => brutal_kill}, + + {ok, {SupFlags, [Watcher]}}. + +%%%=================================================================== +%%% Internal functions +%%%=================================================================== diff --git a/lib/kernel/src/net_kernel.erl b/lib/kernel/src/net_kernel.erl index ddda396713..3cf11fd7b1 100644 --- a/lib/kernel/src/net_kernel.erl +++ b/lib/kernel/src/net_kernel.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1996-2017. All Rights Reserved. +%% Copyright Ericsson AB 1996-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -53,7 +53,7 @@ %% Documented API functions. --export([allow/1, +-export([allow/1, allowed/0, connect_node/1, monitor_nodes/1, monitor_nodes/2, @@ -70,8 +70,8 @@ protocol_childspecs/0, epmd_module/0]). --export([connect/1, disconnect/1, hidden_connect/1, passive_cnct/1]). --export([hidden_connect_node/1]). %% explicit connect +-export([disconnect/1, passive_cnct/1]). +-export([hidden_connect_node/1]). -export([set_net_ticktime/1, set_net_ticktime/2, get_net_ticktime/0]). -export([node_info/1, node_info/2, nodes_info/0, @@ -122,6 +122,7 @@ -record(connection, { node, %% remote node name + conn_id, %% Connection identity state, %% pending | up | up_pending owner, %% owner pid pending_owner, %% possible new owner @@ -170,6 +171,8 @@ kernel_apply(M,F,A) -> request({apply,M,F,A}). Nodes :: [node()]. allow(Nodes) -> request({allow, Nodes}). +allowed() -> request(allowed). + longnames() -> request(longnames). -spec stop() -> ok | {error, Reason} when @@ -221,8 +224,7 @@ get_net_ticktime() -> Error :: error | {error, term()}. monitor_nodes(Flag) -> case catch process_flag(monitor_nodes, Flag) of - true -> ok; - false -> ok; + N when is_integer(N) -> ok; _ -> mk_monitor_nodes_error(Flag, []) end. @@ -235,8 +237,7 @@ monitor_nodes(Flag) -> Error :: error | {error, term()}. monitor_nodes(Flag, Opts) -> case catch process_flag({monitor_nodes, Opts}, Flag) of - true -> ok; - false -> ok; + N when is_integer(N) -> ok; _ -> mk_monitor_nodes_error(Flag, Opts) end. @@ -247,14 +248,15 @@ ticktime_res(A) when is_atom(A) -> A. %% Called though BIF's -connect(Node) -> do_connect(Node, normal, false). %%% Long timeout if blocked (== barred), only affects nodes with %%% {dist_auto_connect, once} set. -passive_cnct(Node) -> do_connect(Node, normal, true). -disconnect(Node) -> request({disconnect, Node}). +passive_cnct(Node) -> + case request({passive_cnct, Node}) of + ignored -> false; + Other -> Other + end. -%% connect but not seen -hidden_connect(Node) -> do_connect(Node, hidden, false). +disconnect(Node) -> request({disconnect, Node}). %% Should this node publish itself on Node? publish_on_node(Node) when is_atom(Node) -> @@ -272,67 +274,24 @@ connect_node(Node) when is_atom(Node) -> hidden_connect_node(Node) when is_atom(Node) -> request({connect, hidden, Node}). -do_connect(Node, Type, WaitForBarred) -> %% Type = normal | hidden - case catch ets:lookup(sys_dist, Node) of - {'EXIT', _} -> - ?connect_failure(Node,{table_missing, sys_dist}), - false; - [#barred_connection{}] -> - case WaitForBarred of - false -> - false; - true -> - Pid = spawn(?MODULE,passive_connect_monitor,[self(),Node]), - receive - {Pid, true} -> - %%io:format("Net Kernel: barred connection (~p) " - %% "connected from other end.~n",[Node]), - true; - {Pid, false} -> - ?connect_failure(Node,{barred_connection, - ets:lookup(sys_dist, Node)}), - %%io:format("Net Kernel: barred connection (~p) " - %% "- failure.~n",[Node]), - false - end - end; - Else -> - case application:get_env(kernel, dist_auto_connect) of - {ok, never} -> - ?connect_failure(Node,{dist_auto_connect,never}), - false; - % This might happen due to connection close - % not beeing propagated to user space yet. - % Save the day by just not connecting... - {ok, once} when Else =/= [], - (hd(Else))#connection.state =:= up -> - ?connect_failure(Node,{barred_connection, - ets:lookup(sys_dist, Node)}), - false; - _ -> - request({connect, Type, Node}) - end - end. -passive_connect_monitor(Parent, Node) -> +passive_connect_monitor(From, Node) -> ok = monitor_nodes(true,[{node_type,all}]), - case lists:member(Node,nodes([connected])) of - true -> - ok = monitor_nodes(false,[{node_type,all}]), - Parent ! {self(),true}; - _ -> - Ref = make_ref(), - Tref = erlang:send_after(connecttime(),self(),Ref), - receive - Ref -> - ok = monitor_nodes(false,[{node_type,all}]), - Parent ! {self(), false}; - {nodeup,Node,_} -> - ok = monitor_nodes(false,[{node_type,all}]), - _ = erlang:cancel_timer(Tref), - Parent ! {self(),true} - end - end. + Reply = case lists:member(Node,nodes([connected])) of + true -> + true; + _ -> + receive + {nodeup,Node,_} -> + true + after connecttime() -> + false + end + end, + ok = monitor_nodes(false,[{node_type,all}]), + {Pid, Tag} = From, + erlang:send(Pid, {Tag, Reply}). + %% If the net_kernel isn't running we ignore all requests to the %% kernel, thus basically accepting them :-) @@ -393,41 +352,140 @@ init({Name, LongOrShortNames, TickT, CleanHalt}) -> {stop, Error} end. +do_auto_connect_1(Node, ConnId, From, State) -> + case ets:lookup(sys_dist, Node) of + [#barred_connection{}] -> + case ConnId of + passive_cnct -> + spawn(?MODULE,passive_connect_monitor,[From,Node]), + {noreply, State}; + _ -> + erts_internal:abort_connection(Node, ConnId), + {reply, false, State} + end; + + ConnLookup -> + do_auto_connect_2(Node, ConnId, From, State, ConnLookup) + end. + +do_auto_connect_2(Node, passive_cnct, From, State, ConnLookup) -> + try erts_internal:new_connection(Node) of + ConnId -> + do_auto_connect_2(Node, ConnId, From, State, ConnLookup) + catch + _:_ -> + error_logger:error_msg("~n** Cannot get connection id for node ~w~n", + [Node]), + {reply, false, State} + end; +do_auto_connect_2(Node, ConnId, From, State, ConnLookup) -> + case ConnLookup of + [#connection{conn_id=ConnId, state = up}] -> + {reply, true, State}; + [#connection{conn_id=ConnId, waiting=Waiting}=Conn] -> + case From of + noreply -> ok; + _ -> ets:insert(sys_dist, Conn#connection{waiting = [From|Waiting]}) + end, + {noreply, State}; + + _ -> + case application:get_env(kernel, dist_auto_connect) of + {ok, never} -> + ?connect_failure(Node,{dist_auto_connect,never}), + erts_internal:abort_connection(Node, ConnId), + {reply, false, State}; + + %% This might happen due to connection close + %% not beeing propagated to user space yet. + %% Save the day by just not connecting... + {ok, once} when ConnLookup =/= [], + (hd(ConnLookup))#connection.state =:= up -> + ?connect_failure(Node,{barred_connection, + ets:lookup(sys_dist, Node)}), + erts_internal:abort_connection(Node, ConnId), + {reply, false, State}; + _ -> + case setup(Node, ConnId, normal, From, State) of + {ok, SetupPid} -> + Owners = [{SetupPid, Node} | State#state.conn_owners], + {noreply,State#state{conn_owners=Owners}}; + _Error -> + ?connect_failure(Node, {setup_call, failed, _Error}), + erts_internal:abort_connection(Node, ConnId), + {reply, false, State} + end + end + end. + + +do_explicit_connect([#connection{conn_id = ConnId, state = up}], _, _, ConnId, _From, State) -> + {reply, true, State}; +do_explicit_connect([#connection{conn_id = ConnId}=Conn], _, _, ConnId, From, State) + when Conn#connection.state =:= pending; + Conn#connection.state =:= up_pending -> + Waiting = Conn#connection.waiting, + ets:insert(sys_dist, Conn#connection{waiting = [From|Waiting]}), + {noreply, State}; +do_explicit_connect([#barred_connection{}], Type, Node, ConnId, From , State) -> + %% Barred connection only affects auto_connect, ignore it. + do_explicit_connect([], Type, Node, ConnId, From , State); +do_explicit_connect(_ConnLookup, Type, Node, ConnId, From , State) -> + case setup(Node,ConnId,Type,From,State) of + {ok, SetupPid} -> + Owners = [{SetupPid, Node} | State#state.conn_owners], + {noreply,State#state{conn_owners=Owners}}; + _Error -> + ?connect_failure(Node, {setup_call, failed, _Error}), + {reply, false, State} + end. + %% ------------------------------------------------------------ %% handle_call. %% ------------------------------------------------------------ %% -%% Set up a connection to Node. -%% The response is delayed until the connection is up and -%% running. +%% Passive auto-connect to Node. +%% The response is delayed until the connection is up and running. +%% +handle_call({passive_cnct, Node}, From, State) when Node =:= node() -> + async_reply({reply, true, State}, From); +handle_call({passive_cnct, Node}, From, State) -> + verbose({passive_cnct, Node}, 1, State), + R = do_auto_connect_1(Node, passive_cnct, From, State), + return_call(R, From); + +%% +%% Explicit connect +%% The response is delayed until the connection is up and running. %% handle_call({connect, _, Node}, From, State) when Node =:= node() -> async_reply({reply, true, State}, From); handle_call({connect, Type, Node}, From, State) -> verbose({connect, Type, Node}, 1, State), - case ets:lookup(sys_dist, Node) of - [Conn] when Conn#connection.state =:= up -> - async_reply({reply, true, State}, From); - [Conn] when Conn#connection.state =:= pending -> - Waiting = Conn#connection.waiting, - ets:insert(sys_dist, Conn#connection{waiting = [From|Waiting]}), - {noreply, State}; - [Conn] when Conn#connection.state =:= up_pending -> - Waiting = Conn#connection.waiting, - ets:insert(sys_dist, Conn#connection{waiting = [From|Waiting]}), - {noreply, State}; - _ -> - case setup(Node,Type,From,State) of - {ok, SetupPid} -> - Owners = [{SetupPid, Node} | State#state.conn_owners], - {noreply,State#state{conn_owners=Owners}}; - _ -> - ?connect_failure(Node, {setup_call, failed}), - async_reply({reply, false, State}, From) - end - end; + ConnLookup = ets:lookup(sys_dist, Node), + R = try erts_internal:new_connection(Node) of + ConnId -> + R1 = do_explicit_connect(ConnLookup, Type, Node, ConnId, From, State), + case R1 of + {reply, true, _S} -> %% already connected + ok; + {noreply, _S} -> %% connection pending + ok; + {reply, false, _S} -> %% connection refused + erts_internal:abort_connection(Node, ConnId) + end, + R1 + + catch + _:_ -> + error_logger:error_msg("~n** Cannot get connection id for node ~w~n", + [Node]), + {reply, false, State} + end, + return_call(R, From); + %% %% Close the connection to Node. @@ -470,6 +528,9 @@ handle_call({allow, Nodes}, From, State) -> async_reply({reply,error,State}, From) end; +handle_call(allowed, From, #state{allowed = Allowed} = State) -> + async_reply({reply,{ok,Allowed},State}, From); + %% %% authentication, used by auth. Simply works as this: %% if the message comes through, the other node IS authorized. @@ -634,6 +695,25 @@ terminate(_Reason, State) -> %% ------------------------------------------------------------ %% +%% Asynchronous auto connect request +%% +handle_info({auto_connect,Node, DHandle}, State) -> + verbose({auto_connect, Node, DHandle}, 1, State), + ConnId = DHandle, + NewState = + case do_auto_connect_1(Node, ConnId, noreply, State) of + {noreply, S} -> %% Pending connection + S; + + {reply, true, S} -> %% Already connected + S; + + {reply, false, S} -> %% Connection refused + S + end, + {noreply, NewState}; + +%% %% accept a new connection. %% handle_info({accept,AcceptPid,Socket,Family,Proto}, State) -> @@ -713,14 +793,23 @@ handle_info({AcceptPid, {accept_pending,MyNode,Node,Address,Type}}, State) -> AcceptPid ! {self(), {accept_pending, already_pending}}, {noreply, State}; _ -> - ets:insert(sys_dist, #connection{node = Node, - state = pending, - owner = AcceptPid, - address = Address, - type = Type}), - AcceptPid ! {self(),{accept_pending,ok}}, - Owners = [{AcceptPid,Node} | State#state.conn_owners], - {noreply, State#state{conn_owners = Owners}} + try erts_internal:new_connection(Node) of + ConnId -> + ets:insert(sys_dist, #connection{node = Node, + conn_id = ConnId, + state = pending, + owner = AcceptPid, + address = Address, + type = Type}), + AcceptPid ! {self(),{accept_pending,ok}}, + Owners = [{AcceptPid,Node} | State#state.conn_owners], + {noreply, State#state{conn_owners = Owners}} + catch + _:_ -> + error_logger:error_msg("~n** Cannot get connection id for node ~w~n", + [Node]), + AcceptPid ! {self(),{accept_pending,nok_pending}} + end end; handle_info({SetupPid, {is_pending, Node}}, State) -> @@ -778,7 +867,7 @@ handle_info(transition_period_end, {noreply,State#state{tick = #tick{ticker = Tckr, time = T}}}; handle_info(X, State) -> - error_msg("Net kernel got ~w~n",[X]), + error_msg("Net kernel got ~tw~n",[X]), {noreply,State}. %% ----------------------------------------------------------- @@ -906,6 +995,7 @@ pending_nodedown(Conn, Node, Type, State) -> % Don't bar connections that have never been alive %mark_sys_dist_nodedown(Node), % - instead just delete the node: + erts_internal:abort_connection(Node, Conn#connection.conn_id), ets:delete(sys_dist, Node), reply_waiting(Node,Conn#connection.waiting, false), case Type of @@ -920,7 +1010,9 @@ up_pending_nodedown(Conn, Node, _Reason, _Type, State) -> AcceptPid = Conn#connection.pending_owner, Owners = State#state.conn_owners, Pend = lists:keydelete(AcceptPid, 1, State#state.pend_owners), + erts_internal:abort_connection(Node, Conn#connection.conn_id), Conn1 = Conn#connection { owner = AcceptPid, + conn_id = erts_internal:new_connection(Node), pending_owner = undefined, state = pending }, ets:insert(sys_dist, Conn1), @@ -928,15 +1020,16 @@ up_pending_nodedown(Conn, Node, _Reason, _Type, State) -> State#state{conn_owners = [{AcceptPid,Node}|Owners], pend_owners = Pend}. -up_nodedown(_Conn, Node, _Reason, Type, State) -> - mark_sys_dist_nodedown(Node), +up_nodedown(Conn, Node, _Reason, Type, State) -> + mark_sys_dist_nodedown(Conn, Node), case Type of normal -> ?nodedown(Node, State); _ -> ok end, State. -mark_sys_dist_nodedown(Node) -> +mark_sys_dist_nodedown(Conn, Node) -> + erts_internal:abort_connection(Node, Conn#connection.conn_id), case application:get_env(kernel, dist_auto_connect) of {ok, once} -> ets:insert(sys_dist, #barred_connection{node = Node}); @@ -1179,15 +1272,8 @@ spawn_func(_,{From,Tag},M,F,A,Gleader) -> %% Set up connection to a new node. %% ----------------------------------------------------------- -setup(Node,Type,From,State) -> - Allowed = State#state.allowed, - case lists:member(Node, Allowed) of - false when Allowed =/= [] -> - error_msg("** Connection attempt with " - "disallowed node ~w ** ~n", [Node]), - {error, bad_node}; - _ -> - case select_mod(Node, State#state.listen) of +setup(Node, ConnId, Type, From, State) -> + case setup_check(Node, State) of {ok, L} -> Mod = L#listen.module, LAddr = L#listen.address, @@ -1200,18 +1286,38 @@ setup(Node,Type,From,State) -> Addr = LAddr#net_address { address = undefined, host = undefined }, + Waiting = case From of + noreply -> []; + _ -> [From] + end, ets:insert(sys_dist, #connection{node = Node, + conn_id = ConnId, state = pending, owner = Pid, - waiting = [From], + waiting = Waiting, address = Addr, type = normal}), {ok, Pid}; Error -> Error - end end. +setup_check(Node, State) -> + Allowed = State#state.allowed, + case lists:member(Node, Allowed) of + false when Allowed =/= [] -> + error_msg("** Connection attempt with " + "disallowed node ~w ** ~n", [Node]), + {error, bad_node}; + _ -> + case select_mod(Node, State#state.listen) of + {ok, _L}=OK -> OK; + Error -> Error + end + end. + + + %% %% Find a module that is willing to handle connection setup to Node %% @@ -1652,6 +1758,11 @@ verbose(_, _, _) -> getnode(P) when is_pid(P) -> node(P); getnode(P) -> P. +return_call({noreply, _State}=R, _From) -> + R; +return_call(R, From) -> + async_reply(R, From). + async_reply({reply, Msg, State}, From) -> async_gen_server_reply(From, Msg), {noreply, State}. @@ -1659,16 +1770,16 @@ async_reply({reply, Msg, State}, From) -> async_gen_server_reply(From, Msg) -> {Pid, Tag} = From, M = {Tag, Msg}, - case catch erlang:send(Pid, M, [nosuspend, noconnect]) of + try erlang:send(Pid, M, [nosuspend, noconnect]) of ok -> ok; nosuspend -> _ = spawn(fun() -> catch erlang:send(Pid, M, [noconnect]) end), ok; noconnect -> - ok; % The gen module takes care of this case. - {'EXIT', _} -> - ok + ok % The gen module takes care of this case. + catch + _:_ -> ok end. call_owner(Owner, Msg) -> diff --git a/lib/kernel/src/os.erl b/lib/kernel/src/os.erl index 0250783632..29a26674ba 100644 --- a/lib/kernel/src/os.erl +++ b/lib/kernel/src/os.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1997-2017. All Rights Reserved. +%% Copyright Ericsson AB 1997-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -21,40 +21,40 @@ %% Provides a common operating system interface. --export([type/0, version/0, cmd/1, find_executable/1, find_executable/2]). +-export([type/0, version/0, cmd/1, cmd/2, find_executable/1, find_executable/2]). -include("file.hrl"). +-export_type([env_var_name/0, env_var_value/0, env_var_name_value/0]). + +-export([getenv/0, getenv/1, getenv/2, putenv/2, unsetenv/1]). + %%% BIFs --export([getenv/0, getenv/1, getenv/2, getpid/0, - perf_counter/0, perf_counter/1, - putenv/2, set_signal/2, system_time/0, system_time/1, - timestamp/0, unsetenv/1]). +-export([get_env_var/1, getpid/0, list_env_vars/0, perf_counter/0, + perf_counter/1, set_env_var/2, set_signal/2, system_time/0, + system_time/1, timestamp/0, unset_env_var/1]). --spec getenv() -> [string()]. +-type os_command() :: atom() | io_lib:chars(). +-type os_command_opts() :: #{ max_size => non_neg_integer() | infinity }. -getenv() -> erlang:nif_error(undef). +-export_type([os_command/0, os_command_opts/0]). --spec getenv(VarName) -> Value | false when - VarName :: string(), - Value :: string(). +-type env_var_name() :: nonempty_string(). -getenv(_) -> - erlang:nif_error(undef). +-type env_var_value() :: string(). --spec getenv(VarName, DefaultValue) -> Value when - VarName :: string(), - DefaultValue :: string(), - Value :: string(). +-type env_var_name_value() :: nonempty_string(). -getenv(VarName, DefaultValue) -> - case os:getenv(VarName) of - false -> - DefaultValue; - Value -> - Value - end. +-spec list_env_vars() -> [{env_var_name(), env_var_value()}]. +list_env_vars() -> + erlang:nif_error(undef). + +-spec get_env_var(VarName) -> Value | false when + VarName :: env_var_name(), + Value :: env_var_value(). +get_env_var(_VarName) -> + erlang:nif_error(undef). -spec getpid() -> Value when Value :: string(). @@ -74,11 +74,10 @@ perf_counter() -> perf_counter(Unit) -> erlang:convert_time_unit(os:perf_counter(), perf_counter, Unit). --spec putenv(VarName, Value) -> true when - VarName :: string(), - Value :: string(). - -putenv(_, _) -> +-spec set_env_var(VarName, Value) -> true when + VarName :: env_var_name(), + Value :: env_var_value(). +set_env_var(_, _) -> erlang:nif_error(undef). -spec system_time() -> integer(). @@ -98,10 +97,9 @@ system_time(_Unit) -> timestamp() -> erlang:nif_error(undef). --spec unsetenv(VarName) -> true when - VarName :: string(). - -unsetenv(_) -> +-spec unset_env_var(VarName) -> true when + VarName :: env_var_name(). +unset_env_var(_) -> erlang:nif_error(undef). -spec set_signal(Signal, Option) -> 'ok' when @@ -115,6 +113,39 @@ set_signal(_Signal, _Option) -> %%% End of BIFs +-spec getenv() -> [env_var_name_value()]. +getenv() -> + [lists:flatten([Key, $=, Value]) || {Key, Value} <- os:list_env_vars() ]. + +-spec getenv(VarName) -> Value | false when + VarName :: env_var_name(), + Value :: env_var_value(). +getenv(VarName) -> + os:get_env_var(VarName). + +-spec getenv(VarName, DefaultValue) -> Value when + VarName :: env_var_name(), + DefaultValue :: env_var_value(), + Value :: env_var_value(). +getenv(VarName, DefaultValue) -> + case os:getenv(VarName) of + false -> + DefaultValue; + Value -> + Value + end. + +-spec putenv(VarName, Value) -> true when + VarName :: env_var_name(), + Value :: env_var_value(). +putenv(VarName, Value) -> + os:set_env_var(VarName, Value). + +-spec unsetenv(VarName) -> true when + VarName :: env_var_name(). +unsetenv(VarName) -> + os:unset_env_var(VarName). + -spec type() -> {Osfamily, Osname} when Osfamily :: unix | win32, Osname :: atom(). @@ -178,7 +209,7 @@ verify_executable(Name0, [Ext|Rest], OrigExtensions) -> end; verify_executable(Name, [], OrigExtensions) when OrigExtensions =/= [""] -> %% Windows %% Will only happen on windows, hence case insensitivity - case can_be_full_name(string:to_lower(Name),OrigExtensions) of + case can_be_full_name(string:lowercase(Name),OrigExtensions) of true -> verify_executable(Name,[""],[""]); _ -> @@ -232,15 +263,20 @@ extensions() -> %% Executes the given command in the default shell for the operating system. -spec cmd(Command) -> string() when - Command :: atom() | io_lib:chars(). + Command :: os_command(). cmd(Cmd) -> - validate(Cmd), - {SpawnCmd, SpawnOpts, SpawnInput, Eot} = mk_cmd(os:type(), Cmd), + cmd(Cmd, #{ }). + +-spec cmd(Command, Options) -> string() when + Command :: os_command(), + Options :: os_command_opts(). +cmd(Cmd, Opts) -> + {SpawnCmd, SpawnOpts, SpawnInput, Eot} = mk_cmd(os:type(), validate(Cmd)), Port = open_port({spawn, SpawnCmd}, [binary, stderr_to_stdout, stream, in, hide | SpawnOpts]), MonRef = erlang:monitor(port, Port), true = port_command(Port, SpawnInput), - Bytes = get_data(Port, MonRef, Eot, []), + Bytes = get_data(Port, MonRef, Eot, [], 0, maps:get(max_size, Opts, infinity)), demonitor(MonRef, [flush]), String = unicode:characters_to_list(Bytes), if %% Convert to unicode list if possible otherwise return bytes @@ -255,8 +291,6 @@ mk_cmd({win32,Wtype}, Cmd) -> {Cspec,_} -> lists:concat([Cspec," /c",Cmd]) end, {Command, [], [], <<>>}; -mk_cmd(OsType,Cmd) when is_atom(Cmd) -> - mk_cmd(OsType, atom_to_list(Cmd)); mk_cmd(_,Cmd) -> %% Have to send command in like this in order to make sh commands like %% cd and ulimit available @@ -279,24 +313,41 @@ mk_cmd(_,Cmd) -> <<$\^D>>}. validate(Atom) when is_atom(Atom) -> - ok; + validate(atom_to_list(Atom)); validate(List) when is_list(List) -> - validate1(List). + case validate1(List) of + false -> + List; + true -> + %% Had zeros at end; remove them... + string:trim(List, trailing, [0]) + end. -validate1([C|Rest]) when is_integer(C) -> +validate1([0|Rest]) -> + validate2(Rest); +validate1([C|Rest]) when is_integer(C), C > 0 -> validate1(Rest); validate1([List|Rest]) when is_list(List) -> - validate1(List), - validate1(Rest); + validate1(List) or validate1(Rest); validate1([]) -> - ok. + false. + +%% Ensure that the rest is zero only... +validate2([]) -> + true; +validate2([0|Rest]) -> + validate2(Rest); +validate2([List|Rest]) when is_list(List) -> + validate2(List), + validate2(Rest). -get_data(Port, MonRef, Eot, Sofar) -> +get_data(Port, MonRef, Eot, Sofar, Size, Max) -> receive {Port, {data, Bytes}} -> - case eot(Bytes, Eot) of + case eot(Bytes, Eot, Size, Max) of more -> - get_data(Port, MonRef, Eot, [Sofar,Bytes]); + get_data(Port, MonRef, Eot, [Sofar, Bytes], + Size + byte_size(Bytes), Max); Last -> catch port_close(Port), flush_until_down(Port, MonRef), @@ -307,13 +358,16 @@ get_data(Port, MonRef, Eot, Sofar) -> iolist_to_binary(Sofar) end. -eot(_Bs, <<>>) -> +eot(Bs, <<>>, Size, Max) when Size + byte_size(Bs) < Max -> more; -eot(Bs, Eot) -> +eot(Bs, <<>>, Size, Max) -> + binary:part(Bs, {0, Max - Size}); +eot(Bs, Eot, Size, Max) -> case binary:match(Bs, Eot) of - nomatch -> more; - {Pos, _} -> - binary:part(Bs,{0, Pos}) + {Pos, _} when Size + Pos < Max -> + binary:part(Bs,{0, Pos}); + _ -> + eot(Bs, <<>>, Size, Max) end. %% When port_close returns we know that all the diff --git a/lib/kernel/src/pg2.erl b/lib/kernel/src/pg2.erl index edf4aedde2..c4732f37ee 100644 --- a/lib/kernel/src/pg2.erl +++ b/lib/kernel/src/pg2.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1997-2016. All Rights Reserved. +%% Copyright Ericsson AB 1997-2017. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -199,7 +199,7 @@ handle_call({delete, Name}, _From, S) -> {reply, ok, S}; handle_call(Request, From, S) -> error_logger:warning_msg("The pg2 server received an unexpected message:\n" - "handle_call(~p, ~p, _)\n", + "handle_call(~tp, ~tp, _)\n", [Request, From]), {noreply, S}. diff --git a/lib/kernel/src/raw_file_io.erl b/lib/kernel/src/raw_file_io.erl new file mode 100644 index 0000000000..e3c07c8f78 --- /dev/null +++ b/lib/kernel/src/raw_file_io.erl @@ -0,0 +1,75 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2017. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(raw_file_io). + +-export([open/2]). + +open(Filename, Modes) -> + %% Layers are applied in this order, and the listed modules will call this + %% function again as necessary. eg. a raw compressed delayed file in list + %% mode will walk through [_list -> _compressed -> _delayed -> _raw]. + ModuleOrder = [{raw_file_io_list, fun match_list/1}, + {raw_file_io_compressed, fun match_compressed/1}, + {raw_file_io_delayed, fun match_delayed/1}, + {raw_file_io_raw, fun match_raw/1}], + open_1(ModuleOrder, Filename, add_implicit_modes(Modes)). +open_1([], _Filename, _Modes) -> + error(badarg); +open_1([{Module, Match} | Rest], Filename, Modes) -> + case lists:any(Match, Modes) of + true -> + {Options, ChildModes} = + lists:partition(fun(Mode) -> Match(Mode) end, Modes), + Module:open_layer(Filename, ChildModes, Options); + false -> + open_1(Rest, Filename, Modes) + end. + +%% 'read' and 'list' mode are enabled unless disabled by another option, so +%% we'll explicitly add them to avoid duplicating this logic in child layers. +add_implicit_modes(Modes0) -> + Modes1 = add_unless_matched(Modes0, fun match_writable/1, read), + add_unless_matched(Modes1, fun match_binary/1, list). +add_unless_matched(Modes, Match, Default) -> + case lists:any(Match, Modes) of + false -> [Default | Modes]; + true -> Modes + end. + +match_list(list) -> true; +match_list(_Other) -> false. + +match_compressed(compressed) -> true; +match_compressed(_Other) -> false. + +match_delayed({delayed_write, _Size, _Timeout}) -> true; +match_delayed(delayed_write) -> true; +match_delayed(_Other) -> false. + +match_raw(raw) -> true; +match_raw(_Other) -> false. + +match_writable(write) -> true; +match_writable(append) -> true; +match_writable(exclusive) -> true; +match_writable(_Other) -> false. + +match_binary(binary) -> true; +match_binary(_Other) -> false. diff --git a/lib/kernel/src/raw_file_io_compressed.erl b/lib/kernel/src/raw_file_io_compressed.erl new file mode 100644 index 0000000000..d5ab042d25 --- /dev/null +++ b/lib/kernel/src/raw_file_io_compressed.erl @@ -0,0 +1,134 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2017. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(raw_file_io_compressed). + +-export([close/1, sync/1, datasync/1, truncate/1, advise/4, allocate/3, + position/2, write/2, pwrite/2, pwrite/3, + read_line/1, read/2, pread/2, pread/3]). + +%% OTP internal. +-export([ipread_s32bu_p32bu/3, sendfile/8]). + +-export([open_layer/3]). + +-include("file_int.hrl"). + +open_layer(Filename, Modes, Options) -> + IsAppend = lists:member(append, Modes), + IsDeflate = lists:member(write, Modes), + IsInflate = lists:member(read, Modes), + if + IsDeflate, IsInflate; IsAppend -> + {error, einval}; + IsDeflate, not IsInflate -> + start_server_module(raw_file_io_deflate, Filename, Modes, Options); + IsInflate -> + start_server_module(raw_file_io_inflate, Filename, Modes, Options) + end. + +start_server_module(Module, Filename, Modes, Options) -> + Secret = make_ref(), + case gen_statem:start(Module, {self(), Secret, Options}, []) of + {ok, Pid} -> open_next_layer(Pid, Secret, Filename, Modes); + Other -> Other + end. + +open_next_layer(Pid, Secret, Filename, Modes) -> + case gen_statem:call(Pid, {'$open', Secret, Filename, Modes}, infinity) of + ok -> + PublicFd = #file_descriptor{ + module = raw_file_io_compressed, data = {self(), Pid} }, + {ok, PublicFd}; + Other -> Other + end. + +close(Fd) -> + wrap_call(Fd, [close]). + +sync(Fd) -> + wrap_call(Fd, [sync]). +datasync(Fd) -> + wrap_call(Fd, [datasync]). + +truncate(Fd) -> + wrap_call(Fd, [truncate]). + +advise(Fd, Offset, Length, Advise) -> + wrap_call(Fd, [advise, Offset, Length, Advise]). +allocate(Fd, Offset, Length) -> + wrap_call(Fd, [allocate, Offset, Length]). + +position(Fd, Mark) -> + wrap_call(Fd, [position, Mark]). + +write(Fd, IOData) -> + try + CompactedData = erlang:iolist_to_iovec(IOData), + wrap_call(Fd, [write, CompactedData]) + catch + error:badarg -> {error, badarg} + end. + +pwrite(Fd, Offset, IOData) -> + try + CompactedData = erlang:iolist_to_iovec(IOData), + wrap_call(Fd, [pwrite, Offset, CompactedData]) + catch + error:badarg -> {error, badarg} + end. +pwrite(Fd, LocBytes) -> + try + CompactedLocBytes = + [ {Offset, erlang:iolist_to_iovec(IOData)} || + {Offset, IOData} <- LocBytes ], + wrap_call(Fd, [pwrite, CompactedLocBytes]) + catch + error:badarg -> {error, badarg} + end. + +read_line(Fd) -> + wrap_call(Fd, [read_line]). +read(Fd, Size) -> + wrap_call(Fd, [read, Size]). +pread(Fd, Offset, Size) -> + wrap_call(Fd, [pread, Offset, Size]). +pread(Fd, LocNums) -> + wrap_call(Fd, [pread, LocNums]). + +ipread_s32bu_p32bu(Fd, Offset, MaxSize) -> + wrap_call(Fd, [ipread_s32bu_p32bu, Offset, MaxSize]). + +sendfile(_,_,_,_,_,_,_,_) -> + {error, enotsup}. + +wrap_call(Fd, Command) -> + {_Owner, Pid} = get_fd_data(Fd), + try gen_statem:call(Pid, Command, infinity) of + Result -> Result + catch + exit:{noproc, _StackTrace} -> {error, einval} + end. + +get_fd_data(#file_descriptor{ data = Data }) -> + {Owner, _ServerPid} = Data, + case self() of + Owner -> Data; + _ -> error(not_on_controlling_process) + end. diff --git a/lib/kernel/src/raw_file_io_deflate.erl b/lib/kernel/src/raw_file_io_deflate.erl new file mode 100644 index 0000000000..acfc546743 --- /dev/null +++ b/lib/kernel/src/raw_file_io_deflate.erl @@ -0,0 +1,159 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2017. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(raw_file_io_deflate). + +-behavior(gen_statem). + +-export([init/1, callback_mode/0, terminate/3]). +-export([opening/3, opened/3]). + +-include("file_int.hrl"). + +-define(GZIP_WBITS, 16 + 15). + +callback_mode() -> state_functions. + +init({Owner, Secret, [compressed]}) -> + Monitor = monitor(process, Owner), + Z = zlib:open(), + ok = zlib:deflateInit(Z, default, deflated, ?GZIP_WBITS, 8, default), + Data = + #{ owner => Owner, + monitor => Monitor, + secret => Secret, + position => 0, + zlib => Z }, + {ok, opening, Data}. + +opening({call, From}, {'$open', Secret, Filename, Modes}, #{ secret := Secret } = Data) -> + case raw_file_io:open(Filename, Modes) of + {ok, PrivateFd} -> + NewData = Data#{ handle => PrivateFd }, + {next_state, opened, NewData, [{reply, From, ok}]}; + Other -> + {stop_and_reply, normal, [{reply, From, Other}]} + end; +opening(_Event, _Contents, _Data) -> + {keep_state_and_data, [postpone]}. + +%% + +opened(info, {'DOWN', Monitor, process, _Owner, Reason}, #{ monitor := Monitor } = Data) -> + if + Reason =/= kill -> flush_deflate_state(Data); + Reason =:= kill -> ignored + end, + {stop, shutdown}; + +opened(info, _Message, _Data) -> + keep_state_and_data; + +opened({call, {Owner, _Tag} = From}, [close], #{ owner := Owner } = Data) -> + #{ handle := PrivateFd } = Data, + Response = + case flush_deflate_state(Data) of + ok -> ?CALL_FD(PrivateFd, close, []); + Other -> Other + end, + {stop_and_reply, normal, [{reply, From, Response}]}; + +opened({call, {Owner, _Tag} = From}, [position, Mark], #{ owner := Owner } = Data) -> + case position(Data, Mark) of + {ok, NewData, Result} -> + Response = {ok, Result}, + {keep_state, NewData, [{reply, From, Response}]}; + Other -> + {keep_state_and_data, [{reply, From, Other}]} + end; + +opened({call, {Owner, _Tag} = From}, [write, IOVec], #{ owner := Owner } = Data) -> + case write(Data, IOVec) of + {ok, NewData} -> {keep_state, NewData, [{reply, From, ok}]}; + Other -> {keep_state_and_data, [{reply, From, Other}]} + end; + +opened({call, {Owner, _Tag} = From}, [read, _Size], #{ owner := Owner }) -> + Response = {error, ebadf}, + {keep_state_and_data, [{reply, From, Response}]}; + +opened({call, {Owner, _Tag} = From}, [read_line], #{ owner := Owner }) -> + Response = {error, ebadf}, + {keep_state_and_data, [{reply, From, Response}]}; + +opened({call, {Owner, _Tag} = From}, _Command, #{ owner := Owner }) -> + Response = {error, enotsup}, + {keep_state_and_data, [{reply, From, Response}]}; + +opened({call, _From}, _Command, _Data) -> + %% The client functions filter this out, so we'll crash if the user does + %% anything stupid on purpose. + {shutdown, protocol_violation}; + +opened(_Event, _Request, _Data) -> + keep_state_and_data. + +write(Data, IOVec) -> + #{ handle := PrivateFd, position := Position, zlib := Z } = Data, + UncompressedSize = iolist_size(IOVec), + case ?CALL_FD(PrivateFd, write, [zlib:deflate(Z, IOVec)]) of + ok -> {ok, Data#{ position := (Position + UncompressedSize) }}; + Other -> Other + end. + +%% +%% We support "seeking" forward as long as it isn't relative to EOF. +%% +%% Seeking is a bit of a misnomer as it's really just compressing zeroes until +%% we reach the desired point, but it has always behaved like this. +%% + +position(Data, Mark) when is_atom(Mark) -> + position(Data, {Mark, 0}); +position(Data, Offset) when is_integer(Offset) -> + position(Data, {bof, Offset}); +position(Data, {bof, Offset}) when is_integer(Offset) -> + position_1(Data, Offset); +position(Data, {cur, Offset}) when is_integer(Offset) -> + #{ position := Position } = Data, + position_1(Data, Position + Offset); +position(_Data, {eof, Offset}) when is_integer(Offset) -> + {error, einval}; +position(_Data, _Any) -> + {error, badarg}. + +position_1(#{ position := Desired } = Data, Desired) -> + {ok, Data, Desired}; +position_1(#{ position := Current } = Data, Desired) when Current < Desired -> + BytesToWrite = min(Desired - Current, 4 bsl 20), + case write(Data, <<0:(BytesToWrite)/unit:8>>) of + {ok, NewData} -> position_1(NewData, Desired); + Other -> Other + end; +position_1(#{ position := Current }, Desired) when Current > Desired -> + {error, einval}. + +flush_deflate_state(#{ handle := PrivateFd, zlib := Z }) -> + case ?CALL_FD(PrivateFd, write, [zlib:deflate(Z, [], finish)]) of + ok -> ok; + Other -> Other + end. + +terminate(_Reason, _State, _Data) -> + ok. diff --git a/lib/kernel/src/raw_file_io_delayed.erl b/lib/kernel/src/raw_file_io_delayed.erl new file mode 100644 index 0000000000..d2ad7550a1 --- /dev/null +++ b/lib/kernel/src/raw_file_io_delayed.erl @@ -0,0 +1,320 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2017. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(raw_file_io_delayed). + +-behavior(gen_statem). + +-export([close/1, sync/1, datasync/1, truncate/1, advise/4, allocate/3, + position/2, write/2, pwrite/2, pwrite/3, + read_line/1, read/2, pread/2, pread/3]). + +%% OTP internal. +-export([ipread_s32bu_p32bu/3, sendfile/8]). + +-export([open_layer/3]). + +-export([init/1, callback_mode/0, terminate/3]). +-export([opening/3, opened/3]). + +-include("file_int.hrl"). + +open_layer(Filename, Modes, Options) -> + Secret = make_ref(), + case gen_statem:start(?MODULE, {self(), Secret, Options}, []) of + {ok, Pid} -> + gen_statem:call(Pid, {'$open', Secret, Filename, Modes}, infinity); + Other -> + Other + end. + +callback_mode() -> state_functions. + +init({Owner, Secret, Options}) -> + Monitor = monitor(process, Owner), + Defaults = + #{ owner => Owner, + monitor => Monitor, + secret => Secret, + timer => none, + pid => self(), + buffer => prim_buffer:new(), + delay_size => 64 bsl 10, + delay_time => 2000 }, + Data = fill_delay_values(Defaults, Options), + {ok, opening, Data}. + +fill_delay_values(Data, []) -> + Data; +fill_delay_values(Data, [{delayed_write, Size, Time} | Options]) -> + fill_delay_values(Data#{ delay_size => Size, delay_time => Time }, Options); +fill_delay_values(Data, [_ | Options]) -> + fill_delay_values(Data, Options). + +opening({call, From}, {'$open', Secret, Filename, Modes}, #{ secret := Secret } = Data) -> + case raw_file_io:open(Filename, Modes) of + {ok, PrivateFd} -> + PublicData = maps:with([owner, buffer, delay_size, pid], Data), + PublicFd = #file_descriptor{ module = ?MODULE, data = PublicData }, + + NewData = Data#{ handle => PrivateFd }, + Response = {ok, PublicFd}, + {next_state, opened, NewData, [{reply, From, Response}]}; + Other -> + {stop_and_reply, normal, [{reply, From, Other}]} + end; +opening(_Event, _Contents, _Data) -> + {keep_state_and_data, [postpone]}. + +%% + +opened(info, {'$timed_out', Secret}, #{ secret := Secret } = Data) -> + %% If the user writes something at this exact moment, the flush will fail + %% and the timer won't reset on the next write since the buffer won't be + %% empty (Unless we collided on a flush). We therefore reset the timeout to + %% ensure that data won't sit idle for extended periods of time. + case try_flush_write_buffer(Data) of + busy -> gen_statem:cast(self(), '$reset_timeout'); + ok -> ok + end, + {keep_state, Data#{ timer => none }, []}; + +opened(info, {'DOWN', Monitor, process, _Owner, Reason}, #{ monitor := Monitor } = Data) -> + if + Reason =/= kill -> try_flush_write_buffer(Data); + Reason =:= kill -> ignored + end, + {stop, shutdown}; + +opened(info, _Message, _Data) -> + keep_state_and_data; + +opened({call, {Owner, _Tag} = From}, [close], #{ owner := Owner } = Data) -> + case flush_write_buffer(Data) of + ok -> + #{ handle := PrivateFd } = Data, + Response = ?CALL_FD(PrivateFd, close, []), + {stop_and_reply, normal, [{reply, From, Response}]}; + Other -> + {stop_and_reply, normal, [{reply, From, Other}]} + end; + +opened({call, {Owner, _Tag} = From}, '$wait', #{ owner := Owner }) -> + %% Used in write/2 to synchronize writes on lock conflicts. + {keep_state_and_data, [{reply, From, ok}]}; + +opened({call, {Owner, _Tag} = From}, '$synchronous_flush', #{ owner := Owner } = Data) -> + cancel_flush_timeout(Data), + Response = flush_write_buffer(Data), + {keep_state_and_data, [{reply, From, Response}]}; + +opened({call, {Owner, _Tag} = From}, Command, #{ owner := Owner } = Data) -> + Response = + case flush_write_buffer(Data) of + ok -> dispatch_command(Data, Command); + Other -> Other + end, + {keep_state_and_data, [{reply, From, Response}]}; + +opened({call, _From}, _Command, _Data) -> + %% The client functions filter this out, so we'll crash if the user does + %% anything stupid on purpose. + {shutdown, protocol_violation}; + +opened(cast, '$reset_timeout', #{ delay_time := Timeout, secret := Secret } = Data) -> + cancel_flush_timeout(Data), + Timer = erlang:send_after(Timeout, self(), {'$timed_out', Secret}), + {keep_state, Data#{ timer => Timer }, []}; + +opened(cast, _Message, _Data) -> + {keep_state_and_data, []}. + +dispatch_command(Data, [Function | Args]) -> + #{ handle := Handle } = Data, + Module = Handle#file_descriptor.module, + apply(Module, Function, [Handle | Args]). + +cancel_flush_timeout(#{ timer := none }) -> + ok; +cancel_flush_timeout(#{ timer := Timer }) -> + _ = erlang:cancel_timer(Timer, [{async, true}]), + ok. + +try_flush_write_buffer(#{ buffer := Buffer, handle := PrivateFd }) -> + case prim_buffer:try_lock(Buffer) of + acquired -> + flush_write_buffer_1(Buffer, PrivateFd), + prim_buffer:unlock(Buffer), + ok; + busy -> + busy + end. + +%% This is only safe to use when there is no chance of conflict with the owner +%% process, or in other words, "during synchronous calls outside of the locked +%% section of write/2" +flush_write_buffer(#{ buffer := Buffer, handle := PrivateFd }) -> + acquired = prim_buffer:try_lock(Buffer), + Result = flush_write_buffer_1(Buffer, PrivateFd), + prim_buffer:unlock(Buffer), + Result. + +flush_write_buffer_1(Buffer, PrivateFd) -> + case prim_buffer:size(Buffer) of + Size when Size > 0 -> + ?CALL_FD(PrivateFd, write, [prim_buffer:read_iovec(Buffer, Size)]); + 0 -> + ok + end. + +terminate(_Reason, _State, _Data) -> + ok. + +%% Client functions + +write(Fd, IOData) -> + try + enqueue_write(Fd, erlang:iolist_to_iovec(IOData)) + catch + error:badarg -> {error, badarg} + end. +enqueue_write(_Fd, []) -> + ok; +enqueue_write(Fd, IOVec) -> + %% get_fd_data will reject everyone except the process that opened the Fd, + %% so we can't race with anyone except the wrapper process. + #{ delay_size := DelaySize, + buffer := Buffer, + pid := Pid } = get_fd_data(Fd), + case prim_buffer:try_lock(Buffer) of + acquired -> + %% (The wrapper process will exit without flushing if we're killed + %% while holding the lock). + enqueue_write_locked(Pid, Buffer, DelaySize, IOVec); + busy -> + %% This can only happen while we're processing a timeout in the + %% wrapper process, so we perform a bogus call to get a completion + %% notification before trying again. + gen_statem:call(Pid, '$wait'), + enqueue_write(Fd, IOVec) + end. +enqueue_write_locked(Pid, Buffer, DelaySize, IOVec) -> + %% The synchronous operations (write, forced flush) are safe since we're + %% running on the only process that can fill the buffer; a timeout being + %% processed just before $synchronous_flush will cause the flush to nop, + %% and a timeout sneaking in just before a synchronous write won't do + %% anything since the buffer is guaranteed to be empty at that point. + BufSize = prim_buffer:size(Buffer), + case is_iovec_smaller_than(IOVec, DelaySize - BufSize) of + true when BufSize > 0 -> + prim_buffer:write(Buffer, IOVec), + prim_buffer:unlock(Buffer); + true -> + prim_buffer:write(Buffer, IOVec), + prim_buffer:unlock(Buffer), + gen_statem:cast(Pid, '$reset_timeout'); + false when BufSize > 0 -> + prim_buffer:write(Buffer, IOVec), + prim_buffer:unlock(Buffer), + gen_statem:call(Pid, '$synchronous_flush'); + false -> + prim_buffer:unlock(Buffer), + gen_statem:call(Pid, [write, IOVec]) + end. + +%% iolist_size/1 will always look through the entire list to get a precise +%% amount, which is pretty inefficient since we only need to know whether we've +%% hit the buffer threshold or not. +%% +%% We only handle the binary case since write/2 forcibly translates input to +%% erlang:iovec(). +is_iovec_smaller_than(IOVec, Max) -> + is_iovec_smaller_than_1(IOVec, Max, 0). +is_iovec_smaller_than_1(_IOVec, Max, Acc) when Acc >= Max -> + false; +is_iovec_smaller_than_1([], _Max, _Acc) -> + true; +is_iovec_smaller_than_1([Binary | Rest], Max, Acc) when is_binary(Binary) -> + is_iovec_smaller_than_1(Rest, Max, Acc + byte_size(Binary)). + +close(Fd) -> + wrap_call(Fd, [close]). + +sync(Fd) -> + wrap_call(Fd, [sync]). +datasync(Fd) -> + wrap_call(Fd, [datasync]). + +truncate(Fd) -> + wrap_call(Fd, [truncate]). + +advise(Fd, Offset, Length, Advise) -> + wrap_call(Fd, [advise, Offset, Length, Advise]). +allocate(Fd, Offset, Length) -> + wrap_call(Fd, [allocate, Offset, Length]). + +position(Fd, Mark) -> + wrap_call(Fd, [position, Mark]). + +pwrite(Fd, Offset, IOData) -> + try + CompactedData = erlang:iolist_to_iovec(IOData), + wrap_call(Fd, [pwrite, Offset, CompactedData]) + catch + error:badarg -> {error, badarg} + end. +pwrite(Fd, LocBytes) -> + try + CompactedLocBytes = + [ {Offset, erlang:iolist_to_iovec(IOData)} || + {Offset, IOData} <- LocBytes ], + wrap_call(Fd, [pwrite, CompactedLocBytes]) + catch + error:badarg -> {error, badarg} + end. + +read_line(Fd) -> + wrap_call(Fd, [read_line]). +read(Fd, Size) -> + wrap_call(Fd, [read, Size]). +pread(Fd, Offset, Size) -> + wrap_call(Fd, [pread, Offset, Size]). +pread(Fd, LocNums) -> + wrap_call(Fd, [pread, LocNums]). + +ipread_s32bu_p32bu(Fd, Offset, MaxSize) -> + wrap_call(Fd, [ipread_s32bu_p32bu, Offset, MaxSize]). + +sendfile(_,_,_,_,_,_,_,_) -> + {error, enotsup}. + +wrap_call(Fd, Command) -> + #{ pid := Pid } = get_fd_data(Fd), + try gen_statem:call(Pid, Command, infinity) of + Result -> Result + catch + exit:{noproc, _StackTrace} -> {error, einval} + end. + +get_fd_data(#file_descriptor{ data = Data }) -> + #{ owner := Owner } = Data, + case self() of + Owner -> Data; + _ -> error(not_on_controlling_process) + end. diff --git a/lib/kernel/src/raw_file_io_inflate.erl b/lib/kernel/src/raw_file_io_inflate.erl new file mode 100644 index 0000000000..7e9780310c --- /dev/null +++ b/lib/kernel/src/raw_file_io_inflate.erl @@ -0,0 +1,261 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2017. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(raw_file_io_inflate). + +-behavior(gen_statem). + +-export([init/1, callback_mode/0, terminate/3]). +-export([opening/3, opened_gzip/3, opened_passthrough/3]). + +-include("file_int.hrl"). + +-define(INFLATE_CHUNK_SIZE, (1 bsl 10)). +-define(GZIP_WBITS, (16 + 15)). + +callback_mode() -> state_functions. + +init({Owner, Secret, [compressed]}) -> + Monitor = monitor(process, Owner), + %% We're using the undocumented inflateInit/3 to open the stream in + %% 'reset mode', which resets the inflate state at the end of every stream, + %% allowing us to read concatenated gzip files. + Z = zlib:open(), + ok = zlib:inflateInit(Z, ?GZIP_WBITS, reset), + Data = + #{ owner => Owner, + monitor => Monitor, + secret => Secret, + position => 0, + buffer => prim_buffer:new(), + zlib => Z }, + {ok, opening, Data}. + +%% The old driver fell back to plain reads if the file didn't start with the +%% magic gzip bytes. +choose_decompression_state(PrivateFd) -> + State = + case ?CALL_FD(PrivateFd, read, [2]) of + {ok, <<16#1F, 16#8B>>} -> opened_gzip; + _Other -> opened_passthrough + end, + {ok, 0} = ?CALL_FD(PrivateFd, position, [0]), + State. + +opening({call, From}, {'$open', Secret, Filename, Modes}, #{ secret := Secret } = Data) -> + case raw_file_io:open(Filename, Modes) of + {ok, PrivateFd} -> + NextState = choose_decompression_state(PrivateFd), + NewData = Data#{ handle => PrivateFd }, + {next_state, NextState, NewData, [{reply, From, ok}]}; + Other -> + {stop_and_reply, normal, [{reply, From, Other}]} + end; +opening(_Event, _Contents, _Data) -> + {keep_state_and_data, [postpone]}. + +internal_close(From, Data) -> + #{ handle := PrivateFd } = Data, + Response = ?CALL_FD(PrivateFd, close, []), + {stop_and_reply, normal, [{reply, From, Response}]}. + +opened_passthrough(info, {'DOWN', Monitor, process, _Owner, _Reason}, #{ monitor := Monitor }) -> + {stop, shutdown}; + +opened_passthrough(info, _Message, _Data) -> + keep_state_and_data; + +opened_passthrough({call, {Owner, _Tag} = From}, [close], #{ owner := Owner } = Data) -> + internal_close(From, Data); + +opened_passthrough({call, {Owner, _Tag} = From}, [Method | Args], #{ owner := Owner } = Data) -> + #{ handle := PrivateFd } = Data, + Response = ?CALL_FD(PrivateFd, Method, Args), + {keep_state_and_data, [{reply, From, Response}]}; + +opened_passthrough({call, _From}, _Command, _Data) -> + %% The client functions filter this out, so we'll crash if the user does + %% anything stupid on purpose. + {shutdown, protocol_violation}; + +opened_passthrough(_Event, _Request, _Data) -> + keep_state_and_data. + +%% + +opened_gzip(info, {'DOWN', Monitor, process, _Owner, _Reason}, #{ monitor := Monitor }) -> + {stop, shutdown}; + +opened_gzip(info, _Message, _Data) -> + keep_state_and_data; + +opened_gzip({call, {Owner, _Tag} = From}, [close], #{ owner := Owner } = Data) -> + internal_close(From, Data); + +opened_gzip({call, {Owner, _Tag} = From}, [position, Mark], #{ owner := Owner } = Data) -> + case position(Data, Mark) of + {ok, NewData, Result} -> + Response = {ok, Result}, + {keep_state, NewData, [{reply, From, Response}]}; + Other -> + {keep_state_and_data, [{reply, From, Other}]} + end; + +opened_gzip({call, {Owner, _Tag} = From}, [read, Size], #{ owner := Owner } = Data) -> + case read(Data, Size) of + {ok, NewData, Result} -> + Response = {ok, Result}, + {keep_state, NewData, [{reply, From, Response}]}; + Other -> + {keep_state_and_data, [{reply, From, Other}]} + end; + +opened_gzip({call, {Owner, _Tag} = From}, [read_line], #{ owner := Owner } = Data) -> + case read_line(Data) of + {ok, NewData, Result} -> + Response = {ok, Result}, + {keep_state, NewData, [{reply, From, Response}]}; + Other -> + {keep_state_and_data, [{reply, From, Other}]} + end; + +opened_gzip({call, {Owner, _Tag} = From}, [write, _IOData], #{ owner := Owner }) -> + Response = {error, ebadf}, + {keep_state_and_data, [{reply, From, Response}]}; + +opened_gzip({call, {Owner, _Tag} = From}, _Request, #{ owner := Owner }) -> + Response = {error, enotsup}, + {keep_state_and_data, [{reply, From, Response}]}; + +opened_gzip({call, _From}, _Request, _Data) -> + %% The client functions filter this out, so we'll crash if the user does + %% anything stupid on purpose. + {shutdown, protocol_violation}; + +opened_gzip(_Event, _Request, _Data) -> + keep_state_and_data. + +%% + +read(#{ buffer := Buffer } = Data, Size) -> + try read_1(Data, Buffer, prim_buffer:size(Buffer), Size) of + Result -> Result + catch + error:badarg -> {error, badarg}; + error:_ -> {error, eio} + end. +read_1(Data, Buffer, BufferSize, ReadSize) when BufferSize >= ReadSize -> + #{ position := Position } = Data, + Decompressed = prim_buffer:read(Buffer, ReadSize), + {ok, Data#{ position => (Position + ReadSize) }, Decompressed}; +read_1(Data, Buffer, BufferSize, ReadSize) when BufferSize < ReadSize -> + #{ handle := PrivateFd } = Data, + case ?CALL_FD(PrivateFd, read, [?INFLATE_CHUNK_SIZE]) of + {ok, Compressed} -> + #{ zlib := Z } = Data, + Uncompressed = erlang:iolist_to_iovec(zlib:inflate(Z, Compressed)), + prim_buffer:write(Buffer, Uncompressed), + read_1(Data, Buffer, prim_buffer:size(Buffer), ReadSize); + eof when BufferSize > 0 -> + read_1(Data, Buffer, BufferSize, BufferSize); + Other -> + Other + end. + +read_line(#{ buffer := Buffer } = Data) -> + try read_line_1(Data, Buffer, prim_buffer:find_byte_index(Buffer, $\n)) of + {ok, NewData, Decompressed} -> {ok, NewData, Decompressed}; + Other -> Other + catch + error:badarg -> {error, badarg}; + error:_ -> {error, eio} + end. + +read_line_1(Data, Buffer, not_found) -> + #{ handle := PrivateFd, zlib := Z } = Data, + case ?CALL_FD(PrivateFd, read, [?INFLATE_CHUNK_SIZE]) of + {ok, Compressed} -> + Uncompressed = erlang:iolist_to_iovec(zlib:inflate(Z, Compressed)), + prim_buffer:write(Buffer, Uncompressed), + read_line_1(Data, Buffer, prim_buffer:find_byte_index(Buffer, $\n)); + eof -> + case prim_buffer:size(Buffer) of + Size when Size > 0 -> {ok, prim_buffer:read(Buffer, Size)}; + Size when Size =:= 0 -> eof + end; + Error -> + Error + end; +read_line_1(Data, Buffer, {ok, LFIndex}) -> + %% Translate CRLF into just LF, completely ignoring which encoding is used, + %% but treat the file position as including CR. + #{ position := Position } = Data, + NewData = Data#{ position => (Position + LFIndex + 1) }, + CRIndex = (LFIndex - 1), + TranslatedLine = + case prim_buffer:read(Buffer, LFIndex + 1) of + <<Line:CRIndex/binary, "\r\n">> -> <<Line/binary, "\n">>; + Line -> Line + end, + {ok, NewData, TranslatedLine}. + +%% +%% We support seeking in both directions as long as it isn't relative to EOF. +%% +%% Seeking backwards is extremely inefficient since we have to seek to the very +%% beginning and then decompress up to the desired point. +%% + +position(Data, Mark) when is_atom(Mark) -> + position(Data, {Mark, 0}); +position(Data, Offset) when is_integer(Offset) -> + position(Data, {bof, Offset}); +position(Data, {bof, Offset}) when is_integer(Offset) -> + position_1(Data, Offset); +position(Data, {cur, Offset}) when is_integer(Offset) -> + #{ position := Position } = Data, + position_1(Data, Position + Offset); +position(_Data, {eof, Offset}) when is_integer(Offset) -> + {error, einval}; +position(_Data, _Other) -> + {error, badarg}. + +position_1(_Data, Desired) when Desired < 0 -> + {error, einval}; +position_1(#{ position := Desired } = Data, Desired) -> + {ok, Data, Desired}; +position_1(#{ position := Current } = Data, Desired) when Current < Desired -> + case read(Data, min(Desired - Current, ?INFLATE_CHUNK_SIZE)) of + {ok, NewData, _Data} -> position_1(NewData, Desired); + eof -> {ok, Data, Current}; + Other -> Other + end; +position_1(#{ position := Current } = Data, Desired) when Current > Desired -> + #{ handle := PrivateFd, buffer := Buffer, zlib := Z } = Data, + case ?CALL_FD(PrivateFd, position, [bof]) of + {ok, 0} -> + ok = zlib:inflateReset(Z), + prim_buffer:wipe(Buffer), + position_1(Data#{ position => 0 }, Desired); + Other -> + Other + end. + +terminate(_Reason, _State, _Data) -> + ok. diff --git a/lib/kernel/src/raw_file_io_list.erl b/lib/kernel/src/raw_file_io_list.erl new file mode 100644 index 0000000000..2e16e63f0e --- /dev/null +++ b/lib/kernel/src/raw_file_io_list.erl @@ -0,0 +1,128 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2017. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(raw_file_io_list). + +-export([close/1, sync/1, datasync/1, truncate/1, advise/4, allocate/3, + position/2, write/2, pwrite/2, pwrite/3, + read_line/1, read/2, pread/2, pread/3]). + +%% OTP internal. +-export([ipread_s32bu_p32bu/3, sendfile/8]). + +-export([open_layer/3]). + +-include("file_int.hrl"). + +open_layer(Filename, Modes, [list]) -> + case raw_file_io:open(Filename, [binary | Modes]) of + {ok, PrivateFd} -> {ok, make_public_fd(PrivateFd, Modes)}; + Other -> Other + end. + +%% We can skip wrapping the file if it's write-only since only read operations +%% are affected by list mode. Since raw_file_io fills in all implicit options +%% for us, all we need to do is check whether 'read' is among them. +make_public_fd(PrivateFd, Modes) -> + case lists:member(read, Modes) of + true -> #file_descriptor{ module = ?MODULE, data = PrivateFd }; + false -> PrivateFd + end. + +close(Fd) -> + PrivateFd = Fd#file_descriptor.data, + ?CALL_FD(PrivateFd, close, []). + +sync(Fd) -> + PrivateFd = Fd#file_descriptor.data, + ?CALL_FD(PrivateFd, sync, []). +datasync(Fd) -> + PrivateFd = Fd#file_descriptor.data, + ?CALL_FD(PrivateFd, datasync, []). + +truncate(Fd) -> + PrivateFd = Fd#file_descriptor.data, + ?CALL_FD(PrivateFd, truncate, []). + +advise(Fd, Offset, Length, Advise) -> + PrivateFd = Fd#file_descriptor.data, + ?CALL_FD(PrivateFd, advise, [Offset, Length, Advise]). +allocate(Fd, Offset, Length) -> + PrivateFd = Fd#file_descriptor.data, + ?CALL_FD(PrivateFd, allocate, [Offset, Length]). + +position(Fd, Mark) -> + PrivateFd = Fd#file_descriptor.data, + ?CALL_FD(PrivateFd, position, [Mark]). + +write(Fd, IOData) -> + PrivateFd = Fd#file_descriptor.data, + ?CALL_FD(PrivateFd, write, [IOData]). + +pwrite(Fd, Offset, IOData) -> + PrivateFd = Fd#file_descriptor.data, + ?CALL_FD(PrivateFd, pwrite, [Offset, IOData]). +pwrite(Fd, LocBytes) -> + PrivateFd = Fd#file_descriptor.data, + ?CALL_FD(PrivateFd, pwrite, [LocBytes]). + +read_line(Fd) -> + PrivateFd = Fd#file_descriptor.data, + case ?CALL_FD(PrivateFd, read_line, []) of + {ok, Binary} -> {ok, binary_to_list(Binary)}; + Other -> Other + end. +read(Fd, Size) -> + PrivateFd = Fd#file_descriptor.data, + case ?CALL_FD(PrivateFd, read, [Size]) of + {ok, Binary} -> {ok, binary_to_list(Binary)}; + Other -> Other + end. +pread(Fd, Offset, Size) -> + PrivateFd = Fd#file_descriptor.data, + case ?CALL_FD(PrivateFd, pread, [Offset, Size]) of + {ok, Binary} -> {ok, binary_to_list(Binary)}; + Other -> Other + end. +pread(Fd, LocNums) -> + PrivateFd = Fd#file_descriptor.data, + case ?CALL_FD(PrivateFd, pread, [LocNums]) of + {ok, LocResults} -> + TranslatedResults = + [ case Result of + Result when is_binary(Result) -> binary_to_list(Result); + eof -> eof + end || Result <- LocResults ], + {ok, TranslatedResults}; + Other -> Other + end. + +ipread_s32bu_p32bu(Fd, Offset, MaxSize) -> + PrivateFd = Fd#file_descriptor.data, + case ?CALL_FD(PrivateFd, ipread_s32bu_p32bu, [Offset, MaxSize]) of + {ok, {Size, Pointer, Binary}} when is_binary(Binary) -> + {ok, {Size, Pointer, binary_to_list(Binary)}}; + Other -> + Other + end. + +sendfile(Fd, Dest, Offset, Bytes, ChunkSize, Headers, Trailers, Flags) -> + Args = [Dest, Offset, Bytes, ChunkSize, Headers, Trailers, Flags], + PrivateFd = Fd#file_descriptor.data, + ?CALL_FD(PrivateFd, sendfile, Args). diff --git a/lib/kernel/src/raw_file_io_raw.erl b/lib/kernel/src/raw_file_io_raw.erl new file mode 100644 index 0000000000..9a9fe78eb1 --- /dev/null +++ b/lib/kernel/src/raw_file_io_raw.erl @@ -0,0 +1,25 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2017. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(raw_file_io_raw). + +-export([open_layer/3]). + +open_layer(Filename, Modes, [raw]) -> + prim_file:open(Filename, [raw | Modes]). diff --git a/lib/kernel/src/rpc.erl b/lib/kernel/src/rpc.erl index 0e0b7dffa3..d197de942f 100644 --- a/lib/kernel/src/rpc.erl +++ b/lib/kernel/src/rpc.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1996-2017. All Rights Reserved. +%% Copyright Ericsson AB 1996-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -418,10 +418,7 @@ abcast(Name, Mess) -> abcast([Node|Tail], Name, Mess) -> Dest = {Name,Node}, - case catch erlang:send(Dest, Mess, [noconnect]) of - noconnect -> spawn(erlang, send, [Dest,Mess]), ok; - _ -> ok - end, + try erlang:send(Dest, Mess) catch error:_ -> ok end, abcast(Tail, Name, Mess); abcast([], _,_) -> abcast. @@ -498,7 +495,7 @@ start_monitor(Node, Name) -> Module :: module(), Function :: atom(), Args :: [term()], - ResL :: [term()], + ResL :: [Res :: term() | {'badrpc', Reason :: term()}], BadNodes :: [node()]. multicall(M, F, A) -> @@ -509,14 +506,14 @@ multicall(M, F, A) -> Module :: module(), Function :: atom(), Args :: [term()], - ResL :: [term()], + ResL :: [Res :: term() | {'badrpc', Reason :: term()}], BadNodes :: [node()]; (Module, Function, Args, Timeout) -> {ResL, BadNodes} when Module :: module(), Function :: atom(), Args :: [term()], Timeout :: timeout(), - ResL :: [term()], + ResL :: [Res :: term() | {'badrpc', Reason :: term()}], BadNodes :: [node()]. multicall(Nodes, M, F, A) when is_list(Nodes) -> @@ -531,7 +528,7 @@ multicall(M, F, A, Timeout) -> Function :: atom(), Args :: [term()], Timeout :: timeout(), - ResL :: [term()], + ResL :: [Res :: term() | {'badrpc', Reason :: term()}], BadNodes :: [node()]. multicall(Nodes, M, F, A, infinity) diff --git a/lib/kernel/src/seq_trace.erl b/lib/kernel/src/seq_trace.erl index cc0c10909b..14fe21e9de 100644 --- a/lib/kernel/src/seq_trace.erl +++ b/lib/kernel/src/seq_trace.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1998-2016. All Rights Reserved. +%% Copyright Ericsson AB 1998-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -41,7 +41,7 @@ -type flag() :: 'send' | 'receive' | 'print' | 'timestamp' | 'monotonic_timestamp' | 'strict_monotonic_timestamp'. -type component() :: 'label' | 'serial' | flag(). --type value() :: (Integer :: non_neg_integer()) +-type value() :: (Label :: term()) | {Previous :: non_neg_integer(), Current :: non_neg_integer()} | (Bool :: boolean()). @@ -59,10 +59,6 @@ set_token({Flags,Label,Serial,_From,Lastcnt}) -> F = decode_flags(Flags), set_token2([{label,Label},{serial,{Lastcnt, Serial}} | F]). -%% We limit the label type to always be a small integer because erl_interface -%% expects that, the BIF can however "unofficially" handle atoms as well, and -%% atoms can be used if only Erlang nodes are involved - -spec set_token(Component, Val) -> {Component, OldVal} when Component :: component(), Val :: value(), diff --git a/lib/kernel/src/user.erl b/lib/kernel/src/user.erl index a5cc7b0ec1..872e63ab53 100644 --- a/lib/kernel/src/user.erl +++ b/lib/kernel/src/user.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1996-2016. All Rights Reserved. +%% Copyright Ericsson AB 1996-2017. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -398,7 +398,7 @@ get_line(Prompt, Port, Q, Acc, Enc) -> get_line_bytes(Prompt, Port, Q, Acc, Bytes, Enc); {Port, eof} -> put(eof, true), - {ok, eof, []}; + {ok, eof, queue:new()}; {io_request,From,ReplyAs,{get_geometry,_}=Req} when is_pid(From) -> do_io_request(Req, From, ReplyAs, Port, queue:new()), @@ -615,7 +615,7 @@ get_chars(Prompt, M, F, Xa, Port, Q, State, Enc) -> get_chars_bytes(State, M, F, Xa, Port, Q, Bytes, Enc); {Port, eof} -> put(eof, true), - {ok, eof, []}; + {ok, eof, queue:new()}; %%{io_request,From,ReplyAs,Request} when is_pid(From) -> %% get_chars_req(Prompt, M, F, Xa, Port, queue:new(), State, %% Request, From, ReplyAs); diff --git a/lib/kernel/src/user_drv.erl b/lib/kernel/src/user_drv.erl index b794d4f45e..9f914aa222 100644 --- a/lib/kernel/src/user_drv.erl +++ b/lib/kernel/src/user_drv.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1996-2016. All Rights Reserved. +%% Copyright Ericsson AB 1996-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -175,6 +175,18 @@ server_loop(Iport, Oport, Curr, User, Gr, {Resp, IOQ} = IOQueue) -> {Iport,eof} -> Curr ! {self(),eof}, server_loop(Iport, Oport, Curr, User, Gr, IOQueue); + + %% We always handle geometry and unicode requests + {Requester,tty_geometry} -> + Requester ! {self(),tty_geometry,get_tty_geometry(Iport)}, + server_loop(Iport, Oport, Curr, User, Gr, IOQueue); + {Requester,get_unicode_state} -> + Requester ! {self(),get_unicode_state,get_unicode_state(Iport)}, + server_loop(Iport, Oport, Curr, User, Gr, IOQueue); + {Requester,set_unicode_state, Bool} -> + Requester ! {self(),set_unicode_state,set_unicode_state(Iport,Bool)}, + server_loop(Iport, Oport, Curr, User, Gr, IOQueue); + Req when element(1,Req) =:= User orelse element(1,Req) =:= Curr, tuple_size(Req) =:= 2 orelse tuple_size(Req) =:= 3 -> %% We match {User|Curr,_}|{User|Curr,_,_} @@ -224,21 +236,16 @@ server_loop(Iport, Oport, Curr, User, Gr, {Resp, IOQ} = IOQueue) -> _ -> % not current, just remove it server_loop(Iport, Oport, Curr, User, gr_del_pid(Gr, Pid), IOQueue) end; + {Requester, {put_chars_sync, _, _, Reply}} -> + %% We need to ack the Req otherwise originating process will hang forever + %% Do discard the output to non visible shells (as was done previously) + Requester ! {reply, Reply}, + server_loop(Iport, Oport, Curr, User, Gr, IOQueue); _X -> - %% Ignore unknown messages. - server_loop(Iport, Oport, Curr, User, Gr, IOQueue) + %% Ignore unknown messages. + server_loop(Iport, Oport, Curr, User, Gr, IOQueue) end. -%% We always handle geometry and unicode requests -handle_req({Curr,tty_geometry},Iport,_Oport,IOQueue) -> - Curr ! {self(),tty_geometry,get_tty_geometry(Iport)}, - IOQueue; -handle_req({Curr,get_unicode_state},Iport,_Oport,IOQueue) -> - Curr ! {self(),get_unicode_state,get_unicode_state(Iport)}, - IOQueue; -handle_req({Curr,set_unicode_state, Bool},Iport,_Oport,IOQueue) -> - Curr ! {self(),set_unicode_state,set_unicode_state(Iport,Bool)}, - IOQueue; handle_req(next,Iport,Oport,{false,IOQ}=IOQueue) -> case queue:out(IOQ) of {empty,_} -> diff --git a/lib/kernel/test/Makefile b/lib/kernel/test/Makefile index b9942e899f..4a86265a4a 100644 --- a/lib/kernel/test/Makefile +++ b/lib/kernel/test/Makefile @@ -1,7 +1,7 @@ # # %CopyrightBegin% # -# Copyright Ericsson AB 1997-2016. All Rights Reserved. +# Copyright Ericsson AB 1997-2018. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -70,6 +70,15 @@ MODULES= \ interactive_shell_SUITE \ init_SUITE \ kernel_config_SUITE \ + logger_SUITE \ + logger_disk_log_h_SUITE \ + logger_env_var_SUITE \ + logger_filters_SUITE \ + logger_formatter_SUITE \ + logger_legacy_SUITE \ + logger_simple_h_SUITE \ + logger_std_h_SUITE \ + logger_test_lib \ os_SUITE \ pg2_SUITE \ seq_trace_SUITE \ @@ -80,7 +89,8 @@ MODULES= \ loose_node \ sendfile_SUITE \ standard_error_SUITE \ - multi_load_SUITE + multi_load_SUITE \ + zzz_SUITE APP_FILES = \ appinc.app \ @@ -101,7 +111,7 @@ TARGET_FILES= $(MODULES:%=$(EBIN)/%.$(EMULATOR)) INSTALL_PROGS= $(TARGET_FILES) EMAKEFILE=Emakefile -COVERFILE=kernel.cover +COVERFILE=kernel.cover logger.cover # ---------------------------------------------------- # Release directory specification @@ -148,8 +158,9 @@ release_tests_spec: make_emakefile $(INSTALL_DIR) "$(RELSYSDIR)" $(INSTALL_DATA) $(ERL_FILES) "$(RELSYSDIR)" $(INSTALL_DATA) $(APP_FILES) "$(RELSYSDIR)" - $(INSTALL_DATA) kernel.spec kernel_smoke.spec $(EMAKEFILE)\ - $(COVERFILE) "$(RELSYSDIR)" + $(INSTALL_DATA) \ + kernel.spec kernel_smoke.spec kernel_bench.spec logger.spec \ + $(EMAKEFILE) $(COVERFILE) "$(RELSYSDIR)" chmod -R u+w "$(RELSYSDIR)" @tar cf - *_SUITE_data | (cd "$(RELSYSDIR)"; tar xf -) diff --git a/lib/kernel/test/application_SUITE.erl b/lib/kernel/test/application_SUITE.erl index 866043cfb4..5c35b82207 100644 --- a/lib/kernel/test/application_SUITE.erl +++ b/lib/kernel/test/application_SUITE.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1996-2017. All Rights Reserved. +%% Copyright Ericsson AB 1996-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -37,7 +37,8 @@ -export([config_change/1, persistent_env/1, distr_changed_tc1/1, distr_changed_tc2/1, ensure_started/1, ensure_all_started/1, - shutdown_func/1, do_shutdown/1, shutdown_timeout/1, shutdown_deadlock/1]). + shutdown_func/1, do_shutdown/1, shutdown_timeout/1, shutdown_deadlock/1, + config_relative_paths/1]). -define(TESTCASE, testcase_name). -define(testcase, proplists:get_value(?TESTCASE, Config)). @@ -55,7 +56,7 @@ all() -> script_start, nodedown_start, permit_false_start_local, permit_false_start_dist, get_key, get_env, ensure_all_started, {group, distr_changed}, config_change, shutdown_func, shutdown_timeout, - shutdown_deadlock, + shutdown_deadlock, config_relative_paths, persistent_env]. groups() -> @@ -1568,7 +1569,8 @@ loop5606(Pid) -> %% Tests get_env/* functions. get_env(Conf) when is_list(Conf) -> - {ok, _} = application:get_env(kernel, error_logger), + ok = application:set_env(kernel, new_var, new_val), + {ok, new_val} = application:get_env(kernel, new_var), undefined = application:get_env(undefined_app, a), undefined = application:get_env(kernel, error_logger_xyz), default = application:get_env(kernel, error_logger_xyz, default), @@ -1602,8 +1604,7 @@ get_key(Conf) when is_list(Conf) -> {ok, [{init, [kalle]}, {takeover, []}, {go, [sune]}]} = rpc:call(Cp1, application, get_key, [appinc, start_phases]), {ok, Env} = rpc:call(Cp1, application, get_key, [appinc ,env]), - [{included_applications,[appinc1,appinc2]}, - {own2,val2},{own_env1,value1}] = lists:sort(Env), + [{own2,val2},{own_env1,value1}] = lists:sort(Env), {ok, []} = rpc:call(Cp1, application, get_key, [appinc, modules]), {ok, {application_starter, [ch_sup, {appinc, 41, 43}] }} = rpc:call(Cp1, application, get_key, [appinc, mod]), @@ -1624,8 +1625,7 @@ get_key(Conf) when is_list(Conf) -> {mod, {application_starter, [ch_sup, {appinc, 41, 43}] }}, {start_phases, [{init, [kalle]}, {takeover, []}, {go, [sune]}]}]} = rpc:call(Cp1, application, get_all_key, [appinc]), - [{included_applications,[appinc1,appinc2]}, - {own2,val2},{own_env1,value1}] = lists:sort(Env), + [{own2,val2},{own_env1,value1}] = lists:sort(Env), {ok, "Test of new app file, including appnew"} = gen_server:call({global, {ch,41}}, {get_pid_key, description}), @@ -1642,8 +1642,7 @@ get_key(Conf) when is_list(Conf) -> {ok, [{init, [kalle]}, {takeover, []}, {go, [sune]}]} = gen_server:call({global, {ch,41}}, {get_pid_key, start_phases}), {ok, Env} = gen_server:call({global, {ch,41}}, {get_pid_key, env}), - [{included_applications,[appinc1,appinc2]}, - {own2,val2},{own_env1,value1}] = lists:sort(Env), + [{own2,val2},{own_env1,value1}] = lists:sort(Env), {ok, []} = gen_server:call({global, {ch,41}}, {get_pid_key, modules}), {ok, {application_starter, [ch_sup, {appinc, 41, 43}] }} = @@ -1670,8 +1669,7 @@ get_key(Conf) when is_list(Conf) -> {mod, {application_starter, [ch_sup, {appinc, 41, 43}] }}, {start_phases, [{init, [kalle]}, {takeover, []}, {go, [sune]}]}]} = gen_server:call({global, {ch,41}}, get_pid_all_key), - [{included_applications,[appinc1,appinc2]}, - {own2,val2},{own_env1,value1}] = lists:sort(Env), + [{own2,val2},{own_env1,value1}] = lists:sort(Env), stop_node_nice(Cp1), ok. @@ -2078,6 +2076,42 @@ shutdown_deadlock(Config) when is_list(Config) -> %%----------------------------------------------------------------- +%% Relative paths in sys.config +%%----------------------------------------------------------------- +config_relative_paths(Config) -> + Dir = ?config(priv_dir,Config), + SubDir = filename:join(Dir,"subdir"), + Sys = filename:join(SubDir,"sys.config"), + ok = filelib:ensure_dir(Sys), + ok = file:write_file(Sys,"[\"../up.config\",\"current\"].\n"), + + Up = filename:join(Dir,"up.config"), + ok = file:write_file(Up,"[{app1,[{key1,value}]}].\n"), + + {ok,Cwd} = file:get_cwd(), + Current1 = filename:join(Cwd,"current.config"), + ok = file:write_file(Current1,"[{app1,[{key2,value1}]}].\n"), + + N1 = list_to_atom(lists:concat([?FUNCTION_NAME,"_1"])), + {ok,Node1} = start_node(N1,filename:rootname(Sys)), + ok = rpc:call(Node1, application, load, [app1()]), + {ok, value} = rpc:call(Node1, application, get_env,[app1,key1]), + {ok, value1} = rpc:call(Node1, application, get_env,[app1,key2]), + + Current2 = filename:join(SubDir,"current.config"), + ok = file:write_file(Current2,"[{app1,[{key2,value2}]}].\n"), + + N2 = list_to_atom(lists:concat([?FUNCTION_NAME,"_2"])), + {ok, Node2} = start_node(N2,filename:rootname(Sys)), + ok = rpc:call(Node2, application, load, [app1()]), + {ok, value} = rpc:call(Node2, application, get_env,[app1,key1]), + {ok, value2} = rpc:call(Node2, application, get_env,[app1,key2]), + + stop_node_nice([Node1,Node2]), + + ok. + +%%----------------------------------------------------------------- %% Utility functions %%----------------------------------------------------------------- app0() -> diff --git a/lib/kernel/test/code_SUITE.erl b/lib/kernel/test/code_SUITE.erl index 6f8e949aac..1314316c13 100644 --- a/lib/kernel/test/code_SUITE.erl +++ b/lib/kernel/test/code_SUITE.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1996-2017. All Rights Reserved. +%% Copyright Ericsson AB 1996-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -35,6 +35,7 @@ purge_stacktrace/1, mult_lib_roots/1, bad_erl_libs/1, code_archive/1, code_archive2/1, on_load/1, on_load_binary/1, on_load_embedded/1, on_load_errors/1, on_load_update/1, + on_load_trace_on_load/1, on_load_purge/1, on_load_self_call/1, on_load_pending/1, on_load_deleted/1, big_boot_embedded/1, @@ -66,14 +67,16 @@ all() -> ext_mod_dep, clash, where_is_file, purge_stacktrace, mult_lib_roots, bad_erl_libs, code_archive, code_archive2, on_load, - on_load_binary, on_load_embedded, on_load_errors, on_load_update, + on_load_binary, on_load_embedded, on_load_errors, + {group, sequence}, on_load_purge, on_load_self_call, on_load_pending, on_load_deleted, module_status, big_boot_embedded, native_early_modules, get_mode, normalized_paths]. -groups() -> - []. +%% These need to run in order +groups() -> [{sequence, [sequence], [on_load_update, + on_load_trace_on_load]}]. init_per_group(_GroupName, Config) -> Config. @@ -928,37 +931,34 @@ purge_stacktrace(Config) when is_list(Config) -> code:purge(code_b_test), try code_b_test:call(fun(b) -> ok end, a) catch - error:function_clause -> + error:function_clause:Stacktrace -> code:load_file(code_b_test), - case erlang:get_stacktrace() of + case Stacktrace of [{?MODULE,_,[a],_}, {code_b_test,call,2,_}, {?MODULE,purge_stacktrace,1,_}|_] -> - false = code:purge(code_b_test), - [] = erlang:get_stacktrace() + false = code:purge(code_b_test) end end, try code_b_test:call(nofun, 2) catch - error:function_clause -> + error:function_clause:Stacktrace2 -> code:load_file(code_b_test), - case erlang:get_stacktrace() of + case Stacktrace2 of [{code_b_test,call,[nofun,2],_}, {?MODULE,purge_stacktrace,1,_}|_] -> - false = code:purge(code_b_test), - [] = erlang:get_stacktrace() + false = code:purge(code_b_test) end end, Args = [erlang,error,[badarg]], try code_b_test:call(erlang, error, [badarg,Args]) catch - error:badarg -> + error:badarg:Stacktrace3 -> code:load_file(code_b_test), - case erlang:get_stacktrace() of + case Stacktrace3 of [{code_b_test,call,Args,_}, {?MODULE,purge_stacktrace,1,_}|_] -> - false = code:purge(code_b_test), - [] = erlang:get_stacktrace() + false = code:purge(code_b_test) end end, ok. @@ -1493,7 +1493,7 @@ do_on_load_error(ReturnValue) -> {undef,[{on_load_error,main,[],_}|_]} = Exit end. -on_load_update(_Config) -> +on_load_update(Config) -> {Mod,Code1} = on_load_update_code(1), {module,Mod} = code:load_binary(Mod, "", Code1), 42 = Mod:a(), @@ -1503,7 +1503,7 @@ on_load_update(_Config) -> {Mod,Code2} = on_load_update_code(2), {error,on_load_failure} = code:load_binary(Mod, "", Code2), 42 = Mod:a(), - 100 = Mod:b(99), + 78 = Mod:b(77), {'EXIT',{undef,_}} = (catch Mod:never()), 4 = erlang:trace_pattern({Mod,'_','_'}, false), @@ -1514,6 +1514,9 @@ on_load_update(_Config) -> {'EXIT',{undef,_}} = (catch Mod:b(10)), {'EXIT',{undef,_}} = (catch Mod:never()), + code:purge(Mod), + code:delete(Mod), + code:purge(Mod), ok. on_load_update_code(Version) -> @@ -1545,6 +1548,31 @@ on_load_update_code_1(3, Mod) -> "f() -> ok.\n", "c() -> 100.\n"]). +%% Test -on_load while trace feature 'on_load' is enabled (OTP-14612) +on_load_trace_on_load(Config) -> + Papa = self(), + Tracer = spawn_link(fun F() -> receive M -> Papa ! M end, F() end), + {tracer,[]} = erlang:trace_info(self(),tracer), + erlang:trace(self(), true, [call, {tracer, Tracer}]), + erlang:trace_pattern(on_load, true, []), + on_load_update(Config), + erlang:trace_pattern(on_load, false, []), + erlang:trace(self(), false, [call]), + + Ms = flush(), + [{trace, Papa, call, {on_load_update_code, a, []}}, + {trace, Papa, call, {on_load_update_code, b, [99]}}, + {trace, Papa, call, {on_load_update_code, c, []}}] = Ms, + + exit(Tracer, normal), + ok. + +flush() -> + receive M -> [M | flush()] + after 100 -> [] + end. + + on_load_purge(_Config) -> Mod = ?FUNCTION_NAME, register(Mod, self()), diff --git a/lib/kernel/test/code_SUITE_data/upgrade_client.erl b/lib/kernel/test/code_SUITE_data/upgrade_client.erl index faa18e1410..1c3c2def53 100644 --- a/lib/kernel/test/code_SUITE_data/upgrade_client.erl +++ b/lib/kernel/test/code_SUITE_data/upgrade_client.erl @@ -341,6 +341,7 @@ check_tracing_loop(N, MsgList) -> stop_tracing(Tracer) -> + erlang:trace_pattern({error_handler,undefined_function,3}, false, [global]), erlang:trace(self(), false, [call]), Tracer ! die_please, receive diff --git a/lib/kernel/test/disk_log_SUITE.erl b/lib/kernel/test/disk_log_SUITE.erl index fe2fc778f2..9704c3b28c 100644 --- a/lib/kernel/test/disk_log_SUITE.erl +++ b/lib/kernel/test/disk_log_SUITE.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1997-2017. All Rights Reserved. +%% Copyright Ericsson AB 1997-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -89,8 +89,6 @@ dist_terminate/1, dist_accessible/1, dist_deadlock/1, dist_open2/1, other_groups/1, - evil/1, - otp_6278/1, otp_10131/1]). -export([head_fun/1, hf/0, lserv/1, @@ -123,7 +121,7 @@ [halt_int, wrap_int, halt_ext, wrap_ext, read_mode, head, notif, new_idx_vsn, reopen, block, unblock, open, close, error, chunk, truncate, many_users, info, change_size, - change_attribute, distribution, evil, otp_6278, otp_10131]). + change_attribute, distribution, otp_6278, otp_10131]). %% These test cases should be skipped if the VxWorks card is %% configured without NFS cache. @@ -149,7 +147,7 @@ all() -> {group, open}, {group, close}, {group, error}, chunk, truncate, many_users, {group, info}, {group, change_size}, change_attribute, - {group, distribution}, evil, otp_6278, otp_10131]. + {group, distribution}, otp_6278, otp_10131]. groups() -> [{halt_int, [], [halt_int_inf, {group, halt_int_sz}]}, @@ -1752,7 +1750,7 @@ block_queue(Conf) when is_list(Conf) -> true = [{1,a},{2,b},{3,c},{4,d},{5,e},{6,f},{7,g},{8,h}] == Terms, del(File, 2), Q = qlen(), - true = (P0 == pps()), + check_pps(P0), ok. %% OTP-4880. Blocked processes did not get disk_log_stopped message. @@ -1784,7 +1782,7 @@ block_queue2(Conf) when is_list(Conf) -> {ok,<<>>} = file:read_file(File ++ ".1"), del(File, No), Q = qlen(), - true = (P0 == pps()), + check_pps(P0), ok. @@ -2121,7 +2119,7 @@ close_block(Conf) when is_list(Conf) -> 0 = sync_do(Pid2, users), sync_do(Pid2, terminate), {error, no_such_log} = disk_log:info(n), - true = (P0 == pps()), + check_pps(P0), %% Users terminate (no link...). Pid3 = spawn_link(?MODULE, lserv, [n]), @@ -2139,7 +2137,7 @@ close_block(Conf) when is_list(Conf) -> disk_log:close(n), disk_log:close(n), {error, no_such_log} = disk_log:info(n), - true = (P0 == pps()), + check_pps(P0), %% Blocking owner terminates. Pid5 = spawn_link(?MODULE, lserv, [n]), @@ -2156,7 +2154,7 @@ close_block(Conf) when is_list(Conf) -> 1 = users(n), ok = disk_log:close(n), {error, no_such_log} = disk_log:info(n), - true = (P0 == pps()), + check_pps(P0), %% Blocking user terminates. Pid6 = spawn_link(?MODULE, lserv, [n]), @@ -2176,7 +2174,7 @@ close_block(Conf) when is_list(Conf) -> 1 = users(n), ok = disk_log:close(n), {error, no_such_log} = disk_log:info(n), - true = (P0 == pps()), + check_pps(P0), %% Blocking owner terminates. Pid7 = spawn_link(?MODULE, lserv, [n]), @@ -2194,7 +2192,7 @@ close_block(Conf) when is_list(Conf) -> 1 = users(n), ok = disk_log:close(n), {error, no_such_log} = disk_log:info(n), - true = (P0 == pps()), + check_pps(P0), %% Two owners, the blocking one terminates. Pid8 = spawn_link(?MODULE, lserv, [n]), @@ -2209,7 +2207,7 @@ close_block(Conf) when is_list(Conf) -> 0 = sync_do(Pid9, users), sync_do(Pid9, terminate), {error, no_such_log} = disk_log:info(n), - true = (P0 == pps()), + check_pps(P0), %% Blocking user closes. Pid10 = spawn_link(?MODULE, lserv, [n]), @@ -2227,7 +2225,7 @@ close_block(Conf) when is_list(Conf) -> ok = disk_log:close(n), sync_do(Pid10, terminate), {error, no_such_log} = disk_log:info(n), - true = (P0 == pps()), + check_pps(P0), %% Blocking user unblocks and closes. Pid11 = spawn_link(?MODULE, lserv, [n]), @@ -2246,7 +2244,7 @@ close_block(Conf) when is_list(Conf) -> ok = disk_log:close(n), {error, no_such_log} = disk_log:info(n), sync_do(Pid11, terminate), - true = (P0 == pps()), + check_pps(P0), %% Blocking owner closes. Pid12 = spawn_link(?MODULE, lserv, [n]), @@ -2265,7 +2263,7 @@ close_block(Conf) when is_list(Conf) -> ok = disk_log:close(n), {error, no_such_log} = disk_log:info(n), sync_do(Pid12, terminate), - true = (P0 == pps()), + check_pps(P0), %% Blocking owner unblocks and closes. Pid13 = spawn_link(?MODULE, lserv, [n]), @@ -2285,7 +2283,7 @@ close_block(Conf) when is_list(Conf) -> ok = disk_log:close(n), {error, no_such_log} = disk_log:info(n), sync_do(Pid13, terminate), - true = (P0 == pps()), + check_pps(P0), del(File, No), % cleanup ok. @@ -2489,7 +2487,7 @@ error_repair(Conf) when is_list(Conf) -> P0 = pps(), {error, {file_error, _, _}} = disk_log:open([{name, n}, {file, File}, {type, wrap}, {size,{40,4}}]), - true = (P0 == pps()), + check_pps(P0), del(File, No), ok = file:del_dir(Dir), @@ -2508,7 +2506,7 @@ error_repair(Conf) when is_list(Conf) -> disk_log:open([{name, n}, {file, File}, {type, wrap}, {format, internal}, {size, {40,No}}]), ok = disk_log:close(n), - true = (P1 == pps()), + check_pps(P1), del(File, No), receive {info_msg, _, "disk_log: repairing" ++ _, _} -> ok after 1000 -> ct:fail(failed) end, @@ -2526,7 +2524,7 @@ error_repair(Conf) when is_list(Conf) -> disk_log:open([{name, n}, {file, File}, {type, wrap}, {format, internal}, {size, {4000,No}}]), ok = disk_log:close(n), - true = (P2 == pps()), + check_pps(P2), del(File, No), receive {info_msg, _, "disk_log: repairing" ++ _, _} -> ok after 1000 -> ct:fail(failed) end, @@ -2635,7 +2633,7 @@ error_log(Conf) when is_list(Conf) -> {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap}, {format, external},{size, {100, No}}]), {error, {file_error, _, _}} = disk_log:truncate(n), - true = (P0 == pps()), + check_pps(P0), del(File, No), %% OTP-4880. @@ -2643,7 +2641,7 @@ error_log(Conf) when is_list(Conf) -> {ok, n} = disk_log:open([{name, n}, {file, File}, {type, halt}, {format, external},{size, 100000}]), {error, {file_error, _, eisdir}} = disk_log:reopen(n, LDir), - true = (P0 == pps()), + check_pps(P0), file:delete(File), B = mk_bytes(60), @@ -3005,7 +3003,7 @@ error_index(Conf) when is_list(Conf) -> {error, {invalid_index_file, _}} = disk_log:open(Args), del(File, No), - true = (P0 == pps()), + check_pps(P0), true = (Q == qlen()), ok. @@ -4438,7 +4436,7 @@ dist_open2(Conf) when is_list(Conf) -> timer:sleep(500), file:delete(File), - true = (P0 == pps()), + check_pps(P0), %% This time the first process has a naughty head_func. This test %% does not add very much. Perhaps it should be removed. However, @@ -4484,7 +4482,7 @@ dist_open2(Conf) when is_list(Conf) -> timer:sleep(100), {error, no_such_log} = disk_log:close(Log), file:delete(File), - true = (P0 == pps()), + check_pps(P0), No = 2, Log2 = n2, @@ -4513,7 +4511,7 @@ dist_open2(Conf) when is_list(Conf) -> file:delete(File2), del(File, No), - true = (P0 == pps()), + check_pps(P0), R. @@ -4558,7 +4556,7 @@ dist_open2_1(Conf, Delay) -> {error, no_such_log} = disk_log:info(Log), file:delete(File), - true = (P0 == pps()), + check_pps(P0), ok. @@ -4615,7 +4613,7 @@ dist_open2_2(Conf, Delay) -> {[{Node1,{repaired,_,_,_}}],[]}} -> ok end, - true = (P0 == pps()), + check_pps(P0), stop_node(Node1), file:delete(File), ok. @@ -4676,119 +4674,6 @@ other_groups(Conf) when is_list(Conf) -> ok. --define(MAX, ?MAX_FWRITE_CACHE). % as in disk_log_1.erl -%% Evil cases such as closed file descriptor port. -evil(Conf) when is_list(Conf) -> - Dir = ?privdir(Conf), - File = filename:join(Dir, "n.LOG"), - Log = n, - - %% Not a very thorough test. - - ok = setup_evil_filled_cache_wrap(Log, Dir), - {error, {file_error,_,einval}} = disk_log:log(Log, apa), - ok = disk_log:close(Log), - - ok = setup_evil_filled_cache_halt(Log, Dir), - {error, {file_error,_,einval}} = disk_log:truncate(Log, apa), - ok = stop_evil(Log), - - %% White box test. - file:delete(File), - Ports0 = erlang:ports(), - {ok, Log} = disk_log:open([{name,Log},{file,File},{type,halt}, - {size,?MAX+50},{format,external}]), - [Fd] = erlang:ports() -- Ports0, - {B,_} = x_mk_bytes(30), - ok = disk_log:blog(Log, <<0:(?MAX-1)/unit:8>>), - exit(Fd, kill), - {error, {file_error,_,einval}} = disk_log:blog_terms(Log, [B,B]), - ok= disk_log:close(Log), - file:delete(File), - - ok = setup_evil_wrap(Log, Dir), - {error, {file_error,_,einval}} = disk_log:close(Log), - - ok = setup_evil_wrap(Log, Dir), - {error, {file_error,_,einval}} = disk_log:log(Log, apa), - ok = stop_evil(Log), - - ok = setup_evil_halt(Log, Dir), - {error, {file_error,_,einval}} = disk_log:log(Log, apa), - ok = stop_evil(Log), - - ok = setup_evil_wrap(Log, Dir), - {error, {file_error,_,einval}} = disk_log:reopen(Log, apa), - {error, {file_error,_,einval}} = disk_log:reopen(Log, apa), - ok = stop_evil(Log), - - ok = setup_evil_wrap(Log, Dir), - {error, {file_error,_,einval}} = disk_log:reopen(Log, apa), - ok = stop_evil(Log), - - ok = setup_evil_wrap(Log, Dir), - {error, {file_error,_,einval}} = disk_log:inc_wrap_file(Log), - ok = stop_evil(Log), - - ok = setup_evil_wrap(Log, Dir), - {error, {file_error,_,einval}} = disk_log:chunk(Log, start), - ok = stop_evil(Log), - - ok = setup_evil_wrap(Log, Dir), - {error, {file_error,_,einval}} = disk_log:truncate(Log), - ok = stop_evil(Log), - - ok = setup_evil_wrap(Log, Dir), - {error, {file_error,_,einval}} = disk_log:chunk_step(Log, start, 1), - ok = stop_evil(Log), - - io:format("messages: ~p~n", [erlang:process_info(self(), messages)]), - del(File, 2), - file:delete(File), - ok. - -setup_evil_wrap(Log, Dir) -> - setup_evil(Log, [{type,wrap},{size,{100,2}}], Dir). - -setup_evil_halt(Log, Dir) -> - setup_evil(Log, [{type,halt},{size,10000}], Dir). - -setup_evil(Log, Args, Dir) -> - File = filename:join(Dir, lists:concat([Log, ".LOG"])), - file:delete(File), - del(File, 2), - ok = disk_log:start(), - Ports0 = erlang:ports(), - {ok, Log} = disk_log:open([{name,Log},{file,File} | Args]), - [Fd] = erlang:ports() -- Ports0, - exit(Fd, kill), - ok = disk_log:log_terms(n, [<<0:10/unit:8>>]), - timer:sleep(2500), % TIMEOUT in disk_log_1.erl is 2000 - ok. - -stop_evil(Log) -> - {error, _} = disk_log:close(Log), - ok. - -setup_evil_filled_cache_wrap(Log, Dir) -> - setup_evil_filled_cache(Log, [{type,wrap},{size,{?MAX,2}}], Dir). - -setup_evil_filled_cache_halt(Log, Dir) -> - setup_evil_filled_cache(Log, [{type,halt},{size,infinity}], Dir). - -%% The cache is filled, and the file descriptor port gone. -setup_evil_filled_cache(Log, Args, Dir) -> - File = filename:join(Dir, lists:concat([Log, ".LOG"])), - file:delete(File), - del(File, 2), - ok = disk_log:start(), - Ports0 = erlang:ports(), - {ok, Log} = disk_log:open([{name,Log},{file,File} | Args]), - [Fd] = erlang:ports() -- Ports0, - ok = disk_log:log_terms(n, [<<0:?MAX/unit:8>>]), - exit(Fd, kill), - ok. - %% OTP-6278. open/1 creates no status or crash report. otp_6278(Conf) when is_list(Conf) -> Dir = ?privdir(Conf), @@ -4906,10 +4791,59 @@ log(Name, N) -> format_error(E) -> lists:flatten(disk_log:format_error(E)). +check_pps({Ports0,Procs0} = P0) -> + case pps() of + P0 -> + ok; + _ -> + timer:sleep(500), + case pps() of + P0 -> + ok; + {Ports1,Procs1} = P1 -> + case {Ports1 -- Ports0, Procs1 -- Procs0} of + {[], []} -> ok; + {PortsDiff,ProcsDiff} -> + io:format("failure, got ~p~n, expected ~p\n", [P1, P0]), + show("Old port", Ports0 -- Ports1), + show("New port", PortsDiff), + show("Old proc", Procs0 -- Procs1), + show("New proc", ProcsDiff), + ct:fail(failed) + end + end + end. + +show(_S, []) -> + ok; +show(S, [{Pid, Name, InitCall}|Pids]) when is_pid(Pid) -> + io:format("~s: ~w (~w), ~w: ~p~n", + [S, Pid, proc_reg_name(Name), InitCall, + erlang:process_info(Pid)]), + show(S, Pids); +show(S, [{Port, _}|Ports]) when is_port(Port)-> + io:format("~s: ~w: ~p~n", [S, Port, erlang:port_info(Port)]), + show(S, Ports). + pps() -> timer:sleep(100), - {erlang:ports(), lists:filter(fun(P) -> erlang:is_process_alive(P) end, - processes())}. + {port_list(), process_list()}. + +port_list() -> + [{P,safe_second_element(erlang:port_info(P, name))} || + P <- erlang:ports()]. + +process_list() -> + [{P,process_info(P, registered_name), + safe_second_element(process_info(P, initial_call))} || + P <- processes(), erlang:is_process_alive(P)]. + +proc_reg_name({registered_name, Name}) -> Name; +proc_reg_name([]) -> no_reg_name. + +safe_second_element({_,Info}) -> Info; +safe_second_element(Other) -> Other. + qlen() -> {_, {_, N}} = lists:keysearch(message_queue_len, 1, process_info(self())), diff --git a/lib/kernel/test/erl_distribution_SUITE.erl b/lib/kernel/test/erl_distribution_SUITE.erl index bbfaa9d147..5a8bbd56c4 100644 --- a/lib/kernel/test/erl_distribution_SUITE.erl +++ b/lib/kernel/test/erl_distribution_SUITE.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1997-2017. All Rights Reserved. +%% Copyright Ericsson AB 1997-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -25,6 +25,7 @@ init_per_group/2,end_per_group/2]). -export([tick/1, tick_change/1, + connect_node/1, nodenames/1, hostnames/1, illegal_nodenames/1, hidden_node/1, setopts/1, @@ -70,6 +71,7 @@ suite() -> all() -> [tick, tick_change, nodenames, hostnames, illegal_nodenames, + connect_node, hidden_node, setopts, table_waste, net_setuptime, inet_dist_options_options, {group, monitor_nodes}]. @@ -87,6 +89,7 @@ init_per_suite(Config) -> Config. end_per_suite(_Config) -> + [slave:stop(N) || N <- nodes()], ok. init_per_group(_GroupName, Config) -> @@ -95,13 +98,23 @@ init_per_group(_GroupName, Config) -> end_per_group(_GroupName, Config) -> Config. - +init_per_testcase(TC, Config) when TC == hostnames; + TC == nodenames -> + file:make_dir("hostnames_nodedir"), + file:write_file("hostnames_nodedir/ignore_core_files",""), + Config; init_per_testcase(Func, Config) when is_atom(Func), is_list(Config) -> Config. end_per_testcase(_Func, _Config) -> ok. +connect_node(Config) when is_list(Config) -> + Connected = nodes(connected), + true = net_kernel:connect_node(node()), + Connected = nodes(connected), + ok. + tick(Config) when is_list(Config) -> PaDir = filename:dirname(code:which(erl_distribution_SUITE)), @@ -240,7 +253,7 @@ illegal(Name) -> test_node(Name) -> test_node(Name, false). test_node(Name, Illigal) -> - ProgName = atom_to_list(lib:progname()), + ProgName = ct:get_progname(), Command = ProgName ++ " -noinput " ++ long_or_short() ++ Name ++ " -eval \"net_adm:ping('" ++ atom_to_list(node()) ++ "')\"" ++ case Illigal of @@ -251,7 +264,7 @@ test_node(Name, Illigal) -> end, net_kernel:monitor_nodes(true), BinCommand = unicode:characters_to_binary(Command, utf8), - Prt = open_port({spawn, BinCommand}, [stream]), + Prt = open_port({spawn, BinCommand}, [stream,{cd,"hostnames_nodedir"}]), Node = list_to_atom(Name), receive {nodeup, Node} -> @@ -459,9 +472,9 @@ run_remote_test([FuncStr, TestNodeStr | Args]) -> 1 end catch - C:E -> + C:E:S -> io:format("Node ~p got EXCEPTION ~p:~p\nat ~p\n", - [node(), C, E, erlang:get_stacktrace()]), + [node(), C, E, S]), 2 end, io:format("Node ~p doing halt(~p).\n",[node(), Status]), @@ -1140,17 +1153,16 @@ monitor_nodes_otp_6481_test(Config, TestType) when is_list(Config) -> TestMonNodeState = monitor_node_state(), %% io:format("~p~n", [TestMonNodeState]), TestMonNodeState = - MonNodeState + case TestType of + nodedown -> []; + nodeup -> [{self(), []}] + end + ++ lists:map(fun (_) -> {MN, []} end, Seq) ++ case TestType of nodedown -> [{self(), []}]; nodeup -> [] end - ++ lists:map(fun (_) -> {MN, []} end, Seq) - ++ case TestType of - nodedown -> []; - nodeup -> [{self(), []}] - end, - + ++ MonNodeState, {ok, Node} = start_node(Name, "", this), receive {nodeup, Node} -> ok end, diff --git a/lib/kernel/test/erl_distribution_wb_SUITE.erl b/lib/kernel/test/erl_distribution_wb_SUITE.erl index 03aaee56b7..8256444bdc 100644 --- a/lib/kernel/test/erl_distribution_wb_SUITE.erl +++ b/lib/kernel/test/erl_distribution_wb_SUITE.erl @@ -61,10 +61,13 @@ %% From R9 and forward extended references is compulsory %% From R10 and forward extended pids and ports are compulsory %% From R20 and forward UTF8 atoms are compulsory +%% From R21 and forward NEW_FUN_TAGS is compulsory (no more tuple fallback {fun, ...}) -define(COMPULSORY_DFLAGS, (?DFLAG_EXTENDED_REFERENCES bor ?DFLAG_EXTENDED_PIDS_PORTS bor - ?DFLAG_UTF8_ATOMS)). + ?DFLAG_UTF8_ATOMS bor + ?DFLAG_NEW_FUN_TAGS)). +-define(PASS_THROUGH, $p). -define(shutdown(X), exit(X)). -define(int16(X), [((X) bsr 8) band 16#ff, (X) band 16#ff]). @@ -674,15 +677,16 @@ build_rex_message(Cookie,OurName) -> %% Receive a distribution message recv_message(Socket) -> case gen_tcp:recv(Socket, 0) of + {ok,[]} -> + recv_message(Socket); %% a tick, ignore {ok,Data} -> B0 = list_to_binary(Data), - {_,B1} = erlang:split_binary(B0,1), - Header = binary_to_term(B1), - Siz = byte_size(term_to_binary(Header)), - {_,B2} = erlang:split_binary(B1,Siz), + <<?PASS_THROUGH, B1/binary>> = B0, + {Header,Siz} = binary_to_term(B1,[used]), + <<_:Siz/binary,B2/binary>> = B1, Message = case (catch binary_to_term(B2)) of {'EXIT', _} -> - could_not_digest_message; + {could_not_digest_message,B2}; Other -> Other end, diff --git a/lib/kernel/test/erl_prim_loader_SUITE.erl b/lib/kernel/test/erl_prim_loader_SUITE.erl index b6417210b9..16a127aa3e 100644 --- a/lib/kernel/test/erl_prim_loader_SUITE.erl +++ b/lib/kernel/test/erl_prim_loader_SUITE.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1996-2016. All Rights Reserved. +%% Copyright Ericsson AB 1996-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -33,6 +33,7 @@ primary_archive/1, virtual_dir_in_archive/1, get_modules/1]). +-define(PRIM_FILE, prim_file). %%----------------------------------------------------------------- %% Test suite for erl_prim_loader. (Most code is run during system start/stop.) @@ -461,7 +462,7 @@ primary_archive(Config) when is_list(Config) -> %% Set primary archive ExpectedEbins = [Archive, DictDir ++ "/ebin", DummyDir ++ "/ebin"], io:format("ExpectedEbins: ~p\n", [ExpectedEbins]), - {ok, FileInfo} = prim_file:read_file_info(Archive), + {ok, FileInfo} = ?PRIM_FILE:read_file_info(Archive), {ok, Ebins} = rpc:call(Node, erl_prim_loader, set_primary_archive, [Archive, ArchiveBin, FileInfo, fun escript:parse_file/1]), diff --git a/lib/kernel/test/error_logger_SUITE.erl b/lib/kernel/test/error_logger_SUITE.erl index 2d26a7246c..eab72e58a7 100644 --- a/lib/kernel/test/error_logger_SUITE.erl +++ b/lib/kernel/test/error_logger_SUITE.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1996-2017. All Rights Reserved. +%% Copyright Ericsson AB 1996-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -32,7 +32,8 @@ init_per_group/2,end_per_group/2, off_heap/1, error_report/1, info_report/1, error/1, info/1, - emulator/1, tty/1, logfile/1, add/1, delete/1]). + emulator/1, via_logger_process/1, other_node/1, + tty/1, logfile/1, add/1, delete/1, format_depth/1]). -export([generate_error/2]). @@ -46,16 +47,20 @@ suite() -> {timetrap,{minutes,1}}]. all() -> - [off_heap, error_report, info_report, error, info, emulator, tty, - logfile, add, delete]. + [off_heap, error_report, info_report, error, info, emulator, + via_logger_process, other_node, tty, logfile, add, delete, + format_depth]. groups() -> []. init_per_suite(Config) -> + logger:add_handler(error_logger,error_logger, + #{level=>info,filter_default=>log}), Config. end_per_suite(_Config) -> + logger:remove_handler(error_logger), ok. init_per_group(_GroupName, Config) -> @@ -226,6 +231,40 @@ generate_error(Error, Stack) -> erlang:raise(error, Error, Stack). %%----------------------------------------------------------------- + +via_logger_process(Config) -> + case os:type() of + {win32,_} -> + {skip,"Skip on windows - cant change file mode"}; + _ -> + error_logger:add_report_handler(?MODULE, self()), + Dir = filename:join(?config(priv_dir,Config),"dummydir"), + Msg = "File operation error: eacces. Target: " ++ + Dir ++ ". Function: list_dir. ", + ok = file:make_dir(Dir), + ok = file:change_mode(Dir,8#0222), + error = erl_prim_loader:list_dir(Dir), + ok = file:change_mode(Dir,8#0664), + _ = file:del_dir(Dir), + reported(error_report, std_error, Msg), + my_yes = error_logger:delete_report_handler(?MODULE), + ok + end. + +%%----------------------------------------------------------------- + +other_node(_Config) -> + error_logger:add_report_handler(?MODULE, self()), + {ok,Node} = test_server:start_node(?FUNCTION_NAME,slave,[]), + ok = rpc:call(Node,logger,add_handler,[error_logger,error_logger, + #{level=>info,filter_default=>log}]), + rpc:call(Node,error_logger,error_report,[hi_from_remote]), + reported(error_report,std_error,hi_from_remote), + test_server:stop_node(Node), + ok. + + +%%----------------------------------------------------------------- %% We don't enables or disables tty error logging here. We do not %% want to interact with the test run. %%----------------------------------------------------------------- @@ -271,6 +310,21 @@ delete(Config) when is_list(Config) -> ok. %%----------------------------------------------------------------- + +format_depth(_Config) -> + ok = application:set_env(kernel,error_logger_format_depth,30), + 30 = error_logger:get_format_depth(), + ok = application:set_env(kernel,error_logger_format_depth,3), + 10 = error_logger:get_format_depth(), + ok = application:set_env(kernel,error_logger_format_depth,11), + 11 = error_logger:get_format_depth(), + ok = application:set_env(kernel,error_logger_format_depth,unlimited), + unlimited = error_logger:get_format_depth(), + ok = application:unset_env(kernel,error_logger_format_depth), + unlimited = error_logger:get_format_depth(), + ok. + +%%----------------------------------------------------------------- %% Check that the report has been received. %%----------------------------------------------------------------- reported(Tag, Type, Report) -> @@ -279,7 +333,7 @@ reported(Tag, Type, Report) -> test_server:messages_get(), ok after 1000 -> - ct:fail(no_report_received) + ct:fail({no_report_received,test_server:messages_get()}) end. %%----------------------------------------------------------------- diff --git a/lib/kernel/test/error_logger_warn_SUITE.erl b/lib/kernel/test/error_logger_warn_SUITE.erl index a8087e11f9..8f1eb2ba0a 100644 --- a/lib/kernel/test/error_logger_warn_SUITE.erl +++ b/lib/kernel/test/error_logger_warn_SUITE.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 2003-2016. All Rights Reserved. +%% Copyright Ericsson AB 2003-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -480,9 +480,12 @@ rb_utc() -> UtcLog=case application:get_env(sasl,utc_log) of {ok,true} -> true; - _AllOthers -> + {ok,false} -> application:set_env(sasl,utc_log,true), - false + false; + undefined -> + application:set_env(sasl,utc_log,true), + undefined end, application:start(sasl), rb:start([{report_dir, rd()}]), @@ -494,7 +497,12 @@ rb_utc() -> Sum=one_rb_findstr([],"UTC"), rb:stop(), application:stop(sasl), - application:set_env(sasl,utc_log,UtcLog), + case UtcLog of + undefined -> + application:unset_env(sasl,utc_log); + _ -> + application:set_env(sasl,utc_log,UtcLog) + end, stop_node(Node), ok. diff --git a/lib/kernel/test/file_SUITE.erl b/lib/kernel/test/file_SUITE.erl index 119e1f24bb..a51025cba6 100644 --- a/lib/kernel/test/file_SUITE.erl +++ b/lib/kernel/test/file_SUITE.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1996-2017. All Rights Reserved. +%% Copyright Ericsson AB 1996-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -39,6 +39,8 @@ -define(FILE_FIN_PER_TESTCASE(Config), Config). -endif. +-define(PRIM_FILE, prim_file). + -module(?FILE_SUITE). -export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1, @@ -54,7 +56,8 @@ open1/1, old_modes/1, new_modes/1, path_open/1, open_errors/1]). -export([ file_info_basic_file/1, file_info_basic_directory/1, - file_info_bad/1, file_info_times/1, file_write_file_info/1]). + file_info_bad/1, file_info_times/1, file_write_file_info/1, + file_wfi_helpers/1]). -export([rename/1, access/1, truncate/1, datasync/1, sync/1, read_write/1, pread_write/1, append/1, exclusive/1]). -export([ e_delete/1, e_rename/1, e_make_dir/1, e_del_dir/1]). @@ -97,6 +100,12 @@ -export([unicode_mode/1]). +-export([volume_relative_paths/1,unc_paths/1]). + +-export([tiny_writes/1, tiny_writes_delayed/1, + large_writes/1, large_writes_delayed/1, + tiny_reads/1, tiny_reads_ahead/1]). + %% Debug exports -export([create_file_slow/2, create_file/2, create_bin/2]). -export([verify_file/2, verify_bin/3]). @@ -107,6 +116,8 @@ -export([disc_free/1, memsize/0]). -include_lib("common_test/include/ct.hrl"). +-include_lib("common_test/include/ct_event.hrl"). + -include_lib("kernel/include/file.hrl"). -define(THROW_ERROR(RES), throw({fail, ?LINE, RES})). @@ -118,13 +129,13 @@ suite() -> all() -> [unicode, altname, read_write_file, {group, dirs}, - {group, files}, delete, rename, names, {group, errors}, - {group, compression}, {group, links}, copy, + {group, files}, delete, rename, names, volume_relative_paths, unc_paths, + {group, errors}, {group, compression}, {group, links}, copy, delayed_write, read_ahead, segment_read, segment_write, ipread, pid2name, interleaved_read_write, otp_5814, otp_10852, large_file, large_write, read_line_1, read_line_2, read_line_3, read_line_4, standard_io, old_io_protocol, - unicode_mode + unicode_mode, {group, bench} ]. groups() -> @@ -142,7 +153,8 @@ groups() -> {pos, [], [pos1, pos2, pos3]}, {file_info, [], [file_info_basic_file, file_info_basic_directory, - file_info_bad, file_info_times, file_write_file_info]}, + file_info_bad, file_info_times, file_write_file_info, + file_wfi_helpers]}, {consult, [], [consult1, path_consult]}, {eval, [], [eval1, path_eval]}, {script, [], [script1, path_script]}, @@ -154,11 +166,19 @@ groups() -> write_compressed, compress_errors, catenated_gzips, compress_async_crash]}, {links, [], - [make_link, read_link_info_for_non_link, symlinks]}]. + [make_link, read_link_info_for_non_link, symlinks]}, + {bench, [], + [tiny_writes, tiny_writes_delayed, + large_writes, large_writes_delayed, + tiny_reads, tiny_reads_ahead]}]. init_per_group(_GroupName, Config) -> Config. +end_per_group(bench, Config) -> + ScratchDir = proplists:get_value(priv_dir, Config), + file:delete(filename:join(ScratchDir, "benchmark_scratch_file")), + Config; end_per_group(_GroupName, Config) -> Config. @@ -381,11 +401,11 @@ read_write_0(Str, {Func, ReadFun}, Options) -> io:format("~p:~p: ~p ERROR: ~ts vs~n ~w~n - ~p~n", [?MODULE, Line, Func, Str, ReadBytes, Options]), exit({error, ?LINE}); - error:What -> + error:What:Stacktrace -> io:format("~p:??: ~p ERROR: ~p from~n ~w~n ~p~n", [?MODULE, Func, What, Str, Options]), - io:format("\t~p~n", [erlang:get_stacktrace()]), + io:format("\t~p~n", [Stacktrace]), exit({error, ?LINE}) end. @@ -473,7 +493,7 @@ um_check_unicode(_Utf8Bin, {ok, _ListOrBin}, _, _UTF8_) -> um_filename(Bin, Dir, Options) when is_binary(Bin) -> um_filename(binary_to_list(Bin), Dir, Options); um_filename(Str = [_|_], Dir, Options) -> - Name = hd(string:tokens(Str, ":")), + Name = hd(string:lexemes(Str, ":")), Enc = atom_to_list(proplists:get_value(encoding, Options, latin1)), File = case lists:member(binary, Options) of true -> @@ -638,6 +658,10 @@ cur_dir_0(Config) when is_list(Config) -> {ok,NewDirFiles} = ?FILE_MODULE:list_dir("."), true = lists:member(UncommonName,NewDirFiles), + %% Ensure that we get the same result with a trailing slash; the + %% APIs used on Windows will choke on them if passed directly. + {ok,NewDirFiles} = ?FILE_MODULE:list_dir("./"), + %% Delete the directory and return to the old current directory %% and check that the created file isn't there (too!) expect({error, einval}, {error, eacces}, @@ -690,10 +714,15 @@ win_cur_dir_1(_Config) -> %% Get the drive letter from the current directory, %% and try to get current directory for that drive. - [Drive,$:|_] = BaseDir, - {ok,BaseDir} = ?FILE_MODULE:get_cwd([Drive,$:]), + [CurDrive,$:|_] = BaseDir, + {ok,BaseDir} = ?FILE_MODULE:get_cwd([CurDrive,$:]), io:format("BaseDir = ~s\n", [BaseDir]), + %% We should error out on non-existent drives. Any reasonable system will + %% have at least one. + CurDirs = [?FILE_MODULE:get_cwd([Drive,$:]) || Drive <- lists:seq($A, $Z)], + lists:member({error,eaccess}, CurDirs), + %% Unfortunately, there is no way to move away from the %% current drive as we can't use the "subst" command from %% a SSH connection. We can't test any more. @@ -831,7 +860,7 @@ no_untranslatable_names() -> end. start_node(Name, Args) -> - [_,Host] = string:tokens(atom_to_list(node()), "@"), + [_,Host] = string:lexemes(atom_to_list(node()), "@"), ct:log("Trying to start ~w@~s~n", [Name,Host]), case test_server:start_node(Name, peer, [{args,Args}]) of {error,Reason} -> @@ -1019,6 +1048,23 @@ close(Config) when is_list(Config) -> Val = ?FILE_MODULE:close(Fd1), io:format("Second close gave: ~p",[Val]), + %% All operations on a closed raw file should EINVAL, even if they're not + %% supported on the current platform. + {ok,Fd2} = ?FILE_MODULE:open(Name, [read, write, raw]), + ok = ?FILE_MODULE:close(Fd2), + + {error, einval} = ?FILE_MODULE:advise(Fd2, 5, 5, normal), + {error, einval} = ?FILE_MODULE:allocate(Fd2, 5, 5), + {error, einval} = ?FILE_MODULE:close(Fd2), + {error, einval} = ?FILE_MODULE:datasync(Fd2), + {error, einval} = ?FILE_MODULE:position(Fd2, 5), + {error, einval} = ?FILE_MODULE:pread(Fd2, 5, 1), + {error, einval} = ?FILE_MODULE:pwrite(Fd2, 5, "einval please"), + {error, einval} = ?FILE_MODULE:read(Fd2, 1), + {error, einval} = ?FILE_MODULE:sync(Fd2), + {error, einval} = ?FILE_MODULE:truncate(Fd2), + {error, einval} = ?FILE_MODULE:write(Fd2, "einval please"), + [] = flush(), ok. @@ -1132,8 +1178,8 @@ pread_write_test(File, Data) -> end, I = Size + 17, ok = ?FILE_MODULE:pwrite(File, 0, Data), - Res = ?FILE_MODULE:pread(File, 0, I), - {ok, Data} = Res, + {ok, Data} = ?FILE_MODULE:pread(File, 0, I), + {ok, [Data]} = ?FILE_MODULE:pread(File, [{0, I}]), eof = ?FILE_MODULE:pread(File, I, 1), ok = ?FILE_MODULE:pwrite(File, [{0, Data}, {I, Data}]), {ok, [Data, eof, Data]} = @@ -1321,6 +1367,10 @@ file_info_basic_file(Config) when is_list(Config) -> io:put_chars(Fd1, "foo bar"), ok = ?FILE_MODULE:close(Fd1), + %% Don't crash the file server when passing incorrect arguments. + {error,badarg} = ?FILE_MODULE:read_file_info(Name, [{time, gurka}]), + {error,badarg} = ?FILE_MODULE:read_file_info([#{} | gaffel]), + %% Test that the file has the expected attributes. %% The times are tricky, so we will save them to a separate test case. {ok,FileInfo} = ?FILE_MODULE:read_file_info(Name), @@ -1564,6 +1614,39 @@ file_write_file_info(Config) when is_list(Config) -> [] = flush(), ok. +file_wfi_helpers(Config) when is_list(Config) -> + RootDir = get_good_directory(Config), + io:format("RootDir = ~p", [RootDir]), + + Name = filename:join(RootDir, + atom_to_list(?MODULE) ++ "_wfi_helpers"), + + ok = ?FILE_MODULE:write_file(Name, "hello again"), + NewTime = {{1997, 02, 15}, {13, 18, 20}}, + ok = ?FILE_MODULE:change_time(Name, NewTime, NewTime), + + {ok, #file_info{atime=NewActAtime, mtime=NewTime}} = + ?FILE_MODULE:read_file_info(Name), + + NewFilteredAtime = filter_atime(NewTime, Config), + NewFilteredAtime = filter_atime(NewActAtime, Config), + + %% Make the file unwritable + ok = ?FILE_MODULE:change_mode(Name, 8#400), + {error, eacces} = ?FILE_MODULE:write_file(Name, "hello again"), + + %% ... and writable again + ok = ?FILE_MODULE:change_mode(Name, 8#600), + ok = ?FILE_MODULE:write_file(Name, "hello again"), + + %% We have no idea which users will work, so all we can do is to check + %% that it returns enoent instead of crashing. + {error, enoent} = ?FILE_MODULE:change_group("bogus file name", 0), + {error, enoent} = ?FILE_MODULE:change_owner("bogus file name", 0), + + [] = flush(), + ok. + %% Returns a directory on a file system that has correct file times. get_good_directory(Config) -> @@ -2044,13 +2127,22 @@ names(Config) when is_list(Config) -> ok = ?FILE_MODULE:close(Fd2), {ok,Fd3} = ?FILE_MODULE:open(Name3,read), ok = ?FILE_MODULE:close(Fd3), + + %% Now try the same on raw files. + {ok,Fd4} = ?FILE_MODULE:open(Name2, [read, raw]), + ok = ?FILE_MODULE:close(Fd4), + {ok,Fd4f} = ?FILE_MODULE:open(lists:flatten(Name2), [read, raw]), + ok = ?FILE_MODULE:close(Fd4f), + {ok,Fd5} = ?FILE_MODULE:open(Name3, [read, raw]), + ok = ?FILE_MODULE:close(Fd5), + case length(Name1) > 255 of true -> io:format("Path too long for an atom:\n\n~p\n", [Name1]); false -> Name4 = list_to_atom(Name1), - {ok,Fd4} = ?FILE_MODULE:open(Name4,read), - ok = ?FILE_MODULE:close(Fd4) + {ok,Fd6} = ?FILE_MODULE:open(Name4,read), + ok = ?FILE_MODULE:close(Fd6) end, %% Try some path names @@ -2074,6 +2166,46 @@ names(Config) when is_list(Config) -> [] = flush(), ok. +volume_relative_paths(Config) when is_list(Config) -> + case os:type() of + {win32, _} -> + {ok, [Drive, $: | _]} = file:get_cwd(), + %% Relative to current device root. + {ok, RootInfo} = file:read_file_info([Drive, $:, $/]), + {ok, RootInfo} = file:read_file_info("/"), + %% Relative to current device directory. + {ok, DirContents} = file:list_dir([Drive, $:]), + {ok, DirContents} = file:list_dir("."), + [] = flush(), + ok; + _ -> + {skip, "This test is Windows-specific."} + end. + +unc_paths(Config) when is_list(Config) -> + case os:type() of + {win32, _} -> + %% We assume administrative shares are set up and reachable, and we + %% settle for testing presence as some of the returned data is + %% different. + {ok, _} = file:read_file_info("C:\\Windows\\explorer.exe"), + {ok, _} = file:read_file_info("\\\\localhost\\c$\\Windows\\explorer.exe"), + + {ok, Cwd} = file:get_cwd(), + + try + ok = file:set_cwd("\\\\localhost\\c$\\Windows\\"), + {ok, _} = file:read_file_info("explorer.exe") + after + file:set_cwd(Cwd) + end, + + [] = flush(), + ok; + _ -> + {skip, "This test is Windows-specific."} + end. + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% @@ -2102,13 +2234,14 @@ e_delete(Config) when is_list(Config) -> case os:type() of {win32, _} -> %% Remove a character device. - {error, eacces} = ?FILE_MODULE:delete("nul"); + expect({error, eacces}, {error, einval}, + ?FILE_MODULE:delete("nul")); _ -> ?FILE_MODULE:write_file_info( Base, #file_info {mode=0}), {error, eacces} = ?FILE_MODULE:delete(Afile), ?FILE_MODULE:write_file_info( - Base, #file_info {mode=8#600}) + Base, #file_info {mode=8#700}) end, [] = flush(), @@ -2239,7 +2372,7 @@ e_make_dir(Config) when is_list(Config) -> ?FILE_MODULE:write_file_info(Base, #file_info {mode=0}), {error, eacces} = ?FILE_MODULE:make_dir(filename:join(Base, "xxxx")), ?FILE_MODULE:write_file_info( - Base, #file_info {mode=8#600}) + Base, #file_info {mode=8#700}) end, ok. @@ -2285,7 +2418,7 @@ e_del_dir(Config) when is_list(Config) -> ok = ?FILE_MODULE:make_dir(ADirectory), ?FILE_MODULE:write_file_info( Base, #file_info {mode=0}), {error, eacces} = ?FILE_MODULE:del_dir(ADirectory), - ?FILE_MODULE:write_file_info( Base, #file_info {mode=8#600}) + ?FILE_MODULE:write_file_info( Base, #file_info {mode=8#700}) end, [] = flush(), ok. @@ -2641,8 +2774,8 @@ altname(Config) when is_list(Config) -> {skipped, "Altname not supported on this platform"}; {ok, "LONGAL~1"} -> {ok, "A_FILE~1"} = ?FILE_MODULE:altname(Name), - {ok, "C:/"} = ?FILE_MODULE:altname("C:/"), - {ok, "C:\\"} = ?FILE_MODULE:altname("C:\\"), + {ok, "c:/"} = ?FILE_MODULE:altname("C:/"), + {ok, "c:/"} = ?FILE_MODULE:altname("C:\\"), {error,enoent} = ?FILE_MODULE:altname(NonexName), {ok, "short"} = ?FILE_MODULE:altname(ShortName), ok @@ -2923,20 +3056,22 @@ delayed_write(Config) when is_list(Config) -> %% %% Test caching and normal close of non-raw file {ok, Fd1} = - ?FILE_MODULE:open(File, [write, {delayed_write, Size+1, 2000}]), + ?FILE_MODULE:open(File, [write, {delayed_write, Size+1, 400}]), ok = ?FILE_MODULE:write(Fd1, Data1), - timer:sleep(1000), % Just in case the file system is slow + %% Wait for a reasonable amount of time to check whether the write was + %% practically instantaneous or actually delayed. + timer:sleep(100), {ok, Fd2} = ?FILE_MODULE:open(File, [read]), eof = ?FILE_MODULE:read(Fd2, 1), ok = ?FILE_MODULE:write(Fd1, Data1), % Data flush on size - timer:sleep(1000), % Just in case the file system is slow + timer:sleep(100), {ok, Data1Data1} = ?FILE_MODULE:pread(Fd2, bof, 2*Size+1), ok = ?FILE_MODULE:write(Fd1, Data1), - timer:sleep(3000), % Wait until data flush on timeout + timer:sleep(500), % Wait until data flush on timeout {ok, Data1Data1Data1} = ?FILE_MODULE:pread(Fd2, bof, 3*Size+1), ok = ?FILE_MODULE:write(Fd1, Data1), ok = ?FILE_MODULE:close(Fd1), % Data flush on close - timer:sleep(1000), % Just in case the file system is slow + timer:sleep(100), {ok, Data1Data1Data1Data1} = ?FILE_MODULE:pread(Fd2, bof, 4*Size+1), ok = ?FILE_MODULE:close(Fd2), %% @@ -2970,7 +3105,7 @@ delayed_write(Config) when is_list(Config) -> {'DOWN', Mref1, _, _, _} = Down1a -> ct:fail(Down1a) end, - timer:sleep(1000), % Just in case the file system is slow + timer:sleep(100), % Just in case the file system is slow {ok, Fd3} = ?FILE_MODULE:open(File, [read]), eof = ?FILE_MODULE:read(Fd3, 1), Child1 ! {Parent, continue, normal}, @@ -2980,7 +3115,7 @@ delayed_write(Config) when is_list(Config) -> {'DOWN', Mref1, _, _, _} = Down1b -> ct:fail(Down1b) end, - timer:sleep(1000), % Just in case the file system is slow + timer:sleep(100), % Just in case the file system is slow {ok, Data1} = ?FILE_MODULE:pread(Fd3, bof, Size+1), ok = ?FILE_MODULE:close(Fd3), %% @@ -2993,7 +3128,7 @@ delayed_write(Config) when is_list(Config) -> {'DOWN', Mref2, _, _, _} = Down2a -> ct:fail(Down2a) end, - timer:sleep(1000), % Just in case the file system is slow + timer:sleep(100), % Just in case the file system is slow {ok, Fd4} = ?FILE_MODULE:open(File, [read]), eof = ?FILE_MODULE:read(Fd4, 1), Child2 ! {Parent, continue, kill}, @@ -3003,7 +3138,7 @@ delayed_write(Config) when is_list(Config) -> {'DOWN', Mref2, _, _, _} = Down2b -> ct:fail(Down2b) end, - timer:sleep(1000), % Just in case the file system is slow + timer:sleep(100), % Just in case the file system is slow eof = ?FILE_MODULE:pread(Fd4, bof, 1), ok = ?FILE_MODULE:close(Fd4), %% @@ -3095,6 +3230,16 @@ read_ahead(Config) when is_list(Config) -> Data1Data2Data3 = Data1++Data2++Data3, {ok, Data1Data2Data3} = ?FILE_MODULE:read(Fd5, 3*Size+1), ok = ?FILE_MODULE:close(Fd5), + + %% Ensure that a read that draws from both the buffer and the file won't + %% return anything wonky. + SplitData = << <<(I rem 256)>> || I <- lists:seq(1, 1024) >>, + file:write_file(File, SplitData), + {ok, Fd6} = ?FILE_MODULE:open(File, [raw, read, binary, {read_ahead, 256}]), + {ok, <<1>>} = file:read(Fd6, 1), + <<1, Shifted:512/binary, _Rest/binary>> = SplitData, + {ok, Shifted} = file:read(Fd6, 512), + %% [] = flush(), ok. @@ -3699,6 +3844,83 @@ do_large_write(Name) -> %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%% Benchmarks +%% +%% Note that we only measure the time it takes to run the isolated file +%% operations and that the actual test runtime can differ significantly, +%% especially on the write side as the files need to be truncated before +%% writing. + +large_writes(Config) when is_list(Config) -> + Modes = [raw, binary], + OpCount = 4096, + Data = <<0:(64 bsl 10)/unit:8>>, + run_write_benchmark(Config, Modes, OpCount, Data). + +large_writes_delayed(Config) when is_list(Config) -> + %% Each write is exactly as large as the delay buffer, causing the writes + %% to pass through each time, giving us a decent idea of how much overhead + %% delayed_write adds. + Modes = [raw, binary, {delayed_write, 64 bsl 10, 2000}], + OpCount = 4096, + Data = <<0:(64 bsl 10)/unit:8>>, + run_write_benchmark(Config, Modes, OpCount, Data). + +tiny_writes(Config) when is_list(Config) -> + Modes = [raw, binary], + OpCount = 512 bsl 10, + Data = <<0>>, + run_write_benchmark(Config, Modes, OpCount, Data). + +tiny_writes_delayed(Config) when is_list(Config) -> + Modes = [raw, binary, {delayed_write, 512 bsl 10, 2000}], + OpCount = 512 bsl 10, + Data = <<0>>, + run_write_benchmark(Config, Modes, OpCount, Data). + +%% The read benchmarks assume that "benchmark_scratch_file" has been filled by +%% the write benchmarks. + +tiny_reads(Config) when is_list(Config) -> + Modes = [raw, binary], + OpCount = 512 bsl 10, + run_read_benchmark(Config, Modes, OpCount, 1). + +tiny_reads_ahead(Config) when is_list(Config) -> + Modes = [raw, binary, {read_ahead, 512 bsl 10}], + OpCount = 512 bsl 10, + run_read_benchmark(Config, Modes, OpCount, 1). + +run_write_benchmark(Config, Modes, OpCount, Data) -> + run_benchmark(Config, [write | Modes], OpCount, fun file:write/2, Data). + +run_read_benchmark(Config, Modes, OpCount, OpSize) -> + run_benchmark(Config, [read | Modes], OpCount, fun file:read/2, OpSize). + +run_benchmark(Config, Modes, OpCount, Fun, Arg) -> + ScratchDir = proplists:get_value(priv_dir, Config), + Path = filename:join(ScratchDir, "benchmark_scratch_file"), + {ok, Fd} = file:open(Path, Modes), + submit_throughput_results(Fun, [Fd, Arg], OpCount). + +submit_throughput_results(Fun, Args, Times) -> + MSecs = measure_repeated_file_op(Fun, Args, Times, millisecond), + IOPS = trunc(Times * (1000 / MSecs)), + ct_event:notify(#event{ name = benchmark_data, data = [{value,IOPS}] }), + {comment, io_lib:format("~p IOPS, ~p ms", [IOPS, trunc(MSecs)])}. + +measure_repeated_file_op(Fun, Args, Times, Unit) -> + Start = os:perf_counter(Unit), + repeated_apply(Fun, Args, Times), + os:perf_counter(Unit) - Start. + +repeated_apply(_F, _Args, Times) when Times =< 0 -> + ok; +repeated_apply(F, Args, Times) -> + erlang:apply(F, Args), + repeated_apply(F, Args, Times - 1). + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% response_analysis(Module, Function, Arguments) -> @@ -3934,7 +4156,7 @@ read_line_create_files(TestData) -> read_line_remove_files(TestData) -> [ file:delete(File) || {_Function,File,_,_} <- TestData ]. -%% read_line with prim_file. +%% read_line with ?PRIM_FILE. read_line_1(Config) when is_list(Config) -> PrivDir = proplists:get_value(priv_dir, Config), All = read_line_testdata(PrivDir), @@ -4103,9 +4325,9 @@ read_line_create7(Filename) -> file:close(F). read_line_all(Filename) -> - {ok,F} = prim_file:open(Filename,[read,binary]), + {ok,F} = ?PRIM_FILE:open(Filename,[read,binary]), X=read_rl_lines(F), - prim_file:close(F), + ?PRIM_FILE:close(F), Bin = list_to_binary([B || {ok,B} <- X]), Bin = re:replace(list_to_binary([element(2,file:read_file(Filename))]), "\r\n","\n",[global,{return,binary}]), @@ -4138,7 +4360,7 @@ read_line_all4(Filename) -> {length(X),Bin}. read_rl_lines(F) -> - case prim_file:read_line(F) of + case ?PRIM_FILE:read_line(F) of eof -> []; {error,X} -> @@ -4158,9 +4380,9 @@ read_rl_lines2(F) -> end. read_line_all_alternating(Filename) -> - {ok,F} = prim_file:open(Filename,[read,binary]), + {ok,F} = ?PRIM_FILE:open(Filename,[read,binary]), X=read_rl_lines(F,true), - prim_file:close(F), + ?PRIM_FILE:close(F), Bin = list_to_binary([B || {ok,B} <- X]), Bin = re:replace(list_to_binary([element(2,file:read_file(Filename))]), "\r\n","\n",[global,{return,binary}]), @@ -4194,8 +4416,8 @@ read_line_all_alternating4(Filename) -> read_rl_lines(F,Alternate) -> case begin case Alternate of - true -> prim_file:read(F,1); - false -> prim_file:read_line(F) + true -> ?PRIM_FILE:read(F,1); + false -> ?PRIM_FILE:read_line(F) end end of eof -> diff --git a/lib/kernel/test/file_name_SUITE.erl b/lib/kernel/test/file_name_SUITE.erl index 899102c908..3afc647081 100644 --- a/lib/kernel/test/file_name_SUITE.erl +++ b/lib/kernel/test/file_name_SUITE.erl @@ -77,6 +77,7 @@ init_per_testcase/2, end_per_testcase/2]). -export([normal/1,icky/1,very_icky/1,normalize/1,home_dir/1]). +-define(PRIM_FILE, prim_file). init_per_testcase(_Func, Config) -> Config. @@ -131,7 +132,7 @@ home_dir(Config) when is_list(Config) -> os:putenv("HOME",NewHome), {"HOME",Save}; _ -> - rm_rf(prim_file,NewHome), + rm_rf(?PRIM_FILE,NewHome), throw(unsupported_os) end, try @@ -145,7 +146,7 @@ home_dir(Config) when is_list(Config) -> _ -> os:putenv(SaveOldName,SaveOldValue) end, - rm_rf(prim_file,NewHome) + rm_rf(?PRIM_FILE,NewHome) end catch throw:need_unicode_mode -> @@ -190,7 +191,7 @@ normal(Config) when is_list(Config) -> try Priv = proplists:get_value(priv_dir, Config), file:set_cwd(Priv), - ok = check_normal(prim_file), + ok = check_normal(?PRIM_FILE), ok = check_normal(file), %% If all is good, delete dir again (avoid hanging dir on windows) rm_rf(file,"normal_dir"), @@ -210,7 +211,7 @@ icky(Config) when is_list(Config) -> try Priv = proplists:get_value(priv_dir, Config), file:set_cwd(Priv), - ok = check_icky(prim_file), + ok = check_icky(?PRIM_FILE), ok = check_icky(file), %% If all is good, delete dir again (avoid hanging dir on windows) rm_rf(file,"icky_dir"), @@ -229,7 +230,7 @@ very_icky(Config) when is_list(Config) -> try Priv = proplists:get_value(priv_dir, Config), file:set_cwd(Priv), - case check_very_icky(prim_file) of + case check_very_icky(?PRIM_FILE) of need_unicode_mode -> {skipped,"VM needs to be started in Unicode filename mode"}; ok -> @@ -292,17 +293,14 @@ check_normal(Mod) -> ok end, [ begin - {ok, FD} = Mod:open(Name,[read]), - {ok, Content} = Mod:read(FD,1024), - ok = file:close(FD) - end || {regular,Name,Content} <- NormalDir ], - [ begin {ok, FD} = Mod:open(Name,[read,binary]), BC = list_to_binary(Content), {ok, BC} = Mod:read(FD,1024), ok = file:close(FD) end || {regular,Name,Content} <- NormalDir ], + {error, badarg} = Mod:rename("fil1\0tmp_fil2","tmp_fil1"), Mod:rename("fil1","tmp_fil1"), + {error, badarg} = Mod:read_file("tmp_fil1\0.txt"), {ok, <<"fil1">>} = Mod:read_file("tmp_fil1"), {error,enoent} = Mod:read_file("fil1"), Mod:rename("tmp_fil1","fil1"), @@ -410,11 +408,6 @@ check_icky(Mod) -> ok end, [ begin - {ok, FD} = Mod:open(Name,[read]), - {ok, Content} = Mod:read(FD,1024), - ok = file:close(FD) - end || {regular,Name,Content} <- IckyDir ], - [ begin {ok, FD} = Mod:open(Name,[read,binary]), BC = list_to_binary([Content]), {ok, BC} = Mod:read(FD,1024), @@ -519,11 +512,6 @@ check_very_icky(Mod) -> ok end, [ begin - {ok, FD} = Mod:open(Name,[read]), - {ok, Content} = Mod:read(FD,1024), - ok = file:close(FD) - end || {regular,Name,Content} <- VeryIckyDir ], - [ begin {ok, FD} = Mod:open(Name,[read,binary]), BC = list_to_binary([Content]), {ok, BC} = Mod:read(FD,1024), diff --git a/lib/kernel/test/gen_sctp_SUITE.erl b/lib/kernel/test/gen_sctp_SUITE.erl index 620ab235a0..a0ae792ba9 100644 --- a/lib/kernel/test/gen_sctp_SUITE.erl +++ b/lib/kernel/test/gen_sctp_SUITE.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 2007-2016. All Rights Reserved. +%% Copyright Ericsson AB 2007-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -1038,8 +1038,7 @@ do_from_other_process(Fun) -> Result -> Parent ! {Ref,Result} catch - Class:Reason -> - Stacktrace = erlang:get_stacktrace(), + Class:Reason:Stacktrace -> Parent ! {Ref,Class,Reason,Stacktrace} end end), @@ -1617,8 +1616,7 @@ s_start(Socket, Timeout, Parent) -> try s_loop(Socket, Timeout, Parent, Handler, gb_trees:empty()) catch - Class:Reason -> - Stacktrace = erlang:get_stacktrace(), + Class:Reason:Stacktrace -> io:format(?MODULE_STRING":socket exception ~w:~w at~n" "~p.~n", [Class,Reason,Stacktrace]), erlang:raise(Class, Reason, Stacktrace) diff --git a/lib/kernel/test/gen_tcp_api_SUITE.erl b/lib/kernel/test/gen_tcp_api_SUITE.erl index 12d22519ce..1be016444f 100644 --- a/lib/kernel/test/gen_tcp_api_SUITE.erl +++ b/lib/kernel/test/gen_tcp_api_SUITE.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1998-2017. All Rights Reserved. +%% Copyright Ericsson AB 1998-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -605,9 +605,9 @@ ok({ok,V}) -> V; ok(NotOk) -> try throw(not_ok) catch - Thrown -> + throw:Thrown:Stacktrace -> erlang:raise( - error, {Thrown, NotOk}, tl(erlang:get_stacktrace())) + error, {Thrown, NotOk}, tl(Stacktrace)) end. get_localaddr() -> diff --git a/lib/kernel/test/gen_tcp_misc_SUITE.erl b/lib/kernel/test/gen_tcp_misc_SUITE.erl index 929f66d400..04c0c48e3a 100644 --- a/lib/kernel/test/gen_tcp_misc_SUITE.erl +++ b/lib/kernel/test/gen_tcp_misc_SUITE.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1998-2016. All Rights Reserved. +%% Copyright Ericsson AB 1998-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -41,6 +41,7 @@ busy_send/1, busy_disconnect_passive/1, busy_disconnect_active/1, fill_sendq/1, partial_recv_and_close/1, partial_recv_and_close_2/1,partial_recv_and_close_3/1,so_priority/1, + recvtos/1, recvttl/1, recvtosttl/1, recvtclass/1, %% Accept tests primitive_accept/1,multi_accept_close_listen/1,accept_timeout/1, accept_timeouts_in_order/1,accept_timeouts_in_order2/1, @@ -50,9 +51,9 @@ killing_acceptor/1,killing_multi_acceptors/1,killing_multi_acceptors2/1, several_accepts_in_one_go/1, accept_system_limit/1, active_once_closed/1, send_timeout/1, send_timeout_active/1, - otp_7731/1, zombie_sockets/1, otp_7816/1, otp_8102/1, - wrapping_oct/0, wrapping_oct/1, - otp_9389/1]). + otp_7731/1, zombie_sockets/1, otp_7816/1, otp_8102/1, + wrapping_oct/0, wrapping_oct/1, otp_9389/1, otp_13939/1, + otp_12242/1]). %% Internal exports. -export([sender/3, not_owner/1, passive_sockets_server/2, priority_server/1, @@ -84,7 +85,8 @@ all() -> busy_disconnect_passive, busy_disconnect_active, fill_sendq, partial_recv_and_close, partial_recv_and_close_2, partial_recv_and_close_3, - so_priority, primitive_accept, + so_priority, recvtos, recvttl, recvtosttl, + recvtclass, primitive_accept, multi_accept_close_listen, accept_timeout, accept_timeouts_in_order, accept_timeouts_in_order2, accept_timeouts_in_order3, accept_timeouts_in_order4, @@ -94,7 +96,8 @@ all() -> killing_multi_acceptors2, several_accepts_in_one_go, accept_system_limit, active_once_closed, send_timeout, send_timeout_active, otp_7731, wrapping_oct, - zombie_sockets, otp_7816, otp_8102, otp_9389]. + zombie_sockets, otp_7816, otp_8102, otp_9389, + otp_12242]. groups() -> []. @@ -1573,52 +1576,56 @@ fill_sendq(Config) when is_list(Config) -> Master = self(), Server = spawn_link(fun () -> - {ok,L} = gen_tcp:listen - (0, [{active,false},binary, - {reuseaddr,true},{packet,0}]), + {ok,L} = gen_tcp:listen(0, [{active,false},binary, + {reuseaddr,true},{packet,0}]), {ok,Port} = inet:port(L), Master ! {self(),client, fill_sendq_client(Port, Master)}, fill_sendq_srv(L, Master) end), io:format("~p Server~n", [Server]), - receive {Server,client,Client} -> - io:format("~p Client~n", [Client]), - receive {Server,reader,Reader} -> - io:format("~p Reader~n", [Reader]), - fill_sendq_loop(Server, Client, Reader) + receive + {Server,client,Client} -> + io:format("~p Client~n", [Client]), + receive + {Server,reader,Reader} -> + io:format("~p Reader~n", [Reader]), + fill_sendq_loop(Server, Client, Reader) end end. fill_sendq_loop(Server, Client, Reader) -> %% Master %% - receive {Server,send} -> + receive + {Server,send} -> fill_sendq_loop(Server, Client, Reader) after 2000 -> %% Send queue full, sender blocked -> close client. io:format("Send timeout, closing Client...~n", []), Client ! {self(),close}, - receive {Server,[{error,closed}]} -> - io:format("Got server closed.~n"), - receive {Reader,[{error,closed}]} -> - io:format - ("Got reader closed.~n"), - ok - after 3000 -> - ct:fail({timeout,{closed,reader}}) - end; - {Reader,[{error,closed}]} -> - io:format("Got reader closed.~n"), - receive {Server,[{error,closed}]} -> - io:format("Got server closed~n"), - ok - after 3000 -> - ct:fail({timeout,{closed,server}}) - end - after 3000 -> - ct:fail({timeout,{closed,[server,reader]}}) - end + receive + {Server,[{error,closed}]} -> + io:format("Got server closed.~n"), + receive + {Reader,[{error,closed}]} -> + io:format("Got reader closed.~n"), + ok + after 3000 -> + ct:fail({timeout,{closed,reader}}) + end; + {Reader,[{error,closed}]} -> + io:format("Got reader closed.~n"), + receive + {Server,[{error,closed}]} -> + io:format("Got server closed~n"), + ok + after 3000 -> + ct:fail({timeout,{closed,server}}) + end + after 3000 -> + ct:fail({timeout,{closed,[server,reader]}}) + end end. fill_sendq_srv(L, Master) -> @@ -1911,6 +1918,232 @@ so_priority(Config) when is_list(Config) -> end end. + + +%% IP_RECVTOS and IP_RECVTCLASS for IP_PKTOPTIONS +%% does not seem to be implemented in Linux until kernel 3.1 +%% +%% It seems pktoptions does not return valid values +%% for IPv4 connect sockets. On the accept socket +%% we get valid values, but on the connect socket we get +%% the default values for TOS and TTL. +%% +%% Therefore the argument CheckConnect that enables +%% checking the returned values for the connect socket. +%% It is only used for recvtclass that is an IPv6 option +%% and there we get valid values from both socket ends. + +recvtos(_Config) -> + test_pktoptions( + inet, [{recvtos,tos,96}], + fun recvtos_ok/2, + false). + +recvtosttl(_Config) -> + test_pktoptions( + inet, [{recvtos,tos,96},{recvttl,ttl,33}], + fun (OSType, OSVer) -> + recvtos_ok(OSType, OSVer) andalso recvttl_ok(OSType, OSVer) + end, + false). + +recvttl(_Config) -> + test_pktoptions( + inet, [{recvttl,ttl,33}], + fun recvttl_ok/2, + false). + +recvtclass(_Config) -> + {ok,IFs} = inet:getifaddrs(), + case + [Name || + {Name,Opts} <- IFs, + lists:member({addr,{0,0,0,0,0,0,0,1}}, Opts)] + of + [_] -> + test_pktoptions( + inet6, [{recvtclass,tclass,224}], + fun recvtclass_ok/2, + true); + [] -> + {skip,{ipv6_not_supported,IFs}} + end. + +%% These version numbers are the highest noted in daily tests +%% where the test fails for a plausible reason, so +%% skip on that platform. +%% +%% On newer versions it might be fixed, but we'll see about that +%% when machines with newer versions gets installed... +%% If the test still fails for a plausible reason these +%% version numbers simply should be increased. +%% Or maybe we should change to only test on known good +%% platforms - change {unix,_} to false? + +%% pktoptions is not supported for IPv4 +recvtos_ok({unix,openbsd}, OSVer) -> not semver_lt(OSVer, {6,4,0}); +recvtos_ok({unix,darwin}, OSVer) -> not semver_lt(OSVer, {17,6,0}); +%% Using the option returns einval, so it is not implemented. +recvtos_ok({unix,freebsd}, OSVer) -> not semver_lt(OSVer, {11,2,0}); +recvtos_ok({unix,sunos}, OSVer) -> not semver_lt(OSVer, {5,12,0}); +%% Does not return any value - not implemented for pktoptions +recvtos_ok({unix,linux}, OSVer) -> not semver_lt(OSVer, {3,1,0}); +%% +recvtos_ok({unix,_}, _) -> true; +recvtos_ok(_, _) -> false. + +%% pktoptions is not supported for IPv4 +recvttl_ok({unix,openbsd}, OSVer) -> not semver_lt(OSVer, {6,4,0}); +recvttl_ok({unix,darwin}, OSVer) -> not semver_lt(OSVer, {17,6,0}); +%% Using the option returns einval, so it is not implemented. +recvttl_ok({unix,freebsd}, OSVer) -> not semver_lt(OSVer, {11,2,0}); +recvttl_ok({unix,sunos}, OSVer) -> not semver_lt(OSVer, {5,12,0}); +%% +recvttl_ok({unix,linux}, _) -> true; +recvttl_ok({unix,_}, _) -> true; +recvttl_ok(_, _) -> false. + +%% pktoptions is not supported for IPv6 +recvtclass_ok({unix,openbsd}, OSVer) -> not semver_lt(OSVer, {6,4,0}); +recvtclass_ok({unix,darwin}, OSVer) -> not semver_lt(OSVer, {17,6,0}); +recvtclass_ok({unix,sunos}, OSVer) -> not semver_lt(OSVer, {5,12,0}); +%% Using the option returns einval, so it is not implemented. +recvtclass_ok({unix,freebsd}, OSVer) -> not semver_lt(OSVer, {11,2,0}); +%% Does not return any value - not implemented for pktoptions +recvtclass_ok({unix,linux}, OSVer) -> not semver_lt(OSVer, {3,1,0}); +%% +recvtclass_ok({unix,_}, _) -> true; +recvtclass_ok(_, _) -> false. + +semver_lt({X1,Y1,Z1}, {X2,Y2,Z2}) -> + if + X1 > X2 -> false; + X1 < X2 -> true; + Y1 > Y2 -> false; + Y1 < Y2 -> true; + Z1 > Z2 -> false; + Z1 < Z2 -> true; + true -> false + end; +semver_lt(_, {_,_,_}) -> false. + +test_pktoptions(Family, Spec, OSFilter, CheckConnect) -> + OSType = os:type(), + OSVer = os:version(), + case OSFilter(OSType, OSVer) of + true -> + io:format("Os: ~p, ~p~n", [OSType,OSVer]), + test_pktoptions(Family, Spec, CheckConnect, OSType, OSVer); + false -> + {skip,{not_supported_for_os_version,{OSType,OSVer}}} + end. +%% +test_pktoptions(Family, Spec, CheckConnect, OSType, OSVer) -> + Timeout = 5000, + RecvOpts = [RecvOpt || {RecvOpt,_,_} <- Spec], + TrueRecvOpts = [{RecvOpt,true} || {RecvOpt,_,_} <- Spec], + FalseRecvOpts = [{RecvOpt,false} || {RecvOpt,_,_} <- Spec], + Opts = [Opt || {_,Opt,_} <- Spec], + OptsVals = [{Opt,Val} || {_,Opt,Val} <- Spec], + Address = + case Family of + inet -> + {127,0,0,1}; + inet6 -> + {0,0,0,0,0,0,0,1} + end, + %% + %% Set RecvOpts on listen socket + {ok,L} = + gen_tcp:listen( + 0, + [Family,binary,{active,false},{send_timeout,Timeout} + |TrueRecvOpts]), + {ok,P} = inet:port(L), + {ok,TrueRecvOpts} = inet:getopts(L, RecvOpts), + {ok,OptsValsDefault} = inet:getopts(L, Opts), + %% + %% Set RecvOpts and Option values on connect socket + {ok,S2} = + gen_tcp:connect( + Address, P, + [Family,binary,{active,false},{send_timeout,Timeout} + |TrueRecvOpts ++ OptsVals], + Timeout), + {ok,TrueRecvOpts} = inet:getopts(S2, RecvOpts), + {ok,OptsVals} = inet:getopts(S2, Opts), + %% + %% Accept socket inherits the options from listen socket + {ok,S1} = gen_tcp:accept(L, Timeout), + {ok,TrueRecvOpts} = inet:getopts(S1, RecvOpts), + {ok,OptsValsDefault} = inet:getopts(S1, Opts), +%%% %% +%%% %% Handshake +%%% ok = gen_tcp:send(S1, <<"hello">>), +%%% {ok,<<"hello">>} = gen_tcp:recv(S2, 5, Timeout), +%%% ok = gen_tcp:send(S2, <<"hi">>), +%%% {ok,<<"hi">>} = gen_tcp:recv(S1, 2, Timeout), + %% + %% Verify returned remote options + {ok,[{pktoptions,OptsVals1}]} = inet:getopts(S1, [pktoptions]), + {ok,[{pktoptions,OptsVals2}]} = inet:getopts(S2, [pktoptions]), + (Result1 = sets_eq(OptsVals1, OptsVals)) + orelse io:format( + "Accept differs: ~p neq ~p~n", [OptsVals1,OptsVals]), + (Result2 = sets_eq(OptsVals2, OptsValsDefault)) + orelse io:format( + "Connect differs: ~p neq ~p~n", + [OptsVals2,OptsValsDefault]), + %% + ok = gen_tcp:close(S2), + ok = gen_tcp:close(S1), + %% + %% + %% Clear RecvOpts on listen socket and set Option values + ok = inet:setopts(L, FalseRecvOpts ++ OptsVals), + {ok,FalseRecvOpts} = inet:getopts(L, RecvOpts), + {ok,OptsVals} = inet:getopts(L, Opts), + %% + %% Set RecvOpts on connecting socket + %% + {ok,S4} = + gen_tcp:connect( + Address, P, + [Family,binary,{active,false},{send_timeout,Timeout} + |TrueRecvOpts], + Timeout), + {ok,TrueRecvOpts} = inet:getopts(S4, RecvOpts), + {ok,OptsValsDefault} = inet:getopts(S4, Opts), + %% + %% Accept socket inherits the options from listen socket + {ok,S3} = gen_tcp:accept(L, Timeout), + {ok,FalseRecvOpts} = inet:getopts(S3, RecvOpts), + {ok,OptsVals} = inet:getopts(S3, Opts), + %% + %% Verify returned remote options + {ok,[{pktoptions,[]}]} = inet:getopts(S3, [pktoptions]), + {ok,[{pktoptions,OptsVals4}]} = inet:getopts(S4, [pktoptions]), + (Result3 = sets_eq(OptsVals4, OptsVals)) + orelse io:format( + "Accept2 differs: ~p neq ~p~n", [OptsVals4,OptsVals]), + %% + ok = gen_tcp:close(S4), + ok = gen_tcp:close(S3), + ok = gen_tcp:close(L), + (Result1 and ((not CheckConnect) or (Result2 and Result3))) + orelse + exit({failed, + [{OptsVals1,OptsVals4,OptsVals}, + {OptsVals2,OptsValsDefault}], + {OSType,OSVer}}), +%% exit({{OSType,OSVer},success}), % In search for the truth + ok. + +sets_eq(L1, L2) -> + lists:sort(L1) == lists:sort(L2). + + + %% Accept test utilities (suites are below) millis() -> @@ -2202,7 +2435,7 @@ wait_until_accepting(Proc,0) -> exit({timeout_waiting_for_accepting,Proc}); wait_until_accepting(Proc,N) -> case process_info(Proc,current_function) of - {current_function,{prim_inet,accept0,2}} -> + {current_function,{prim_inet,accept0,3}} -> case process_info(Proc,status) of {status,waiting} -> ok; @@ -3014,3 +3247,182 @@ ok({ok,V}) -> V. get_hostname(Name) -> "@"++Host = lists:dropwhile(fun(C) -> C =/= $@ end, atom_to_list(Name)), Host. + +otp_13939(doc) -> + ["Check that writing to a remotely closed socket doesn't block forever " + "when exit_on_close is false."]; +otp_13939(suite) -> + []; +otp_13939(Config) when is_list(Config) -> + {Pid, Ref} = spawn_opt( + fun() -> + {ok, Listener} = gen_tcp:listen(0, [{exit_on_close, false}]), + {ok, Port} = inet:port(Listener), + + spawn_link( + fun() -> + {ok, Client} = gen_tcp:connect("localhost", Port, + [{active, false}]), + ok = gen_tcp:close(Client) + end), + + {ok, Accepted} = gen_tcp:accept(Listener), + + ok = gen_tcp:send(Accepted, <<0:(10*1024*1024*8)>>), + + %% The bug surfaces when there's a delay between the send + %% operations; inet:getstat is a red herring. + timer:sleep(100), + + {error, Code} = gen_tcp:send(Accepted, <<0:(10*1024*1024*8)>>), + ct:pal("gen_tcp:send returned ~p~n", [Code]) + end, [link, monitor]), + + receive + {'DOWN', Ref, process, Pid, normal} -> + ok + after 1000 -> + demonitor(Ref, [flush]), + exit(Pid, normal), + ct:fail("Server process blocked on send.") + end. + +otp_12242(Config) when is_list(Config) -> + case os:type() of + {win32,_} -> + %% Even if we set sndbuf and recbuf to small sizes + %% Windows either happily accepts to send GBytes of data + %% in no time, so the second send below that is supposed + %% to time out just succedes, or the first send that + %% is supposed to fill the inet_drv I/O queue and + %% start waiting for when more data can be sent + %% instead sends all data but suffers a send + %% failure that closes the socket + {skipped,backpressure_broken_on_win32}; + _ -> + %% Find the IPv4 address of an up and running interface + %% that is not loopback nor pointtopoint + {ok,IFList} = inet:getifaddrs(), + ct:pal("IFList ~p~n", [IFList]), + case + lists:flatten( + [lists:filtermap( + fun ({addr,Addr}) when tuple_size(Addr) =:= 4 -> + {true,Addr}; + (_) -> + false + end, Opts) + || {_,Opts} <- IFList, + case lists:keyfind(flags, 1, Opts) of + {_,Flags} -> + lists:member(up, Flags) + andalso + lists:member(running, Flags) + andalso + not lists:member(loopback, Flags) + andalso + not lists:member(pointtopoint, Flags); + false -> + false + end]) + of + [Addr|_] -> + otp_12242(Addr); + Other -> + {skipped,{no_external_address,Other}} + end + end; +%% +otp_12242(Addr) when tuple_size(Addr) =:= 4 -> + ct:timetrap(30000), + ct:pal("Using address ~p~n", [Addr]), + Bufsize = 16 * 1024, + Datasize = 128 * 1024 * 1024, % At least 1 s on GBit interface + Blob = binary:copy(<<$x>>, Datasize), + LOpts = + [{backlog,4},{reuseaddr,true},{ip,Addr}, + binary,{active,false}, + {recbuf,Bufsize},{sndbuf,Bufsize},{buffer,Bufsize}], + COpts = + [binary,{active,false},{ip,Addr}, + {linger,{true,1}}, % 1 s + {send_timeout,500}, + {recbuf,Bufsize},{sndbuf,Bufsize},{buffer,Bufsize}], + Dir = filename:dirname(code:which(?MODULE)), + {ok,ListenerNode} = + test_server:start_node( + ?UNIQ_NODE_NAME, slave, [{args,"-pa " ++ Dir}]), + Tester = self(), + Listener = + spawn( + ListenerNode, + fun () -> + {ok,L} = gen_tcp:listen(0, LOpts), + {ok,LPort} = inet:port(L), + Tester ! {self(),port,LPort}, + {ok,A} = gen_tcp:accept(L), + ok = gen_tcp:close(L), + receive + {Tester,stop} -> + ok = gen_tcp:close(A) + end + end), + ListenerMref = monitor(process, Listener), + LPort = receive {Listener,port,P} -> P end, + {ok,C} = gen_tcp:connect(Addr, LPort, COpts, infinity), + {ok,ReadCOpts} = inet:getopts(C, [recbuf,sndbuf,buffer]), + ct:pal("ReadCOpts ~p~n", [ReadCOpts]), + %% + %% Fill the buffers + ct:pal("Sending ~p bytes~n", [Datasize]), + ok = gen_tcp:send(C, Blob), + ct:pal("Sent ~p bytes~n", [Datasize]), + %% Spawn the Closer, + %% try to ensure that the close call is in progress + %% before the owner proceeds with sending + Owner = self(), + {_Closer,CloserMref} = + spawn_opt( + fun () -> + Owner ! {tref, erlang:start_timer(50, Owner, closing)}, + ct:pal("Calling gen_tcp:close(C)~n"), + try gen_tcp:close(C) of + Result -> + ct:pal("gen_tcp:close(C) -> ~p~n", [Result]), + ok = Result + catch + Class:Reason:Stacktrace -> + ct:pal( + "gen_tcp:close(C) >< ~p:~p~n ~p~n", + [Class,Reason,Stacktrace]), + erlang:raise(Class, Reason, Stacktrace) + end + end, [link,monitor]), + receive + {tref,Tref} -> + receive {timeout,Tref,_} -> ok end, + ct:pal("Sending ~p bytes again~n", [Datasize]), + %% Now should the close be in progress... + %% All buffers are full, remote end is not reading, + %% and the send timeout is 1 s so this will timeout: + {error,timeout} = gen_tcp:send(C, Blob), + ct:pal("Sending ~p bytes again timed out~n", [Datasize]), + ok = inet:setopts(C, [{send_timeout,10000}]), + %% There is a hidden timeout here. Port close is sampled + %% every 5 s by prim_inet:send_recv_reply. + %% Linger is 3 s so the Closer will finish this send: + ct:pal("Sending ~p bytes with 10 s timeout~n", [Datasize]), + {error,closed} = gen_tcp:send(C, Blob), + ct:pal("Sending ~p bytes with 10 s timeout was closed~n", + [Datasize]), + normal = wait(CloserMref), + ct:pal("The Closer has exited~n"), + Listener ! {Tester,stop}, + receive {'DOWN',ListenerMref,_,_,_} -> ok end, + ct:pal("The Listener has exited~n"), + test_server:stop_node(ListenerNode), + ok + end. + +wait(Mref) -> + receive {'DOWN',Mref,_,_,Reason} -> Reason end. diff --git a/lib/kernel/test/gen_udp_SUITE.erl b/lib/kernel/test/gen_udp_SUITE.erl index aa616d43d6..af9985de45 100644 --- a/lib/kernel/test/gen_udp_SUITE.erl +++ b/lib/kernel/test/gen_udp_SUITE.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1998-2017. All Rights Reserved. +%% Copyright Ericsson AB 1998-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -34,8 +34,9 @@ -export([init_per_testcase/2, end_per_testcase/2]). -export([send_to_closed/1, active_n/1, - buffer_size/1, binary_passive_recv/1, bad_address/1, + buffer_size/1, binary_passive_recv/1, max_buffer_size/1, bad_address/1, read_packets/1, open_fd/1, connect/1, implicit_inet6/1, + recvtos/1, recvtosttl/1, recvttl/1, recvtclass/1, local_basic/1, local_unbound/1, local_fdopen/1, local_fdopen_unbound/1, local_abstract/1]). @@ -44,9 +45,10 @@ suite() -> {timetrap,{minutes,1}}]. all() -> - [send_to_closed, buffer_size, binary_passive_recv, + [send_to_closed, buffer_size, binary_passive_recv, max_buffer_size, bad_address, read_packets, open_fd, connect, implicit_inet6, active_n, + recvtos, recvtosttl, recvttl, recvtclass, {group, local}]. groups() -> @@ -237,6 +239,14 @@ buffer_size_server_recv(Socket, IP, Port, Cnt) -> end. +%%------------------------------------------------------------- +%% OTP-15206: Keep buffer small for udp +%%------------------------------------------------------------- +max_buffer_size(Config) when is_list(Config) -> + {ok, Socket} = gen_udp:open(0, [binary]), + ok = inet:setopts(Socket,[{recbuf, 1 bsl 20}]), + {ok, [{buffer, 65536}]} = inet:getopts(Socket,[buffer]), + gen_udp:close(Socket). %%------------------------------------------------------------- %% OTP-3823 gen_udp:recv does not return address in binary mode @@ -288,58 +298,56 @@ bad_address(Config) when is_list(Config) -> %% %% Starts a slave node that on command sends a bunch of messages %% to our UDP port. The receiving process just receives and -%% ignores the incoming messages, but counts them. -%% A tracing process traces the receiving process for -%% 'receive' and scheduling events. From the trace, -%% message contents is verified; and, how many messages -%% are received per in/out scheduling, which should be -%% the same as the read_packets parameter. -%% -%% What happens on the SMP emulator remains to be seen... -%% +%% ignores the incoming messages. +%% A tracing process traces the receiving port for +%% 'send' and scheduling events. From the trace, +%% how many messages are received per in/out scheduling, +%% which should never be more than the read_packet parameter. %% OTP-6249 UDP option for number of packet reads. read_packets(Config) when is_list(Config) -> - case erlang:system_info(smp_support) of - false -> - read_packets_1(); - true -> - %% We would need some new sort of tracing to test this - %% option reliably in an SMP emulator. - {skip,"SMP emulator"} - end. - -read_packets_1() -> N1 = 5, - N2 = 7, + N2 = 1, + Msgs = 30000, {ok,R} = gen_udp:open(0, [{read_packets,N1}]), {ok,RP} = inet:port(R), {ok,Node} = start_node(gen_udp_SUITE_read_packets), Die = make_ref(), - Loop = erlang:spawn_link(fun () -> infinite_loop(Die) end), %% - Msgs1 = [erlang:integer_to_list(M) || M <- lists:seq(1, N1*3)], - [V1|_] = read_packets_test(R, RP, Msgs1, Node), + {V1, Trace1} = read_packets_test(R, RP, Msgs, Node), {ok,[{read_packets,N1}]} = inet:getopts(R, [read_packets]), %% ok = inet:setopts(R, [{read_packets,N2}]), - Msgs2 = [erlang:integer_to_list(M) || M <- lists:seq(1, N2*3)], - [V2|_] = read_packets_test(R, RP, Msgs2, Node), + {V2, Trace2} = read_packets_test(R, RP, Msgs, Node), {ok,[{read_packets,N2}]} = inet:getopts(R, [read_packets]), %% stop_node(Node), - Mref = erlang:monitor(process, Loop), - Loop ! Die, - receive - {'DOWN',Mref,_,_, normal} -> - case {V1,V2} of - {N1,N2} -> - ok; - _ when V1 =/= N1, V2 =/= N2 -> - ok - end + ct:log("N1=~p, V1=~p vs N2=~p, V2=~p",[N1,V1,N2,V2]), + + dump_terms(Config, "trace1.terms", Trace2), + dump_terms(Config, "trace2.terms", Trace2), + + %% Because of the inherit racy-ness of the feature it is + %% hard to test that it behaves correctly. + %% Right now (OTP 21) a port task takes 5% of the + %% allotted port task reductions to execute, so + %% the max number of executions a port is allowed to + %% do before being re-scheduled is N * 20 + + if + V1 > (N1 * 20) -> + ct:fail("Got ~p msgs, max was ~p", [V1, N1]); + V2 > (N2 * 20) -> + ct:fail("Got ~p msgs, max was ~p", [V2, N2]); + true -> + ok end. +dump_terms(Config, Name, Terms) -> + FName = filename:join(proplists:get_value(priv_dir, Config),Name), + file:write_file(FName, term_to_binary(Terms)), + ct:log("Logged terms to ~s",[FName]). + infinite_loop(Die) -> receive Die -> @@ -350,7 +358,6 @@ infinite_loop(Die) -> end. read_packets_test(R, RP, Msgs, Node) -> - Len = length(Msgs), Receiver = self(), Tracer = spawn_link( @@ -375,24 +382,24 @@ read_packets_test(R, RP, Msgs, Node) -> [link,{priority,high}]), receive {Sender,{port,SP}} -> - erlang:trace(self(), true, - [running,'receive',{tracer,Tracer}]), + erlang:trace(R, true, + [running_ports,'send',{tracer,Tracer}]), erlang:yield(), Sender ! {Receiver,go}, - read_packets_recv(Len), - erlang:trace(self(), false, [all]), + read_packets_recv(Msgs), + erlang:trace(R, false, [all]), Tracer ! {Receiver,get_trace}, receive {Tracer,{trace,Trace}} -> - read_packets_verify(R, SP, Msgs, Trace) + {read_packets_verify(R, SP, Trace), Trace} end end. -read_packets_send(S, RP, [Msg|Msgs]) -> - ok = gen_udp:send(S, localhost, RP, Msg), - read_packets_send(S, RP, Msgs); -read_packets_send(_S, _RP, []) -> - ok. +read_packets_send(_S, _RP, 0) -> + ok; +read_packets_send(S, RP, Msgs) -> + ok = gen_udp:send(S, localhost, RP, "UDP FLOOOOOOD"), + read_packets_send(S, RP, Msgs - 1). read_packets_recv(0) -> ok; @@ -404,23 +411,24 @@ read_packets_recv(N) -> timeout end. -read_packets_verify(R, SP, Msg, Trace) -> - lists:reverse( - lists:sort(read_packets_verify(R, SP, Msg, Trace, 0))). - -read_packets_verify(R, SP, Msgs, [{trace,Self,OutIn,_}|Trace], M) - when Self =:= self(), OutIn =:= out; - Self =:= self(), OutIn =:= in -> - push(M, read_packets_verify(R, SP, Msgs, Trace, 0)); -read_packets_verify(R, SP, [Msg|Msgs], - [{trace,Self,'receive',{udp,R,{127,0,0,1},SP,Msg}} - |Trace], M) +read_packets_verify(R, SP, Trace) -> + [Max | _] = Pkts = lists:reverse(lists:sort(read_packets_verify(R, SP, Trace, 0))), + ct:pal("~p",[lists:sublist(Pkts,10)]), + Max. + +read_packets_verify(R, SP, [{trace,R,OutIn,_}|Trace], M) + when OutIn =:= out; OutIn =:= in -> + push(M, read_packets_verify(R, SP, Trace, 0)); +read_packets_verify(R, SP, [{trace, R,'receive',timeout}|Trace], M) -> + push(M, read_packets_verify(R, SP, Trace, 0)); +read_packets_verify(R, SP, + [{trace,R,'send',{udp,R,{127,0,0,1},SP,_Msg}, Self} | Trace], M) when Self =:= self() -> - read_packets_verify(R, SP, Msgs, Trace, M+1); -read_packets_verify(_R, _SP, [], [], M) -> + read_packets_verify(R, SP, Trace, M+1); +read_packets_verify(_R, _SP, [], M) -> push(M, []); -read_packets_verify(_R, _SP, Msgs, Trace, M) -> - ct:fail({read_packets_verify,mismatch,Msgs,Trace,M}). +read_packets_verify(_R, _SP, Trace, M) -> + ct:fail({read_packets_verify,mismatch,Trace,M}). push(0, Vs) -> Vs; @@ -566,6 +574,168 @@ active_n(Config) when is_list(Config) -> ok. + +recvtos(_Config) -> + test_recv_opts( + inet, [{recvtos,tos,96}], + fun recvtos_ok/2). + +recvtosttl(_Config) -> + test_recv_opts( + inet, [{recvtos,tos,96},{recvttl,ttl,33}], + fun (OSType, OSVer) -> + recvtos_ok(OSType, OSVer) andalso recvttl_ok(OSType, OSVer) + end). + +recvttl(_Config) -> + test_recv_opts( + inet, [{recvttl,ttl,33}], + fun recvttl_ok/2). + +recvtclass(_Config) -> + {ok,IFs} = inet:getifaddrs(), + case + [Name || + {Name,Opts} <- IFs, + lists:member({addr,{0,0,0,0,0,0,0,1}}, Opts)] + of + [_] -> + test_recv_opts( + inet6, [{recvtclass,tclass,224}], + fun recvtclass_ok/2); + [] -> + {skip,ipv6_not_supported,IFs} + end. + +%% These version numbers are just above the highest noted in daily tests +%% where the test fails for a plausible reason, that is the lowest +%% where we can expect that the test mighe succeed, so +%% skip on platforms lower than this. +%% +%% On newer versions it might be fixed, but we'll see about that +%% when machines with newer versions gets installed... +%% If the test still fails for a plausible reason these +%% version numbers simply should be increased. +%% Or maybe we should change to only test on known good platforms? + +%% Using the option returns einval, so it is not implemented. +recvtos_ok({unix,darwin}, OSVer) -> not semver_lt(OSVer, {17,6,0}); +%% Using the option returns einval, so it is not implemented. +recvtos_ok({unix,openbsd}, OSVer) -> not semver_lt(OSVer, {6,4,0}); +%% Using the option returns einval, so it is not implemented. +recvtos_ok({unix,sunos}, OSVer) -> not semver_lt(OSVer, {5,12,0}); +%% +recvtos_ok({unix,_}, _) -> true; +recvtos_ok(_, _) -> false. + +recvttl_ok({unix,_}, _) -> true; +recvttl_ok(_, _) -> false. + +%% Using the option returns einval, so it is not implemented. +recvtclass_ok({unix,darwin}, OSVer) -> not semver_lt(OSVer, {9,9,0}); +recvtclass_ok({unix,linux}, OSVer) -> not semver_lt(OSVer, {2,6,11}); +%% +recvtclass_ok({unix,_}, _) -> true; +recvtclass_ok(_, _) -> false. + +semver_lt({X1,Y1,Z1}, {X2,Y2,Z2}) -> + if + X1 > X2 -> false; + X1 < X2 -> true; + Y1 > Y2 -> false; + Y1 < Y2 -> true; + Z1 > Z2 -> false; + Z1 < Z2 -> true; + true -> false + end; +semver_lt(_, {_,_,_}) -> false. + +test_recv_opts(Family, Spec, OSFilter) -> + OSType = os:type(), + OSVer = os:version(), + case OSFilter(OSType, OSVer) of + true -> + io:format("Os: ~p, ~p~n", [OSType,OSVer]), + test_recv_opts(Family, Spec, OSType, OSVer); + false -> + {skip,{not_supported_for_os_version,{OSType,OSVer}}} + end. +%% +test_recv_opts(Family, Spec, _OSType, _OSVer) -> + Timeout = 5000, + RecvOpts = [RecvOpt || {RecvOpt,_,_} <- Spec], + TrueRecvOpts = [{RecvOpt,true} || {RecvOpt,_,_} <- Spec], + FalseRecvOpts = [{RecvOpt,false} || {RecvOpt,_,_} <- Spec], + Opts = [Opt || {_,Opt,_} <- Spec], + OptsVals = [{Opt,Val} || {_,Opt,Val} <- Spec], + TrueRecvOpts_OptsVals = TrueRecvOpts ++ OptsVals, + Addr = + case Family of + inet -> + {127,0,0,1}; + inet6 -> + {0,0,0,0,0,0,0,1} + end, + %% + {ok,S1} = + gen_udp:open(0, [Family,binary,{active,false}|TrueRecvOpts]), + {ok,P1} = inet:port(S1), + {ok,TrueRecvOpts} = inet:getopts(S1, RecvOpts), + ok = inet:setopts(S1, FalseRecvOpts), + {ok,FalseRecvOpts} = inet:getopts(S1, RecvOpts), + ok = inet:setopts(S1, TrueRecvOpts_OptsVals), + {ok,TrueRecvOpts_OptsVals} = inet:getopts(S1, RecvOpts ++ Opts), + %% + {ok,S2} = + gen_udp:open(0, [Family,binary,{active,true}|FalseRecvOpts]), + {ok,P2} = inet:port(S2), + {ok,FalseRecvOpts_OptsVals2} = inet:getopts(S2, RecvOpts ++ Opts), + OptsVals2 = FalseRecvOpts_OptsVals2 -- FalseRecvOpts, + %% + ok = gen_udp:send(S2, Addr, P1, <<"abcde">>), + ok = gen_udp:send(S1, Addr, P2, <<"fghij">>), + {ok,{_,P2,OptsVals3,<<"abcde">>}} = gen_udp:recv(S1, 0, Timeout), + verify_sets_eq(OptsVals3, OptsVals2), + receive + {udp,S2,_,P1,<<"fghij">>} -> + ok; + Other1 -> + exit({unexpected,Other1}) + after Timeout -> + exit(timeout) + end, + %% + ok = inet:setopts(S1, FalseRecvOpts), + {ok,FalseRecvOpts} = inet:getopts(S1, RecvOpts), + ok = inet:setopts(S2, TrueRecvOpts), + {ok,TrueRecvOpts} = inet:getopts(S2, RecvOpts), + %% + ok = gen_udp:send(S2, Addr, P1, <<"klmno">>), + ok = gen_udp:send(S1, Addr, P2, <<"pqrst">>), + {ok,{_,P2,<<"klmno">>}} = gen_udp:recv(S1, 0, Timeout), + receive + {udp,S2,_,P1,OptsVals4,<<"pqrst">>} -> + verify_sets_eq(OptsVals4, OptsVals); + Other2 -> + exit({unexpected,Other2}) + after Timeout -> + exit(timeout) + end, + ok = gen_udp:close(S1), + ok = gen_udp:close(S2), +%% exit({{OSType,OSVer},success}), % In search for the truth + ok. + +verify_sets_eq(L1, L2) -> + L = lists:sort(L1), + case lists:sort(L2) of + L -> + ok; + _ -> + exit({sets_neq,L1,L2}) + end. + + local_basic(_Config) -> SFile = local_filename(server), SAddr = {local,bin_filename(SFile)}, @@ -757,9 +927,9 @@ ok({ok,V}) -> V; ok(NotOk) -> try throw(not_ok) catch - Thrown -> + throw:Thrown:Stacktrace -> erlang:raise( - error, {Thrown, NotOk}, tl(erlang:get_stacktrace())) + error, {Thrown, NotOk}, tl(Stacktrace)) end. diff --git a/lib/kernel/test/global_SUITE.erl b/lib/kernel/test/global_SUITE.erl index 0a7f73c344..8eab36e308 100644 --- a/lib/kernel/test/global_SUITE.erl +++ b/lib/kernel/test/global_SUITE.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1997-2016. All Rights Reserved. +%% Copyright Ericsson AB 1997-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -1383,7 +1383,7 @@ ring(Config) when is_list(Config) -> rpc_cast(Cp8, ?MODULE, single_node, [Time, Cp7, Config]), %% sleep to make the partitioned net ready - ct:sleep(Time - msec()), + sleep(Time - msec()), pong = net_adm:ping(Cp0), pong = net_adm:ping(Cp1), @@ -1466,7 +1466,7 @@ simple_ring(Config) when is_list(Config) -> rpc_cast(Cp5, ?MODULE, single_node, [Time, Cp4, Config]), %% sleep to make the partitioned net ready - ct:sleep(Time - msec()), + sleep(Time - msec()), pong = net_adm:ping(Cp0), pong = net_adm:ping(Cp1), @@ -1542,7 +1542,7 @@ line(Config) when is_list(Config) -> rpc_cast(Cp8, ?MODULE, single_node, [Time, Cp7, Config]), %% Sleep to make the partitioned net ready - ct:sleep(Time - msec()), + sleep(Time - msec()), pong = net_adm:ping(Cp0), pong = net_adm:ping(Cp1), @@ -1626,7 +1626,7 @@ simple_line(Config) when is_list(Config) -> rpc_cast(Cp5, ?MODULE, single_node, [Time, Cp4, Config]), %% sleep to make the partitioned net ready - ct:sleep(Time - msec()), + sleep(Time - msec()), pong = net_adm:ping(Cp0), pong = net_adm:ping(Cp1), @@ -3470,8 +3470,8 @@ start_procs(Parent, N1, N2, N3, Config) -> Pid6 = rpc:call(N3, ?MODULE, start_proc3, [test4]), assert_pid(Pid6), yes = global:register_name(test1, Pid3), - yes = global:register_name(test2, Pid4, {global, notify_all_name}), - yes = global:register_name(test3, Pid5, {global, random_notify_name}), + yes = global:register_name(test2, Pid4, fun global:notify_all_name/3), + yes = global:register_name(test3, Pid5, fun global:random_notify_name/3), Resolve = fun(Name, Pid1, Pid2) -> Parent ! {resolve_called, Name, node()}, {Min, Max} = minmax(Pid1, Pid2), @@ -3546,7 +3546,7 @@ start_proc_basic(Name) -> end. init_proc_basic(Parent, Name) -> - X = global:register_name(Name, self(), {?MODULE, fix_basic_name}), + X = global:register_name(Name, self(), fun ?MODULE:fix_basic_name/3), Parent ! {self(),X}, loop(). @@ -3555,7 +3555,7 @@ single_node(Time, Node, Config) -> lists:foreach(fun(N) -> _ = erlang:disconnect_node(N) end, nodes()), ?UNTIL(get_known(node()) =:= [node()]), spawn(?MODULE, init_2, []), - ct:sleep(Time - msec()), + sleep(Time - msec()), net_adm:ping(Node). init_2() -> @@ -3791,15 +3791,6 @@ stop() -> test_server:stop_node(Node) end, nodes()). -dbg_logs(Name) -> dbg_logs(Name, ?NODES). - -dbg_logs(Name, Nodes) -> - lists:foreach(fun(N) -> - F = lists:concat([Name, ".log.", N, ".txt"]), - ok = sys:log_to_file({global_name_server, N}, F) - end, Nodes). - - %% Tests that locally loaded nodes do not loose contact with other nodes. global_lost_nodes(Config) when is_list(Config) -> Timeout = 60, @@ -4018,13 +4009,6 @@ collect_nodes(N, Max) -> [Node | collect_nodes(N+1, Max)] end. -only_element(_E, []) -> - true; -only_element(E, [E|R]) -> - only_element(E, R); -only_element(_E, _) -> - false. - exit_p(Pid) -> Ref = erlang:monitor(process, Pid), Pid ! die, @@ -4047,6 +4031,11 @@ wait_for_exit_fast(Pid) -> ok end. +sleep(Time) when Time > 0 -> + ct:sleep(Time); +sleep(_Time) -> + ok. + check_everywhere(Nodes, Name, Config) -> ?UNTIL(begin case rpc:multicall(Nodes, global, whereis_name, [Name]) of @@ -4171,10 +4160,10 @@ rpc_cast(Node, Module, Function, Args, File) -> %% The emulator now ensures that the node has been removed from %% nodes(). -rpc_disconnect_node(Node, DisconnectedNode, _Config) -> - True = rpc:call(Node, erlang, disconnect_node, [DisconnectedNode]), - False = lists:member(DisconnectedNode, rpc:call(Node, erlang, nodes, [])), - {true, false} = {True, False}. +rpc_disconnect_node(Node, DisconnectedNode, Config) -> + true = rpc:call(Node, erlang, disconnect_node, [DisconnectedNode]), + ?UNTIL + (not lists:member(DisconnectedNode, rpc:call(Node, erlang, nodes, []))). %%% %%% Utility diff --git a/lib/kernel/test/heart_SUITE.erl b/lib/kernel/test/heart_SUITE.erl index 45032faf6d..f5ca6d0e1d 100644 --- a/lib/kernel/test/heart_SUITE.erl +++ b/lib/kernel/test/heart_SUITE.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1996-2016. All Rights Reserved. +%% Copyright Ericsson AB 1996-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -168,7 +168,7 @@ reboot(Config) when is_list(Config) -> {ok, Node} = start_check(slave, ?UNIQ_NODE_NAME), ok = rpc:call(Node, heart, set_cmd, - [atom_to_list(lib:progname()) ++ + [ct:get_progname() ++ " -noshell -heart " ++ name(Node) ++ "&"]), rpc:call(Node, init, reboot, []), receive @@ -203,7 +203,7 @@ node_start_immediately_after_crash_test(Config) when is_list(Config) -> [{"ERL_CRASH_DUMP_SECONDS", "0"}]), ok = rpc:call(Node, heart, set_cmd, - [atom_to_list(lib:progname()) ++ + [ct:get_progname() ++ " -noshell -heart " ++ name(Node) ++ "&"]), Mod = exhaust_atoms, @@ -254,7 +254,7 @@ node_start_soon_after_crash_test(Config) when is_list(Config) -> [{"ERL_CRASH_DUMP_SECONDS", "10"}]), ok = rpc:call(Node, heart, set_cmd, - [atom_to_list(lib:progname()) ++ + [ct:get_progname() ++ " -noshell -heart " ++ name(Node) ++ "&"]), Mod = exhaust_atoms, @@ -309,7 +309,7 @@ set_cmd(Config) when is_list(Config) -> clear_cmd(Config) when is_list(Config) -> {ok, Node} = start_check(slave, ?UNIQ_NODE_NAME), ok = rpc:call(Node, heart, set_cmd, - [atom_to_list(lib:progname()) ++ + [ct:get_progname() ++ " -noshell -heart " ++ name(Node) ++ "&"]), rpc:call(Node, init, reboot, []), receive @@ -346,9 +346,16 @@ clear_cmd(Config) when is_list(Config) -> get_cmd(Config) when is_list(Config) -> {ok, Node} = start_check(slave, ?UNIQ_NODE_NAME), - Cmd = "test", - ok = rpc:call(Node, heart, set_cmd, [Cmd]), - {ok, Cmd} = rpc:call(Node, heart, get_cmd, []), + + ShortCmd = "test", + ok = rpc:call(Node, heart, set_cmd, [ShortCmd]), + {ok, ShortCmd} = rpc:call(Node, heart, get_cmd, []), + + %% This would hang prior to OTP-15024 being fixed. + LongCmd = [$a || _ <- lists:seq(1, 160)], + ok = rpc:call(Node, heart, set_cmd, [LongCmd]), + {ok, LongCmd} = rpc:call(Node, heart, get_cmd, []), + stop_node(Node), ok. diff --git a/lib/kernel/test/inet_SUITE.erl b/lib/kernel/test/inet_SUITE.erl index 3b502be8b8..f436eafad3 100644 --- a/lib/kernel/test/inet_SUITE.erl +++ b/lib/kernel/test/inet_SUITE.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1997-2017. All Rights Reserved. +%% Copyright Ericsson AB 1997-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -40,7 +40,8 @@ lookup_bad_search_option/1, getif/1, getif_ifr_name_overflow/1,getservbyname_overflow/1, getifaddrs/1, - parse_strict_address/1, simple_netns/1, simple_netns_open/1, + parse_strict_address/1, ipv4_mapped_ipv6_address/1, + simple_netns/1, simple_netns_open/1, simple_bind_to_device/1, simple_bind_to_device_open/1]). -export([get_hosts/1, get_ipv6_hosts/1, parse_hosts/1, parse_address/1, @@ -667,6 +668,26 @@ parse_strict_address(Config) when is_list(Config) -> {ok, {3089,3106,23603,50240,0,0,119,136}} = inet:parse_strict_address("c11:0c22:5c33:c440::077:0088"). +ipv4_mapped_ipv6_address(Config) when is_list(Config) -> + {D1,D2,D3,D4} = IPv4Address = + {rand:uniform(256) - 1, + rand:uniform(256) - 1, + rand:uniform(256) - 1, + rand:uniform(256) - 1}, + E7 = (D1 bsl 8) bor D2, + E8 = (D3 bsl 8) bor D4, + io:format("IPv4Address: ~p.~n", [IPv4Address]), + {0,0,0,0,0,65535,E7,E8} = inet:ipv4_mapped_ipv6_address(IPv4Address), + IPv6Address = + {rand:uniform(65536) - 1, + rand:uniform(65536) - 1, + rand:uniform(65536) - 1, + rand:uniform(65536) - 1, + rand:uniform(65536) - 1, + rand:uniform(65536) - 1, E7, E8}, + IPv4Address = inet:ipv4_mapped_ipv6_address(IPv6Address), + ok. + t_gethostnative(Config) when is_list(Config) -> %% this will result in 26 bytes sent which causes problem in Windows %% if the port-program has not assured stdin to be read in BINARY mode @@ -1039,28 +1060,26 @@ getservbyname_overflow(Config) when is_list(Config) -> getifaddrs(Config) when is_list (Config) -> {ok,IfAddrs} = inet:getifaddrs(), io:format("IfAddrs = ~p.~n", [IfAddrs]), - case - {os:type(), - [If || - {If,Opts} <- IfAddrs, - lists:keymember(hwaddr, 1, Opts)]} of - {{unix,sunos},[]} -> ok; - {OT,[]} -> - ct:fail({should_have_hwaddr,OT}); - _ -> ok + case [If || {If,Opts} <- IfAddrs, lists:keymember(hwaddr, 1, Opts)] of + [] -> + case os:type() of + {unix,sunos} -> ok; + OT -> + ct:fail({should_have_hwaddr,OT}) + end; + [_|_] -> ok end, - Addrs = - [element(1, A) || A <- ifaddrs(IfAddrs)], + Addrs = ifaddrs(IfAddrs), io:format("Addrs = ~p.~n", [Addrs]), [check_addr(Addr) || Addr <- Addrs], ok. -check_addr({addr,Addr}) +check_addr(Addr) when tuple_size(Addr) =:= 8, element(1, Addr) band 16#FFC0 =:= 16#FE80 -> io:format("Addr: ~p link local; SKIPPED!~n", [Addr]), ok; -check_addr({addr,Addr}) -> +check_addr(Addr) -> io:format("Addr: ~p.~n", [Addr]), Ping = "ping", Pong = "pong", @@ -1076,80 +1095,86 @@ check_addr({addr,Addr}) -> ok = gen_tcp:close(S2), ok = gen_tcp:close(L). --record(ifopts, {name,flags,addrs=[],hwaddr}). - -ifaddrs([]) -> []; -ifaddrs([{If,Opts}|IOs]) -> - #ifopts{flags=F} = Ifopts = check_ifopts(Opts, #ifopts{name=If}), - case F of - {flags,Flags} -> - case lists:member(up, Flags) of - true -> - Ifopts#ifopts.addrs; - false -> - [] - end ++ ifaddrs(IOs); - undefined -> - ifaddrs(IOs) +ifaddrs(IfOpts) -> + IfMap = collect_ifopts(IfOpts), + ChkFun = + fun Self({{_,Flags} = Key, Opts}, ok) -> + Broadcast = lists:member(broadcast, Flags), + P2P = lists:member(pointtopoint, Flags), + case Opts of + [{addr,_},{netmask,_},{broadaddr,_}|Os] + when Broadcast -> + Self({Key, Os}, ok); + [{addr,_},{netmask,_},{dstaddr,_}|Os] + when P2P -> + Self({Key, Os}, ok); + [{addr,_},{netmask,_}|Os] -> + Self({Key, Os}, ok); + [{hwaddr,_}|Os] -> + Self({Key, Os}, ok); + [] -> + ok + end + end, + fold_ifopts(ChkFun, ok, IfMap), + AddrsFun = + fun ({{_,Flags}, Opts}, Acc) -> + case + lists:member(running, Flags) + andalso (not lists:member(pointtopoint, Flags)) + of + true -> + lists:reverse( + [Addr || {addr,Addr} <- Opts], + Acc); + false -> + Acc + end + end, + fold_ifopts(AddrsFun, [], IfMap). + +collect_ifopts(IfOpts) -> + collect_ifopts(IfOpts, #{}). +%% +collect_ifopts(IfOpts, IfMap) -> + case IfOpts of + [{If,[{flags,Flags}|Opts]}|IfOs] -> + Key = {If,Flags}, + case maps:is_key(Key, IfMap) of + true -> + ct:fail({unexpected_ifopts,IfOpts,IfMap}); + false -> + collect_ifopts(IfOs, IfMap, Opts, Key, []) + end; + [] -> + IfMap; + _ -> + ct:fail({unexpected_ifopts,IfOpts,IfMap}) + end. +%% +collect_ifopts(IfOpts, IfMap, Opts, Key, R) -> + case Opts of + [{flags,_}|_] -> + {If,_} = Key, + collect_ifopts( + [{If,Opts}|IfOpts], maps:put(Key, lists:reverse(R), IfMap)); + [OptVal|Os] -> + collect_ifopts(IfOpts, IfMap, Os, Key, [OptVal|R]); + [] -> + collect_ifopts(IfOpts, maps:put(Key, lists:reverse(R), IfMap)) end. -check_ifopts([], #ifopts{flags=F,addrs=Raddrs}=Ifopts) -> - Addrs = lists:reverse(Raddrs), - R = Ifopts#ifopts{addrs=Addrs}, - io:format("~p.~n", [R]), - %% See how we did... - {flags,Flags} = F, - case lists:member(broadcast, Flags) of - true -> - [case A of - {{addr,_},{netmask,_},{broadaddr,_}} -> - A; - {{addr,T},{netmask,_}} when tuple_size(T) =:= 8 -> - A - end || A <- Addrs]; - false -> - case lists:member(pointtopoint, Flags) of - true -> - [case A of - {{addr,_},{netmask,_},{dstaddr,_}} -> - A - end || A <- Addrs]; - false -> - [case A of - {{addr,_},{netmask,_}} -> - A - end || A <- Addrs] - end - end, - R; -check_ifopts([{flags,_}=F|Opts], #ifopts{flags=undefined}=Ifopts) -> - check_ifopts(Opts, Ifopts#ifopts{flags=F}); -check_ifopts([{flags,_}=F|Opts], #ifopts{flags=Flags}=Ifopts) -> - case F of - Flags -> - check_ifopts(Opts, Ifopts); - _ -> - ct:fail({multiple_flags,F,Ifopts}) - end; -check_ifopts( - [{addr,_}=A,{netmask,_}=N,{dstaddr,_}=D|Opts], - #ifopts{addrs=Addrs}=Ifopts) -> - check_ifopts(Opts, Ifopts#ifopts{addrs=[{A,N,D}|Addrs]}); -check_ifopts( - [{addr,_}=A,{netmask,_}=N,{broadaddr,_}=B|Opts], - #ifopts{addrs=Addrs}=Ifopts) -> - check_ifopts(Opts, Ifopts#ifopts{addrs=[{A,N,B}|Addrs]}); -check_ifopts( - [{addr,_}=A,{netmask,_}=N|Opts], - #ifopts{addrs=Addrs}=Ifopts) -> - check_ifopts(Opts, Ifopts#ifopts{addrs=[{A,N}|Addrs]}); -check_ifopts([{addr,_}=A|Opts], #ifopts{addrs=Addrs}=Ifopts) -> - check_ifopts(Opts, Ifopts#ifopts{addrs=[{A}|Addrs]}); -check_ifopts([{hwaddr,Hwaddr}=H|Opts], #ifopts{hwaddr=undefined}=Ifopts) - when is_list(Hwaddr) -> - check_ifopts(Opts, Ifopts#ifopts{hwaddr=H}); -check_ifopts([{hwaddr,_}=H|_], #ifopts{}=Ifopts) -> - ct:fail({multiple_hwaddrs,H,Ifopts}). +fold_ifopts(Fun, Acc, IfMap) -> + fold_ifopts(Fun, Acc, IfMap, maps:keys(IfMap)). +%% +fold_ifopts(Fun, Acc, IfMap, Keys) -> + case Keys of + [Key|Ks] -> + Opts = maps:get(Key, IfMap), + fold_ifopts(Fun, Fun({Key,Opts}, Acc), IfMap, Ks); + [] -> + Acc + end. %% Works just like lists:member/2, except that any {127,_,_,_} tuple %% matches any other {127,_,_,_}. We do this to handle Linux systems diff --git a/lib/kernel/test/inet_res_SUITE.erl b/lib/kernel/test/inet_res_SUITE.erl index 6691ad9c06..df6e48abae 100644 --- a/lib/kernel/test/inet_res_SUITE.erl +++ b/lib/kernel/test/inet_res_SUITE.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 2009-2016. All Rights Reserved. +%% Copyright Ericsson AB 2009-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -217,10 +217,10 @@ proxy_start(TC, {NS,P}) -> spawn_link( fun () -> try proxy_start(TC, NS, P, Parent, Tag) - catch C:X -> + catch C:X:Stacktrace -> io:format( "~w: ~w:~p ~p~n", - [self(),C,X,erlang:get_stacktrace()]) + [self(),C,X,Stacktrace]) end end), receive {started,Tag,Port} -> diff --git a/lib/kernel/test/inet_sockopt_SUITE.erl b/lib/kernel/test/inet_sockopt_SUITE.erl index ada9c2689c..27ff74e309 100644 --- a/lib/kernel/test/inet_sockopt_SUITE.erl +++ b/lib/kernel/test/inet_sockopt_SUITE.erl @@ -110,9 +110,14 @@ simple(Config) when is_list(Config) -> {S1,S2} = create_socketpair(Opt, Opt), {ok,Opt} = inet:getopts(S1,OptTags), {ok,Opt} = inet:getopts(S2,OptTags), - COpt = [{X,case X of nodelay -> false;_ -> Y end} || {X,Y} <- Opt], + NoPushOpt = case os:type() of + {unix, Osname} when Osname =:= linux; Osname =:= freebsd -> {nopush, true}; + {_,_} -> {nopush, false} + end, + COpt = [{X,case X of nodelay -> false;_ -> Y end} || {X,Y} <- [NoPushOpt|Opt]], + COptTags = [X || {X,_} <- COpt], inet:setopts(S1,COpt), - {ok,COpt} = inet:getopts(S1,OptTags), + {ok,COpt} = inet:getopts(S1,COptTags), {ok,Opt} = inet:getopts(S2,OptTags), gen_tcp:close(S1), gen_tcp:close(S2), diff --git a/lib/kernel/test/init_SUITE.erl b/lib/kernel/test/init_SUITE.erl index 2b59eb2bfe..6a006cdc01 100644 --- a/lib/kernel/test/init_SUITE.erl +++ b/lib/kernel/test/init_SUITE.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1996-2016. All Rights Reserved. +%% Copyright Ericsson AB 1996-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -299,7 +299,7 @@ many_restarts() -> many_restarts(Config) when is_list(Config) -> {ok, Node} = loose_node:start(init_test, "", ?DEFAULT_TIMEOUT_SEC), - loop_restart(50,Node,rpc:call(Node,erlang,whereis,[error_logger])), + loop_restart(50,Node,rpc:call(Node,erlang,whereis,[logger])), loose_node:stop(Node), ok. @@ -316,13 +316,13 @@ loop_restart(N,Node,EHPid) -> ct:fail(not_stopping) end, ok = wait_for(30, Node, EHPid), - loop_restart(N-1,Node,rpc:call(Node,erlang,whereis,[error_logger])). + loop_restart(N-1,Node,rpc:call(Node,erlang,whereis,[logger])). wait_for(0,Node,_) -> loose_node:stop(Node), error; wait_for(N,Node,EHPid) -> - case rpc:call(Node, erlang, whereis, [error_logger]) of + case rpc:call(Node, erlang, whereis, [logger]) of Pid when is_pid(Pid), Pid =/= EHPid -> %% erlang:display(ok), ok; @@ -365,7 +365,9 @@ restart(Config) when is_list(Config) -> %% Ok, the node is up, now the real test test begins. erlang:monitor_node(Node, true), SysProcs0 = rpc:call(Node, ?MODULE, find_system_processes, []), - [InitPid, PurgerPid, LitCollectorPid, DirtyCodePid] = SysProcs0, + io:format("SysProcs0=~p~n", [SysProcs0]), + [InitPid, PurgerPid, LitCollectorPid, + DirtySigNPid, DirtySigHPid, DirtySigMPid] = SysProcs0, InitPid = rpc:call(Node, erlang, whereis, [init]), PurgerPid = rpc:call(Node, erlang, whereis, [erts_code_purger]), Procs = rpc:call(Node, erlang, processes, []), @@ -381,7 +383,9 @@ restart(Config) when is_list(Config) -> ok = wait_restart(30, Node), SysProcs1 = rpc:call(Node, ?MODULE, find_system_processes, []), - [InitPid1, PurgerPid1, LitCollectorPid1, DirtyCodePid1] = SysProcs1, + io:format("SysProcs1=~p~n", [SysProcs1]), + [InitPid1, PurgerPid1, LitCollectorPid1, + DirtySigNPid1, DirtySigHPid1, DirtySigMPid1] = SysProcs1, %% Still the same init process! InitPid1 = rpc:call(Node, erlang, whereis, [init]), @@ -394,20 +398,18 @@ restart(Config) when is_list(Config) -> PurgerP = pid_to_list(PurgerPid1), %% and same literal area collector process! - case LitCollectorPid of - undefined -> undefined = LitCollectorPid1; - _ -> - LitCollectorP = pid_to_list(LitCollectorPid), - LitCollectorP = pid_to_list(LitCollectorPid1) - end, - - %% and same dirty process code checker process! - case DirtyCodePid of - undefined -> undefined = DirtyCodePid1; - _ -> - DirtyCodeP = pid_to_list(DirtyCodePid), - DirtyCodeP = pid_to_list(DirtyCodePid1) - end, + LitCollectorP = pid_to_list(LitCollectorPid), + LitCollectorP = pid_to_list(LitCollectorPid1), + + %% and same normal dirty signal handler process! + DirtySigNP = pid_to_list(DirtySigNPid), + DirtySigNP = pid_to_list(DirtySigNPid1), + %% and same high dirty signal handler process! + DirtySigHP = pid_to_list(DirtySigHPid), + DirtySigHP = pid_to_list(DirtySigHPid1), + %% and same max dirty signal handler process! + DirtySigMP = pid_to_list(DirtySigMPid), + DirtySigMP = pid_to_list(DirtySigMPid1), NewProcs0 = rpc:call(Node, erlang, processes, []), NewProcs = NewProcs0 -- SysProcs1, @@ -433,7 +435,9 @@ restart(Config) when is_list(Config) -> -record(sys_procs, {init, code_purger, literal_collector, - dirty_proc_checker}). + dirty_sig_handler_normal, + dirty_sig_handler_high, + dirty_sig_handler_max}). find_system_processes() -> find_system_procs(processes(), #sys_procs{}). @@ -442,21 +446,32 @@ find_system_procs([], SysProcs) -> [SysProcs#sys_procs.init, SysProcs#sys_procs.code_purger, SysProcs#sys_procs.literal_collector, - SysProcs#sys_procs.dirty_proc_checker]; + SysProcs#sys_procs.dirty_sig_handler_normal, + SysProcs#sys_procs.dirty_sig_handler_high, + SysProcs#sys_procs.dirty_sig_handler_max]; find_system_procs([P|Ps], SysProcs) -> - case process_info(P, initial_call) of - {initial_call,{otp_ring0,start,2}} -> + case process_info(P, [initial_call, priority]) of + [{initial_call,{otp_ring0,start,2}},_] -> undefined = SysProcs#sys_procs.init, find_system_procs(Ps, SysProcs#sys_procs{init = P}); - {initial_call,{erts_code_purger,start,0}} -> + [{initial_call,{erts_code_purger,start,0}},_] -> undefined = SysProcs#sys_procs.code_purger, find_system_procs(Ps, SysProcs#sys_procs{code_purger = P}); - {initial_call,{erts_literal_area_collector,start,0}} -> + [{initial_call,{erts_literal_area_collector,start,0}},_] -> undefined = SysProcs#sys_procs.literal_collector, find_system_procs(Ps, SysProcs#sys_procs{literal_collector = P}); - {initial_call,{erts_dirty_process_code_checker,start,0}} -> - undefined = SysProcs#sys_procs.dirty_proc_checker, - find_system_procs(Ps, SysProcs#sys_procs{dirty_proc_checker = P}); + [{initial_call,{erts_dirty_process_signal_handler,start,0}}, + {priority,normal}] -> + undefined = SysProcs#sys_procs.dirty_sig_handler_normal, + find_system_procs(Ps, SysProcs#sys_procs{dirty_sig_handler_normal = P}); + [{initial_call,{erts_dirty_process_signal_handler,start,0}}, + {priority,high}] -> + undefined = SysProcs#sys_procs.dirty_sig_handler_high, + find_system_procs(Ps, SysProcs#sys_procs{dirty_sig_handler_high = P}); + [{initial_call,{erts_dirty_process_signal_handler,start,0}}, + {priority,max}] -> + undefined = SysProcs#sys_procs.dirty_sig_handler_max, + find_system_procs(Ps, SysProcs#sys_procs{dirty_sig_handler_max = P}); _ -> find_system_procs(Ps, SysProcs) end. diff --git a/lib/kernel/test/kernel_SUITE.erl b/lib/kernel/test/kernel_SUITE.erl index da56359294..3e5ed855b5 100644 --- a/lib/kernel/test/kernel_SUITE.erl +++ b/lib/kernel/test/kernel_SUITE.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1997-2016. All Rights Reserved. +%% Copyright Ericsson AB 1997-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -30,14 +30,14 @@ -export([init_per_testcase/2, end_per_testcase/2]). %% Test cases must be exported. --export([app_test/1, appup_test/1]). +-export([app_test/1, appup_test/1, refc/1]). suite() -> [{ct_hooks,[ts_install_cth]}, {timetrap,{minutes,2}}]. all() -> - [app_test, appup_test]. + [app_test, appup_test, refc]. groups() -> []. @@ -163,3 +163,68 @@ check_appup([Vsn|Vsns],Instrs,Expected) -> end; check_appup([],_,_) -> ok. + +%%% Check that refc module handles the counters as expected +refc(_Config) -> + Enable = fun(Enable) -> erlang:system_flag(scheduler_wall_time, Enable) end, + IsOn = fun() -> undefined /= erlang:statistics(scheduler_wall_time) end, + Tester = self(), + Loop = fun Loop() -> + receive + die -> normal; + {apply, Bool} -> + Res = Enable(Bool), + Tester ! {self(), Res}, + Loop() + end + end, + + %% Counter should be 0 + false = Enable(false), + + false = Enable(true), + true = Enable(true), + true = Enable(false), + true = Enable(false), + + %% Counter should be 0 + false = IsOn(), + + P1 = spawn_link(Loop), + P1 ! {apply, true}, + receive {P1, R1} -> false = R1 end, + + %% P1 has turned it on counter should be one + true = IsOn(), + true = Enable(true), + true = Enable(false), + true = IsOn(), + + P1 ! {apply, false}, + receive {P1, R2} -> true = R2 end, + false = IsOn(), + + P1 ! {apply, true}, + receive {P1, R3} -> false = R3 end, + true = IsOn(), + true = Enable(false), + + + P1 ! die, + timer:sleep(100), + false = IsOn(), + false = Enable(false), + + P2 = spawn_link(Loop), + P2 ! {apply, true}, + receive {P2, R4} -> false = R4 end, + true = IsOn(), + P2 ! {apply, true}, + receive {P2, R5} -> true = R5 end, + true = IsOn(), + + P2 ! die, + timer:sleep(100), + false = IsOn(), + + ok. diff --git a/lib/kernel/test/kernel_bench.spec b/lib/kernel/test/kernel_bench.spec new file mode 100644 index 0000000000..4de133f21b --- /dev/null +++ b/lib/kernel/test/kernel_bench.spec @@ -0,0 +1,2 @@ +{groups,"../kernel_test",zlib_SUITE,[bench]}. +{groups,"../kernel_test",file_SUITE,[bench]}. diff --git a/lib/kernel/test/kernel_config_SUITE.erl b/lib/kernel/test/kernel_config_SUITE.erl index 9a4578917d..9207025a2c 100644 --- a/lib/kernel/test/kernel_config_SUITE.erl +++ b/lib/kernel/test/kernel_config_SUITE.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1996-2016. All Rights Reserved. +%% Copyright Ericsson AB 1996-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -76,7 +76,7 @@ sync(Conf) when is_list(Conf) -> %% Reset wall_clock {T1,_} = erlang:statistics(wall_clock), io:format("~p~n", [{t1, T1}]), - Command = lists:concat([lib:progname(), + Command = lists:append([ct:get_progname(), " -detached -sname cp1 ", "-config ", Config, " -env ERL_CRASH_DUMP erl_crash_dump.cp1"]), diff --git a/lib/kernel/test/logger.cover b/lib/kernel/test/logger.cover new file mode 100644 index 0000000000..960bc0abff --- /dev/null +++ b/lib/kernel/test/logger.cover @@ -0,0 +1,14 @@ +%% -*- erlang -*- +{incl_mods,[error_logger, + logger, + logger_backend, + logger_config, + logger_disk_log_h, + logger_h_common, + logger_filters, + logger_formatter, + logger_server, + logger_simple_h, + logger_std_h, + logger_sup]}. + diff --git a/lib/kernel/test/logger.spec b/lib/kernel/test/logger.spec new file mode 100644 index 0000000000..1ab90b3e93 --- /dev/null +++ b/lib/kernel/test/logger.spec @@ -0,0 +1,11 @@ +%% -*-erlang-*- +{suites,"../kernel_test", [error_logger_SUITE, + error_logger_warn_SUITE, + logger_SUITE, + logger_disk_log_h_SUITE, + logger_env_var_SUITE, + logger_filters_SUITE, + logger_formatter_SUITE, + logger_legacy_SUITE, + logger_simple_h_SUITE, + logger_std_h_SUITE]}. diff --git a/lib/kernel/test/logger_SUITE.erl b/lib/kernel/test/logger_SUITE.erl new file mode 100644 index 0000000000..d831d0d108 --- /dev/null +++ b/lib/kernel/test/logger_SUITE.erl @@ -0,0 +1,1330 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2018. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(logger_SUITE). + +-compile(export_all). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("kernel/include/logger.hrl"). +-include_lib("kernel/src/logger_internal.hrl"). + +-define(str,"Log from "++atom_to_list(?FUNCTION_NAME)++ + ":"++integer_to_list(?LINE)). +-define(map_rep,#{function=>?FUNCTION_NAME, line=>?LINE}). +-define(keyval_rep,[{function,?FUNCTION_NAME}, {line,?LINE}]). + +-define(MY_LOC(N),#{mfa=>{?MODULE,?FUNCTION_NAME,?FUNCTION_ARITY}, + file=>?FILE, line=>?LINE-N}). + +-define(TRY(X), my_try(fun() -> X end)). + + +suite() -> + [{timetrap,{seconds,30}}, + {ct_hooks,[logger_test_lib]}]. + +init_per_suite(Config) -> + case logger:get_handler_config(?STANDARD_HANDLER) of + {ok,StdH} -> + ok = logger:remove_handler(?STANDARD_HANDLER), + [{default_handler,StdH}|Config]; + _ -> + Config + end. + +end_per_suite(Config) -> + case ?config(default_handler,Config) of + #{module:=HMod} = HConfig -> + ok = logger:add_handler(?STANDARD_HANDLER,HMod,HConfig); + _ -> + ok + end. + +init_per_group(_Group, Config) -> + Config. + +end_per_group(_Group, _Config) -> + ok. + +init_per_testcase(_TestCase, Config) -> + PC = logger:get_primary_config(), + [{logger_config,PC}|Config]. + +end_per_testcase(Case, Config) -> + try apply(?MODULE,Case,[cleanup,Config]) + catch error:undef -> ok + end, + ok. + +groups() -> + []. + +all() -> + [start_stop, + add_remove_handler, + multiple_handlers, + add_remove_filter, + change_config, + set_formatter, + log_no_levels, + log_all_levels_api, + macros, + set_level, + set_module_level, + set_application_level, + cache_module_level, + format_report, + filter_failed, + handler_failed, + config_sanity_check, + log_failed, + emulator, + via_logger_process, + other_node, + compare_levels, + process_metadata, + app_config, + kernel_config]. + +start_stop(_Config) -> + S = whereis(logger), + true = is_pid(S), + ok. + +add_remove_handler(_Config) -> + register(callback_receiver,self()), + Hs0 = logger:get_handler_config(), + {error,{not_found,h1}} = logger:get_handler_config(h1), + ok = logger:add_handler(h1,?MODULE,#{}), + [add] = test_server:messages_get(), + Hs = logger:get_handler_config(), + Hs0 = lists:filter(fun(#{id:=h1}) -> false; (_) -> true end, Hs), + {ok,#{module:=?MODULE,level:=all,filters:=[],filter_default:=log}} = %defaults + logger:get_handler_config(h1), + ok = logger:set_handler_config(h1,filter_default,stop), + [changing_config] = test_server:messages_get(), + ?LOG_NOTICE("hello",[]), + ok = check_no_log(), + ok = logger:set_handler_config(h1,filter_default,log), + [changing_config] = test_server:messages_get(), + {ok,#{filter_default:=log}} = logger:get_handler_config(h1), + ?LOG_NOTICE("hello",[]), + ok = check_logged(notice,"hello",[],?MY_LOC(1)), + ok = logger:remove_handler(h1), + [remove] = test_server:messages_get(), + Hs0 = logger:get_handler_config(), + {error,{not_found,h1}} = logger:get_handler_config(h1), + {error,{not_found,h1}} = logger:remove_handler(h1), + logger:notice("hello",[]), + ok = check_no_log(), + ok. + +add_remove_handler(cleanup,_Config) -> + logger:remove_handler(h1), + ok. + +multiple_handlers(_Config) -> + ok = logger:add_handler(h1,?MODULE,#{level=>notice,filter_default=>log}), + ok = logger:add_handler(h2,?MODULE,#{level=>error,filter_default=>log}), + ?LOG_ERROR("hello",[]), + ok = check_logged(error,"hello",[],?MY_LOC(1)), + ok = check_logged(error,"hello",[],?MY_LOC(2)), + ?LOG_NOTICE("hello",[]), + ok = check_logged(notice,"hello",[],?MY_LOC(1)), + ok = check_no_log(), + ok. + +multiple_handlers(cleanup,_Config) -> + logger:remove_handler(h1), + logger:remove_handler(h2), + ok. + +add_remove_filter(_Config) -> + ok = logger:add_handler(h1,?MODULE,#{level=>notice,filter_default=>log}), + LF = {fun(Log,_) -> Log#{level=>error} end, []}, + ok = logger:add_primary_filter(lf,LF), + {error,{already_exist,lf}} = logger:add_primary_filter(lf,LF), + {error,{already_exist,lf}} = logger:add_primary_filter(lf,{fun(Log,_) -> + Log + end, []}), + ?LOG_NOTICE("hello",[]), + ok = check_logged(error,"hello",[],?MY_LOC(1)), + ok = check_no_log(), + + ok = logger:add_handler(h2,?MODULE,#{level=>notice,filter_default=>log}), + HF = {fun(#{level:=error}=Log,_) -> + Log#{level=>mylevel}; + (_,_) -> + ignore + end, + []}, + ok = logger:add_handler_filter(h1,hf,HF), + {error,{already_exist,hf}} = logger:add_handler_filter(h1,hf,HF), + {error,{already_exist,hf}} = logger:add_handler_filter(h1,hf,{fun(Log,_) -> + Log + end, []}), + ?LOG_NOTICE("hello",[]), + ok = check_logged(mylevel,"hello",[],?MY_LOC(1)), + ok = check_logged(error,"hello",[],?MY_LOC(2)), + + ok = logger:remove_primary_filter(lf), + {error,{not_found,lf}} = logger:remove_primary_filter(lf), + + ?LOG_NOTICE("hello",[]), + ok = check_logged(notice,"hello",[],?MY_LOC(1)), + ok = check_logged(notice,"hello",[],?MY_LOC(2)), + + ?LOG_ERROR("hello",[]), + ok = check_logged(mylevel,"hello",[],?MY_LOC(1)), + ok = check_logged(error,"hello",[],?MY_LOC(2)), + + ok = logger:remove_handler_filter(h1,hf), + {error,{not_found,hf}} = logger:remove_handler_filter(h1,hf), + ?LOG_NOTICE("hello",[]), + ok = check_logged(notice,"hello",[],?MY_LOC(1)), + ok = check_logged(notice,"hello",[],?MY_LOC(2)), + + ?LOG_ERROR("hello",[]), + ok = check_logged(error,"hello",[],?MY_LOC(1)), + ok = check_logged(error,"hello",[],?MY_LOC(2)), + ok. + +add_remove_filter(cleanup,_Config) -> + logger:remove_primary_filter(lf), + logger:remove_handler(h1), + logger:remove_handler(h2), + ok. + +change_config(_Config) -> + %% Overwrite handler config - check that defaults are added + {error,{not_found,h1}} = logger:set_handler_config(h1,#{}), + ok = logger:add_handler(h1,?MODULE,#{level=>notice,custom=>custom}), + {ok,#{module:=?MODULE,level:=notice,filter_default:=log,custom:=custom}} = + logger:get_handler_config(h1), + register(callback_receiver,self()), + ok = logger:set_handler_config(h1,#{filter_default=>stop}), + [changing_config] = test_server:messages_get(), + {ok,#{module:=?MODULE,level:=all,filter_default:=stop}=C2} = + logger:get_handler_config(h1), + false = maps:is_key(custom,C2), + {error,fail} = logger:set_handler_config(h1,#{conf_call=>fun() -> {error,fail} end}), + {error,{attempting_syncronous_call_to_self,_}} = + logger:set_handler_config( + h1,#{conf_call=>fun() -> logger:set_handler_config(?MODULE,#{}) end}), + ok = + logger:set_handler_config( + h1,#{conf_call=>fun() -> logger:set_module_level(?MODULE,debug) end}), + {ok,C2} = logger:get_handler_config(h1), + + %% Change handler config: Single key + {error,fail} = logger:set_handler_config(h1,conf_call,fun() -> {error,fail} end), + ok = logger:set_handler_config(h1,custom,custom), + [changing_config] = test_server:messages_get(), + {ok,#{custom:=custom}=C3} = logger:get_handler_config(h1), + C2 = maps:remove(custom,C3), + + %% Change handler config: Map + ok = logger:update_handler_config(h1,#{custom=>new_custom}), + [changing_config] = test_server:messages_get(), + {ok,C4} = logger:get_handler_config(h1), + C4 = C3#{custom:=new_custom}, + + %% Change handler config: Id and module can not be changed + {error,{illegal_config_change,Old,New}} = + logger:set_handler_config(h1,id,newid), + %% Check that only the faulty field is included in return + [{id,h1}] = maps:to_list(Old), + [{id,newid}] = maps:to_list(New), + %% Check that both fields are included when both are changed + {error,{illegal_config_change, + #{id:=h1,module:=?MODULE}, + #{id:=newid,module:=newmodule}}} = + logger:set_handler_config(h1,#{id=>newid,module=>newmodule}), + + %% Change primary config: Single key + PConfig0 = logger:get_primary_config(), + ok = logger:set_primary_config(level,warning), + PConfig1 = logger:get_primary_config(), + PConfig1 = PConfig0#{level:=warning}, + + %% Change primary config: Map + ok = logger:update_primary_config(#{level=>error}), + PConfig2 = logger:get_primary_config(), + PConfig2 = PConfig1#{level:=error}, + + %% Overwrite primary config - check that defaults are added + ok = logger:set_primary_config(#{filter_default=>stop}), + #{level:=notice,filters:=[],filter_default:=stop}=PC1 = + logger:get_primary_config(), + 3 = maps:size(PC1), + %% Check that internal 'handlers' field has not been changed + MS = [{{{?HANDLER_KEY,'$1'},'_','_'},[],['$1']}], + HIds1 = lists:sort(ets:select(?LOGGER_TABLE,MS)), % dirty, internal data + HIds2 = lists:sort(logger:get_handler_ids()), + HIds1 = HIds2, + + %% Cleanup + ok = logger:set_primary_config(PConfig0), + [] = test_server:messages_get(), + + ok. + +change_config(cleanup,Config) -> + logger:remove_handler(h1), + PC = ?config(logger_config,Config), + logger:set_primary_config(PC), + ok. + +set_formatter(_Config) -> + {error,{not_found,h1}}=logger:set_handler_config(h1,formatter,{?MODULE,[]}), + ok = logger:add_handler(h1,?MODULE,#{level=>notice,filter_default=>log}), + ok = logger:set_handler_config(h1,formatter,{?MODULE,[]}), + logger:notice("hello",[]), + receive + {_Log,#{formatter:={?MODULE,[]}}} -> + ok + after 500 -> + ct:fail({timeout,no_log,process_info(self(),messages)}) + end, + ok. + +set_formatter(cleanup,_Config) -> + logger:remove_handler(h1), + ok. + +log_no_levels(_Config) -> + ok = logger:add_handler(h1,?MODULE,#{level=>all,filter_default=>log}), + logger:notice(M1=?map_rep), + ok = check_logged(notice,M1,#{}), + + Levels = [emergency,alert,critical,error,warning,notice,info,debug], + ok = logger:set_primary_config(level,none), + [logger:Level(#{Level=>rep}) || Level <- Levels], + ok = check_no_log(), + + ok = logger:set_primary_config(level,all), + M2 = ?map_rep, + ?LOG_NOTICE(M2), + ok = check_logged(notice,M2,#{}), + + ok = logger:set_module_level(?MODULE,none), + ?LOG_EMERGENCY(?map_rep), + ?LOG_ALERT(?map_rep), + ?LOG_CRITICAL(?map_rep), + ?LOG_ERROR(?map_rep), + ?LOG_WARNING(?map_rep), + ?LOG_NOTICE(?map_rep), + ?LOG_INFO(?map_rep), + ?LOG_DEBUG(?map_rep), + ok = check_no_log(), + + ok = logger:unset_module_level(?MODULE), + logger:notice(M3=?map_rep), + ok = check_logged(notice,M3,#{}), + + ok = logger:set_handler_config(h1,level,none), + [logger:Level(#{Level=>rep}) || Level <- Levels], + ok = check_no_log(), + + ok. +log_no_levels(cleanup,_Config) -> + logger:remove_handler(h1), + logger:set_primary_config(level,notice), + logger:unset_module_level(?MODULE), + ok. + +log_all_levels_api(_Config) -> + ok = logger:set_primary_config(level,all), + ok = logger:add_handler(h1,?MODULE,#{level=>all,filter_default=>log}), + test_api(emergency), + test_api(alert), + test_api(critical), + test_api(error), + test_api(warning), + test_api(notice), + test_api(info), + test_api(debug), + test_log_function(emergency), + ok. + +log_all_levels_api(cleanup,_Config) -> + logger:remove_handler(h1), + logger:set_primary_config(level,notice), + ok. + +macros(_Config) -> + ok = logger:add_handler(h1,?MODULE,#{level=>all,filter_default=>log}), + test_macros(emergency), + test_log_macro(alert), + ok. + +macros(cleanup,_Config) -> + logger:remove_handler(h1), + logger:unset_module_level(?MODULE), + ok. + +set_level(_Config) -> + ok = logger:add_handler(h1,?MODULE,#{level=>all,filter_default=>log}), + logger:debug(?map_rep), + ok = check_no_log(), + logger:notice(M1=?map_rep), + ok = check_logged(notice,M1,#{}), + ok = logger:set_primary_config(level,debug), + logger:debug(M2=?map_rep), + ok = check_logged(debug,M2,#{}), + ok. + +set_level(cleanup,_Config) -> + logger:remove_handler(h1), + logger:set_primary_config(level,notice), + ok. + +set_module_level(_Config) -> + [] = logger:get_module_level([?MODULE,other]), + [] = logger:get_module_level(?MODULE), + [] = logger:get_module_level(), + + ok = logger:add_handler(h1,?MODULE,#{level=>notice,filter_default=>log}), + {error,{invalid_level,bad}} = logger:set_module_level(?MODULE,bad), + {error,{not_a_list_of_modules,{bad}}} = + logger:set_module_level({bad},warning), + {error,{not_a_list_of_modules,[{bad}]}} = + logger:set_module_level([{bad}],warning), + ok = logger:set_module_level(?MODULE,warning), + [{?MODULE,warning}] = logger:get_module_level([?MODULE,other]), + [{?MODULE,warning}] = logger:get_module_level(?MODULE), + [{?MODULE,warning}] = logger:get_module_level(), + logger:notice(?map_rep,?MY_LOC(0)), + ok = check_no_log(), + logger:warning(M1=?map_rep,?MY_LOC(0)), + ok = check_logged(warning,M1,?MY_LOC(1)), + ok = logger:set_module_level(?MODULE,notice), + [{?MODULE,notice}] = logger:get_module_level([?MODULE,other]), + [{?MODULE,notice}] = logger:get_module_level(?MODULE), + [{?MODULE,notice}] = logger:get_module_level(), + logger:notice(M2=?map_rep,?MY_LOC(0)), + ok = check_logged(notice,M2,?MY_LOC(1)), + + {error,{not_a_list_of_modules,{bad}}} = logger:unset_module_level({bad}), + {error,{not_a_list_of_modules,[{bad}]}} = logger:unset_module_level([{bad}]), + ok = logger:unset_module_level(?MODULE), + [] = logger:get_module_level([?MODULE,other]), + [] = logger:get_module_level(?MODULE), + [] = logger:get_module_level(), + + ok = logger:set_module_level([m1,m2,m3],notice), + [{m1,notice},{m2,notice},{m3,notice}] = logger:get_module_level(), + ok = logger:unset_module_level(m2), + [{m1,notice},{m3,notice}] = logger:get_module_level(), + ok = logger:unset_module_level(), + [] = logger:get_module_level(), + + ok. + +set_module_level(cleanup,_Config) -> + logger:remove_handler(h1), + logger:unset_module_level(?MODULE), + ok. + +set_application_level(_Config) -> + + {error,{not_loaded,mnesia}} = logger:set_application_level(mnesia, warning), + {error,{not_loaded,mnesia}} = logger:unset_application_level(mnesia), + + case application:load(mnesia) of + ok -> + {ok, Modules} = application:get_key(mnesia, modules), + [] = logger:get_module_level(Modules), + + {error,{invalid_level,warn}} = + logger:set_application_level(mnesia, warn), + + ok = logger:set_application_level(mnesia, debug), + DebugModules = lists:sort([{M,debug} || M <- Modules]), + DebugModules = lists:sort(logger:get_module_level(Modules)), + + ok = logger:set_application_level(mnesia, warning), + + WarnModules = lists:sort([{M,warning} || M <- Modules]), + WarnModules = lists:sort(logger:get_module_level(Modules)), + + ok = logger:unset_application_level(mnesia), + [] = logger:get_module_level(Modules); + {error,{"no such file or directory","mnesia.app"}} -> + {skip, "Cannot load mnesia, does not exist"} + end. + +set_application_level(cleanup,_Config) -> + _ = logger:unset_application_level(mnesia), + _ = application:unload(mnesia), + ok. + +cache_module_level(_Config) -> + ok = logger:unset_module_level(?MODULE), + [] = ets:lookup(?LOGGER_TABLE,?MODULE), %dirty - add API in logger_config? + ?LOG_NOTICE(?map_rep), + %% Caching is done asynchronously, so wait a bit for the update + timer:sleep(100), + [_] = ets:lookup(?LOGGER_TABLE,?MODULE), %dirty - add API in logger_config? + ok = logger:unset_module_level(?MODULE), + [] = ets:lookup(?LOGGER_TABLE,?MODULE), %dirty - add API in logger_config? + ok. + +cache_module_level(cleanup,_Config) -> + logger:unset_module_level(?MODULE), + ok. + +format_report(_Config) -> + {"~ts",["string"]} = logger:format_report("string"), + {"~tp",[term]} = logger:format_report(term), + {"~tp",[[]]} = logger:format_report([]), + {" ~tp: ~tp",[key,value]} = logger:format_report([{key,value}]), + KeyVals = [{key1,value1},{key2,"value2"},{key3,[]}], + KeyValRes = + {" ~tp: ~tp\n ~tp: ~ts\n ~tp: ~tp", + [key1,value1,key2,"value2",key3,[]]} = + logger:format_report(KeyVals), + KeyValRes = logger:format_report(maps:from_list(KeyVals)), + KeyValRes = logger:format_otp_report(#{label=>{?MODULE,test},report=>KeyVals}), + {" ~tp: ~tp\n ~tp: ~tp", + [label,{?MODULE,test},report,KeyVals]} = + logger:format_report(#{label=>{?MODULE,test},report=>KeyVals}), + + {" ~tp: ~tp\n ~tp",[key1,value1,term]} = + logger:format_report([{key1,value1},term]), + + {" ~tp: ~tp\n ~tp",[key1,value1,[]]} = + logger:format_report([{key1,value1},[]]), + + {"~tp",[[]]} = logger:format_report([[],[],[]]), + + ok. + +filter_failed(_Config) -> + ok = logger:add_handler(h1,?MODULE,#{level=>notice,filter_default=>log}), + + %% Logger filters + {error,{invalid_filter,_}} = + logger:add_primary_filter(lf,{fun(_) -> ok end,args}), + ok = logger:add_primary_filter(lf, + {fun(_,_) -> + erlang:error({badmatch,b}) + end, + args}), + #{filters:=[_]} = logger:get_primary_config(), + ok = logger:notice(M1=?map_rep), + ok = check_logged(notice,M1,#{}), + {error,{not_found,lf}} = logger:remove_primary_filter(lf), + + ok = logger:add_primary_filter(lf,{fun(_,_) -> faulty_return end,args}), + #{filters:=[_]} = logger:get_primary_config(), + ok = logger:notice(M2=?map_rep), + ok = check_logged(notice,M2,#{}), + {error,{not_found,lf}} = logger:remove_primary_filter(lf), + + %% Handler filters + {error,{not_found,h0}} = + logger:add_handler_filter(h0,hf,{fun(_,_) -> ignore end,args}), + {error,{not_found,h0}} = logger:remove_handler_filter(h0,hf), + {error,{invalid_filter,_}} = + logger:add_handler_filter(h1,hf,{fun(_) -> ok end,args}), + ok = logger:add_handler_filter(h1,hf, + {fun(_,_) -> + erlang:error({badmatch,b}) + end, + args}), + {ok,#{filters:=[_]}} = logger:get_handler_config(h1), + ok = logger:notice(M3=?map_rep), + ok = check_logged(notice,M3,#{}), + {error,{not_found,hf}} = logger:remove_handler_filter(h1,hf), + + ok = logger:add_handler_filter(h1,hf,{fun(_,_) -> faulty_return end,args}), + {ok,#{filters:=[_]}} = logger:get_handler_config(h1), + ok = logger:notice(M4=?map_rep), + ok = check_logged(notice,M4,#{}), + {error,{not_found,hf}} = logger:remove_handler_filter(h1,hf), + + ok. + +filter_failed(cleanup,_Config) -> + logger:remove_handler(h1), + ok. + +handler_failed(_Config) -> + register(callback_receiver,self()), + {error,{invalid_id,1}} = logger:add_handler(1,?MODULE,#{}), + {error,{invalid_module,"nomodule"}} = logger:add_handler(h1,"nomodule",#{}), + {error,{invalid_config,bad}} = logger:add_handler(h1,?MODULE,bad), + {error,{invalid_filters,false}} = + logger:add_handler(h1,?MODULE,#{filters=>false}), + {error,{invalid_filter_default,true}} = + logger:add_handler(h1,?MODULE,#{filter_default=>true}), + {error,{invalid_formatter,[]}} = + logger:add_handler(h1,?MODULE,#{formatter=>[]}), + {error,{invalid_handler,_}} = logger:add_handler(h1,nomodule,#{filter_default=>log}), + logger:notice(?map_rep), + check_no_log(), + H1 = logger:get_handler_config(), + false = lists:search(fun(#{id:=h1}) -> true; (_) -> false end,H1), + {error,{not_found,h1}} = logger:remove_handler(h1), + + ok = logger:add_handler(h2,?MODULE, + #{filter_default => log, + log_call => fun() -> + erlang:error({badmatch,b}) + end}), + {error,{already_exist,h2}} = logger:add_handler(h2,othermodule,#{}), + [add] = test_server:messages_get(), + + logger:notice(?map_rep), + [remove] = test_server:messages_get(), + H2 = logger:get_handler_config(), + false = lists:search(fun(#{id:=h2}) -> true; (_) -> false end,H2), + {error,{not_found,h2}} = logger:remove_handler(h2), + + CallAddHandler = fun() -> logger:add_handler(h2,?MODULE,#{}) end, + CrashHandler = fun() -> erlang:error({badmatch,b}) end, + KillHandler = fun() -> exit(self(), die) end, + + {error,{handler_not_added,{attempting_syncronous_call_to_self,_}}} = + logger:add_handler(h1,?MODULE,#{add_call=>CallAddHandler}), + {error,{handler_not_added,{callback_crashed,_}}} = + logger:add_handler(h1,?MODULE,#{add_call=>CrashHandler}), + {error,{handler_not_added,{logger_process_exited,_,die}}} = + logger:add_handler(h1,?MODULE,#{add_call=>KillHandler}), + + check_no_log(), + ok = logger:add_handler(h1,?MODULE,#{}), + {error,{attempting_syncronous_call_to_self,_}} = + logger:set_handler_config(h1,#{conf_call=>CallAddHandler}), + {error,{callback_crashed,_}} = + logger:set_handler_config(h1,#{conf_call=>CrashHandler}), + {error,{logger_process_exited,_,die}} = + logger:set_handler_config(h1,#{conf_call=>KillHandler}), + + {error,{attempting_syncronous_call_to_self,_}} = + logger:set_handler_config(h1,conf_call,CallAddHandler), + {error,{callback_crashed,_}} = + logger:set_handler_config(h1,conf_call,CrashHandler), + {error,{logger_process_exited,_,die}} = + logger:set_handler_config(h1,conf_call,KillHandler), + + ok = logger:remove_handler(h1), + [add,remove] = test_server:messages_get(), + + check_no_log(), + ok = logger:add_handler(h1,?MODULE,#{rem_call=>CallAddHandler}), + ok = logger:remove_handler(h1), + ok = logger:add_handler(h1,?MODULE,#{rem_call=>CrashHandler}), + ok = logger:remove_handler(h1), + ok = logger:add_handler(h1,?MODULE,#{rem_call=>KillHandler}), + ok = logger:remove_handler(h1), + [add,add,add] = test_server:messages_get(), + + ok. + +handler_failed(cleanup,_Config) -> + logger:remove_handler(h1), + logger:remove_handler(h2), + ok. + +config_sanity_check(_Config) -> + %% Primary config + {error,{invalid_config,bad}} = logger:set_primary_config(bad), + {error,{invalid_filter_default,bad}} = + logger:set_primary_config(filter_default,bad), + {error,{invalid_level,bad}} = logger:set_primary_config(level,bad), + {error,{invalid_filters,bad}} = logger:set_primary_config(filters,bad), + {error,{invalid_filter,bad}} = logger:set_primary_config(filters,[bad]), + {error,{invalid_filter,{_,_}}} = + logger:set_primary_config(filters,[{id,bad}]), + {error,{invalid_filter,{_,{_,_}}}} = + logger:set_primary_config(filters,[{id,{bad,args}}]), + {error,{invalid_filter,{_,{_,_}}}} = + logger:set_primary_config(filters,[{id,{fun() -> ok end,args}}]), + {error,{invalid_primary_config,{bad,bad}}} = + logger:set_primary_config(bad,bad), + + %% Handler config + {error,{not_found,h1}} = logger:set_handler_config(h1,a,b), + ok = logger:add_handler(h1,?MODULE,#{}), + {error,{invalid_filter_default,bad}} = + logger:set_handler_config(h1,filter_default,bad), + {error,{invalid_level,bad}} = logger:set_handler_config(h1,level,bad), + {error,{invalid_filters,bad}} = logger:set_handler_config(h1,filters,bad), + {error,{invalid_filter,bad}} = logger:set_handler_config(h1,filters,[bad]), + {error,{invalid_filter,{_,_}}} = + logger:set_handler_config(h1,filters,[{id,bad}]), + {error,{invalid_filter,{_,{_,_}}}} = + logger:set_handler_config(h1,filters,[{id,{bad,args}}]), + {error,{invalid_filter,{_,{_,_}}}} = + logger:set_handler_config(h1,filters,[{id,{fun() -> ok end,args}}]), + {error,{invalid_formatter,bad}} = + logger:set_handler_config(h1,formatter,bad), + {error,{invalid_module,{bad}}} = + logger:set_handler_config(h1,formatter,{{bad},cfg}), + {error,{invalid_formatter_config,logger_formatter,bad}} = + logger:set_handler_config(h1,formatter,{logger_formatter,bad}), + {error,{invalid_formatter_config,logger_formatter,{bad,bad}}} = + logger:set_handler_config(h1,formatter,{logger_formatter,#{bad=>bad}}), + {error,{invalid_formatter_template,logger_formatter,bad}} = + logger:set_handler_config(h1,formatter,{logger_formatter, + #{template=>bad}}), + {error,{invalid_formatter_template,logger_formatter,[1]}} = + logger:set_handler_config(h1,formatter,{logger_formatter, + #{template=>[1]}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{template=>[]}}), + {error,{invalid_formatter_config,logger_formatter,{single_line,bad}}} = + logger:set_handler_config(h1,formatter,{logger_formatter, + #{single_line=>bad}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{single_line=>true}}), + {error,{invalid_formatter_config,logger_formatter,{legacy_header,bad}}} = + logger:set_handler_config(h1,formatter,{logger_formatter, + #{legacy_header=>bad}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{legacy_header=>true}}), + {error,{invalid_formatter_config,logger_formatter,{report_cb,bad}}} = + logger:set_handler_config(h1,formatter,{logger_formatter, + #{report_cb=>bad}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{report_cb=>fun(R) -> + {"~p",[R]} + end}}), + {error,{invalid_formatter_config,logger_formatter,{chars_limit,bad}}} = + logger:set_handler_config(h1,formatter,{logger_formatter, + #{chars_limit=>bad}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{chars_limit=>unlimited}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{chars_limit=>4}}), + {error,{invalid_formatter_config,logger_formatter,{depth,bad}}} = + logger:set_handler_config(h1,formatter,{logger_formatter, + #{depth=>bad}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{depth=>unlimited}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{depth=>4}}), + {error,{invalid_formatter_config,logger_formatter,{max_size,bad}}} = + logger:set_handler_config(h1,formatter,{logger_formatter, + #{max_size=>bad}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{max_size=>unlimited}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{max_size=>4}}), + ok = logger:set_handler_config(h1,formatter,{module,config}), + {error,{callback_crashed,{error,{badmatch,3},[{?MODULE,check_config,1,_}]}}} = + logger:set_handler_config(h1,formatter,{?MODULE,crash}), + ok = logger:set_handler_config(h1,custom,custom), + + %% Old utc parameter is no longer allowed (replaced by time_offset) + {error,{invalid_formatter_config,logger_formatter,{utc,true}}} = + logger:set_handler_config(h1,formatter,{logger_formatter, + #{utc=>true}}), + {error,{invalid_formatter_config,logger_formatter,{time_offset,bad}}} = + logger:set_handler_config(h1,formatter,{logger_formatter, + #{time_offset=>bad}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{time_offset=>0}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{time_offset=>""}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{time_offset=>"Z"}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{time_offset=>"z"}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{time_offset=>"-0:0"}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{time_offset=>"+10:13"}}), + + {error,{invalid_formatter_config,logger_formatter,{time_offset,"+0"}}} = + logger:set_handler_config(h1,formatter,{logger_formatter, + #{time_offset=>"+0"}}), + + {error,{invalid_formatter_config,logger_formatter,{time_designator,bad}}} = + logger:set_handler_config(h1,formatter,{logger_formatter, + #{time_designator=>bad}}), + {error,{invalid_formatter_config,logger_formatter,{time_designator,"s"}}} = + logger:set_handler_config(h1,formatter,{logger_formatter, + #{time_designator=>"s"}}), + {error,{invalid_formatter_config,logger_formatter,{time_designator,0}}} = + logger:set_handler_config(h1,formatter,{logger_formatter, + #{time_designator=>0}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{time_designator=>$\s}}), + ok. + +config_sanity_check(cleanup,_Config) -> + logger:remove_handler(h1), + ok. + +log_failed(_Config) -> + ok = logger:add_handler(h1,?MODULE,#{level=>notice,filter_default=>log}), + {error,function_clause} = ?TRY(logger:log(bad,?map_rep)), + {error,function_clause} = ?TRY(logger:log(notice,?map_rep,bad)), + {error,function_clause} = ?TRY(logger:log(notice,fun() -> ?map_rep end,bad)), + {error,function_clause} = ?TRY(logger:log(notice,fun() -> ?map_rep end,bad,#{})), + {error,function_clause} = ?TRY(logger:log(notice,bad,bad,bad)), + {error,function_clause} = ?TRY(logger:log(notice,bad,bad,#{})), + check_no_log(), + ok = logger:log(notice,M1=?str,#{}), + check_logged(notice,M1,#{}), + ok = logger:log(notice,M2=?map_rep,#{}), + check_logged(notice,M2,#{}), + ok = logger:log(notice,M3=?keyval_rep,#{}), + check_logged(notice,M3,#{}), + + %% Should we check report input more thoroughly? + ok = logger:log(notice,M4=?keyval_rep++[other,stuff,in,list],#{}), + check_logged(notice,M4,#{}), + + %% This might break a handler since it is assumed to be a format + %% string and args, so it depends how the handler protects itself + %% against something like io_lib:format("ok","ok") + ok = logger:log(notice,"ok","ok",#{}), + check_logged(notice,"ok","ok",#{}), + + ok. + +log_failed(cleanup,_Config) -> + logger:remove_handler(h1), + ok. + +emulator(_Config) -> + ok = logger:add_handler(h1,?MODULE,#{level=>notice,filter_default=>log, + tc_proc=>self()}), + Msg = "Error in process ~p on node ~p with exit value:~n~p~n", + Error = {badmatch,4}, + Stack = [{module, function, 2, []}], + Pid = spawn(?MODULE, generate_error, [Error, Stack]), + check_logged(error, Msg, [Pid, node(), {Error, Stack}], + #{gl=>group_leader(), + error_logger=>#{tag=>error,emulator=>true}}), + ok. + +emulator(cleanup, _Config) -> + logger:remove_handler(h1), + ok. + +generate_error(Error, Stack) -> + erlang:raise(error, Error, Stack). + +via_logger_process(Config) -> + ok = logger:add_handler(h1,?MODULE,#{level=>notice,filter_default=>log, + tc_proc=>self()}), + + %% Explicitly send a message to the logger process + %% This is used by code_server, erl_prim_loader, init, prim_file, ... + Msg = ?str, + logger ! {log,error,Msg,[],#{}}, + check_logged(error, Msg, [], #{}), + + case os:type() of + {win32,_} -> + %% Skip this part on windows - cant change file mode" + ok; + _ -> + %% This should trigger the same thing from erl_prim_loader + Dir = filename:join(?config(priv_dir,Config),"dummydir"), + ok = file:make_dir(Dir), + ok = file:change_mode(Dir,8#0222), + error = erl_prim_loader:list_dir(Dir), + check_logged(error, + #{report=>"File operation error: eacces. Target: " ++ + Dir ++". Function: list_dir. "}, + #{pid=>self(), + gl=>group_leader(), + error_logger=>#{tag=>error_report, + type=>std_error}}), + ok + end. + +via_logger_process(cleanup, Config) -> + Dir = filename:join(?config(priv_dir,Config),"dummydir"), + _ = file:change_mode(Dir,8#0664), + _ = file:del_dir(Dir), + logger:remove_handler(h1), + ok. + +other_node(_Config) -> + ok = logger:add_handler(h1,?MODULE,#{level=>notice,filter_default=>log, + tc_proc=>self()}), + {ok,Node} = test_server:start_node(?FUNCTION_NAME,slave,[]), + rpc:call(Node,logger,error,[Msg=?str,#{}]), + check_logged(error,Msg,#{}), + ok. + +other_node(cleanup,_Config) -> + Nodes = nodes(), + [test_server:stop_node(Node) || Node <- Nodes], + logger:remove_handler(h1), + ok. + +compare_levels(_Config) -> + Levels = [emergency,alert,critical,error,warning,notice,info,debug], + ok = compare(Levels), + {error,badarg} = ?TRY(logger:compare_levels(bad,bad)), + {error,badarg} = ?TRY(logger:compare_levels({bad},notice)), + {error,badarg} = ?TRY(logger:compare_levels(notice,"bad")), + ok. + +compare([L|Rest]) -> + eq = logger:compare_levels(L,L), + [gt = logger:compare_levels(L,L1) || L1 <- Rest], + [lt = logger:compare_levels(L1,L) || L1 <- Rest], + compare(Rest); +compare([]) -> + ok. + +process_metadata(_Config) -> + undefined = logger:get_process_metadata(), + {error,badarg} = ?TRY(logger:set_process_metadata(bad)), + ok = logger:add_handler(h1,?MODULE,#{level=>notice,filter_default=>log}), + Time = erlang:system_time(microsecond), + ProcMeta = #{time=>Time,line=>0,custom=>proc}, + ok = logger:set_process_metadata(ProcMeta), + S1 = ?str, + ?LOG_NOTICE(S1,#{custom=>macro}), + check_logged(notice,S1,#{time=>Time,line=>0,custom=>macro}), + + Time2 = erlang:system_time(microsecond), + S2 = ?str, + ?LOG_NOTICE(S2,#{time=>Time2,line=>1,custom=>macro}), + check_logged(notice,S2,#{time=>Time2,line=>1,custom=>macro}), + + logger:notice(S3=?str,#{custom=>func}), + check_logged(notice,S3,#{time=>Time,line=>0,custom=>func}), + + ProcMeta = logger:get_process_metadata(), + ok = logger:update_process_metadata(#{custom=>changed,custom2=>added}), + Expected = ProcMeta#{custom:=changed,custom2=>added}, + Expected = logger:get_process_metadata(), + ok = logger:unset_process_metadata(), + undefined = logger:get_process_metadata(), + + ok = logger:update_process_metadata(#{custom=>added_again}), + {error,badarg} = ?TRY(logger:update_process_metadata(bad)), + #{custom:=added_again} = logger:get_process_metadata(), + + ok. + +process_metadata(cleanup,_Config) -> + logger:remove_handler(h1), + ok. + +app_config(Config) -> + %% Start a node with default configuration + {ok,_,Node} = logger_test_lib:setup(Config,[]), + + App1Name = app1, + App1 = {application, App1Name, + [{description, "Test of app with logger config"}, + {applications, [kernel]}]}, + ok = rpc:call(Node,application,load,[App1]), + ok = rpc:call(Node,application,set_env, + [App1Name,logger,[{handler,default,logger_std_h,#{}}]]), + + %% Try to add an own default handler + {error,{bad_config,{handler,{app1,{already_exist,default}}}}} = + rpc:call(Node,logger,add_handlers,[App1Name]), + + %% Add a different handler + ok = rpc:call(Node,application,set_env,[App1Name,logger, + [{handler,myh,logger_std_h,#{}}]]), + ok = rpc:call(Node,logger,add_handlers,[App1Name]), + + {ok,#{filters:=DF}} = rpc:call(Node,logger,get_handler_config,[default]), + {ok,#{filters:=[]}} = rpc:call(Node,logger,get_handler_config,[myh]), + + true = test_server:stop_node(Node), + + %% Start a node with no default handler, then add an own default handler + {ok,#{handlers:=[#{id:=simple}]},Node} = + logger_test_lib:setup(Config,[{logger,[{handler,default,undefined}]}]), + + ok = rpc:call(Node,application,load,[App1]), + ok = rpc:call(Node,application,set_env, + [App1Name,logger,[{handler,default,logger_std_h,#{}}]]), + ok = rpc:call(Node,logger,add_handlers,[App1Name]), + + #{handlers:=[#{id:=default,filters:=DF}]} = + rpc:call(Node,logger,get_config,[]), + + true = test_server:stop_node(Node), + + %% Start a silent node, then add an own default handler + {ok,#{handlers:=[]},Node} = + logger_test_lib:setup(Config,[{error_logger,silent}]), + + {error,{bad_config,{handler,[{some,bad,config}]}}} = + rpc:call(Node,logger,add_handlers,[[{some,bad,config}]]), + ok = rpc:call(Node,logger,add_handlers, + [[{handler,default,logger_std_h,#{}}]]), + + #{handlers:=[#{id:=default,filters:=DF}]} = + rpc:call(Node,logger,get_config,[]), + + ok. + +%% This test case is maintly to see code coverage. Note that +%% logger_env_var_SUITE tests a lot of the same, and checks the +%% functionality more thoroughly, but since it all happens at node +%% start, it is not possible to see code coverage in that test. +kernel_config(Config) -> + %% Start a node with simple handler only, then simulate kernel + %% start by calling internally exported + %% internal_init_logger(). This is to test all variants of kernel + %% config, including bad config, and see the code coverage. + {ok,#{handlers:=[#{id:=simple,filters:=DF}]}=LC,Node} = + logger_test_lib:setup(Config,[{error_logger,false}]), + + %% Same once more, to get coverage + ok = rpc:call(Node,logger,internal_init_logger,[]), + ok = rpc:call(Node,logger,add_handlers,[kernel]), + LC = rpc:call(Node,logger,get_config,[]), + + %% This shall mean the same as above, but using 'logger' parameter + %% instead of 'error_logger' + ok = rpc:call(Node,application,unset_env,[kernel,error_logger]), + ok = rpc:call(Node,application,set_env, + [kernel,logger,[{handler,default,undefined}]]), + ok = rpc:call(Node,logger,internal_init_logger,[]), + ok = rpc:call(Node,logger,add_handlers,[kernel]), + LC = rpc:call(Node,logger,get_config,[]), + + %% Silent + ok = rpc:call(Node,application,unset_env,[kernel,logger]), + ok = rpc:call(Node,application,set_env,[kernel,error_logger,silent]), + ok = rpc:call(Node,logger,internal_init_logger,[]), + ok = rpc:call(Node,logger,add_handlers,[kernel]), + #{primary:=#{filter_default:=log,filters:=[]}, + handlers:=[], + module_levels:=[]} = rpc:call(Node,logger,get_config,[]), + + %% Default + ok = rpc:call(Node,application,unset_env,[kernel,error_logger]), + ok = rpc:call(Node,application,unset_env,[kernel,logger]), + ok = rpc:call(Node,logger,internal_init_logger,[]), + ok = rpc:call(Node,logger,add_handlers,[kernel]), + #{primary:=#{filter_default:=log,filters:=[]}, + handlers:=[#{id:=default,filters:=DF,config:=#{type:=standard_io}}], + module_levels:=[]} = rpc:call(Node,logger,get_config,[]), + + %% error_logger=tty (same as default) + ok = rpc:call(Node,logger,remove_handler,[default]),% so it can be added again + ok = rpc:call(Node,application,set_env,[kernel,error_logger,tty]), + ok = rpc:call(Node,application,unset_env,[kernel,logger]), + ok = rpc:call(Node,logger,internal_init_logger,[]), + ok = rpc:call(Node,logger,add_handlers,[kernel]), + #{primary:=#{filter_default:=log,filters:=[]}, + handlers:=[#{id:=default,filters:=DF,config:=#{type:=standard_io}}], + module_levels:=[]} = rpc:call(Node,logger,get_config,[]), + + %% error_logger={file,File} + ok = rpc:call(Node,logger,remove_handler,[default]),% so it can be added again + F = filename:join(?config(priv_dir,Config), + atom_to_list(?FUNCTION_NAME)++".log"), + ok = rpc:call(Node,application,set_env,[kernel,error_logger,{file,F}]), + ok = rpc:call(Node,application,unset_env,[kernel,logger]), + ok = rpc:call(Node,logger,internal_init_logger,[]), + ok = rpc:call(Node,logger,add_handlers,[kernel]), + #{primary:=#{filter_default:=log,filters:=[]}, + handlers:=[#{id:=default,filters:=DF,config:=#{type:={file,F}}}], + module_levels:=[]} = rpc:call(Node,logger,get_config,[]), + + %% Same, but using 'logger' parameter instead of 'error_logger' + ok = rpc:call(Node,logger,remove_handler,[default]),% so it can be added again + ok = rpc:call(Node,application,unset_env,[kernel,error_logger]), + ok = rpc:call(Node,application,set_env,[kernel,logger, + [{handler,default,logger_std_h, + #{config=>#{type=>{file,F}}}}]]), + ok = rpc:call(Node,logger,internal_init_logger,[]), + ok = rpc:call(Node,logger,add_handlers,[kernel]), + #{primary:=#{filter_default:=log,filters:=[]}, + handlers:=[#{id:=default,filters:=DF,config:=#{type:={file,F}}}], + module_levels:=[]} = rpc:call(Node,logger,get_config,[]), + + %% Same, but with type={file,File,Modes} + ok = rpc:call(Node,logger,remove_handler,[default]),% so it can be added again + ok = rpc:call(Node,application,unset_env,[kernel,error_logger]), + M = [raw,write,delayed_write], + ok = rpc:call(Node,application,set_env,[kernel,logger, + [{handler,default,logger_std_h, + #{config=>#{type=>{file,F,M}}}}]]), + ok = rpc:call(Node,logger,internal_init_logger,[]), + ok = rpc:call(Node,logger,add_handlers,[kernel]), + #{primary:=#{filter_default:=log,filters:=[]}, + handlers:=[#{id:=default,filters:=DF,config:=#{type:={file,F,M}}}], + module_levels:=[]} = rpc:call(Node,logger,get_config,[]), + + %% Same, but with disk_log handler + ok = rpc:call(Node,logger,remove_handler,[default]),% so it can be added again + ok = rpc:call(Node,application,unset_env,[kernel,error_logger]), + M = [raw,write,delayed_write], + ok = rpc:call(Node,application,set_env,[kernel,logger, + [{handler,default,logger_disk_log_h, + #{config=>#{file=>F}}}]]), + ok = rpc:call(Node,logger,internal_init_logger,[]), + ok = rpc:call(Node,logger,add_handlers,[kernel]), + #{primary:=#{filter_default:=log,filters:=[]}, + handlers:=[#{id:=default,filters:=DF,config:=#{file:=F}}], + module_levels:=[]} = rpc:call(Node,logger,get_config,[]), + + %% Set primary filters and module level. No default handler. + ok = rpc:call(Node,logger,remove_handler,[default]),% so it can be added again + ok = rpc:call(Node,application,unset_env,[kernel,error_logger]), + ok = rpc:call(Node,application,set_env, + [kernel,logger,[{handler,default,undefined}, + {filters,stop,[{f1,{fun(_,_) -> log end,ok}}]}, + {module_level,debug,[?MODULE]}]]), + ok = rpc:call(Node,logger,internal_init_logger,[]), + ok = rpc:call(Node,logger,add_handlers,[kernel]), + #{primary:=#{filter_default:=stop,filters:=[_]}, + handlers:=[], + module_levels:=[{?MODULE,debug}]} = rpc:call(Node,logger,get_config,[]), + + %% Bad config + ok = rpc:call(Node,application,unset_env,[kernel,logger]), + + ok = rpc:call(Node,application,set_env,[kernel,error_logger,bad]), + {error,{bad_config,{kernel,{error_logger,bad}}}} = + rpc:call(Node,logger,internal_init_logger,[]), + + ok = rpc:call(Node,application,unset_env,[kernel,error_logger]), + ok = rpc:call(Node,application,set_env,[kernel,logger_level,bad]), + {error,{bad_config,{kernel,{logger_level,bad}}}} = + rpc:call(Node,logger,internal_init_logger,[]), + + ok = rpc:call(Node,application,unset_env,[kernel,logger_level]), + ok = rpc:call(Node,application,set_env, + [kernel,logger,[{filters,stop,[bad]}]]), + {error,{bad_config,{kernel,{invalid_filters,[bad]}}}} = + rpc:call(Node,logger,internal_init_logger,[]), + + ok = rpc:call(Node,application,set_env, + [kernel,logger,[{filters,stop,[bad]}]]), + {error,{bad_config,{kernel,{invalid_filters,[bad]}}}} = + rpc:call(Node,logger,internal_init_logger,[]), + + ok = rpc:call(Node,application,set_env, + [kernel,logger,[{filters,stop,[{f1,bad}]}]]), + {error,{bad_config,{kernel,{invalid_filter,{f1,bad}}}}} = + rpc:call(Node,logger,internal_init_logger,[]), + + ok = rpc:call(Node,application,set_env, + [kernel,logger,MF=[{filters,stop,[]},{filters,log,[]}]]), + {error,{bad_config,{kernel,{multiple_filters,MF}}}} = + rpc:call(Node,logger,internal_init_logger,[]), + + ok = rpc:call(Node,application,set_env, + [kernel,logger,[{module_level,bad,[?MODULE]}]]), + {error,{bad_config,{kernel,{invalid_level,bad}}}} = + rpc:call(Node,logger,internal_init_logger,[]), + + ok. + +%%%----------------------------------------------------------------- +%%% Internal +check_logged(Level,Format,Args,Meta) -> + do_check_logged(Level,{Format,Args},Meta). + +check_logged(Level,Msg,Meta) when ?IS_REPORT(Msg) -> + do_check_logged(Level,{report,Msg},Meta); +check_logged(Level,Msg,Meta) when ?IS_STRING(Msg) -> + do_check_logged(Level,{string,Msg},Meta). + +do_check_logged(Level,Msg0,Meta0) -> + receive + {#{level:=Level,msg:=Msg,meta:=Meta},_} -> + check_msg(Msg0,Msg), + check_maps(Meta0,Meta,meta) + after 500 -> + ct:fail({timeout,no_log,process_info(self(),messages)}) + end. + +check_no_log() -> + receive + X -> ct:fail({got_unexpected_log,X}) + after 500 -> + ok + end. + +check_msg(Msg,Msg) -> + ok; +check_msg({report,Expected},{report,Got}) when is_map(Expected), is_map(Got) -> + check_maps(Expected,Got,msg); +check_msg(Expected,Got) -> + ct:fail({unexpected,msg,Expected,Got}). + +check_maps(Expected,Got,What) -> + case maps:merge(Got,Expected) of + Got -> + ok; + _ -> + ct:fail({unexpected,What,Expected,Got}) + end. + +%% Handler +adding_handler(#{add_call:=Fun}) -> + Fun(); +adding_handler(Config) -> + maybe_send(add), + {ok,Config}. + +removing_handler(#{rem_call:=Fun}) -> + Fun(); +removing_handler(_Config) -> + maybe_send(remove), + ok. +changing_config(_Old,#{conf_call:=Fun}) -> + Fun(); +changing_config(_Old,Config) -> + maybe_send(changing_config), + {ok,Config}. + +maybe_send(Msg) -> + case whereis(callback_receiver) of + undefined -> ok; + Pid -> Pid ! Msg + end. + +log(_Log,#{log_call:=Fun}) -> + Fun(); +log(Log,Config) -> + TcProc = maps:get(tc_proc,Config,self()), + TcProc ! {Log,Config}, + ok. + +test_api(Level) -> + logger:Level(#{Level=>rep}), + ok = check_logged(Level,#{Level=>rep},#{}), + logger:Level(#{Level=>rep},#{my=>meta}), + ok = check_logged(Level,#{Level=>rep},#{my=>meta}), + logger:Level("~w: ~w",[Level,fa]), + ok = check_logged(Level,"~w: ~w",[Level,fa],#{}), + logger:Level("~w: ~w ~w",[Level,fa,meta],#{my=>meta}), + ok = check_logged(Level,"~w: ~w ~w",[Level,fa,meta],#{my=>meta}), + logger:Level(fun(x) -> {"~w: ~w ~w",[Level,fun_to_fa,meta]} end,x, + #{my=>meta}), + ok = check_logged(Level,"~w: ~w ~w",[Level,fun_to_fa,meta],#{my=>meta}), + logger:Level(fun(x) -> #{Level=>fun_to_r,meta=>true} end,x, + #{my=>meta}), + ok = check_logged(Level,#{Level=>fun_to_r,meta=>true},#{my=>meta}), + logger:Level(fun(x) -> <<"fun_to_s">> end,x,#{}), + ok = check_logged(Level,<<"fun_to_s">>,#{}), + logger:Level(F1=fun(x) -> {fun_to_bad} end,x,#{}), + ok = check_logged(Level,"LAZY_FUN ERROR: ~tp; Returned: ~tp", + [{F1,x},{fun_to_bad}],#{}), + logger:Level(F2=fun(x) -> erlang:error(fun_that_crashes) end,x,#{}), + ok = check_logged(Level,"LAZY_FUN CRASH: ~tp; Reason: ~tp", + [{F2,x},{error,fun_that_crashes}],#{}), + ok. + +test_log_function(Level) -> + logger:log(Level,#{Level=>rep}), + ok = check_logged(Level,#{Level=>rep},#{}), + logger:log(Level,#{Level=>rep},#{my=>meta}), + ok = check_logged(Level,#{Level=>rep},#{my=>meta}), + logger:log(Level,"~w: ~w",[Level,fa]), + ok = check_logged(Level,"~w: ~w",[Level,fa],#{}), + logger:log(Level,"~w: ~w ~w",[Level,fa,meta],#{my=>meta}), + ok = check_logged(Level,"~w: ~w ~w",[Level,fa,meta],#{my=>meta}), + logger:log(Level,fun(x) -> {"~w: ~w ~w",[Level,fun_to_fa,meta]} end, + x, #{my=>meta}), + ok = check_logged(Level,"~w: ~w ~w",[Level,fun_to_fa,meta],#{my=>meta}), + logger:log(Level,fun(x) -> #{Level=>fun_to_r,meta=>true} end, + x, #{my=>meta}), + ok = check_logged(Level,#{Level=>fun_to_r,meta=>true},#{my=>meta}), + logger:log(Level,fun(x) -> <<"fun_to_s">> end,x,#{}), + ok = check_logged(Level,<<"fun_to_s">>,#{}), + logger:log(Level,F1=fun(x) -> {fun_to_bad} end,x,#{}), + ok = check_logged(Level,"LAZY_FUN ERROR: ~tp; Returned: ~tp", + [{F1,x},{fun_to_bad}],#{}), + logger:log(Level,F2=fun(x) -> erlang:error(fun_that_crashes) end,x,#{}), + ok = check_logged(Level,"LAZY_FUN CRASH: ~tp; Reason: ~tp", + [{F2,x},{error,fun_that_crashes}],#{}), + ok. + +test_macros(emergency=Level) -> + ?LOG_EMERGENCY(#{Level=>rep}), + ok = check_logged(Level,#{Level=>rep},?MY_LOC(1)), + ?LOG_EMERGENCY(#{Level=>rep},#{my=>meta}), + ok = check_logged(Level,#{Level=>rep},(?MY_LOC(1))#{my=>meta}), + ?LOG_EMERGENCY("~w: ~w",[Level,fa]), + ok = check_logged(Level,"~w: ~w",[Level,fa],?MY_LOC(1)), + ?LOG_EMERGENCY("~w: ~w ~w",[Level,fa,meta],#{my=>meta}), + ok = check_logged(Level,"~w: ~w ~w",[Level,fa,meta],(?MY_LOC(1))#{my=>meta}), + ?LOG_EMERGENCY(fun(x) -> {"~w: ~w ~w",[Level,fun_to_fa,meta]} end, + x, #{my=>meta}), + ok = check_logged(Level,"~w: ~w ~w",[Level,fun_to_fa,meta], + (?MY_LOC(3))#{my=>meta}), + ?LOG_EMERGENCY(fun(x) -> #{Level=>fun_to_r,meta=>true} end, x, #{my=>meta}), + ok = check_logged(Level,#{Level=>fun_to_r,meta=>true}, + (?MY_LOC(2))#{my=>meta}), + ?LOG_EMERGENCY(fun(x) -> <<"fun_to_s">> end,x,#{}), + ok = check_logged(Level,<<"fun_to_s">>,?MY_LOC(1)), + F1=fun(x) -> {fun_to_bad} end, + ?LOG_EMERGENCY(F1,x,#{}), + ok = check_logged(Level,"LAZY_FUN ERROR: ~tp; Returned: ~tp", + [{F1,x},{fun_to_bad}],#{}), + F2=fun(x) -> erlang:error(fun_that_crashes) end, + ?LOG_EMERGENCY(F2,x,#{}), + ok = check_logged(Level,"LAZY_FUN CRASH: ~tp; Reason: ~tp", + [{F2,x},{error,fun_that_crashes}],#{}), + ok. + +test_log_macro(Level) -> + ?LOG(Level,#{Level=>rep}), + ok = check_logged(Level,#{Level=>rep},?MY_LOC(1)), + ?LOG(Level,#{Level=>rep},#{my=>meta}), + ok = check_logged(Level,#{Level=>rep},(?MY_LOC(1))#{my=>meta}), + ?LOG(Level,"~w: ~w",[Level,fa]), + ok = check_logged(Level,"~w: ~w",[Level,fa],?MY_LOC(1)), + ?LOG(Level,"~w: ~w ~w",[Level,fa,meta],#{my=>meta}), + ok = check_logged(Level,"~w: ~w ~w",[Level,fa,meta],(?MY_LOC(1))#{my=>meta}), + ?LOG(Level,fun(x) -> {"~w: ~w ~w",[Level,fun_to_fa,meta]} end, + x, #{my=>meta}), + ok = check_logged(Level,"~w: ~w ~w",[Level,fun_to_fa,meta], + (?MY_LOC(3))#{my=>meta}), + ?LOG(Level,fun(x) -> #{Level=>fun_to_r,meta=>true} end, x, #{my=>meta}), + ok = check_logged(Level,#{Level=>fun_to_r,meta=>true}, + (?MY_LOC(2))#{my=>meta}), + ?LOG(Level,fun(x) -> <<"fun_to_s">> end,x,#{}), + ok = check_logged(Level,<<"fun_to_s">>,?MY_LOC(1)), + F1=fun(x) -> {fun_to_bad} end, + ?LOG(Level,F1,x,#{}), + ok = check_logged(Level,"LAZY_FUN ERROR: ~tp; Returned: ~tp", + [{F1,x},{fun_to_bad}],#{}), + F2=fun(x) -> erlang:error(fun_that_crashes) end, + ?LOG(Level,F2,x,#{}), + ok = check_logged(Level,"LAZY_FUN CRASH: ~tp; Reason: ~tp", + [{F2,x},{error,fun_that_crashes}],#{}), + ok. + +%%%----------------------------------------------------------------- +%%% Called by macro ?TRY(X) +my_try(Fun) -> + try Fun() catch C:R -> {C,R} end. + +check_config(crash) -> + erlang:error({badmatch,3}); +check_config(_) -> + ok. diff --git a/lib/kernel/test/logger_disk_log_h_SUITE.erl b/lib/kernel/test/logger_disk_log_h_SUITE.erl new file mode 100644 index 0000000000..905c2c52c5 --- /dev/null +++ b/lib/kernel/test/logger_disk_log_h_SUITE.erl @@ -0,0 +1,1679 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2018. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(logger_disk_log_h_SUITE). + +-compile(export_all). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("kernel/include/logger.hrl"). +-include_lib("kernel/src/logger_internal.hrl"). +-include_lib("kernel/src/logger_h_common.hrl"). +-include_lib("stdlib/include/ms_transform.hrl"). +-include_lib("kernel/include/file.hrl"). + +-define(check_no_log, [] = test_server:messages_get()). + +-define(check(Expected), + receive {log,Expected} -> + [] = test_server:messages_get() + after 1000 -> + ct:fail({report_not_received, + {line,?LINE}, + {got,test_server:messages_get()}}) + end). + +-define(msg,"Log from "++atom_to_list(?FUNCTION_NAME)++ + ":"++integer_to_list(?LINE)). +-define(bin(Msg), list_to_binary(Msg++"\n")). +-define(log_no(File,N), lists:concat([File,".",N])). +-define(domain,#{domain=>[?MODULE]}). + +suite() -> + [{timetrap,{seconds,30}}, + {ct_hooks,[logger_test_lib]}]. + +init_per_suite(Config) -> + timer:start(), % to avoid progress report + Config. + +end_per_suite(_Config) -> + ok. + +init_per_group(_Group, Config) -> + Config. + +end_per_group(_Group, _Config) -> + ok. + +init_per_testcase(TestHooksCase, Config) when + TestHooksCase == write_failure; + TestHooksCase == sync_failure -> + case (fun() -> ?TEST_HOOKS_TAB == undefined end)() of + true -> + {skip,"Define the TEST_HOOKS macro to run this test"}; + false -> + ct:print("********** ~w **********", [TestHooksCase]), + Config + end; +init_per_testcase(TestCase, Config) -> + ct:print("********** ~w **********", [TestCase]), + Config. + +end_per_testcase(Case, Config) -> + try apply(?MODULE,Case,[cleanup,Config]) + catch error:undef -> ok + end, + ok. + +groups() -> + []. + +all() -> + [start_stop_handler, + create_log, + open_existing_log, + disk_log_opts, + default_formatter, + logging, + filter_config, + errors, + formatter_fail, + config_fail, + bad_input, + info_and_reset, + reconfig, + sync, + disk_log_full, + disk_log_wrap, + disk_log_events, + write_failure, + sync_failure, + op_switch_to_sync, + op_switch_to_drop, + op_switch_to_flush, + limit_burst_disabled, + limit_burst_enabled_one, + limit_burst_enabled_period, + kill_disabled, + qlen_kill_new, + %% qlen_kill_std, + mem_kill_new, + %% mem_kill_std, + restart_after, + handler_requests_under_load + ]. + +start_stop_handler(_Config) -> + ok = logger:add_handler(?MODULE, logger_disk_log_h, #{}), + {error,{already_exist,?MODULE}} = + logger:add_handler(?MODULE, logger_disk_log_h, #{}), + true = is_pid(whereis(h_proc_name())), + ok = logger:remove_handler(?MODULE), + timer:sleep(500), + undefined = whereis(h_proc_name()). +start_stop_handler(cleanup, _Config) -> + logger:remove_handler(?MODULE). + +create_log(Config) -> + PrivDir = ?config(priv_dir,Config), + %% test new handler + Name1 = list_to_atom(lists:concat([?FUNCTION_NAME,"_A"])), + LogFile1 = filename:join(PrivDir, Name1), + ok = start_and_add(Name1, #{filter_default=>stop, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter=>{?MODULE,self()}}, + #{file=>LogFile1}), + logger:notice("hello", ?domain), + logger_disk_log_h:filesync(Name1), + ct:pal("Checking contents of ~p", [?log_no(LogFile1,1)]), + try_read_file(?log_no(LogFile1,1), {ok,<<"hello\n">>}, 5000), + + %% test second handler + Name2 = list_to_atom(lists:concat([?FUNCTION_NAME,"_B"])), + DLName = lists:concat([?FUNCTION_NAME,"_B_log"]), + LogFile2 = filename:join(PrivDir, DLName), + ok = start_and_add(Name2, #{filter_default=>stop, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter=>{?MODULE,self()}}, + #{file=>LogFile2}), + logger:notice("dummy", ?domain), + logger_disk_log_h:filesync(Name2), + ct:pal("Checking contents of ~p", [?log_no(LogFile2,1)]), + try_read_file(?log_no(LogFile2,1), {ok,<<"dummy\n">>}, 5000), + + remove_and_stop(Name1), + remove_and_stop(Name2), + try_read_file(?log_no(LogFile1,1), {ok,<<"hello\ndummy\n">>}, 1), + try_read_file(?log_no(LogFile2,1), {ok,<<"dummy\n">>}, 5000), + ok. + +open_existing_log(Config) -> + PrivDir = ?config(priv_dir,Config), + %% test new handler + HName = ?FUNCTION_NAME, + DLName = lists:concat([?FUNCTION_NAME,"_log"]), + LogFile1 = filename:join(PrivDir, DLName), + ok = start_and_add(HName, #{filter_default=>stop, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter=>{?MODULE,self()}}, + #{file=>LogFile1}), + logger:notice("one", ?domain), + logger_disk_log_h:filesync(HName), + ct:pal("Checking contents of ~p", [?log_no(LogFile1,1)]), + try_read_file(?log_no(LogFile1,1), {ok,<<"one\n">>}, 5000), + logger:notice("two", ?domain), + ok = remove_and_stop(HName), + try_read_file(?log_no(LogFile1,1), {ok,<<"one\ntwo\n">>}, 5000), + + logger:notice("two and a half", ?domain), + + ok = start_and_add(HName, #{filter_default=>stop, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter=>{?MODULE,self()}}, + #{file=>LogFile1}), + logger:notice("three", ?domain), + logger_disk_log_h:filesync(HName), + try_read_file(?log_no(LogFile1,1), {ok,<<"one\ntwo\nthree\n">>}, 5000), + remove_and_stop(HName), + try_read_file(?log_no(LogFile1,1), {ok,<<"one\ntwo\nthree\n">>}, 5000). + +disk_log_opts(Config) -> + Get = fun(Key, PL) -> proplists:get_value(Key, PL) end, + PrivDir = ?config(priv_dir,Config), + WName = list_to_atom(lists:concat([?FUNCTION_NAME,"_W"])), + WFile = lists:concat([?FUNCTION_NAME,"_W_log"]), + Size = length("12345"), + ConfigW = #{filter_default=>stop, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter => {?MODULE,no_nl}}, + WFileFull = filename:join(PrivDir, WFile), + DLOptsW = #{file => WFileFull, + type => wrap, + max_no_bytes => Size, + max_no_files => 2}, + ok = start_and_add(WName, ConfigW, DLOptsW), + WInfo1 = disk_log:info(WName), + ct:log("Fullname = ~s", [WFileFull]), + {WFileFull,wrap,{Size,2},1} = {Get(file,WInfo1),Get(type,WInfo1), + Get(size,WInfo1),Get(current_file,WInfo1)}, + logger:notice("123", ?domain), + logger_disk_log_h:filesync(WName), + timer:sleep(500), + 1 = Get(current_file, disk_log:info(WName)), + + logger:notice("45", ?domain), + logger_disk_log_h:filesync(WName), + timer:sleep(500), + 1 = Get(current_file, disk_log:info(WName)), + + logger:notice("6", ?domain), + logger_disk_log_h:filesync(WName), + timer:sleep(500), + 2 = Get(current_file, disk_log:info(WName)), + + logger:notice("7890", ?domain), + logger_disk_log_h:filesync(WName), + timer:sleep(500), + 2 = Get(current_file, disk_log:info(WName)), + + HName1 = list_to_atom(lists:concat([?FUNCTION_NAME,"_H1"])), + HFile1 = lists:concat([?FUNCTION_NAME,"_H1_log"]), + ConfigH = #{filter_default=>stop, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter => {?MODULE,no_nl}}, + HFile1Full = filename:join(PrivDir, HFile1), + DLOptsH1 = #{file => HFile1Full, + type => halt}, + ok = start_and_add(HName1, ConfigH, DLOptsH1), + HInfo1 = disk_log:info(HName1), + ct:log("Fullname = ~s", [HFile1Full]), + {HFile1Full,halt,infinity} = {Get(file,HInfo1),Get(type,HInfo1), + Get(size,HInfo1)}, + logger:notice("12345", ?domain), + logger_disk_log_h:filesync(HName1), + timer:sleep(500), + 1 = Get(no_written_items, disk_log:info(HName1)), + + HName2 = list_to_atom(lists:concat([?FUNCTION_NAME,"_H2"])), + HFile2 = lists:concat([?FUNCTION_NAME,"_H2_log"]), + HFile2Full = filename:join(PrivDir, HFile2), + DLOptsH2 = DLOptsH1#{file => HFile2Full, + max_no_bytes => 1000}, + ok = start_and_add(HName2, ConfigH, DLOptsH2), + HInfo3 = disk_log:info(HName2), + ct:log("Fullname = ~s", [HFile2Full]), + {HFile2Full,halt,1000} = {Get(file,HInfo3),Get(type,HInfo3), + Get(size,HInfo3)}, + + remove_and_stop(WName), + remove_and_stop(HName1), + remove_and_stop(HName2), + ok. + +default_formatter(Config) -> + PrivDir = ?config(priv_dir,Config), + LogFile = filename:join(PrivDir,atom_to_list(?FUNCTION_NAME)), + HandlerConfig = #{config => #{file=>LogFile}, + filter_default=>log}, + ct:pal("Log: ~p", [LogFile]), + ok = logger:add_handler(?MODULE, logger_disk_log_h, HandlerConfig), + ok = logger:set_handler_config(?MODULE,formatter, + {?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}), + LogName = lists:concat([LogFile, ".1"]), + logger:notice("dummy"), + wait_until_written(LogName), + {ok,Bin} = file:read_file(LogName), + match = re:run(Bin, "=NOTICE REPORT====.*\ndummy", [{capture,none}]), + ok. +default_formatter(cleanup, _Config) -> + logger:remove_handler(?MODULE). + +logging(Config) -> + PrivDir = ?config(priv_dir,Config), + %% test new handler + Name = list_to_atom(lists:concat([?FUNCTION_NAME,"_1"])), + LogFile = filename:join(PrivDir, Name), + ok = start_and_add(Name, #{filter_default=>log, + formatter=>{?MODULE,self()}}, + #{file => LogFile}), + MsgFormatter = fun(Term) -> {io_lib:format("Term:~p",[Term]),[]} end, + logger:notice([{x,y}], #{report_cb => MsgFormatter}), + logger:notice([{x,y}], #{}), + ct:pal("Checking contents of ~p", [?log_no(LogFile,1)]), + try_read_file(?log_no(LogFile,1), {ok,<<"Term:[{x,y}]\n x: y\n">>}, 5000). + +logging(cleanup, _Config) -> + Name = list_to_atom(lists:concat([?FUNCTION_NAME,"_1"])), + remove_and_stop(Name). + +filter_config(_Config) -> + ok = logger:add_handler(?MODULE,logger_disk_log_h,#{}), + {ok,#{config:=HConfig}=Config} = logger:get_handler_config(?MODULE), + HConfig = maps:without([handler_pid,mode_tab],HConfig), + + FakeFullHConfig = HConfig#{handler_pid=>self(),mode_tab=>erlang:make_ref()}, + #{config:=HConfig} = + logger_disk_log_h:filter_config(Config#{config=>FakeFullHConfig}), + ok. + +filter_config(cleanup,_Config) -> + logger:remove_handler(?MODULE), + ok. + +errors(Config) -> + PrivDir = ?config(priv_dir,Config), + Name1 = list_to_atom(lists:concat([?FUNCTION_NAME,"_1"])), + LogFile1 = filename:join(PrivDir,Name1), + HandlerConfig = #{config=>#{file=>LogFile1}, + filter_default=>log, + formatter=>{?MODULE,self()}}, + ok = logger:add_handler(Name1, logger_disk_log_h, HandlerConfig), + {error,{already_exist,Name1}} = + logger:add_handler(Name1, logger_disk_log_h, #{}), + + %%! TODO: + %%! Check how bad log_opts are handled! + + {error,{illegal_config_change, + #{config:=#{type:=wrap}}, + #{config:=#{type:=halt}}}} = + logger:update_handler_config(Name1, + config, + #{type=>halt, + file=>LogFile1}), + + {error,{illegal_config_change, + #{config:=#{file:=LogFile1}}, + #{config:=#{file:="newfilename"}}}} = + logger:update_handler_config(Name1, + config, + #{file=>"newfilename"}), + + %% Read-only fields may (accidentially) be included in the change, + %% but it won't take effect + {ok,C} = logger:get_handler_config(Name1), + ok = logger:set_handler_config(Name1,config, + #{handler_pid=>self(), + mode_tab=>erlang:make_ref()}), + {ok,C} = logger:get_handler_config(Name1), + + + ok = logger:remove_handler(Name1), + {error,{not_found,Name1}} = logger:remove_handler(Name1), + ok. + +errors(cleanup, _Config) -> + Name1 = list_to_atom(lists:concat([?FUNCTION_NAME,"_1"])), + _ = logger:remove_handler(Name1). + +formatter_fail(Config) -> + PrivDir = ?config(priv_dir,Config), + Name = ?FUNCTION_NAME, + LogFile = filename:join(PrivDir,Name), + ct:pal("Log = ~p", [LogFile]), + HandlerConfig = #{config => #{file=>LogFile}, + filter_default=>stop, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE])}, + %% no formatter! + logger:add_handler(Name, logger_disk_log_h, HandlerConfig), + Pid = whereis(h_proc_name(Name)), + true = is_pid(Pid), + H = logger:get_handler_ids(), + true = lists:member(Name,H), + + %% Formatter is added automatically + {ok,#{formatter:={logger_formatter,_}}} = logger:get_handler_config(Name), + logger:notice(M1=?msg,?domain), + Got1 = try_match_file(?log_no(LogFile,1),"[0-9\\+\\-T:\\.]* notice: "++M1,5000), + + ok = logger:set_handler_config(Name,formatter,{nonexistingmodule,#{}}), + logger:notice(M2=?msg,?domain), + Got2 = try_match_file(?log_no(LogFile,1), + escape(Got1)++"[0-9\\+\\-T:\\.]* notice: FORMATTER CRASH: .*"++M2, + 5000), + + ok = logger:set_handler_config(Name,formatter,{?MODULE,crash}), + logger:notice(M3=?msg,?domain), + Got3 = try_match_file(?log_no(LogFile,1), + escape(Got2)++"[0-9\\+\\-T:\\.]* notice: FORMATTER CRASH: .*"++M3, + 5000), + + ok = logger:set_handler_config(Name,formatter,{?MODULE,bad_return}), + logger:notice(?msg,?domain), + try_match_file(?log_no(LogFile,1), + escape(Got3)++"FORMATTER ERROR: bad return value", + 5000), + + %% Check that handler is still alive and was never dead + Pid = whereis(h_proc_name(Name)), + H = logger:get_handler_ids(), + ok. + +formatter_fail(cleanup,_Config) -> + _ = logger:remove_handler(?FUNCTION_NAME), + ok. + +config_fail(_Config) -> + {error,{handler_not_added,{invalid_config,logger_disk_log_h,{bad,bad}}}} = + logger:add_handler(?MODULE,logger_disk_log_h, + #{config => #{bad => bad}, + filter_default=>log, + formatter=>{?MODULE,self()}}), + + {error,{handler_not_added,{invalid_levels,{_,1,_}}}} = + logger:add_handler(?MODULE,logger_disk_log_h, + #{config => #{drop_mode_qlen=>1}}), + {error,{handler_not_added,{invalid_levels,{43,42,_}}}} = + logger:add_handler(?MODULE,logger_disk_log_h, + #{config => #{sync_mode_qlen=>43, + drop_mode_qlen=>42}}), + {error,{handler_not_added,{invalid_levels,{_,43,42}}}} = + logger:add_handler(?MODULE,logger_disk_log_h, + #{config => #{drop_mode_qlen=>43, + flush_qlen=>42}}), + + ok = logger:add_handler(?MODULE,logger_disk_log_h, + #{filter_default=>log, + formatter=>{?MODULE,self()}}), + %% can't change the disk log options for a log already in use + {error,{illegal_config_change,_,_}} = + logger:update_handler_config(?MODULE,config, + #{max_no_files=>2}), + %% can't change name of an existing handler + {error,{illegal_config_change,_,_}} = + logger:update_handler_config(?MODULE,id,bad), + %% incorrect values of OP params + {ok,#{config := HConfig}} = logger:get_handler_config(?MODULE), + {error,{invalid_levels,_}} = + logger:update_handler_config(?MODULE,config, + HConfig#{sync_mode_qlen=>100, + flush_qlen=>99}), + %% invalid name of config parameter + {error,{invalid_config,logger_disk_log_h,{filesync_rep_int,2000}}} = + logger:update_handler_config(?MODULE, config, + HConfig#{filesync_rep_int => 2000}), + ok. +config_fail(cleanup,_Config) -> + logger:remove_handler(?MODULE). + +bad_input(_Config) -> + {error,{badarg,{filesync,["BadType"]}}} = + logger_disk_log_h:filesync("BadType"), + {error,{badarg,{info,["BadType"]}}} = logger_disk_log_h:info("BadType"), + {error,{badarg,{reset,["BadType"]}}} = logger_disk_log_h:reset("BadType"). + +info_and_reset(_Config) -> + ok = logger:add_handler(?MODULE,logger_disk_log_h, + #{filter_default=>log, + formatter=>{?MODULE,self()}}), + #{id := ?MODULE} = logger_disk_log_h:info(?MODULE), + ok = logger_disk_log_h:reset(?MODULE). +info_and_reset(cleanup,_Config) -> + logger:remove_handler(?MODULE). + +reconfig(Config) -> + Dir = ?config(priv_dir,Config), + ok = logger:add_handler(?MODULE, + logger_disk_log_h, + #{filter_default=>log, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter=>{?MODULE,self()}}), + #{id := ?MODULE, + sync_mode_qlen := ?SYNC_MODE_QLEN, + drop_mode_qlen := ?DROP_MODE_QLEN, + flush_qlen := ?FLUSH_QLEN, + burst_limit_enable := ?BURST_LIMIT_ENABLE, + burst_limit_max_count := ?BURST_LIMIT_MAX_COUNT, + burst_limit_window_time := ?BURST_LIMIT_WINDOW_TIME, + overload_kill_enable := ?OVERLOAD_KILL_ENABLE, + overload_kill_qlen := ?OVERLOAD_KILL_QLEN, + overload_kill_mem_size := ?OVERLOAD_KILL_MEM_SIZE, + overload_kill_restart_after := ?OVERLOAD_KILL_RESTART_AFTER, + filesync_repeat_interval := ?FILESYNC_REPEAT_INTERVAL, + log_opts := #{type := ?DISK_LOG_TYPE, + max_no_files := ?DISK_LOG_MAX_NO_FILES, + max_no_bytes := ?DISK_LOG_MAX_NO_BYTES, + file := DiskLogFile}} = + logger_disk_log_h:info(?MODULE), + {ok,#{config := + #{sync_mode_qlen := ?SYNC_MODE_QLEN, + drop_mode_qlen := ?DROP_MODE_QLEN, + flush_qlen := ?FLUSH_QLEN, + burst_limit_enable := ?BURST_LIMIT_ENABLE, + burst_limit_max_count := ?BURST_LIMIT_MAX_COUNT, + burst_limit_window_time := ?BURST_LIMIT_WINDOW_TIME, + overload_kill_enable := ?OVERLOAD_KILL_ENABLE, + overload_kill_qlen := ?OVERLOAD_KILL_QLEN, + overload_kill_mem_size := ?OVERLOAD_KILL_MEM_SIZE, + overload_kill_restart_after := ?OVERLOAD_KILL_RESTART_AFTER, + filesync_repeat_interval := ?FILESYNC_REPEAT_INTERVAL, + file := DiskLogFile, + max_no_files := ?DISK_LOG_MAX_NO_FILES, + max_no_bytes := ?DISK_LOG_MAX_NO_BYTES, + type := wrap} = HConfig0}} = + logger:get_handler_config(?MODULE), + + HConfig1 = HConfig0#{sync_mode_qlen => 1, + drop_mode_qlen => 2, + flush_qlen => 3, + burst_limit_enable => false, + burst_limit_max_count => 10, + burst_limit_window_time => 10, + overload_kill_enable => true, + overload_kill_qlen => 100000, + overload_kill_mem_size => 10000000, + overload_kill_restart_after => infinity, + filesync_repeat_interval => no_repeat}, + ok = logger:set_handler_config(?MODULE, config, HConfig1), + #{id := ?MODULE, + sync_mode_qlen := 1, + drop_mode_qlen := 2, + flush_qlen := 3, + burst_limit_enable := false, + burst_limit_max_count := 10, + burst_limit_window_time := 10, + overload_kill_enable := true, + overload_kill_qlen := 100000, + overload_kill_mem_size := 10000000, + overload_kill_restart_after := infinity, + filesync_repeat_interval := no_repeat} = + logger_disk_log_h:info(?MODULE), + {ok,#{config:=HConfig1}} = logger:get_handler_config(?MODULE), + + ok = logger:update_handler_config(?MODULE, config, + #{flush_qlen => ?FLUSH_QLEN}), + {ok,#{config:=C1}} = logger:get_handler_config(?MODULE), + ct:log("C1: ~p",[C1]), + C1 = HConfig1#{flush_qlen => ?FLUSH_QLEN}, + + ok = logger:set_handler_config(?MODULE, config, #{sync_mode_qlen => 1}), + {ok,#{config:=C2}} = logger:get_handler_config(?MODULE), + ct:log("C2: ~p",[C2]), + C2 = HConfig0#{sync_mode_qlen => 1}, + + ok = logger:set_handler_config(?MODULE, config, #{drop_mode_qlen => 100}), + {ok,#{config:=C3}} = logger:get_handler_config(?MODULE), + ct:log("C3: ~p",[C3]), + C3 = HConfig0#{drop_mode_qlen => 100}, + + ok = logger:update_handler_config(?MODULE, config, #{sync_mode_qlen => 1}), + {ok,#{config:=C4}} = logger:get_handler_config(?MODULE), + ct:log("C4: ~p",[C4]), + C4 = HConfig0#{sync_mode_qlen => 1, + drop_mode_qlen => 100}, + + ok = logger:remove_handler(?MODULE), + + File = filename:join(Dir, "logfile"), + ok = logger:add_handler(?MODULE, + logger_disk_log_h, + #{filter_default=>log, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter=>{?MODULE,self()}, + config=> + #{type => halt, + max_no_files => 1, + max_no_bytes => 1024, + file => File}}), + #{log_opts := #{type := halt, + max_no_files := 1, + max_no_bytes := 1024, + file := File}} = + logger_disk_log_h:info(?MODULE), + {ok,#{config := + #{type := halt, + max_no_files := 1, + max_no_bytes := 1024, + file := File}=HaltHConfig} = Config2} = + logger:get_handler_config(?MODULE), + + ok = logger:update_handler_config(?MODULE, level, notice), + {ok,C5} = logger:get_handler_config(?MODULE), + ct:log("C5: ~p",[C5]), + C5 = Config2#{level => notice}, + + ok = logger:set_handler_config(?MODULE, level, info), + {ok,C6} = logger:get_handler_config(?MODULE), + ct:log("C6: ~p",[C6]), + C6 = Config2#{level => info}, + + %% You are not allowed to actively set the write once fields + %% (type, max_no_files, max_no_bytes, file) in runtime. + {error, {illegal_config_change,_,_}} = + logger:set_handler_config(?MODULE,config,#{type=>wrap}), + {error, {illegal_config_change,_,_}} = + logger:set_handler_config(?MODULE,config,#{max_no_files=>2}), + {error, {illegal_config_change,_,_}} = + logger:set_handler_config(?MODULE,config,#{max_no_bytes=>2048}), + {error, {illegal_config_change,_,_}} = + logger:set_handler_config(?MODULE,config,#{file=>"otherfile.log"}), + {ok,C7} = logger:get_handler_config(?MODULE), + ct:log("C7: ~p",[C7]), + C7 = C6, + + %% ... but if you don't specify the write once fields, then + %% set_handler_config shall NOT reset them to their default value + ok = logger:set_handler_config(?MODULE,config,#{sync_mode_qlen=>1}), + {ok,#{config:=C8}} = logger:get_handler_config(?MODULE), + ct:log("C8: ~p",[C8]), + C8 = HaltHConfig#{sync_mode_qlen=>1}, + ok. + +reconfig(cleanup, _Config) -> + logger:remove_handler(?MODULE). + +sync(Config) -> + Dir = ?config(priv_dir,Config), + File = filename:join(Dir, ?FUNCTION_NAME), + Log = lists:concat([File,".1"]), + ok = logger:add_handler(?MODULE, + logger_disk_log_h, + #{config => #{file => File}, + filter_default=>log, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter=>{?MODULE,nl}}), + + start_tracer([{disk_log,blog,2}, + {logger_disk_log_h,disk_log_sync,2}], + [{disk_log,blog,<<"first\n">>}, + {logger_disk_log_h,disk_log_sync}]), + + logger:notice("first", ?domain), + %% wait for automatic disk_log_sync + check_tracer(?FILESYNC_REPEAT_INTERVAL*2), + + %% check that if there's no repeated disk_log_sync active, + %% a disk_log_sync is still performed when handler goes idle + {ok,#{config := HConfig}} = logger:get_handler_config(?MODULE), + HConfig1 = HConfig#{filesync_repeat_interval => no_repeat}, + ok = logger:update_handler_config(?MODULE, config, HConfig1), + + no_repeat = maps:get(filesync_repeat_interval, + logger_disk_log_h:info(?MODULE)), + %% The following timer is to make sure the time from last log + %% ("first") to next ("second") is long enough, so the a flush is + %% triggered by the idle timeout between "fourth" and "fifth". + timer:sleep(?IDLE_DETECT_TIME_MSEC*2), + + start_tracer([{disk_log,blog,2}, + {logger_disk_log_h,disk_log_sync,2}], + [{disk_log,blog,<<"second\n">>}, + {logger_disk_log_h,disk_log_sync}, + {disk_log,blog,<<"third\n">>}, + {logger_disk_log_h,disk_log_sync}]), + + logger:notice("second", ?domain), + timer:sleep(?IDLE_DETECT_TIME_MSEC*2), + logger:notice("third", ?domain), + %% wait for automatic disk_log_sync + check_tracer(?IDLE_DETECT_TIME_MSEC*2), + + try_read_file(Log, {ok,<<"first\nsecond\nthird\n">>}, 1000), + + %% switch repeated disk_log_sync on and verify that the looping works + SyncInt = 1000, + WaitT = 4500, + OneSync = {logger_disk_log_h,handle_cast,repeated_disk_log_sync}, + %% receive 1 initial repeated_disk_log_sync, then 1 per sec + start_tracer([{logger_disk_log_h,handle_cast,2}], + [OneSync || _ <- lists:seq(1, 1 + trunc(WaitT/SyncInt))]), + + HConfig2 = HConfig#{filesync_repeat_interval => SyncInt}, + ok = logger:update_handler_config(?MODULE, config, HConfig2), + + SyncInt = maps:get(filesync_repeat_interval, + logger_disk_log_h:info(?MODULE)), + timer:sleep(WaitT), + HConfig3 = HConfig#{filesync_repeat_interval => no_repeat}, + ok = logger:update_handler_config(?MODULE, config, HConfig3), + check_tracer(100), + ok. +sync(cleanup,_Config) -> + dbg:stop_clear(), + logger:remove_handler(?MODULE). + +disk_log_wrap(Config) -> + Get = fun(Key, PL) -> proplists:get_value(Key, PL) end, + Dir = ?config(priv_dir,Config), + File = filename:join(Dir, ?FUNCTION_NAME), + ct:pal("Log = ~p", [File]), + MaxFiles = 3, + MaxBytes = 5, + ok = logger:add_handler(?MODULE, + logger_disk_log_h, + #{filter_default=>log, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter=>{?MODULE,self()}, + config=> + #{type => wrap, + max_no_files => MaxFiles, + max_no_bytes => MaxBytes, + file => File}}), + Info = disk_log:info(?MODULE), + {File,wrap,{MaxBytes,MaxFiles},1} = + {Get(file,Info),Get(type,Info),Get(size,Info),Get(current_file,Info)}, + Tester = self(), + TraceFun = fun({trace,_,call,{Mod,Func,Details}}, Pid) -> + Pid ! {trace,Mod,Func,Details}, + Pid + end, + {ok,_} = dbg:tracer(process, {TraceFun, Tester}), + {ok,_} = dbg:p(whereis(h_proc_name()), [c]), + {ok,_} = dbg:tp(logger_disk_log_h, handle_info, 2, []), + + Text = [34 + rand:uniform(126-34) || _ <- lists:seq(1,MaxBytes)], + ct:pal("String = ~p (~w)", [Text, erts_debug:size(Text)]), + %% fill first file + lists:foreach(fun(N) -> + Log = lists:concat([File,".",N]), + logger:notice(Text, ?domain), + wait_until_written(Log), + ct:pal("N = ~w", + [N = Get(current_file, + disk_log:info(?MODULE))]) + end, lists:seq(1,MaxFiles)), + + %% wait for trace messages + timer:sleep(1000), + dbg:stop_clear(), + Received = lists:flatmap(fun({trace,_M,handle_info, + [{disk_log,_Node,_Name,What},_]}) -> + [{trace,What}]; + ({log,_}) -> + [] + end, test_server:messages_get()), + ct:pal("Trace =~n~p", [Received]), + Received = [{trace,{wrap,0}} || _ <- lists:seq(1,MaxFiles-1)], + ok. + +disk_log_wrap(cleanup,_Config) -> + dbg:stop_clear(), + logger:remove_handler(?MODULE). + +disk_log_full(Config) -> + Dir = ?config(priv_dir,Config), + File = filename:join(Dir, ?FUNCTION_NAME), + ct:pal("Log = ~p", [File]), + MaxBytes = 50, + ok = logger:add_handler(?MODULE, + logger_disk_log_h, + #{filter_default=>log, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter=>{?MODULE,self()}, + config=> + #{type => halt, + max_no_files => 1, + max_no_bytes => MaxBytes, + file => File}}), + + Tester = self(), + TraceFun = fun({trace,_,call,{Mod,Func,Details}}, Pid) -> + Pid ! {trace,Mod,Func,Details}, + Pid + end, + {ok,_} = dbg:tracer(process, {TraceFun, Tester}), + {ok,_} = dbg:p(whereis(h_proc_name()), [c]), + {ok,_} = dbg:tp(logger_disk_log_h, handle_info, 2, []), + + NoOfChars = 5, + Text = [34 + rand:uniform(126-34) || _ <- lists:seq(1,NoOfChars)], + [logger:notice(Text, ?domain) || _ <- lists:seq(1,trunc(MaxBytes/NoOfChars)+1)], + + %% wait for trace messages + timer:sleep(2000), + dbg:stop_clear(), + Received = lists:flatmap(fun({trace,_M,handle_info, + [{disk_log,_Node,_Name,What},_]}) -> + [{trace,What}]; + ({log,_}) -> + [] + end, test_server:messages_get()), + ct:pal("Trace =~n~p", [Received]), + [{trace,full}, + {trace,{error_status,{error,{full,_}}}}] = Received, + ok. +disk_log_full(cleanup, _Config) -> + dbg:stop_clear(), + logger:remove_handler(?MODULE). + +disk_log_events(Config) -> + Node = node(), + Log = ?MODULE, + ok = logger:add_handler(?MODULE, + logger_disk_log_h, + #{filter_default=>log, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter=>{?MODULE,self()}}), + + %% Events copied from disk_log API + Events = + [{disk_log, Node, Log, {wrap, 0}}, + {disk_log, Node, Log, {truncated, 0}}, + {disk_log, Node, Log, {read_only, 42}}, + {disk_log, Node, Log, {blocked_log, 42}}, + {disk_log, Node, Log, {format_external, 42}}, + {disk_log, Node, Log, full}, + {disk_log, Node, Log, {error_status, ok}}], + + Tester = self(), + TraceFun = fun({trace,_,call,{Mod,Func,Details}}, Pid) -> + Pid ! {trace,Mod,Func,Details}, + Pid + end, + {ok,_} = dbg:tracer(process, {TraceFun, Tester}), + {ok,_} = dbg:p(whereis(h_proc_name()), [c]), + {ok,_} = dbg:tp(logger_disk_log_h, handle_info, 2, []), + + [whereis(h_proc_name()) ! E || E <- Events], + %% wait for trace messages + timer:sleep(2000), + dbg:stop_clear(), + Received = lists:map(fun({trace,_M,handle_info, + [Got,_]}) -> Got + end, test_server:messages_get()), + ct:pal("Trace =~n~p", [Received]), + NoOfEvents = length(Events), + NoOfEvents = length(Received), + lists:foreach(fun(Event) -> + true = lists:member(Event, Received) + end, Received), + ok. +disk_log_events(cleanup, _Config) -> + dbg:stop_clear(), + logger:remove_handler(?MODULE). + +write_failure(Config) -> + Dir = ?config(priv_dir, Config), + File = filename:join(Dir, ?FUNCTION_NAME), + Log = lists:concat([File,".1"]), + ct:pal("Log = ~p", [Log]), + + Node = start_h_on_new_node(Config, File), + false = (undefined == rpc:call(Node, ets, whereis, [?TEST_HOOKS_TAB])), + rpc:call(Node, ets, insert, [?TEST_HOOKS_TAB,{tester,self()}]), + rpc:call(Node, ?MODULE, set_internal_log, [?MODULE,internal_log]), + rpc:call(Node, ?MODULE, set_result, [disk_log_blog,ok]), + HState = rpc:call(Node, logger_disk_log_h, info, [?STANDARD_HANDLER]), + ct:pal("LogOpts = ~p", [LogOpts = maps:get(log_opts, HState)]), + + ok = log_on_remote_node(Node, "Logged1"), + rpc:call(Node, logger_disk_log_h, filesync, [?STANDARD_HANDLER]), + ?check_no_log, + + SyncRepInt = case (fun() -> is_atom(?FILESYNC_REPEAT_INTERVAL) end)() of + true -> 5500; + false -> ?FILESYNC_REPEAT_INTERVAL + 500 + end, + + try_read_file(Log, {ok,<<"Logged1\n">>}, SyncRepInt), + + rpc:call(Node, ?MODULE, set_result, [disk_log_blog,{error,no_such_log}]), + ok = log_on_remote_node(Node, "Cause simple error printout"), + + ?check({error,{?STANDARD_HANDLER,log,LogOpts,{error,no_such_log}}}), + + ok = log_on_remote_node(Node, "No second error printout"), + ?check_no_log, + + rpc:call(Node, ?MODULE, set_result, [disk_log_blog, + {error,{full,?STANDARD_HANDLER}}]), + ok = log_on_remote_node(Node, "Cause simple error printout"), + ?check({error,{?STANDARD_HANDLER,log,LogOpts, + {error,{full,?STANDARD_HANDLER}}}}), + + rpc:call(Node, ?MODULE, set_result, [disk_log_blog,ok]), + ok = log_on_remote_node(Node, "Logged2"), + rpc:call(Node, logger_disk_log_h, filesync, [?STANDARD_HANDLER]), + ?check_no_log, + try_read_file(Log, {ok,<<"Logged1\nLogged2\n">>}, SyncRepInt), + ok. +write_failure(cleanup, _Config) -> + Nodes = nodes(), + [test_server:stop_node(Node) || Node <- Nodes]. + + +sync_failure(Config) -> + Dir = ?config(priv_dir, Config), + FileName = lists:concat([?MODULE,"_",?FUNCTION_NAME]), + File = filename:join(Dir, FileName), + + + Node = start_h_on_new_node(Config, File), + false = (undefined == rpc:call(Node, ets, whereis, [?TEST_HOOKS_TAB])), + rpc:call(Node, ets, insert, [?TEST_HOOKS_TAB,{tester,self()}]), + rpc:call(Node, ?MODULE, set_internal_log, [?MODULE,internal_log]), + rpc:call(Node, ?MODULE, set_result, [disk_log_sync,ok]), + HState = rpc:call(Node, logger_disk_log_h, info, [?STANDARD_HANDLER]), + LogOpts = maps:get(log_opts, HState), + + SyncInt = 500, + ok = rpc:call(Node, logger, update_handler_config, + [?STANDARD_HANDLER, config, + #{filesync_repeat_interval => SyncInt}]), + Info = rpc:call(Node, logger_disk_log_h, info, [?STANDARD_HANDLER]), + SyncInt = maps:get(filesync_repeat_interval, Info), + + ok = log_on_remote_node(Node, "Logged1"), + ?check_no_log, + + rpc:call(Node, ?MODULE, set_result, [disk_log_sync,{error,no_such_log}]), + ok = log_on_remote_node(Node, "Cause simple error printout"), + + ?check({error,{?STANDARD_HANDLER,filesync,LogOpts,{error,no_such_log}}}), + + ok = log_on_remote_node(Node, "No second error printout"), + ?check_no_log, + + rpc:call(Node, ?MODULE, set_result, + [disk_log_sync,{error,{blocked_log,?STANDARD_HANDLER}}]), + ok = log_on_remote_node(Node, "Cause simple error printout"), + ?check({error,{?STANDARD_HANDLER,filesync,LogOpts, + {error,{blocked_log,?STANDARD_HANDLER}}}}), + + rpc:call(Node, ?MODULE, set_result, [disk_log_sync,ok]), + ok = log_on_remote_node(Node, "Logged2"), + ?check_no_log, + ok. +sync_failure(cleanup, _Config) -> + Nodes = nodes(), + [test_server:stop_node(Node) || Node <- Nodes]. + +start_h_on_new_node(Config, File) -> + {ok,_,Node} = + logger_test_lib:setup( + Config, + [{logger,[{handler,default,logger_disk_log_h, + #{ config => #{ file => File }}}]}]), + ok = rpc:call(Node,logger,set_handler_config,[?STANDARD_HANDLER,formatter, + {?MODULE,nl}]), + Node. + +log_on_remote_node(Node,Msg) -> + _ = spawn_link(Node, + fun() -> erlang:group_leader(whereis(user),self()), + logger:notice(Msg) + end), + ok. + +%% functions for test hook macros to be called by rpc +set_internal_log(_Mod, _Func) -> + ?set_internal_log({_Mod,_Func}). +set_result(_Op, _Result) -> + ?set_result(_Op, _Result). +set_defaults() -> + ?set_defaults(). + +%% internal log function that sends the term to the test case process +internal_log(Type, Term) -> + [{tester,Tester}] = ets:lookup(?TEST_HOOKS_TAB, tester), + Tester ! {log,{Type,Term}}, + logger:internal_log(Type, Term), + ok. + + +%%%----------------------------------------------------------------- +%%% Overload protection tests + +op_switch_to_sync(Config) -> + {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + NumOfReqs = 500, + NewHConfig = + HConfig#{config => DLHConfig#{sync_mode_qlen => 2, + drop_mode_qlen => NumOfReqs+1, + flush_qlen => 2*NumOfReqs, + burst_limit_enable => false}}, + ok = logger:update_handler_config(?MODULE, NewHConfig), + send_burst({n,NumOfReqs}, seq, {chars,79}, notice), + Lines = count_lines(Log), + NumOfReqs = Lines, + ok = file_delete(Log), + ok. +op_switch_to_sync(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +op_switch_to_drop() -> + [{timetrap,{seconds,180}}]. +op_switch_to_drop(Config) -> + Test = + fun() -> + {Log,HConfig,DLHConfig} = + start_handler(?MODULE, ?FUNCTION_NAME, Config), + NumOfReqs = 300, + Procs = 2, + Bursts = 10, + NewHConfig = + HConfig#{config => + DLHConfig#{sync_mode_qlen => 1, + drop_mode_qlen => 2, + flush_qlen => Procs*NumOfReqs*Bursts, + burst_limit_enable => false}}, + ok = logger:update_handler_config(?MODULE, NewHConfig), + %% It sometimes happens that the handler either gets + %% the requests in a slow enough pace so that dropping + %% never occurs. Therefore, lets generate a number of + %% bursts to increase the chance of message buildup. + [send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, notice) || + _ <- lists:seq(1, Bursts)], + Logged = count_lines(Log), + ok = stop_handler(?MODULE), + ct:pal("Number of messages dropped = ~w (~w)", + [Procs*NumOfReqs*Bursts-Logged,Procs*NumOfReqs*Bursts]), + true = (Logged < (Procs*NumOfReqs*Bursts)), + true = (Logged > 0), + _ = file_delete(Log), + ok + end, + %% As it's tricky to get the timing right in only one go, we perform the + %% test repeatedly, hoping that will generate a successful result. + case repeat_until_ok(Test, 10) of + {ok,{Failures,_Result}} -> + ct:log("Failed ~w times before success!", [Failures]); + {fails,Reason} -> + ct:fail(Reason) + end. +op_switch_to_drop(cleanup, _Config) -> + _ = stop_handler(?MODULE). + +op_switch_to_flush() -> + [{timetrap,{minutes,3}}]. +op_switch_to_flush(Config) -> + Test = + fun() -> + {Log,HConfig,DLHConfig} = + start_handler(?MODULE, ?FUNCTION_NAME, Config), + + %% NOTE: it's important that both async and sync + %% requests have been queued when the flush happens + %% (verify with coverage of flush_log_requests/2) + + NewHConfig = + HConfig#{config => + DLHConfig#{sync_mode_qlen => 2, + %% disable drop mode + drop_mode_qlen => 300, + flush_qlen => 300, + burst_limit_enable => false}}, + ok = logger:update_handler_config(?MODULE, NewHConfig), + NumOfReqs = 1500, + Procs = 10, + Bursts = 10, + %% It sometimes happens that the handler either gets + %% the requests in a slow enough pace so that flushing + %% never occurs, or it gets all messages at once, + %% causing all messages to get flushed (no dropping of + %% sync messages gets tested). Therefore, lets + %% generate a number of bursts to increase the chance + %% of message buildup in some random fashion. + [send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, notice) || + _ <- lists:seq(1,Bursts)], + Logged = count_lines(Log), + ok= stop_handler(?MODULE), + ct:pal("Number of messages flushed/dropped = ~w (~w)", + [NumOfReqs*Procs*Bursts-Logged,NumOfReqs*Procs*Bursts]), + true = (Logged < (NumOfReqs*Procs*Bursts)), + true = (Logged > 0), + _ = file_delete(Log), + ok + end, + %% As it's tricky to get the timing right in only one go, we perform the + %% test repeatedly, hoping that will generate a successful result. + case repeat_until_ok(Test, 10) of + {ok,{Failures,_Result}} -> + ct:log("Failed ~w times before success!", [Failures]); + {fails,Reason} -> + ct:fail(Reason) + end. +op_switch_to_flush(cleanup, _Config) -> + _ = stop_handler(?MODULE). + + +limit_burst_disabled(Config) -> + {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + NewHConfig = + HConfig#{config => DLHConfig#{burst_limit_enable => false, + burst_limit_max_count => 10, + burst_limit_window_time => 2000, + drop_mode_qlen => 200, + flush_qlen => 300}}, + ok = logger:update_handler_config(?MODULE, NewHConfig), + NumOfReqs = 100, + send_burst({n,NumOfReqs}, seq, {chars,79}, notice), + Logged = count_lines(Log), + ct:pal("Number of messages logged = ~w", [Logged]), + NumOfReqs = Logged, + ok = file_delete(Log), + ok. +limit_burst_disabled(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +limit_burst_enabled_one(Config) -> + {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + ReqLimit = 10, + NewHConfig = + HConfig#{config => DLHConfig#{burst_limit_enable => true, + burst_limit_max_count => ReqLimit, + burst_limit_window_time => 2000, + drop_mode_qlen => 200, + flush_qlen => 300}}, + ok = logger:update_handler_config(?MODULE, NewHConfig), + NumOfReqs = 100, + send_burst({n,NumOfReqs}, seq, {chars,79}, notice), + Logged = count_lines(Log), + ct:pal("Number of messages logged = ~w", [Logged]), + ReqLimit = Logged, + ok = file_delete(Log), + ok. +limit_burst_enabled_one(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +limit_burst_enabled_period(Config) -> + {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + ReqLimit = 10, + BurstTWin = 1000, + NewHConfig = + HConfig#{config => DLHConfig#{burst_limit_enable => true, + burst_limit_max_count => ReqLimit, + burst_limit_window_time => BurstTWin, + drop_mode_qlen => 20000, + flush_qlen => 20001}}, + ok = logger:update_handler_config(?MODULE, NewHConfig), + + Windows = 3, + Sent = send_burst({t,BurstTWin*Windows}, seq, {chars,79}, notice), + Logged = count_lines(Log), + ct:pal("Number of messages sent = ~w~nNumber of messages logged = ~w", + [Sent,Logged]), + true = (Logged > (ReqLimit*Windows)) andalso + (Logged < (ReqLimit*(Windows+2))), + ok = file_delete(Log), + ok. +limit_burst_enabled_period(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +kill_disabled(Config) -> + {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + NewHConfig = + HConfig#{config=>DLHConfig#{overload_kill_enable=>false, + overload_kill_qlen=>10, + overload_kill_mem_size=>100}}, + ok = logger:update_handler_config(?MODULE, NewHConfig), + NumOfReqs = 100, + send_burst({n,NumOfReqs}, seq, {chars,79}, notice), + Logged = count_lines(Log), + ct:pal("Number of messages logged = ~w", [Logged]), + ok = file_delete(Log), + true = is_pid(whereis(h_proc_name())), + ok. +kill_disabled(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +qlen_kill_new(Config) -> + {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + Pid0 = whereis(h_proc_name()), + {_,Mem0} = process_info(Pid0, memory), + RestartAfter = ?OVERLOAD_KILL_RESTART_AFTER, + NewHConfig = + HConfig#{config => + DLHConfig#{overload_kill_enable=>true, + overload_kill_qlen=>10, + overload_kill_mem_size=>Mem0+50000, + overload_kill_restart_after=>RestartAfter}}, + ok = logger:update_handler_config(?MODULE, NewHConfig), + MRef = erlang:monitor(process, Pid0), + NumOfReqs = 100, + Procs = 4, + send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, notice), + %% send_burst({n,NumOfReqs}, seq, {chars,79}, notice), + receive + {'DOWN', MRef, _, _, Info} -> + case Info of + {shutdown,{overloaded,?MODULE,QLen,Mem}} -> + ct:pal("Terminated with qlen = ~w, mem = ~w", [QLen,Mem]); + killed -> + ct:pal("Slow shutdown, handler process was killed!", []) + end, + file_delete(Log), + {ok,_} = wait_for_process_up(RestartAfter * 3), + ok + after + 5000 -> + Info = logger_disk_log_h:info(?MODULE), + ct:pal("Handler state = ~p", [Info]), + ct:fail("Handler not dead! It should not have survived this!") + end. +qlen_kill_new(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +mem_kill_new(Config) -> + {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + Pid0 = whereis(h_proc_name()), + {_,Mem0} = process_info(Pid0, memory), + RestartAfter = ?OVERLOAD_KILL_RESTART_AFTER, + NewHConfig = + HConfig#{config => + DLHConfig#{overload_kill_enable=>true, + overload_kill_qlen=>50000, + overload_kill_mem_size=>Mem0+500, + overload_kill_restart_after=>RestartAfter}}, + ok = logger:update_handler_config(?MODULE, NewHConfig), + MRef = erlang:monitor(process, Pid0), + NumOfReqs = 100, + Procs = 4, + send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, notice), + %% send_burst({n,NumOfReqs}, seq, {chars,79}, notice), + receive + {'DOWN', MRef, _, _, Info} -> + case Info of + {shutdown,{overloaded,?MODULE,QLen,Mem}} -> + ct:pal("Terminated with qlen = ~w, mem = ~w", [QLen,Mem]); + killed -> + ct:pal("Slow shutdown, handler process was killed!", []) + end, + file_delete(Log), + {ok,_} = wait_for_process_up(RestartAfter * 3), + ok + after + 5000 -> + Info = logger_disk_log_h:info(?MODULE), + ct:pal("Handler state = ~p", [Info]), + ct:fail("Handler not dead! It should not have survived this!") + end. +mem_kill_new(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +restart_after() -> + [{timetrap,{minutes,2}}]. +restart_after(Config) -> + {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + NewHConfig1 = + HConfig#{config=>DLHConfig#{overload_kill_enable=>true, + overload_kill_qlen=>10, + overload_kill_restart_after=>infinity}}, + ok = logger:update_handler_config(?MODULE, NewHConfig1), + MRef1 = erlang:monitor(process, whereis(h_proc_name())), + %% kill handler + send_burst({n,100}, {spawn,4,0}, {chars,79}, notice), + receive + {'DOWN', MRef1, _, _, _Reason1} -> + file_delete(Log), + error = wait_for_process_up(?OVERLOAD_KILL_RESTART_AFTER * 3), + ok + after + 5000 -> + Info1 = logger_std_h:info(?MODULE), + ct:pal("Handler state = ~p", [Info1]), + ct:fail("Handler not dead! It should not have survived this!") + end, + + {Log,_,_} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + RestartAfter = ?OVERLOAD_KILL_RESTART_AFTER, + NewHConfig2 = + HConfig#{config=>DLHConfig#{overload_kill_enable=>true, + overload_kill_qlen=>10, + overload_kill_restart_after=>RestartAfter}}, + ok = logger:update_handler_config(?MODULE, NewHConfig2), + Pid0 = whereis(h_proc_name()), + MRef2 = erlang:monitor(process, Pid0), + %% kill handler + send_burst({n,100}, {spawn,4,0}, {chars,79}, notice), + receive + {'DOWN', MRef2, _, _, _Reason2} -> + file_delete(Log), + {ok,Pid1} = wait_for_process_up(RestartAfter * 3), + false = (Pid1 == Pid0), + ok + after + 5000 -> + Info2 = logger_std_h:info(?MODULE), + ct:pal("Handler state = ~p", [Info2]), + ct:fail("Handler not dead! It should not have survived this!") + end, + ok. +restart_after(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +%% send handler requests (sync, info, reset, change_config) +%% during high load to verify that sync, dropping and flushing is +%% handled correctly. +handler_requests_under_load() -> + [{timetrap,{minutes,5}}]. +handler_requests_under_load(Config) -> + {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + NewHConfig = + HConfig#{config => DLHConfig#{sync_mode_qlen => 2, + drop_mode_qlen => 1000, + flush_qlen => 2000, + burst_limit_enable => false}}, + ok = logger:update_handler_config(?MODULE, NewHConfig), + Pid = spawn_link(fun() -> send_requests(?MODULE, 1, [{filesync,[]}, + {info,[]}, + {reset,[]}, + {change_config,[]}]) + end), + Procs = 100, + Sent = Procs * send_burst({n,5000}, {spawn,Procs,10}, {chars,79}, notice), + Pid ! {self(),finish}, + ReqResult = receive {Pid,Result} -> Result end, + Logged = count_lines(Log), + ct:pal("Number of messages sent = ~w~nNumber of messages logged = ~w", + [Sent,Logged]), + FindError = fun(Res) -> + [E || E <- Res, + is_tuple(E) andalso (element(1,E) == error)] + end, + Errors = [{Req,FindError(Res)} || {Req,Res} <- ReqResult], + NoOfReqs = lists:foldl(fun({_,Res}, N) -> N + length(Res) end, 0, ReqResult), + ct:pal("~w requests made. Errors: ~n~p", [NoOfReqs,Errors]), + ok = file_delete(Log). +handler_requests_under_load(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +send_requests(HName, TO, Reqs = [{Req,Res}|Rs]) -> + receive + {From,finish} -> + From ! {self(),Reqs} + after + TO -> + Result = + case Req of + change_config -> + logger:update_handler_config(HName, logger_disk_log_h, + #{overload_kill_enable => + false}); + Func -> + logger_disk_log_h:Func(HName) + end, + send_requests(HName, TO, Rs ++ [{Req,[Result|Res]}]) + end. + +%%%----------------------------------------------------------------- +%%% +start_handler(Name, FuncName, Config) -> + Dir = ?config(priv_dir,Config), + File = filename:join(Dir, FuncName), + ct:pal("Logging to ~tp", [File]), + FullFile = lists:concat([File,".1"]), + _ = file_delete(FullFile), + ok = logger:add_handler(Name, + logger_disk_log_h, + #{config=>#{file => File, + max_no_files => 1, + max_no_bytes => 100000000}, + filter_default=>log, + filters=>?DEFAULT_HANDLER_FILTERS([Name]), + formatter=>{?MODULE,op}}), + {ok,HConfig = #{config := DLHConfig}} = logger:get_handler_config(Name), + {FullFile,HConfig,DLHConfig}. + +stop_handler(Name) -> + ct:pal("Stopping handler ~p!", [Name]), + logger:remove_handler(Name). + +send_burst(NorT, Type, {chars,Sz}, Class) -> + Text = [34 + rand:uniform(126-34) || _ <- lists:seq(1,Sz)], + case NorT of + {n,N} -> + %% process_flag(priority, high), + send_n_burst(N, Type, Text, Class), + %% process_flag(priority, normal), + N; + {t,T} -> + ct:pal("Sending messages sequentially for ~w ms", [T]), + T0 = erlang:monotonic_time(millisecond), + send_t_burst(T0, T, Text, Class, 0) + end. + +send_n_burst(0, _, _Text, _Class) -> + ok; +send_n_burst(N, seq, Text, Class) -> + ok = logger:Class(Text, ?domain), + send_n_burst(N-1, seq, Text, Class); +send_n_burst(N, {spawn,Ps,TO}, Text, Class) -> + ct:pal("~w processes each sending ~w messages", [Ps,N]), + MRefs = [begin if TO == 0 -> ok; true -> timer:sleep(TO) end, + monitor(process,spawn_link(per_proc_fun(N,Text,Class,X))) + end || X <- lists:seq(1,Ps)], + lists:foreach(fun(MRef) -> + receive + {'DOWN', MRef, _, _, _} -> + ok + end + end, MRefs), + ct:pal("Message burst sent", []), + ok. + +send_t_burst(T0, T, Text, Class, N) -> + T1 = erlang:monotonic_time(millisecond), + if (T1-T0) > T -> + N; + true -> + ok = logger:Class(Text, ?domain), + send_t_burst(T0, T, Text, Class, N+1) + end. + +per_proc_fun(N,Text,Class,X) when X rem 2 == 0 -> + fun() -> + process_flag(priority,high), + send_n_burst(N, seq, Text, Class) + end; +per_proc_fun(N,Text,Class,_) -> + fun() -> + send_n_burst(N, seq, Text, Class) + end. + +%%%----------------------------------------------------------------- +%%% Formatter callback +%%% Using this to send the formatted string back to the test case +%%% process - so it can check for logged events. +format(_,bad_return) -> + bad_return; +format(_,crash) -> + erlang:error(formatter_crashed); +format(#{msg:={report,R},meta:=#{report_cb:=Fun}}=Log,Config) -> + format(Log#{msg=>Fun(R)},Config); +format(#{msg:={string,String0}},no_nl) -> + String = unicode:characters_to_list(String0), + String; +format(#{msg:={string,String0}},nl) -> + String = unicode:characters_to_list(String0), + String++"\n"; +format(#{msg:={string,String0}},op) -> + String = unicode:characters_to_list(String0), + String++"\n"; +format(#{msg:={report,#{label:={supervisor,progress}}}},op) -> + ""; +format(#{msg:={report,#{label:={gen_server,terminate}}}},op) -> + ""; +format(#{msg:={report,#{label:={proc_lib,crash}}}},op) -> + ""; +format(#{msg:={F,A}},OpOrPid) when is_list(F), is_list(A) -> + String = lists:flatten(io_lib:format(F,A)), + if is_pid(OpOrPid) -> OpOrPid ! {log,String}; + true -> ok + end, + String++"\n"; +format(#{msg:={string,String0}},Pid) -> + String = unicode:characters_to_list(String0), + Pid ! {log,String}, + String++"\n"; +format(Msg,Tag) -> + Error = {unexpected_format,Msg,Tag}, + erlang:display(Error), + exit(Error). + +remove(Handler, LogName) -> + logger_disk_log_h:remove(Handler, LogName), + HState = #{log_names := Logs} = logger_disk_log_h:info(), + false = maps:is_key(LogName, HState), + false = lists:member(LogName, Logs), + false = logger_config:exist(?LOGGER_TABLE, LogName), + {error,no_such_log} = disk_log:info(LogName), + ok. + +start_and_add(Name, Config, LogOpts) -> + HConfig = maps:get(config, Config, #{}), + HConfig1 = maps:merge(HConfig, LogOpts), + Config1 = Config#{config=>HConfig1}, + ct:pal("Adding handler ~w with: ~p", [Name,Config1]), + ok = logger:add_handler(Name, logger_disk_log_h, Config1), + Pid = whereis(h_proc_name(Name)), + true = is_pid(Pid), + Name = proplists:get_value(name, disk_log:info(Name)), + ok. + +remove_and_stop(Handler) -> + ok = logger:remove_handler(Handler), + timer:sleep(500), + undefined = whereis(h_proc_name(Handler)), + ok. + +try_read_file(FileName, Expected, Time) -> + try_read_file(FileName, Expected, Time, undefined). + +try_read_file(FileName, Expected, Time, _) when Time > 0 -> + case file:read_file(FileName) of + Expected -> + ok; + Error = {error,_Reason} -> + erlang:error(Error); + SomethingElse -> + ct:pal("try_read_file read unexpected: ~p~n", [SomethingElse]), + timer:sleep(500), + try_read_file(FileName, Expected, Time-500, SomethingElse) + end; + +try_read_file(_, _, _, Incorrect) -> + ct:pal("try_read_file got incorrect pattern: ~p~n", [Incorrect]), + erlang:error({error,not_matching_pattern,Incorrect}). + +try_match_file(FileName, Pattern, Time) -> + try_match_file(FileName, Pattern, Time, <<>>). + +try_match_file(FileName, Pattern, Time, _) when Time > 0 -> + case file:read_file(FileName) of + {ok, Bin} -> + case re:run(Bin,Pattern,[{capture,none}]) of + match -> + unicode:characters_to_list(Bin); + _ -> + timer:sleep(100), + try_match_file(FileName, Pattern, Time-100, Bin) + end; + Error -> + erlang:error(Error) + end; +try_match_file(_,Pattern,_,Incorrect) -> + ct:pal("try_match_file did not match pattern: ~p~nGot: ~p~n", + [Pattern,Incorrect]), + erlang:error({error,not_matching_pattern,Pattern,Incorrect}). + +count_lines(File) -> + wait_until_written(File), + count_lines1(File). + +wait_until_written(File) -> + wait_until_written(File, -1). + +wait_until_written(File, Sz) -> + timer:sleep(2000), + case file:read_file_info(File) of + {ok,#file_info{size = Sz}} -> + timer:sleep(1000), + case file:read_file_info(File) of + {ok,#file_info{size = Sz}} -> + ok; + {ok,#file_info{size = Sz1}} -> + wait_until_written(File, Sz1) + end; + {ok,#file_info{size = Sz1}} -> + wait_until_written(File, Sz1) + end. + +count_lines1(File) -> + {_,Dev} = file:open(File, [read]), + Lines = count_lines2(Dev, 0), + file:close(Dev), + Lines. + +count_lines2(Dev, LC) -> + case file:read_line(Dev) of + {ok,"Handler logger_disk_log_h_SUITE " ++_} -> + %% Not counting handler info + count_lines2(Dev,LC); + {ok,_} -> + count_lines2(Dev,LC+1); + eof -> LC + end. + +repeat_until_ok(Fun, N) -> + repeat_until_ok(Fun, 0, N, undefined). + +repeat_until_ok(_Fun, Stop, Stop, Reason) -> + {fails,Reason}; + +repeat_until_ok(Fun, C, Stop, FirstReason) -> + if C > 0 -> timer:sleep(5000); + true -> ok + end, + try Fun() of + Result -> + {ok,{C,Result}} + catch + _:Reason:Stack -> + ct:pal("Test fails: ~p (~p)~n", [Reason,hd(Stack)]), + if FirstReason == undefined -> + repeat_until_ok(Fun, C+1, Stop, {Reason,Stack}); + true -> + repeat_until_ok(Fun, C+1, Stop, FirstReason) + end + end. + +start_tracer(Trace,Expected) -> + Pid = self(), + dbg:tracer(process,{fun tracer/2,{Pid,Expected}}), + dbg:p(h_proc_name(),[c]), + tpl(Trace), + ok. + +tpl([{M,F,A}|Trace]) -> + {ok,Match} = dbg:tpl(M,F,A,c), + case lists:keyfind(matched,1,Match) of + {_,_,1} -> + ok; + _ -> + dbg:stop_clear(), + throw({skip,"Can't trace "++atom_to_list(M)++":"++ + atom_to_list(F)++"/"++integer_to_list(A)}) + end, + tpl(Trace); +tpl([]) -> + ok. + +tracer({trace,_,call,{logger_disk_log_h,handle_cast,[Op|_]},Caller}, + {Pid,[{Mod,Func,Op}|Expected]}) -> + maybe_tracer_done(Pid,Expected,{Mod,Func,Op},Caller); +tracer({trace,_,call,{Mod=disk_log,Func=blog,[_,Data]},Caller}, {Pid,[{Mod,Func,Data}|Expected]}) -> + maybe_tracer_done(Pid,Expected,{Mod,Func,Data},Caller); +tracer({trace,_,call,{Mod,Func,_},Caller}, {Pid,[{Mod,Func}|Expected]}) -> + maybe_tracer_done(Pid,Expected,{Mod,Func},Caller); +tracer({trace,_,call,Call,Caller}, {Pid,Expected}) -> + ct:log("Tracer got unexpected: ~p~nCaller: ~p~nExpected: ~p~n",[Call,Caller,Expected]), + Pid ! {tracer_got_unexpected,Call,Expected}, + {Pid,Expected}. + +maybe_tracer_done(Pid,[],Got,Caller) -> + ct:log("Tracer got: ~p~nCaller: ~p~n",[Got,Caller]), + Pid ! tracer_done; +maybe_tracer_done(Pid,Expected,Got,Caller) -> + ct:log("Tracer got: ~p~nCaller: ~p~n",[Got,Caller]), + {Pid,Expected}. + +check_tracer(T) -> + receive + tracer_done -> + dbg:stop_clear(), + ok; + {tracer_got_unexpected,Got,Expected} -> + dbg:stop_clear(), + ct:fail({tracer_got_unexpected,Got,Expected}) + after T -> + dbg:stop_clear(), + ct:fail({timeout,tracer}) + end. + +escape([$+|Rest]) -> + [$\\,$+|escape(Rest)]; +escape([H|T]) -> + [H|escape(T)]; +escape([]) -> + []. + +h_proc_name() -> + h_proc_name(?MODULE). +h_proc_name(Name) -> + list_to_atom(lists:concat([logger_disk_log_h,"_",Name])). + +wait_for_process_up(T) -> + wait_for_process_up(?MODULE, h_proc_name(), T). + +wait_for_process_up(Name, RegName, T) -> + N = (T div 500) + 1, + wait_for_process_up1(Name, RegName, N). + +wait_for_process_up1(_Name, _RegName, 0) -> + error; +wait_for_process_up1(Name, RegName, N) -> + timer:sleep(500), + case whereis(RegName) of + Pid when is_pid(Pid) -> + case logger:get_handler_config(Name) of + {ok,_} -> + %% ct:pal("Process ~p up (~p tries left)",[Name,N]), + {ok,Pid}; + _ -> + wait_for_process_up1(Name, RegName, N-1) + end; + undefined -> + %% ct:pal("Waiting for process ~p (~p tries left)",[Name,N]), + wait_for_process_up1(Name, RegName, N-1) + end. + +file_delete(Log) -> + file:delete(Log). diff --git a/lib/kernel/test/logger_env_var_SUITE.erl b/lib/kernel/test/logger_env_var_SUITE.erl new file mode 100644 index 0000000000..e8d1a313dc --- /dev/null +++ b/lib/kernel/test/logger_env_var_SUITE.erl @@ -0,0 +1,683 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2018. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(logger_env_var_SUITE). + +-compile(export_all). + +-include_lib("kernel/include/logger.hrl"). +-include_lib("kernel/src/logger_internal.hrl"). + +-import(logger_test_lib,[setup/2,log/3,sync_and_read/3]). + +suite() -> + [{timetrap,{seconds,60}}, + {ct_hooks,[logger_test_lib]}]. + +init_per_suite(Config) -> + Config. + +end_per_suite(_Config) -> + ok. + +groups() -> + [{error_logger,[],[error_logger_tty, + error_logger_tty_sasl_compatible, + error_logger_false, + error_logger_false_progress, + error_logger_false_sasl_compatible, + error_logger_silent, + error_logger_silent_sasl_compatible, + error_logger_file]}, + {logger,[],[logger_file, + logger_file_sasl_compatible, + logger_file_log_progress, + logger_file_no_filter, + logger_file_no_filter_level, + logger_file_formatter, + logger_filters, + logger_filters_stop, + logger_module_level, + logger_disk_log, + logger_disk_log_formatter, + logger_undefined, + logger_many_handlers_default_first, + logger_many_handlers_default_last, + logger_many_handlers_default_last_broken_filter + ]}, + {bad,[],[bad_error_logger, + bad_level, + bad_sasl_compatibility]}]. + +all() -> + [default, + default_sasl_compatible, + sasl_compatible_false, + sasl_compatible_false_no_progress, + sasl_compatible, + all_logger_level, + {group,bad}, + {group,error_logger}, + {group,logger} + ]. + +default(Config) -> + {ok,#{primary:=P,handlers:=Hs,module_levels:=ML},_Node} = setup(Config,[]), + notice = maps:get(level,P), + #{module:=logger_std_h} = StdC = find(?STANDARD_HANDLER,Hs), + all = maps:get(level,StdC), + StdFilters = maps:get(filters,StdC), + {domain,{_,{log,super,[otp,sasl]}}} = lists:keyfind(domain,1,StdFilters), + false = exists(simple,Hs), + false = exists(sasl,Hs), + [] = ML, + ok. + +default_sasl_compatible(Config) -> + {ok,#{primary:=P,handlers:=Hs,module_levels:=ML},_Node} = + setup(Config,[{logger_sasl_compatible,true}]), + info = maps:get(level,P), + #{module:=logger_std_h} = StdC = find(?STANDARD_HANDLER,Hs), + all = maps:get(level,StdC), + StdFilters = maps:get(filters,StdC), + {domain,{_,{log,super,[otp]}}} = lists:keyfind(domain,1,StdFilters), + false = exists(simple,Hs), + true = exists(sasl,Hs), + [] = ML, + ok. + +error_logger_tty(Config) -> + {ok,#{primary:=P,handlers:=Hs,module_levels:=ML},_Node} = + setup(Config,[{error_logger,tty}]), + notice = maps:get(level,P), + #{module:=logger_std_h} = StdC = find(?STANDARD_HANDLER,Hs), + all = maps:get(level,StdC), + StdFilters = maps:get(filters,StdC), + {domain,{_,{log,super,[otp,sasl]}}} = lists:keyfind(domain,1,StdFilters), + false = exists(simple,Hs), + false = exists(sasl,Hs), + [] = ML, + ok. + +error_logger_tty_sasl_compatible(Config) -> + {ok,#{primary:=P,handlers:=Hs,module_levels:=ML},_Node} = + setup(Config, + [{error_logger,tty}, + {logger_sasl_compatible,true}]), + info = maps:get(level,P), + #{module:=logger_std_h} = StdC = find(?STANDARD_HANDLER,Hs), + all = maps:get(level,StdC), + StdFilters = maps:get(filters,StdC), + {domain,{_,{log,super,[otp]}}} = lists:keyfind(domain,1,StdFilters), + false = exists(simple,Hs), + true = exists(sasl,Hs), + [] = ML, + ok. + +error_logger_false(Config) -> + {ok,#{handlers:=Hs,primary:=P,module_levels:=ML},_Node} = + setup(Config, + [{error_logger,false}, + {logger_level,notice}]), + false = exists(?STANDARD_HANDLER,Hs), + #{module:=logger_simple_h} = SimpleC = find(simple,Hs), + all = maps:get(level,SimpleC), + notice = maps:get(level,P), + SimpleFilters = maps:get(filters,SimpleC), + {domain,{_,{log,super,[otp,sasl]}}} = lists:keyfind(domain,1,SimpleFilters), + false = exists(sasl,Hs), + [] = ML, + ok. + +error_logger_false_progress(Config) -> + {ok,#{handlers:=Hs,primary:=P,module_levels:=ML},_Node} = + setup(Config, + [{error_logger,false}, + {logger_level,notice}]), + false = exists(?STANDARD_HANDLER,Hs), + #{module:=logger_simple_h} = SimpleC = find(simple,Hs), + all = maps:get(level,SimpleC), + notice = maps:get(level,P), + SimpleFilters = maps:get(filters,SimpleC), + {domain,{_,{log,super,[otp,sasl]}}} = lists:keyfind(domain,1,SimpleFilters), + false = exists(sasl,Hs), + [] = ML, + ok. + +error_logger_false_sasl_compatible(Config) -> + {ok,#{handlers:=Hs,primary:=P,module_levels:=ML},_Node} = + setup(Config, + [{error_logger,false}, + {logger_level,notice}, + {logger_sasl_compatible,true}]), + false = exists(?STANDARD_HANDLER,Hs), + #{module:=logger_simple_h} = SimpleC = find(simple,Hs), + all = maps:get(level,SimpleC), + info = maps:get(level,P), + SimpleFilters = maps:get(filters,SimpleC), + {domain,{_,{log,super,[otp]}}} = lists:keyfind(domain,1,SimpleFilters), + true = exists(sasl,Hs), + [] = ML, + ok. + +error_logger_silent(Config) -> + {ok,#{handlers:=Hs},_Node} = setup(Config, + [{error_logger,silent}]), + false = exists(?STANDARD_HANDLER,Hs), + false = exists(simple,Hs), + false = exists(sasl,Hs), + ok. + +error_logger_silent_sasl_compatible(Config) -> + {ok,#{handlers:=Hs},_Node} = setup(Config, + [{error_logger,silent}, + {logger_sasl_compatible,true}]), + false = exists(?STANDARD_HANDLER,Hs), + false = exists(simple,Hs), + true = exists(sasl,Hs), + ok. + + +error_logger_file(Config) -> + Log = file(Config,?FUNCTION_NAME), + {ok,_,Node} = setup(Config, + [{error_logger,{file,Log}}]), + check_default_log(Node,Log, + file,% dest + 0),% progress in std logger + ok. + + +logger_file(Config) -> + Log = file(Config,?FUNCTION_NAME), + {ok,#{primary:=P,handlers:=Hs,module_levels:=ML},Node} + = setup(Config, + [{logger, + [{handler,?STANDARD_HANDLER,logger_std_h, + #{config=>#{type=>{file,Log}}}}]}]), + check_default_log(Node,Log, + file,% dest + 0),% progress in std logger + + notice = maps:get(level,P), + #{module:=logger_std_h} = StdC = find(?STANDARD_HANDLER,Hs), + all = maps:get(level,StdC), + StdFilters = maps:get(filters,StdC), + {domain,{_,{log,super,[otp,sasl]}}} = lists:keyfind(domain,1,StdFilters), + false = exists(simple,Hs), + false = exists(sasl,Hs), + [] = ML, + ok. + +logger_file_sasl_compatible(Config) -> + Log = file(Config,?FUNCTION_NAME), + {ok,#{primary:=P,handlers:=Hs,module_levels:=ML},Node} + = setup(Config, + [{logger_sasl_compatible,true}, + {logger, + [{handler,?STANDARD_HANDLER,logger_std_h, + #{config=>#{type=>{file,Log}}}}]}]), + check_default_log(Node,Log, + file,% dest + 0),% progress in std logger + + info = maps:get(level,P), + #{module:=logger_std_h} = StdC = find(?STANDARD_HANDLER,Hs), + all = maps:get(level,StdC), + StdFilters = maps:get(filters,StdC), + {domain,{_,{log,super,[otp]}}} = lists:keyfind(domain,1,StdFilters), + false = exists(simple,Hs), + true = exists(sasl,Hs), + [] = ML, + ok. + +logger_file_log_progress(Config) -> + Log = file(Config,?FUNCTION_NAME), + {ok,#{primary:=P,handlers:=Hs,module_levels:=ML},Node} + = setup(Config, + [{logger_level,info}, + {logger, + [{handler,?STANDARD_HANDLER,logger_std_h, + #{config=>#{type=>{file,Log}}}}]}]), + check_default_log(Node,Log, + file,% dest + 6,% progress in std logger + info), + + info = maps:get(level,P), + #{module:=logger_std_h} = StdC = find(?STANDARD_HANDLER,Hs), + all = maps:get(level,StdC), + StdFilters = maps:get(filters,StdC), + {domain,{_,{log,super,[otp,sasl]}}} = lists:keyfind(domain,1,StdFilters), + false = exists(simple,Hs), + false = exists(sasl,Hs), + [] = ML, + ok. + +logger_file_no_filter(Config) -> + Log = file(Config,?FUNCTION_NAME), + {ok,#{handlers:=Hs},Node} + = setup(Config, + [{logger, + [{handler,?STANDARD_HANDLER,logger_std_h, + #{filter_default=>log,filters=>[], + config=>#{type=>{file,Log}}}}]}]), + check_default_log(Node,Log, + file,% dest + 6),% progress in std logger + + #{module:=logger_std_h} = StdC = find(?STANDARD_HANDLER,Hs), + all = maps:get(level,StdC), + [] = maps:get(filters,StdC), + false = exists(simple,Hs), + false = exists(sasl,Hs), + + ok. + +logger_file_no_filter_level(Config) -> + Log = file(Config,?FUNCTION_NAME), + {ok,#{handlers:=Hs},Node} + = setup(Config, + [{logger, + [{handler,?STANDARD_HANDLER,logger_std_h, + #{filters=>[],level=>error, + config=>#{type=>{file,Log}}}}]}]), + check_default_log(Node,Log, + file,% dest + 0,% progress in std logger + error),% level + + #{module:=logger_std_h} = StdC = find(?STANDARD_HANDLER,Hs), + error = maps:get(level,StdC), + [] = maps:get(filters,StdC), + false = exists(simple,Hs), + false = exists(sasl,Hs), + + ok. + +logger_file_formatter(Config) -> + Log = file(Config,?FUNCTION_NAME), + {ok,#{handlers:=Hs},Node} + = setup(Config, + [{logger, + [{handler,?STANDARD_HANDLER,logger_std_h, + #{filters=>[], + formatter=>{logger_formatter,#{}}, + config=>#{type=>{file,Log}}}}]}]), + check_single_log(Node,Log, + file,% dest + 6),% progress in std logger + + #{module:=logger_std_h} = StdC = find(?STANDARD_HANDLER,Hs), + all = maps:get(level,StdC), + [] = maps:get(filters,StdC), + false = exists(simple,Hs), + false = exists(sasl,Hs), + + ok. + +logger_filters(Config) -> + Log = file(Config,?FUNCTION_NAME), + {ok,#{handlers:=Hs,primary:=P},Node} + = setup(Config, + [{logger_level,info}, + {logger, + [{handler,?STANDARD_HANDLER,logger_std_h, + #{config=>#{type=>{file,Log}}}}, + {filters,log,[{stop_progress,{fun logger_filters:progress/2,stop}}]} + ]}]), + check_default_log(Node,Log, + file,% dest + 0,% progress in std logger + info), + + #{module:=logger_std_h} = StdC = find(?STANDARD_HANDLER,Hs), + all = maps:get(level,StdC), + StdFilters = maps:get(filters,StdC), + {domain,{_,{log,super,[otp,sasl]}}} = lists:keyfind(domain,1,StdFilters), + false = exists(simple,Hs), + false = exists(sasl,Hs), + LoggerFilters = maps:get(filters,P), + true = lists:keymember(stop_progress,1,LoggerFilters), + + ok. + +logger_filters_stop(Config) -> + Log = file(Config,?FUNCTION_NAME), + {ok,#{handlers:=Hs,primary:=P},Node} + = setup(Config, + [{logger_level,info}, + {logger, + [{handler,?STANDARD_HANDLER,logger_std_h, + #{filters=>[], + config=>#{type=>{file,Log}}}}, + {filters,stop,[{log_error,{fun logger_filters:level/2,{log,gt,info}}}]} + ]}]), + check_default_log(Node,Log, + file,% dest + 0,% progress in std logger + info), + + #{module:=logger_std_h} = StdC = find(?STANDARD_HANDLER,Hs), + all = maps:get(level,StdC), + [] = maps:get(filters,StdC), + false = exists(simple,Hs), + false = exists(sasl,Hs), + LoggerFilters = maps:get(filters,P), + true = lists:keymember(log_error,1,LoggerFilters), + + ok. + +logger_module_level(Config) -> + Log = file(Config,?FUNCTION_NAME), + {ok,#{handlers:=Hs,module_levels:=ModuleLevels},Node} + = setup(Config, + [{logger_level,info}, + {logger, + [{handler,?STANDARD_HANDLER,logger_std_h, + #{config=>#{type=>{file,Log}}}}, + {module_level,error,[supervisor]} + ]}]), + check_default_log(Node,Log, + file,% dest + 3,% progress in std logger + info), + + #{module:=logger_std_h} = StdC = find(?STANDARD_HANDLER,Hs), + all = maps:get(level,StdC), + StdFilters = maps:get(filters,StdC), + {domain,{_,{log,super,[otp,sasl]}}} = lists:keyfind(domain,1,StdFilters), + false = exists(simple,Hs), + false = exists(sasl,Hs), + [{supervisor,error}] = ModuleLevels, + ok. + +logger_disk_log(Config) -> + Log = file(Config,?FUNCTION_NAME), + {ok,#{handlers:=Hs},Node} + = setup(Config, + [{logger, + [{handler,?STANDARD_HANDLER,logger_disk_log_h, + #{config=>#{file=>Log}}}]}]), + check_default_log(Node,Log, + disk_log,% dest + 0),% progress in std logger + + #{module:=logger_disk_log_h} = StdC = find(?STANDARD_HANDLER,Hs), + all = maps:get(level,StdC), + StdFilters = maps:get(filters,StdC), + {domain,{_,{log,super,[otp,sasl]}}} = lists:keyfind(domain,1,StdFilters), + false = exists(simple,Hs), + false = exists(sasl,Hs), + + ok. + +logger_disk_log_formatter(Config) -> + Log = file(Config,?FUNCTION_NAME), + {ok,#{handlers:=Hs},Node} + = setup(Config, + [{logger, + [{handler,?STANDARD_HANDLER,logger_disk_log_h, + #{filters=>[], + formatter=>{logger_formatter,#{}}, + config=>#{file=>Log}}}]}]), + check_single_log(Node,Log, + disk_log,% dest + 6),% progress in std logger + + #{module:=logger_disk_log_h} = StdC = find(?STANDARD_HANDLER,Hs), + all = maps:get(level,StdC), + [] = maps:get(filters,StdC), + false = exists(simple,Hs), + false = exists(sasl,Hs), + + ok. + +logger_undefined(Config) -> + {ok,#{handlers:=Hs,primary:=P},_Node} = + setup(Config,[{logger,[{handler,?STANDARD_HANDLER,undefined}]}]), + false = exists(?STANDARD_HANDLER,Hs), + #{module:=logger_simple_h} = SimpleC = find(simple,Hs), + all = maps:get(level,SimpleC), + notice = maps:get(level,P), + SimpleFilters = maps:get(filters,SimpleC), + {domain,{_,{log,super,[otp,sasl]}}} = lists:keyfind(domain,1,SimpleFilters), + false = exists(sasl,Hs), + ok. + + +%% Test that we can add multiple handlers with the default first +logger_many_handlers_default_first(Config) -> + LogErr = file(Config,logger_many_handlers_default_first_error), + LogInfo = file(Config,logger_many_handlers_default_first_info), + + logger_many_handlers( + Config,[{logger, + [{handler,?STANDARD_HANDLER,logger_std_h, + #{level=>error, + filters=>[], + formatter=>{logger_formatter,#{}}, + config=>#{type=>{file,LogErr}}} + }, + {handler,info,logger_std_h, + #{level=>info, + filters=>[{level,{fun logger_filters:level/2,{stop,gteq,error}}}], + config=>#{type=>{file,LogInfo}}} + } + ]}, + {logger_level,info}], LogErr, LogInfo, 6). + +%% Test that we can add multiple handlers with the default last +logger_many_handlers_default_last(Config) -> + LogErr = file(Config,logger_many_handlers_default_last_error), + LogInfo = file(Config,logger_many_handlers_default_last_info), + logger_many_handlers( + Config,[{logger, + [{handler,info,logger_std_h, + #{level=>info, + filters=>[{level,{fun logger_filters:level/2,{stop,gteq,error}}}], + config=>#{type=>{file,LogInfo}}} + }, + {handler,?STANDARD_HANDLER,logger_std_h, + #{level=>error, + filters=>[], + formatter=>{logger_formatter,#{}}, + config=>#{type=>{file,LogErr}}} + } + ]}, + {logger_level,info}], LogErr, LogInfo, 7). + +%% Check that we can handle that an added logger has a broken filter +%% This used to cause a deadlock. +logger_many_handlers_default_last_broken_filter(Config) -> + LogErr = file(Config,logger_many_handlers_default_first_broken_filter_error), + LogInfo = file(Config,logger_many_handlers_default_first_broken_filter_info), + + logger_many_handlers( + Config,[{logger, + [{handler,info,logger_std_h, + #{level=>info, + filters=>[{broken,{fun logger_filters:level/2,broken_state}}, + {level,{fun logger_filters:level/2,{stop,gteq,error}}}], + config=>#{type=>{file,LogInfo}}} + }, + {handler,?STANDARD_HANDLER,logger_std_h, + #{level=>error, + filters=>[], + formatter=>{logger_formatter,#{}}, + config=>#{type=>{file,LogErr}}} + } + ]}, + {logger_level,info}], LogErr, LogInfo, 7). + +logger_many_handlers(Config, Env, LogErr, LogInfo, NumProgress) -> + {ok,_,Node} = setup(Config,Env), + check_single_log(Node,LogErr, + file,% dest + 0,% progress in std logger + error), % level + ok = rpc:call(Node,logger_std_h,filesync,[info]), + {ok, Bin} = file:read_file(LogInfo), + ct:log("Log content:~n~s",[Bin]), + match(Bin,<<"info:">>,NumProgress,info,info), + match(Bin,<<"notice:">>,1,notice,info), + match(Bin,<<"alert:">>,0,alert,info), + + ok. + +sasl_compatible_false(Config) -> + Log = file(Config,?FUNCTION_NAME), + {ok,_,Node} = setup(Config, + [{error_logger,{file,Log}}, + {logger_sasl_compatible,false}, + {logger_level,info}]), % to get progress + check_default_log(Node,Log, + file,% dest + 6,% progress in std logger + info), + ok. + +sasl_compatible_false_no_progress(Config) -> + Log = file(Config,?FUNCTION_NAME), + {ok,_,Node} = setup(Config, + [{error_logger,{file,Log}}, + {logger_sasl_compatible,false}]), + check_default_log(Node,Log, + file,% dest + 0),% progress in std logger + ok. + +sasl_compatible(Config) -> + Log = file(Config,?FUNCTION_NAME), + {ok,_,Node} = setup(Config, + [{error_logger,{file,Log}}, + {sasl_compatible,true}]), + check_default_log(Node,Log, + file,% dest + 0),% progress in std logger + ok. + +all_logger_level(Config) -> + [all_logger_level(Config,Level) || Level <- [none, + emergency, + alert, + critical, + error, + warning, + notice, + info, + debug, + all]], + ok. + +all_logger_level(Config,Level) -> + {ok,#{primary:=#{level:=Level}},Node} = setup(Config,[{logger_level,Level}]), + true = test_server:stop_node(Node), + ok. + +bad_error_logger(Config) -> + error = setup(Config,[{error_logger,baddest}]). + +bad_level(Config) -> + error = setup(Config,[{logger_level,badlevel}]). + +bad_sasl_compatibility(Config) -> + error = setup(Config,[{logger_sasl_compatible,badcomp}]). + +%%%----------------------------------------------------------------- +%%% Internal +file(Config,Func) -> + filename:join(proplists:get_value(priv_dir,Config), + lists:concat([Func,".log"])). + +check_default_log(Node,Log,Dest,NumProgress) -> + check_default_log(Node,Log,Dest,NumProgress,notice). +check_default_log(Node,Log,Dest,NumProgress,Level) -> + + {ok,Bin1,Bin2} = check_log(Node,Log,Dest), + + match(Bin1,<<"PROGRESS REPORT">>,NumProgress,info,Level), + match(Bin1,<<"ALERT REPORT">>,1,alert,Level), + match(Bin1,<<"INFO REPORT">>,0,notice,Level), + match(Bin1,<<"DEBUG REPORT">>,0,debug,Level), + + match(Bin2,<<"INFO REPORT">>,1,notice,Level), + match(Bin2,<<"DEBUG REPORT">>,0,debug,Level), + ok. + +check_single_log(Node,Log,Dest,NumProgress) -> + check_single_log(Node,Log,Dest,NumProgress,notice). +check_single_log(Node,Log,Dest,NumProgress,Level) -> + + {ok,Bin1,Bin2} = check_log(Node,Log,Dest), + + match(Bin1,<<"info:">>,NumProgress,info,Level), + match(Bin1,<<"alert:">>,1,alert,Level), + match(Bin1,<<"debug:">>,0,debug,Level), + + match(Bin2,<<"info:">>,NumProgress+1,info,Level), + match(Bin2,<<"debug:">>,0,debug,Level), + + ok. + +check_log(Node,Log,Dest) -> + + ok = log(Node,alert,["dummy1"]), + ok = log(Node,debug,["dummy1"]), + + %% Check that there are progress reports (supervisor and + %% application_controller) and an error report (the call above) in + %% the log. There should not be any info reports yet. + {ok,Bin1} = sync_and_read(Node,Dest,Log), + ct:log("Log content:~n~s",[Bin1]), + + %% Then stop sasl and see that the info report from + %% application_controller is there + ok = rpc:call(Node,application,stop,[sasl]), + {ok,Bin2} = sync_and_read(Node,Dest,Log), + ct:log("Log content:~n~s",[Bin2]), + {ok,Bin1,Bin2}. + +match(Bin,Pattern,0,_,_) -> + nomatch = re:run(Bin,Pattern,[{capture,none}]); +match(Bin,Pattern,N,LogLevel,ConfLevel) -> + case logger:compare_levels(LogLevel,ConfLevel) of + lt -> match(Bin,Pattern,0,LogLevel,ConfLevel); + _ -> + {match,M} = re:run(Bin,Pattern,[{capture,all},global]), + N = length(M) + end. + +find(Id,Handlers) -> + case lists:search(fun(#{id:=Id0}) when Id0=:=Id-> true; + (_) -> false end, + Handlers) of + {value,Config} -> + Config; + false -> + false + end. + +exists(Id,Handlers) -> + case find(Id,Handlers) of + false -> + false; + _ -> + true + end. diff --git a/lib/kernel/test/logger_filters_SUITE.erl b/lib/kernel/test/logger_filters_SUITE.erl new file mode 100644 index 0000000000..11cce8fd20 --- /dev/null +++ b/lib/kernel/test/logger_filters_SUITE.erl @@ -0,0 +1,227 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2018. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(logger_filters_SUITE). + +-compile(export_all). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("kernel/include/logger.hrl"). + +-define(ndlog, + #{level=>info,msg=>{"Line: ~p",[?LINE]},meta=>#{}}). +-define(dlog(Domain), + #{level=>info,msg=>{"Line: ~p",[?LINE]},meta=>#{domain=>Domain}}). +-define(llog(Level), + #{level=>Level,msg=>{"Line: ~p",[?LINE]},meta=>#{}}). +-define(plog, + #{level=>info, + msg=>{report,#{label=>{?MODULE,progress}}}, + meta=>#{line=>?LINE}}). +-define(rlog(Node), + #{level=>info, + msg=>{"Line: ~p",[?LINE]}, + meta=>#{gl=>rpc:call(Node,erlang,whereis,[user])}}). + +-define(TRY(X), my_try(fun() -> X end)). + +suite() -> + [{timetrap,{seconds,30}}]. + +init_per_suite(Config) -> + Config. + +end_per_suite(_Config) -> + ok. + +init_per_group(_Group, Config) -> + Config. + +end_per_group(_Group, _Config) -> + ok. + +init_per_testcase(_TestCase, Config) -> + Config. + +end_per_testcase(Case, Config) -> + try apply(?MODULE,Case,[cleanup,Config]) + catch error:undef -> ok + end, + ok. + +groups() -> + []. + +all() -> + [domain, + level, + progress, + remote_gl]. + +domain(_Config) -> + L1 = logger_filters:domain(L1=?dlog([]),{log,super,[]}), + stop = logger_filters:domain(?dlog([]),{stop,super,[]}), + L2 = logger_filters:domain(L2=?dlog([]),{log,sub,[]}), + stop = logger_filters:domain(?dlog([]),{stop,sub,[]}), + L3 = logger_filters:domain(L3=?dlog([]),{log,equal,[]}), + stop = logger_filters:domain(?dlog([]),{stop,equal,[]}), + ignore = logger_filters:domain(?dlog([]),{log,not_equal,[]}), + ignore = logger_filters:domain(?dlog([]),{stop,not_equal,[]}), + ignore = logger_filters:domain(?dlog([]),{log,undefined,[]}), + ignore = logger_filters:domain(?dlog([]),{stop,undefined,[]}), + + L4 = logger_filters:domain(L4=?dlog([a]),{log,super,[a,b]}), + stop = logger_filters:domain(?dlog([a]),{stop,super,[a,b]}), + ignore = logger_filters:domain(?dlog([a]),{log,sub,[a,b]}), + ignore = logger_filters:domain(?dlog([a]),{stop,sub,[a,b]}), + ignore = logger_filters:domain(?dlog([a]),{log,equal,[a,b]}), + ignore = logger_filters:domain(?dlog([a]),{stop,equal,[a,b]}), + L5 = logger_filters:domain(L5=?dlog([a]),{log,not_equal,[a,b]}), + stop = logger_filters:domain(?dlog([a]),{stop,not_equal,[a,b]}), + ignore = logger_filters:domain(?dlog([a]),{log,undefined,[a,b]}), + ignore = logger_filters:domain(?dlog([a]),{stop,undefined,[a,b]}), + + ignore = logger_filters:domain(?dlog([a,b]),{log,super,[a]}), + ignore = logger_filters:domain(?dlog([a,b]),{stop,super,[a]}), + L6 = logger_filters:domain(L6=?dlog([a,b]),{log,sub,[a]}), + stop = logger_filters:domain(?dlog([a,b]),{stop,sub,[a]}), + ignore = logger_filters:domain(?dlog([a,b]),{log,equal,[a]}), + ignore = logger_filters:domain(?dlog([a,b]),{stop,equal,[a]}), + L7 = logger_filters:domain(L7=?dlog([a,b]),{log,not_equal,[a]}), + stop = logger_filters:domain(?dlog([a,b]),{stop,not_equal,[a]}), + ignore = logger_filters:domain(?dlog([a,b]),{log,undefined,[a]}), + ignore = logger_filters:domain(?dlog([a,b]),{stop,undefined,[a]}), + + ignore = logger_filters:domain(?ndlog,{log,super,[a]}), + ignore = logger_filters:domain(?ndlog,{stop,super,[a]}), + ignore = logger_filters:domain(?ndlog,{log,sub,[a]}), + ignore = logger_filters:domain(?ndlog,{stop,sub,[a]}), + ignore = logger_filters:domain(?ndlog,{log,equal,[a]}), + ignore = logger_filters:domain(?ndlog,{stop,equal,[a]}), + L8 = logger_filters:domain(L8=?ndlog,{log,not_equal,[a]}), + stop = logger_filters:domain(?ndlog,{stop,not_equal,[a]}), + L9 = logger_filters:domain(L9=?ndlog,{log,undefined,[a]}), + stop = logger_filters:domain(?ndlog,{stop,undefined,[a]}), + + L10 = logger_filters:domain(L10=?dlog([a,b,c,d]),{log,super,[a,b,c,d]}), + stop = logger_filters:domain(?dlog([a,b,c,d]),{stop,super,[a,b,c,d]}), + L11 = logger_filters:domain(L11=?dlog([a,b,c,d]),{log,sub,[a,b,c,d]}), + stop = logger_filters:domain(?dlog([a,b,c,d]),{stop,sub,[a,b,c,d]}), + L12 = logger_filters:domain(L12=?dlog([a,b,c,d]),{log,equal,[a,b,c,d]}), + stop = logger_filters:domain(?dlog([a,b,c,d]),{stop,equal,[a,b,c,d]}), + ignore = logger_filters:domain(?dlog([a,b,c,d]),{log,not_equal,[a,b,c,d]}), + ignore = logger_filters:domain(?dlog([a,b,c,d]),{stop,not_equal,[a,b,c,d]}), + ignore = logger_filters:domain(?dlog([a,b,c,d]),{log,undefined,[a,b,c,d]}), + ignore = logger_filters:domain(?dlog([a,b,c,d]),{stop,undefined,[a,b,c,d]}), + + %% A domain field in meta which is not a list is allowed by the + %% filter, but since MatchDomain is always a list of atoms, only + %% Action=not_equal can ever match. + ignore = logger_filters:domain(?dlog(dummy),{log,super,[a,b,c,d]}), + ignore = logger_filters:domain(?dlog(dummy),{stop,super,[a,b,c,d]}), + ignore = logger_filters:domain(?dlog(dummy),{log,sub,[a,b,c,d]}), + ignore = logger_filters:domain(?dlog(dummy),{stop,sub,[a,b,c,d]}), + ignore = logger_filters:domain(?dlog(dummy),{log,equal,[a,b,c,d]}), + ignore = logger_filters:domain(?dlog(dummy),{stop,equal,[a,b,c,d]}), + L13 = logger_filters:domain(L13=?dlog(dummy),{log,not_equal,[a,b,c,d]}), + stop = logger_filters:domain(?dlog(dummy),{stop,not_equal,[a,b,c,d]}), + ignore = logger_filters:domain(?dlog(dummy),{log,undefined,[a,b,c,d]}), + ignore = logger_filters:domain(?dlog(dummy),{stop,undefined,[a,b,c,d]}), + + {error,badarg} = ?TRY(logger_filters:domain(?ndlog,bad)), + {error,badarg} = ?TRY(logger_filters:domain(?ndlog,{bad,super,[]})), + {error,badarg} = ?TRY(logger_filters:domain(?ndlog,{log,bad,[]})), + {error,badarg} = ?TRY(logger_filters:domain(?ndlog,{log,super,bad})), + + ok. + +level(_Config) -> + ignore = logger_filters:level(?llog(info),{log,lt,info}), + ignore = logger_filters:level(?llog(info),{stop,lt,info}), + ignore = logger_filters:level(?llog(info),{log,gt,info}), + ignore = logger_filters:level(?llog(info),{stop,gt,info}), + L1 = logger_filters:level(L1=?llog(info),{log,lteq,info}), + stop = logger_filters:level(?llog(info),{stop,lteq,info}), + L2 = logger_filters:level(L2=?llog(info),{log,gteq,info}), + stop = logger_filters:level(?llog(info),{stop,gteq,info}), + L3 = logger_filters:level(L3=?llog(info),{log,eq,info}), + stop = logger_filters:level(?llog(info),{stop,eq,info}), + ignore = logger_filters:level(?llog(info),{log,neq,info}), + ignore = logger_filters:level(?llog(info),{stop,neq,info}), + + ignore = logger_filters:level(?llog(error),{log,lt,info}), + ignore = logger_filters:level(?llog(error),{stop,lt,info}), + L4 = logger_filters:level(L4=?llog(error),{log,gt,info}), + stop = logger_filters:level(?llog(error),{stop,gt,info}), + ignore = logger_filters:level(?llog(error),{log,lteq,info}), + ignore = logger_filters:level(?llog(error),{stop,lteq,info}), + L5 = logger_filters:level(L5=?llog(error),{log,gteq,info}), + stop = logger_filters:level(?llog(error),{stop,gteq,info}), + ignore = logger_filters:level(?llog(error),{log,eq,info}), + ignore = logger_filters:level(?llog(error),{stop,eq,info}), + L6 = logger_filters:level(L6=?llog(error),{log,neq,info}), + stop = logger_filters:level(?llog(error),{stop,neq,info}), + + L7 = logger_filters:level(L7=?llog(info),{log,lt,error}), + stop = logger_filters:level(?llog(info),{stop,lt,error}), + ignore = logger_filters:level(?llog(info),{log,gt,error}), + ignore = logger_filters:level(?llog(info),{stop,gt,error}), + L8 = logger_filters:level(L8=?llog(info),{log,lteq,error}), + stop = logger_filters:level(?llog(info),{stop,lteq,error}), + ignore = logger_filters:level(?llog(info),{log,gteq,error}), + ignore = logger_filters:level(?llog(info),{stop,gteq,error}), + ignore = logger_filters:level(?llog(info),{log,eq,error}), + ignore = logger_filters:level(?llog(info),{stop,eq,error}), + L9 = logger_filters:level(L9=?llog(info),{log,neq,error}), + stop = logger_filters:level(?llog(info),{stop,neq,error}), + + {error,badarg} = ?TRY(logger_filters:level(?llog(info),bad)), + {error,badarg} = ?TRY(logger_filters:level(?llog(info),{bad,eq,info})), + {error,badarg} = ?TRY(logger_filters:level(?llog(info),{log,bad,info})), + {error,badarg} = ?TRY(logger_filters:level(?llog(info),{log,eq,bad})), + + ok. + +progress(_Config) -> + L1 = logger_filters:progress(L1=?plog,log), + stop = logger_filters:progress(?plog,stop), + ignore = logger_filters:progress(?ndlog,log), + ignore = logger_filters:progress(?ndlog,stop), + + {error,badarg} = ?TRY(logger_filters:progress(?plog,bad)), + + ok. + +remote_gl(_Config) -> + {ok,Node} = test_server:start_node(?FUNCTION_NAME,slave,[]), + L1 = logger_filters:remote_gl(L1=?rlog(Node),log), + stop = logger_filters:remote_gl(?rlog(Node),stop), + ignore = logger_filters:remote_gl(?ndlog,log), + ignore = logger_filters:remote_gl(?ndlog,stop), + + {error,badarg} = ?TRY(logger_filters:remote_gl(?rlog(Node),bad)), + ok. + +remote_gl(cleanup,_Config) -> + [test_server:stop_node(N) || N<-nodes()]. + +%%%----------------------------------------------------------------- +%%% Called by macro ?TRY(X) +my_try(Fun) -> + try Fun() catch C:R -> {C,R} end. diff --git a/lib/kernel/test/logger_formatter_SUITE.erl b/lib/kernel/test/logger_formatter_SUITE.erl new file mode 100644 index 0000000000..8c13f0f908 --- /dev/null +++ b/lib/kernel/test/logger_formatter_SUITE.erl @@ -0,0 +1,886 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2018. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(logger_formatter_SUITE). + +-compile(export_all). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("kernel/include/logger.hrl"). + +-define(TRY(X), my_try(fun() -> X end)). + +suite() -> + [{timetrap,{seconds,30}}]. + +init_per_suite(Config) -> + Config. + +end_per_suite(_Config) -> + ok. + +init_per_group(_Group, Config) -> + Config. + +end_per_group(_Group, _Config) -> + ok. + +init_per_testcase(_TestCase, Config) -> + Config. + +end_per_testcase(Case, Config) -> + try apply(?MODULE,Case,[cleanup,Config]) + catch error:undef -> ok + end, + ok. + +groups() -> + []. + +all() -> + [default, + legacy_header, + error_logger_notice_header, + single_line, + template, + format_msg, + report_cb, + max_size, + depth, + chars_limit, + format_mfa, + format_time, + level_or_msg_in_meta, + faulty_log, + faulty_config, + faulty_msg, + check_config, + update_config]. + +default(_Config) -> + String1 = format(info,{"~p",[term]},#{},#{}), + ct:log(String1), + [_DateTime,"info:","term\n"] = string:lexemes(String1," "), + + Time = timestamp(), + ExpectedTimestamp = default_time_format(Time), + String2 = format(info,{"~p",[term]},#{time=>Time},#{}), + ct:log(String2), + " info: term\n" = string:prefix(String2,ExpectedTimestamp), + ok. + +legacy_header(_Config) -> + Time = timestamp(), + String1 = format(info,{"~p",[term]},#{time=>Time},#{legacy_header=>true, + single_line=>false}), + ct:log(String1), + "=INFO REPORT==== "++Rest = String1, + [Timestamp,"\nterm\n"] = string:lexemes(Rest," ="), + [D,M,Y,H,Min,S,Micro] = string:lexemes(Timestamp,"-:."), + integer(D,31), + integer(Y,2018,infinity), + integer(H,23), + integer(Min,59), + integer(S,59), + integer(Micro,999999), + true = lists:member(M,["Jan","Feb","Mar","Apr","May","Jun", + "Jul","Aug","Sep","Oct","Nov","Dec"]), + + String2 = format(info,{"~p",[term]},#{time=>Time},#{legacy_header=>false, + single_line=>false}), + ct:log(String2), + ExpectedTimestamp = default_time_format(Time), + " info:\nterm\n" = string:prefix(String2,ExpectedTimestamp), + + String3 = format(info,{"~p",[term]},#{time=>Time},#{legacy_header=>bad, + single_line=>false}), + ct:log(String3), + String3 = String2, + + String4 = format(info,{"~p",[term]},#{time=>Time}, + #{legacy_header=>true, + single_line=>true}), % <---ignored + ct:log(String4), + String4 = String1, + + String5 = format(info,{"~p",[term]},#{}, % <--- no time + #{legacy_header=>true, + single_line=>false}), + ct:log(String5), + "=INFO REPORT==== "++_ = String5, + ok. + +error_logger_notice_header(_Config) -> + Meta1 = #{error_logger=>#{tag => info_report,type => std_info}}, + String1 = format(notice,{"~p",[term]},Meta1, + #{legacy_header=>true, + error_logger_notice_header=>notice}), + ct:log(String1), + "=NOTICE REPORT==== "++_ = String1, + + String2 = format(notice,{"~p",[term]},Meta1, + #{legacy_header=>true, + error_logger_notice_header=>info}), + ct:log(String2), + "=INFO REPORT==== "++_ = String2, + + String3 = format(notice,{"~p",[term]},#{}, + #{legacy_header=>true, + error_logger_notice_header=>notice}), + ct:log(String3), + "=NOTICE REPORT==== "++_ = String3, + + String4 = format(notice,{"~p",[term]},#{}, + #{legacy_header=>true, + error_logger_notice_header=>info}), + ct:log(String4), + "=NOTICE REPORT==== "++_ = String4, + + ok. + +single_line(_Config) -> + Time = timestamp(), + ExpectedTimestamp = default_time_format(Time), + String1 = format(info,{"~p",[term]},#{time=>Time},#{single_line=>true}), + ct:log(String1), + " info: term\n" = string:prefix(String1,ExpectedTimestamp), + + String2 = format(info,{"~p",[term]},#{time=>Time},#{single_line=>false}), + ct:log(String2), + " info:\nterm\n" = string:prefix(String2,ExpectedTimestamp), + + String2 = format(info,{"~p",[term]},#{time=>Time},#{single_line=>bad}), + + + %% Test that no extra commas/spaces are added when removing + %% newlines, especially not after "=>" in a map association (as + %% was the case in OTP-21.0, when the only single_line adjustment + %% was done by regexp replacement of "\n" by ", "). + Prefix = + "Some characters to fill the line ------------------------------------- ", + String3 = format(info,{"~s~p~n~s~p~n",[Prefix, + lists:seq(1,10), + Prefix, + #{a=>map,with=>a,few=>accociations}]}, + #{time=>Time}, + #{single_line=>true}), + ct:log(String3), + match = re:run(String3,"\\[1,2,3,4,5,6,7,8,9,10\\]",[{capture,none}]), + match = re:run(String3, + "#{a => map,few => accociations,with => a}", + [{capture,none}]), + + %% This part is added to make sure that the previous test made + %% sense, i.e. that there would actually be newlines inside the + %% list and map. + String4 = format(info,{"~s~p~n~s~p~n",[Prefix, + lists:seq(1,10), + Prefix, + #{a=>map,with=>a,few=>accociations}]}, + #{time=>Time}, + #{single_line=>false}), + ct:log(String4), + match = re:run(String4,"\\[1,2,3,\n",[global,{capture,none}]), + {match,Match4} = re:run(String4,"=>\n",[global,{capture,all}]), + 3 = length(Match4), + + %% Test that big metadata fields do not get line breaks + String5 = format(info,"", + #{mymeta=>lists:seq(1,100)}, + #{single_line=>true,template=>[mymeta,"\n"]}), + ct:log(String5), + [_] = string:lexemes(String5,"\n"), + + %% Ensure that the previous test made sense, i.e. that the + %% metadata field does produce multiple lines if + %% single_line==false. + String6 = format(info,"", + #{mymeta=>lists:seq(1,100)}, + #{single_line=>false,template=>[mymeta,"\n"]}), + ct:log(String6), + [_,_|_] = string:lexemes(String6,"\n"), + + ok. + +template(_Config) -> + Time = timestamp(), + + Template1 = [msg], + String1 = format(info,{"~p",[term]},#{time=>Time},#{template=>Template1}), + ct:log(String1), + "term" = String1, + + Template2 = [msg,unknown], + String2 = format(info,{"~p",[term]},#{time=>Time},#{template=>Template2}), + ct:log(String2), + "term" = String2, + + Template3 = ["string"], + String3 = format(info,{"~p",[term]},#{time=>Time},#{template=>Template3}), + ct:log(String3), + "string" = String3, + + Template4 = ["string\nnewline"], + String4 = format(info,{"~p",[term]},#{time=>Time},#{template=>Template4, + single_line=>true}), + ct:log(String4), + "string\nnewline" = String4, + + Template5 = [], + String5 = format(info,{"~p",[term]},#{time=>Time},#{template=>Template5}), + ct:log(String5), + "" = String5, + + Ref6 = erlang:make_ref(), + Meta6 = #{atom=>some_atom, + integer=>632, + list=>[list,"string",4321,#{},{tuple}], + mfa=>{mod,func,0}, + pid=>self(), + ref=>Ref6, + string=>"some string", + time=>Time, + tuple=>{1,atom,"list"}, + nested=>#{subkey=>subvalue}}, + Template6 = lists:join(";",lists:sort(maps:keys(maps:remove(nested,Meta6))) ++ + [[nested,subkey]]), + String6 = format(info,{"~p",[term]},Meta6,#{template=>Template6, + single_line=>true}), + ct:log(String6), + SelfStr = pid_to_list(self()), + RefStr6 = ref_to_list(Ref6), + ListStr = "[list,\"string\",4321,#{},{tuple}]", + ExpectedTime6 = default_time_format(Time), + ["some_atom", + "632", + ListStr, + "mod:func/0", + SelfStr, + RefStr6, + "some string", + ExpectedTime6, + "{1,atom,\"list\"}", + "subvalue"] = string:lexemes(String6,";"), + + Meta7 = #{time=>Time, + nested=>#{key1=>#{subkey1=>value1}, + key2=>value2}}, + Template7 = lists:join(";",[nested, + [nested,key1], + [nested,key1,subkey1], + [nested,key2], + [nested,key2,subkey2], + [nested,key3], + [nested,key3,subkey3]]), + String7 = format(info,{"~p",[term]},Meta7,#{template=>Template7, + single_line=>true}), + ct:log(String7), + [MultipleKeysStr7, + "#{subkey1 => value1}", + "value1", + "value2", + "", + "", + ""] = string:split(String7,";",all), + %% Order of keys is not fixed + case MultipleKeysStr7 of + "#{key2 => value2,key1 => #{subkey1 => value1}}" -> ok; + "#{key1 => #{subkey1 => value1},key2 => value2}" -> ok; + _ -> ct:fail({full_nested_map_unexpected,MultipleKeysStr7}) + end, + + Meta8 = #{time=>Time, + nested=>#{key1=>#{subkey1=>value1}, + key2=>value2}}, + Template8 = + lists:join( + ";", + [{nested,["exist:",nested],["noexist"]}, + {[nested,key1],["exist:",[nested,key1]],["noexist"]}, + {[nested,key1,subkey1],["exist:",[nested,key1,subkey1]],["noexist"]}, + {[nested,key2],["exist:",[nested,key2]],["noexist"]}, + {[nested,key2,subkey2],["exist:",[nested,key2,subkey2]],["noexist"]}, + {[nested,key3],["exist:",[nested,key3]],["noexist"]}, + {[nested,key3,subkey3],["exist:",[nested,key3,subkey3]],["noexist"]}]), + String8 = format(info,{"~p",[term]},Meta8,#{template=>Template8, + single_line=>true}), + ct:log(String8), + [MultipleKeysStr8, + "exist:#{subkey1 => value1}", + "exist:value1", + "exist:value2", + "noexist", + "noexist", + "noexist"] = string:split(String8,";",all), + %% Order of keys is not fixed + case MultipleKeysStr8 of + "exist:#{key2 => value2,key1 => #{subkey1 => value1}}" -> ok; + "exist:#{key1 => #{subkey1 => value1},key2 => value2}" -> ok; + _ -> ct:fail({full_nested_map_unexpected,MultipleKeysStr8}) + end, + + ok. + +format_msg(_Config) -> + Template = [msg], + + String1 = format(info,{"~p",[term]},#{},#{template=>Template}), + ct:log(String1), + "term" = String1, + + String2 = format(info,{"list",[term]},#{},#{template=>Template}), + ct:log(String2), + "FORMAT ERROR: \"list\" - [term]" = String2, + + String3 = format(info,{report,term},#{},#{template=>Template}), + ct:log(String3), + "term" = String3, + + String4 = format(info,{report,term}, + #{report_cb=>fun(_)-> {"formatted",[]} end}, + #{template=>Template}), + ct:log(String4), + "formatted" = String4, + + String5 = format(info,{report,term}, + #{report_cb=>fun(_)-> faulty_return end}, + #{template=>Template}), + ct:log(String5), + "REPORT_CB/1 ERROR: term; Returned: faulty_return" = String5, + + String6 = format(info,{report,term}, + #{report_cb=>fun(_)-> erlang:error(fun_crashed) end}, + #{template=>Template}), + ct:log(String6), + "REPORT_CB/1 CRASH: term; Reason: {error,fun_crashed,"++_ = String6, + + String7 = format(info,{report,term}, + #{report_cb=>fun(_,_)-> ['not',a,string] end}, + #{template=>Template}), + ct:log(String7), + "REPORT_CB/2 ERROR: term; Returned: ['not',a,string]" = String7, + + String8 = format(info,{report,term}, + #{report_cb=>fun(_,_)-> faulty_return end}, + #{template=>Template}), + ct:log(String8), + "REPORT_CB/2 ERROR: term; Returned: faulty_return" = String8, + + String9 = format(info,{report,term}, + #{report_cb=>fun(_,_)-> erlang:error(fun_crashed) end}, + #{template=>Template}), + ct:log(String9), + "REPORT_CB/2 CRASH: term; Reason: {error,fun_crashed,"++_ = String9, + + %% strings are not formatted + String10 = format(info,{string,"string"}, + #{report_cb=>fun(_)-> {"formatted",[]} end}, + #{template=>Template}), + ct:log(String10), + "string" = String10, + + String11 = format(info,{string,['not',printable,list]}, + #{report_cb=>fun(_)-> {"formatted",[]} end}, + #{template=>Template}), + ct:log("~ts",[String11]), % avoiding ct_log crash + "FORMAT ERROR: \"~ts\" - [['not',printable,list]]" = String11, + + String12 = format(info,{string,"string"},#{},#{template=>Template}), + ct:log(String12), + "string" = String12, + + ok. + +report_cb(_Config) -> + Template = [msg], + MetaFun = fun(_) -> {"meta_rcb",[]} end, + ConfigFun = fun(_) -> {"config_rcb",[]} end, + "term" = format(info,{report,term},#{},#{template=>Template}), + "meta_rcb" = + format(info,{report,term},#{report_cb=>MetaFun},#{template=>Template}), + "config_rcb" = + format(info,{report,term},#{},#{template=>Template, + report_cb=>ConfigFun}), + "config_rcb" = + format(info,{report,term},#{report_cb=>MetaFun},#{template=>Template, + report_cb=>ConfigFun}), + ok. + +max_size(_Config) -> + Cfg = #{template=>[msg], + single_line=>false}, + "12345678901234567890" = + format(info,{"12345678901234567890",[]},#{},Cfg), + %% application:set_env(kernel,logger_max_size,11), + %% "12345678901234567890" = % min value is 50, so this is not limited + %% format(info,{"12345678901234567890",[]},#{},Cfg), + %% "12345678901234567890123456789012345678901234567..." = % 50 + %% format(info, + %% {"123456789012345678901234567890123456789012345678901234567890", + %% []}, + %% #{}, + %% Cfg), + %% application:set_env(kernel,logger_max_size,53), + %% "12345678901234567890123456789012345678901234567890..." = %53 + %% format(info, + %% {"123456789012345678901234567890123456789012345678901234567890", + %% []}, + %% #{}, + %% Cfg), + "123456789012..." = + format(info,{"12345678901234567890",[]},#{},Cfg#{max_size=>15}), + "12345678901234567890" = + format(info,{"12345678901234567890",[]},#{},Cfg#{max_size=>unlimited}), + %% Check that one newline at the end of the line is kept (if it exists) + "12345678901...\n" = + format(info,{"12345678901234567890\n",[]},#{},Cfg#{max_size=>15}), + "12345678901...\n" = + format(info,{"12345678901234567890",[]},#{},Cfg#{template=>[msg,"\n"], + max_size=>15}), + ok. +max_size(cleanup,_Config) -> + application:unset_env(kernel,logger_max_size), + ok. + +depth(_Config) -> + Template = [msg], + "[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]" = + format(info, + {"~p",[[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]]}, + #{}, + #{template=>Template}), + application:set_env(kernel,error_logger_format_depth,11), + "[1,2,3,4,5,6,7,8,9,0|...]" = + format(info, + {"~p",[[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]]}, + #{}, + #{template=>Template}), + "[1,2,3,4,5,6,7,8,9,0,1,2|...]" = + format(info, + {"~p",[[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]]}, + #{}, + #{template=>Template, + depth=>13}), + "[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]" = + format(info, + {"~p",[[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]]}, + #{}, + #{template=>Template, + depth=>unlimited}), + ok. +depth(cleanup,_Config) -> + application:unset_env(kernel,error_logger_format_depth), + ok. + +chars_limit(_Config) -> + FA = {"LoL: ~p~nL: ~p~nMap: ~p~n", + [lists:duplicate(10,lists:seq(1,100)), + lists:seq(1,100), + maps:from_list(lists:zip(lists:seq(1,100), + lists:duplicate(100,value)))]}, + Meta = #{time=>timestamp()}, + Template = [time," - ", msg, "\n"], + FC = #{template=>Template, + depth=>unlimited, + max_size=>unlimited, + chars_limit=>unlimited, + single_line=>true}, + CL1 = 80, + String1 = format(info,FA,Meta,FC#{chars_limit=>CL1}), + L1 = string:length(String1), + ct:log("String1: ~p~nLength1: ~p~n",[lists:flatten(String1),L1]), + true = L1 > CL1, + true = L1 < CL1 + 15, + + String2 = format(info,FA,Meta,FC#{chars_limit=>CL1,depth=>10}), + L2 = string:length(String2), + ct:log("String2: ~p~nLength2: ~p~n",[lists:flatten(String2),L2]), + String2 = String1, + + CL3 = 200, + String3 = format(info,FA,Meta,FC#{chars_limit=>CL3}), + L3 = string:length(String3), + ct:log("String3: ~p~nLength3: ~p~n",[lists:flatten(String3),L3]), + true = L3 > CL3, + true = L3 < CL3 + 15, + + String4 = format(info,FA,Meta,FC#{chars_limit=>CL3,depth=>10}), + L4 = string:length(String4), + ct:log("String4: ~p~nLength4: ~p~n",[lists:flatten(String4),L4]), + true = L4 > CL3, + true = L4 < CL3 + 15, + + %% Test that max_size truncates the string which is limited by + %% depth and chars_limit + MS5 = 150, + String5 = format(info,FA,Meta,FC#{chars_limit=>CL3,depth=>10,max_size=>MS5}), + L5 = string:length(String5), + ct:log("String5: ~p~nLength5: ~p~n",[String5,L5]), + L5 = MS5, + true = lists:prefix(lists:sublist(String5,L5-4),String4), + + %% Test that chars_limit limits string also + Str = "123456789012345678901234567890123456789012345678901234567890123456789", + CL6 = 80, + String6 = format(info,{string,Str},Meta,FC#{chars_limit=>CL6}), + L6 = string:length(String6), + ct:log("String6: ~p~nLength6: ~p~n",[String6,L6]), + L6 = CL6, + + ok. + +format_mfa(_Config) -> + Template = [mfa], + + Meta1 = #{mfa=>{mod,func,0}}, + String1 = format(info,{"~p",[term]},Meta1,#{template=>Template}), + ct:log(String1), + "mod:func/0" = String1, + + Meta2 = #{mfa=>{mod,func,[]}}, + String2 = format(info,{"~p",[term]},Meta2,#{template=>Template}), + ct:log(String2), + "mod:func/0" = String2, + + Meta3 = #{mfa=>"mod:func/0"}, + String3 = format(info,{"~p",[term]},Meta3,#{template=>Template}), + ct:log(String3), + "mod:func/0" = String3, + + Meta4 = #{mfa=>othermfa}, + String4 = format(info,{"~p",[term]},Meta4,#{template=>Template}), + ct:log(String4), + "othermfa" = String4, + + ok. + +format_time(_Config) -> + Time = timestamp(), + Meta = #{time=>Time}, + FC = #{template=>[time]}, + Msg = {string,""}, + ExpectedLocal = default_time_format(Time,false), + ExpectedUtc = default_time_format(Time,true), + + %% default - local time + ExpectedLocal = format(info,Msg,Meta,FC), + + %% time_offset config parameter to formatter + ExpectedLocal = format(info,Msg,Meta,FC#{time_offset=>""}), + ExpectedUtc = format(info,Msg,Meta,FC#{time_offset=>"Z"}), + + %% stdlib utc_log works when time_offset parameter is not set + application:set_env(stdlib,utc_log,true), + ExpectedUtc = format(info,Msg,Meta,FC), + + %% sasl utc_log overwrites stdlib utc_log + application:set_env(sasl,utc_log,false), + ExpectedLocal = format(info,Msg,Meta,FC), + + %% sasl utc_log overwrites stdlib utc_log + application:set_env(sasl,utc_log,true), + application:set_env(stdlib,utc_log,false), + ExpectedUtc = format(info,Msg,Meta,FC), + + %% time_offset config parameter to formatter + %% overwrites sasl and stdlib utc_log + application:set_env(sasl,utc_log,false), + ExpectedUtc = format(info,Msg,Meta,FC#{time_offset=>"Z"}), + + %% time_offset config parameter to formatter + %% overwrites sasl and stdlib utc_log + application:set_env(sasl,utc_log,true), + application:set_env(stdlib,utc_log,true), + ExpectedLocal = format(info,Msg,Meta,FC#{time_offset=>""}), + + %% time_designator config parameter to formatter + ExpectedLocalS = default_time_format(Time,false,$\s), + ExpectedUtcS = default_time_format(Time,true,$\s), + + ExpectedLocalS = format(info,Msg,Meta,FC#{time_offset=>"", + time_designator=>$\s}), + ExpectedUtcS = format(info,Msg,Meta,FC#{time_offset=>"Z", + time_designator=>$\s}), + + ok. + +format_time(cleanup,_Config) -> + application:unset_env(sasl,utc_log), + application:unset_env(stdlib,utc_log), + ok. + +level_or_msg_in_meta(_Config) -> + %% The template contains atoms to pick out values from meta, + %% or level/msg to add these from the log event. What if you have + %% a key named 'level' or 'msg' in meta and want to display + %% its value? + %% For now we simply ignore Meta on this and display the + %% actual level and msg from the log event. + + Meta = #{level=>mylevel, + msg=>"metamsg"}, + Template = [level,";",msg], + String = format(info,{"~p",[term]},Meta,#{template=>Template}), + ct:log(String), + "info;term" = String, % so mylevel and "metamsg" are ignored + + ok. + +faulty_log(_Config) -> + %% Unexpected log (should be type logger:log_event()) - print error + {error, + function_clause, + {logger_formatter,format,[_,_],_}} = + ?TRY(logger_formatter:format(unexp_log,#{})), + ok. + +faulty_config(_Config) -> + {error, + function_clause, + {logger_formatter,format,[_,_],_}} = + ?TRY(logger_formatter:format(#{level=>info, + msg=>{"~p",[term]}, + meta=>#{time=>timestamp()}}, + unexp_config)), + ok. + +faulty_msg(_Config) -> + {error, + function_clause, + {logger_formatter,_,_,_}} = + ?TRY(logger_formatter:format(#{level=>info, + msg=>term, + meta=>#{time=>timestamp()}}, + #{})), + ok. + +-define(cfgerr(X), {error,{invalid_formatter_config,logger_formatter,X}}). +check_config(_Config) -> + ok = logger_formatter:check_config(#{}), + ?cfgerr(bad) = logger_formatter:check_config(bad), + + C1 = #{chars_limit => 1, + depth => 1, + legacy_header => true, + error_logger_notice_header => info, + max_size => 1, + report_cb => fun(R) -> {"~p",[R]} end, + single_line => false, + template => [], + time_designator => $T, + time_offset => 0}, + ok = logger_formatter:check_config(C1), + + ok = logger_formatter:check_config(#{chars_limit => unlimited}), + ?cfgerr({chars_limit,bad}) = + logger_formatter:check_config(#{chars_limit => bad}), + + ok = logger_formatter:check_config(#{depth => unlimited}), + ?cfgerr({depth,bad}) = + logger_formatter:check_config(#{depth => bad}), + + ok = logger_formatter:check_config(#{legacy_header => false}), + ?cfgerr({legacy_header,bad}) = + logger_formatter:check_config(#{legacy_header => bad}), + + ok = logger_formatter:check_config(#{error_logger_notice_header => notice}), + ?cfgerr({error_logger_notice_header,bad}) = + logger_formatter:check_config(#{error_logger_notice_header => bad}), + + ok = logger_formatter:check_config(#{max_size => unlimited}), + ?cfgerr({max_size,bad}) = + logger_formatter:check_config(#{max_size => bad}), + + ok = + logger_formatter:check_config(#{report_cb => fun(_,_) -> "" end}), + ?cfgerr({report_cb,F}) = + logger_formatter:check_config(#{report_cb => F=fun(_,_,_) -> {"",[]} end}), + ?cfgerr({report_cb,bad}) = + logger_formatter:check_config(#{report_cb => bad}), + + ok = logger_formatter:check_config(#{single_line => true}), + ?cfgerr({single_line,bad}) = + logger_formatter:check_config(#{single_line => bad}), + + Ts = [[key], + [[key1,key2]], + [{key,[key],[]}], + [{[key1,key2],[[key1,key2]],["noexist"]}], + ["string"]], + [begin + ct:log("check template: ~p",[T]), + ok = logger_formatter:check_config(#{template => T}) + end + || T <- Ts], + + ETs = [bad, + [{key,bad}], + [{key,[key],bad}], + [{key,[key],"bad"}], + "bad", + [[key,$a,$b,$c]], + [[$a,$b,$c,key]]], + [begin + ct:log("check template: ~p",[T]), + {error,{invalid_formatter_template,logger_formatter,T}} = + logger_formatter:check_config(#{template => T}) + end + || T <- ETs], + + ?cfgerr({time_designator,bad}) = + logger_formatter:check_config(#{time_designator => bad}), + ?cfgerr({time_designator,"b"}) = + logger_formatter:check_config(#{time_designator => "b"}), + + ok = logger_formatter:check_config(#{time_offset => -1}), + ok = logger_formatter:check_config(#{time_offset => "+02:00"}), + ok = logger_formatter:check_config(#{time_offset => "-23:59"}), + ok = logger_formatter:check_config(#{time_offset => "+24:00"}), + ok = logger_formatter:check_config(#{time_offset => "-25:00"}), + ?cfgerr({time_offset,bad}) = + logger_formatter:check_config(#{time_offset => bad}), + ?cfgerr({time_offset,"02:00"}) = + logger_formatter:check_config(#{time_offset => "02:00"}), + ?cfgerr({time_offset,"+02"}) = + logger_formatter:check_config(#{time_offset => "+02"}), + + ok. + +%% Test that formatter config can be changed, and that the default +%% template is updated accordingly +update_config(_Config) -> + {error,{not_found,?MODULE}} = logger:update_formatter_config(?MODULE,#{}), + + logger:add_handler_filter(default,silence,{fun(_,_) -> stop end,ok}), + ok = logger:add_handler(?MODULE,?MODULE,#{}), + D = lists:seq(1,1000), + logger:notice("~p~n",[D]), + {Lines1,C1} = check_log(), + [ct:log(L) || L <- Lines1], + ct:log("~p",[C1]), + [Line1] = Lines1, + [_Time,"notice: "++D1] = string:split(Line1," "), + true = length(D1)>3000, + true = #{}==C1, + + ok = logger:update_formatter_config(?MODULE,single_line,false), + logger:notice("~p~n",[D]), + {Lines2,C2} = check_log(), + [ct:log(L) || L <- Lines2], + ct:log("~p",[C2]), + true = length(Lines2)>50, + true = #{single_line=>false}==C2, + + ok = logger:update_formatter_config(?MODULE,#{legacy_header=>true}), + logger:notice("~p~n",[D]), + {Lines3,C3} = check_log(), + [ct:log(L) || L <- Lines3], + ct:log("~p",[C3]), + ["=NOTICE REPORT==== "++_|D3] = Lines3, + true = length(D3)>50, + true = #{legacy_header=>true,single_line=>false}==C3, + + ok = logger:update_formatter_config(?MODULE,single_line,true), + logger:notice("~p~n",[D]), + {Lines4,C4} = check_log(), + [ct:log(L) || L <- Lines4], + ct:log("~p",[C4]), + ["=NOTICE REPORT==== "++_,D4] = Lines4, + true = length(D4)>3000, + true = #{legacy_header=>true,single_line=>true}==C4, + + %% Finally, check that error_logger_notice_header works, default=info + error_logger:info_msg("~p",[D]), + {Lines5,C5} = check_log(), + [ct:log(L) || L <- Lines5], + ct:log("~p",[C5]), + ["=INFO REPORT==== "++_,_D5] = Lines5, + + ok=logger:update_formatter_config(?MODULE,error_logger_notice_header,notice), + error_logger:info_msg("~p",[D]), + {Lines6,C6} = check_log(), + [ct:log(L) || L <- Lines6], + ct:log("~p",[C6]), + ["=NOTICE REPORT==== "++_,_D6] = Lines6, + + {error,{invalid_formatter_config,bad}} = + logger:update_formatter_config(?MODULE,bad), + {error,{invalid_formatter_config,logger_formatter,{depth,bad}}} = + logger:update_formatter_config(?MODULE,depth,bad), + + ok. + +update_config(cleanup,_Config) -> + _ = logger:remove_handler(?MODULE), + _ = logger:remove_handler_filter(default,silence), + ok. + +%%%----------------------------------------------------------------- +%%% Internal +format(Level,Msg,Meta,Config) -> + format(#{level=>Level,msg=>Msg,meta=>add_time(Meta)},Config). + +format(Log,Config) -> + lists:flatten(logger_formatter:format(Log,Config)). + +default_time_format(Timestamp) -> + default_time_format(Timestamp,false). + +default_time_format(Timestamp,Utc) -> + default_time_format(Timestamp,Utc,$T). + +default_time_format(Timestamp,Utc,Sep) -> + Offset = if Utc -> "Z"; + true -> "" + end, + calendar:system_time_to_rfc3339(Timestamp,[{unit,microsecond}, + {time_designator,Sep}, + {offset,Offset}]). + +integer(Str) -> + is_integer(list_to_integer(Str)). +integer(Str,Max) -> + integer(Str,0,Max). +integer(Str,Min,Max) -> + Int = list_to_integer(Str), + Int >= Min andalso Int =<Max. + +%%%----------------------------------------------------------------- +%%% Called by macro ?TRY(X) +my_try(Fun) -> + try Fun() catch C:R:S -> {C,R,hd(S)} end. + +timestamp() -> + erlang:system_time(microsecond). + +%% necessary? +add_time(#{time:=_}=Meta) -> + Meta; +add_time(Meta) -> + Meta#{time=>timestamp()}. + +%%%----------------------------------------------------------------- +%%% handler callback +log(Log,#{formatter:={M,C}}) -> + put(log,{M:format(Log,C),C}), + ok. + +check_log() -> + {S,C} = erase(log), + {string:lexemes(S,"\n"),C}. diff --git a/lib/kernel/test/logger_legacy_SUITE.erl b/lib/kernel/test/logger_legacy_SUITE.erl new file mode 100644 index 0000000000..c3cab07d81 --- /dev/null +++ b/lib/kernel/test/logger_legacy_SUITE.erl @@ -0,0 +1,288 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2018. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(logger_legacy_SUITE). + +-compile(export_all). +-compile({nowarn_deprecated_function,[{gen_fsm,start,3}, + {gen_fsm,send_all_state_event,2}]}). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("kernel/include/logger.hrl"). +-include_lib("kernel/src/logger_internal.hrl"). + +%%%----------------------------------------------------------------- +%%% This test suite test that log events from within OTP can be +%%% delivered to legacy error_logger event handlers on the same format +%%% as before 'logger' was introduced. +%%% +%%% Before changing the expected format of any of the log events in +%%% this suite, please make sure that the backwards incompatibility it +%%% introduces is ok. +%%% ----------------------------------------------------------------- + +-define(check(Expected), + receive Expected -> + [] = test_server:messages_get() + after 1000 -> + ct:fail({report_not_received, + {line,?LINE}, + {got,test_server:messages_get()}}) + end). +-define(check_no_flush(Expected), + receive Expected -> + ok + after 1000 -> + ct:fail({report_not_received, + {line,?LINE}, + {got,test_server:messages_get()}}) + end). + +suite() -> + [{timetrap,{seconds,30}}]. + +init_per_suite(Config) -> + logger:add_handler(error_logger,error_logger, + #{level=>info,filter_default=>stop}), + Config. + +end_per_suite(_Config) -> + logger:remove_handler(error_logger), + ok. + +init_per_group(std, Config) -> + ok = logger:set_handler_config( + error_logger,filters, + [{domain,{fun logger_filters:domain/2,{log,super,[otp]}}}]), + Config; +init_per_group(sasl, Config) -> + %% Since default level is notice, and progress reports are info, + %% we need to raise the global logger level to info in order to + %% receive these. + ok = logger:set_primary_config(level,info), + ok = logger:set_handler_config( + error_logger,filters, + [{domain,{fun logger_filters:domain/2,{log,super,[otp,sasl]}}}]), + + %% cth_log_redirect checks if sasl is started before displaying + %% any sasl reports - so just to see the real sasl reports in tc + %% log: + {ok,Apps} = application:ensure_all_started(sasl), + [{stop_apps,Apps}|Config]; +init_per_group(_Group, Config) -> + Config. + +end_per_group(sasl, Config) -> + Apps = ?config(stop_apps,Config), + [application:stop(App) || App <- Apps], + ok = logger:set_primary_config(level,notice), + ok; +end_per_group(_Group, _Config) -> + ok. + +init_per_testcase(_TestCase, Config) -> + error_logger:add_report_handler(?MODULE,{event_handler,self()}), + Config. + +end_per_testcase(Case, Config) -> + %% Using gen_event directly here, instead of + %% error_logger:delete_report_handler. This is to avoid + %% automatically stopping the error_logger process due to removing + %% the last handler. + gen_event:delete_handler(error_logger,?MODULE,[]), + try apply(?MODULE,Case,[cleanup,Config]) + catch error:undef -> ok + end, + ok. + +groups() -> + [{std,[],[gen_server, + gen_event, + gen_fsm, + gen_statem]}, + {sasl,[],[sasl_reports, + supervisor_handle_info]}]. + +all() -> + [{group,std}, + {group,sasl}]. + +gen_server(_Config) -> + {ok,Pid} = gen_server:start(?MODULE,gen_server,[]), + Msg = fun() -> erlang:error({badmatch,b}) end, + Pid ! Msg, + ?check({warning_msg,"** Undefined handle_info in ~p"++_,[?MODULE,Msg]}), + ok = gen_server:cast(Pid,Msg), + ?check({error,"** Generic server ~tp terminating"++_, + [Pid,{'$gen_cast',Msg},gen_server,{{badmatch,b},_}]}). + +gen_event(_Config) -> + {ok,Pid} = gen_event:start(), + ok = gen_event:add_handler(Pid,?MODULE,gen_event), + Msg = fun() -> erlang:error({badmatch,b}) end, + Pid ! Msg, + ?check({warning_msg,"** Undefined handle_info in ~tp"++_,[?MODULE,Msg]}), + gen_event:notify(Pid,Msg), + ?check({error,"** gen_event handler ~p crashed."++_, + [?MODULE,Pid,Msg,gen_event,{{badmatch,b},_}]}). + +gen_fsm(_Config) -> + {ok,Pid} = gen_fsm:start(?MODULE,gen_fsm,[]), + Msg = fun() -> erlang:error({badmatch,b}) end, + Pid ! Msg, + ?check({warning_msg,"** Undefined handle_info in ~p"++_,[?MODULE,Msg]}), + gen_fsm:send_all_state_event(Pid,Msg), + ?check({error,"** State machine ~tp terminating"++_, + [Pid,Msg,mystate,gen_fsm,{{badmatch,b},_}]}). + +gen_statem(_Config) -> + {ok,Pid} = gen_statem:start(?MODULE,gen_statem,[]), + Msg = fun() -> erlang:error({badmatch,b}) end, + Pid ! Msg, + ?check({error,"** State machine ~tp terminating"++_, + [Pid,{info,Msg},{mystate,gen_statem},error,{badmatch,b}|_]}). + +sasl_reports(Config) -> + App = {application,?MODULE,[{description, ""}, + {vsn, "1.0"}, + {modules, [?MODULE]}, + {registered, []}, + {applications, []}, + {mod, {?MODULE, []}}]}, + AppStr = io_lib:format("~p.",[App]), + Dir = ?config(priv_dir,Config), + AppFile = filename:join(Dir,?MODULE_STRING++".app"), + ok = file:write_file(AppFile,AppStr), + true = code:add_patha(Dir), + ok = application:start(?MODULE), + SupName = sup_name(), + Pid = whereis(SupName), + [{ch,ChPid,_,_}] = supervisor:which_children(Pid), + Node = node(), + ?check_no_flush({info_report,progress,[{application,?MODULE}, + {started_at,Node}]}), + ?check({info_report,progress,[{supervisor,{local,SupName}}, + {started,[{pid,ChPid}|_]}]}), + ok = gen_server:cast(ChPid, fun() -> + spawn_link(fun() -> receive x->ok end end) + end), + Msg = fun() -> erlang:error({badmatch,b}) end, + ok = gen_server:cast(ChPid,Msg), + ?check_no_flush({error,"** Generic server ~tp terminating"++_, + [ChPid,{'$gen_cast',Msg},gen_server,{{badmatch,b},_}]}), + ?check_no_flush({error_report,crash_report, + [[{initial_call,_}, + {pid,ChPid}, + {registered_name,[]}, + {error_info,{error,{badmatch,b},_}}, + {ancestors,_}, + {message_queue_len,_}, + {messages,_}, + {links,[Pid,Neighbour]}, + {dictionary,_}, + {trap_exit,_}, + {status,_}, + {heap_size,_}, + {stack_size,_}, + {reductions,_}], + [{neighbour,[{pid,Neighbour}, + {registered_name,_}, + {initial_call,_}, + {current_function,_}, + {ancestors,_}, + {message_queue_len,_}, + {links,[ChPid]}, + {trap_exit,_}, + {status,_}, + {heap_size,_}, + {stack_size,_}, + {reductions,_}, + {current_stacktrace,_}]}]]}), + ?check_no_flush({error_report,supervisor_report, + [{supervisor,{local,SupName}}, + {errorContext,child_terminated}, + {reason,{{badmatch,b},_}}, + {offender,[{pid,ChPid}|_]}]}), + ?check({info_report,progress,[{supervisor,{local,SupName}}, + {started,_}]}), + + ok = application:stop(?MODULE), + ?check({info_report,std_info,[{application,?MODULE}, + {exited,stopped}, + {type,temporary}]}). + +sasl_reports(cleanup,_Config) -> + application:stop(?MODULE). + +supervisor_handle_info(_Config) -> + {ok,Pid} = supervisor:start_link({local,sup_name()},?MODULE,supervisor), + ?check({info_report,progress,[{supervisor,_},{started,_}]}), + Pid ! msg, + ?check({error,"Supervisor received unexpected message: ~tp~n",[msg]}). + +supervisor_handle_info(cleanup,_Config) -> + Pid = whereis(sup_name()), + unlink(Pid), + exit(Pid,shutdown). + +%%%----------------------------------------------------------------- +%%% Callbacks for error_logger event handler, gen_server, gen_statem, +%%% gen_fsm, gen_event, supervisor and application. +start(_,_) -> + supervisor:start_link({local,sup_name()},?MODULE,supervisor). + +init(supervisor) -> + {ok,{#{},[#{id=>ch,start=>{gen_server,start_link,[?MODULE,gen_server,[]]}}]}}; +init(StateMachine) when StateMachine==gen_statem; StateMachine==gen_fsm -> + {ok,mystate,StateMachine}; +init(State) -> + {ok,State}. + +%% error_logger event handler +handle_event({Tag,_Gl,{_Pid,Type,Report}},{_,Pid}=State) -> + Pid ! {Tag,Type,Report}, + {ok,State}; +%% other gen_event +handle_event(Fun,State) when is_function(Fun) -> + Fun(), + {next_state,State}. + +%% gen_fsm +handle_event(Fun,State,Data) when is_function(Fun) -> + Fun(), + {next_state,State,Data}. + +%% gen_statem +handle_event(info,Fun,State,Data) when is_function(Fun) -> + Fun(), + {next_state,State,Data}. + +%% gen_server +handle_cast(Fun,State) when is_function(Fun) -> + Fun(), + {noreply,State}. + +%% gen_statem +callback_mode() -> + handle_event_function. + +%%%----------------------------------------------------------------- +%%% Internal +sup_name() -> + list_to_atom(?MODULE_STRING++"_sup"). diff --git a/lib/kernel/test/logger_simple_h_SUITE.erl b/lib/kernel/test/logger_simple_h_SUITE.erl new file mode 100644 index 0000000000..e0ad792bdb --- /dev/null +++ b/lib/kernel/test/logger_simple_h_SUITE.erl @@ -0,0 +1,209 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2018. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(logger_simple_h_SUITE). + +-compile(export_all). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("kernel/include/logger.hrl"). +-include_lib("kernel/src/logger_internal.hrl"). + +-import(logger_test_lib, [setup/2, log/3, sync_and_read/3]). + +-define(check_no_log,[] = test_server:messages_get()). +-define(check(Expected), + receive {log,Expected} -> + [] = test_server:messages_get() + after 1000 -> + ct:fail({report_not_received, + {line,?LINE}, + {expected,Expected}, + {got,test_server:messages_get()}}) + end). + +-define(str,"Log from "++atom_to_list(?FUNCTION_NAME)++ + ":"++integer_to_list(?LINE)). +-define(map_rep,#{function=>?FUNCTION_NAME, line=>?LINE}). +-define(keyval_rep,[{function,?FUNCTION_NAME}, {line,?LINE}]). + +suite() -> + [{timetrap,{seconds,30}}, + {ct_hooks, [logger_test_lib]}]. + +init_per_suite(Config) -> + Hs0 = logger:get_handler_config(), + Hs = lists:keydelete(cth_log_redirect,1,Hs0), + [ok = logger:remove_handler(Id) || {Id,_,_} <- Hs], + Env = [{App,Key,application:get_env(App,Key)} || + {App,Key} <- [{kernel,logger_level}]], + [{env,Env},{logger,Hs}|Config]. + +end_per_suite(Config) -> + [application:set_env(App,Key,Val) || {App,Key,Val} <- ?config(env,Config)], + Hs = ?config(logger,Config), + [ok = logger:add_handler(Id,Mod,C) || {Id,Mod,C} <- Hs], + ok. + +init_per_group(_Group, Config) -> + Config. + +end_per_group(_Group, _Config) -> + ok. + +init_per_testcase(_TestCase, Config) -> + Config. + +end_per_testcase(Case, Config) -> + try apply(?MODULE,Case,[cleanup,Config]) + catch error:undef -> ok + end, + ok. + +groups() -> + []. + +all() -> + [start_stop, + replace_default, + replace_file, + replace_disk_log + ]. + +start_stop(_Config) -> + undefined = whereis(logger_simple_h), + register(logger_simple_h,self()), + {error,_} = logger:add_handler(simple, + logger_simple_h, + #{filter_default=>log}), + unregister(logger_simple_h), + ok = logger:add_handler(simple,logger_simple_h,#{filter_default=>log}), + Pid = whereis(logger_simple_h), + true = is_pid(Pid), + ok = logger:remove_handler(simple), + false = is_pid(whereis(logger_simple_h)), + ok. +start_stop(cleanup,_Config) -> + logger:remove_handler(simple). + +%% This testcase just tests that it does not crash, the default handler prints +%% to stdout which we cannot read from in a detached slave. +replace_default(Config) -> + + {ok, _, Node} = logger_test_lib:setup(Config, [{logger, [{handler, default, undefined}]}]), + log(Node, emergency, [?str]), + log(Node, alert, [?str,[]]), + log(Node, error, [?map_rep]), + log(Node, info, [?keyval_rep]), + log(Node, info, [?keyval_rep++[not_key_val]]), + rpc:call(Node, error_logger, error_report, [some_type,?map_rep]), + rpc:call(Node, error_logger, warning_report, ["some_type",?map_rep]), + log(Node, critical, [?str,[?keyval_rep]]), + log(Node, notice, [["fake",string,"line:",?LINE]]), + + ok = rpc:call(Node, logger, add_handlers, [kernel]), + + ok. + +replace_file(Config) -> + + {ok, _, Node} = logger_test_lib:setup(Config, [{logger, [{handler, default, undefined}]}]), + log(Node, emergency, [M1=?str]), + log(Node, alert, [M2=?str,[]]), + log(Node, error, [?map_rep]), + log(Node, warning, [?keyval_rep]), + log(Node, warning, [?keyval_rep++[not_key_val]]), + log(Node, critical, [?str,[?keyval_rep]]), + log(Node, notice, [["fake",string,"line:",?LINE]]), + + File = filename:join(proplists:get_value(priv_dir,Config), + atom_to_list(?FUNCTION_NAME)++".log"), + + ok = rpc:call(Node, logger, add_handlers, + [[{handler, default, logger_std_h, + #{ config => #{ type => {file, File} }, + formatter => {?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}}}]]), + + {ok,Bin} = sync_and_read(Node, file, File), + Lines = [unicode:characters_to_list(L) || + L <- binary:split(Bin,<<"\n">>,[global,trim])], + ["=EMERGENCY REPORT===="++_, + M1, + "=ALERT REPORT===="++_, + M2, + "=ERROR REPORT===="++_, + _, + _, + "=WARNING REPORT===="++_, + _, + _, + "=WARNING REPORT===="++_, + _, + _, + _, + "=CRITICAL REPORT===="++_, + _, + _, + "=NOTICE REPORT===="++_, + _ + ] = Lines, + ok. + +replace_disk_log(Config) -> + + {ok, _, Node} = logger_test_lib:setup(Config, [{logger, [{handler, default, undefined}]}]), + log(Node, emergency, [M1=?str]), + log(Node, alert, [M2=?str,[]]), + log(Node, error, [?map_rep]), + log(Node, warning, [?keyval_rep]), + log(Node, warning, [?keyval_rep++[not_key_val]]), + log(Node, critical, [?str,[?keyval_rep]]), + log(Node, notice, [["fake",string,"line:",?LINE]]), + + File = filename:join(proplists:get_value(priv_dir,Config), + atom_to_list(?FUNCTION_NAME)++".log"), + + ok = rpc:call(Node, logger, add_handlers, + [[{handler, default, logger_disk_log_h, + #{ config => #{ file => File }, + formatter => {?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}}}]]), + {ok,Bin} = sync_and_read(Node, disk_log, File), + Lines = [unicode:characters_to_list(L) || + L <- binary:split(Bin,<<"\n">>,[global,trim])], + ["=EMERGENCY REPORT===="++_, + M1, + "=ALERT REPORT===="++_, + M2, + "=ERROR REPORT===="++_, + _, + _, + "=WARNING REPORT===="++_, + _, + _, + "=WARNING REPORT===="++_, + _, + _, + _, + "=CRITICAL REPORT===="++_, + _, + _, + "=NOTICE REPORT===="++_, + _ + ] = Lines, + ok. diff --git a/lib/kernel/test/logger_std_h_SUITE.erl b/lib/kernel/test/logger_std_h_SUITE.erl new file mode 100644 index 0000000000..b6a09f4980 --- /dev/null +++ b/lib/kernel/test/logger_std_h_SUITE.erl @@ -0,0 +1,1718 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2018. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(logger_std_h_SUITE). + +-compile(export_all). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("kernel/include/logger.hrl"). +-include_lib("kernel/src/logger_internal.hrl"). +-include_lib("kernel/src/logger_h_common.hrl"). +-include_lib("stdlib/include/ms_transform.hrl"). +-include_lib("kernel/include/file.hrl"). + +-define(check_no_log, [] = test_server:messages_get()). +-define(check(Expected), + receive + {log,Expected} -> + [] = test_server:messages_get() + after 5000 -> + ct:fail({report_not_received, + {line,?LINE}, + {expected,Expected}, + {got,test_server:messages_get()}}) + end). + +-define(msg,"Log from "++atom_to_list(?FUNCTION_NAME)++ + ":"++integer_to_list(?LINE)). +-define(bin(Msg), list_to_binary(Msg++"\n")). +-define(domain,#{domain=>[?MODULE]}). + +suite() -> + [{timetrap,{seconds,30}}, + {ct_hooks,[logger_test_lib]}]. + +init_per_suite(Config) -> + timer:start(), % to avoid progress report + {ok,#{formatter:=OrigFormatter}} = + logger:get_handler_config(?STANDARD_HANDLER), + [{formatter,OrigFormatter}|Config]. + +end_per_suite(Config) -> + {OrigMod,OrigConf} = proplists:get_value(formatter,Config), + logger:set_handler_config(?STANDARD_HANDLER,formatter,{OrigMod,OrigConf}), + ok. + +init_per_group(_Group, Config) -> + Config. + +end_per_group(_Group, _Config) -> + ok. + +init_per_testcase(TestHooksCase, Config) when + TestHooksCase == write_failure; + TestHooksCase == sync_failure -> + case (fun() -> ?TEST_HOOKS_TAB == undefined end)() of + true -> + {skip,"Define the TEST_HOOKS macro to run this test"}; + false -> + ct:print("********** ~w **********", [TestHooksCase]), + Config + end; +init_per_testcase(OPCase, Config) when + OPCase == qlen_kill_new; + OPCase == restart_after -> + case re:run(erlang:system_info(system_version), + "dirty-schedulers-TEST", + [{capture,none}]) of + match -> + {skip,"Overload protection test skipped on dirty-schedulers-TEST"}; + nomatch -> + ct:print("********** ~w **********", [OPCase]), + Config + end; +init_per_testcase(TestCase, Config) -> + ct:print("********** ~w **********", [TestCase]), + Config. + +end_per_testcase(Case, Config) -> + try apply(?MODULE,Case,[cleanup,Config]) + catch error:undef -> ok + end, + ok. + +groups() -> + []. + +all() -> + [add_remove_instance_tty, + add_remove_instance_standard_io, + add_remove_instance_standard_error, + add_remove_instance_file1, + add_remove_instance_file2, + default_formatter, + filter_config, + errors, + formatter_fail, + config_fail, + crash_std_h_to_file, + crash_std_h_to_disk_log, + bad_input, + info_and_reset, + reconfig, + file_opts, + sync, + write_failure, + sync_failure, + op_switch_to_sync_file, + op_switch_to_sync_tty, + op_switch_to_drop_file, + op_switch_to_drop_tty, + op_switch_to_flush_file, + op_switch_to_flush_tty, + limit_burst_disabled, + limit_burst_enabled_one, + limit_burst_enabled_period, + kill_disabled, + qlen_kill_new, + qlen_kill_std, + mem_kill_new, + mem_kill_std, + restart_after, + handler_requests_under_load + ]. + +add_remove_instance_tty(_Config) -> + {error,{handler_not_added,{invalid_config,logger_std_h,{type,tty}}}} = + logger:add_handler(?MODULE,logger_std_h, + #{config => #{type => tty}, + filter_default=>log, + formatter=>{?MODULE,self()}}), + ok. + +add_remove_instance_standard_io(_Config) -> + add_remove_instance_nofile(standard_io). +add_remove_instance_standard_io(cleanup,_Config) -> + logger_std_h_remove(). + +add_remove_instance_standard_error(_Config) -> + add_remove_instance_nofile(standard_error). +add_remove_instance_standard_error(cleanup,_Config) -> + logger_std_h_remove(). + +add_remove_instance_file1(Config) -> + Dir = ?config(priv_dir,Config), + Log = filename:join(Dir,"stdlog1.txt"), + Type = {file,Log}, + add_remove_instance_file(Log, Type). +add_remove_instance_file1(cleanup,_Config) -> + logger_std_h_remove(). + +add_remove_instance_file2(Config) -> + Dir = ?config(priv_dir,Config), + Log = filename:join(Dir,"stdlog2.txt"), + Type = {file,Log,[raw,append]}, + add_remove_instance_file(Log, Type). +add_remove_instance_file2(cleanup,_Config) -> + logger_std_h_remove(). + +add_remove_instance_file(Log, Type) -> + ok = logger:add_handler(?MODULE, + logger_std_h, + #{config => #{type => Type}, + filter_default=>stop, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter=>{?MODULE,self()}}), + Pid = whereis(h_proc_name()), + true = is_pid(Pid), + logger:notice(M1=?msg,?domain), + ?check(M1), + B1 = ?bin(M1), + try_read_file(Log, {ok,B1}, filesync_rep_int()), + ok = logger:remove_handler(?MODULE), + timer:sleep(500), + undefined = whereis(h_proc_name()), + logger:notice(?msg,?domain), + ?check_no_log, + try_read_file(Log, {ok,B1}, filesync_rep_int()), + ok. + +default_formatter(_Config) -> + ok = logger:set_handler_config(?STANDARD_HANDLER,formatter, + {?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}), + ct:capture_start(), + logger:notice(M1=?msg), + timer:sleep(100), + ct:capture_stop(), + [Msg] = ct:capture_get(), + match = re:run(Msg,"=NOTICE REPORT====.*\n"++M1,[{capture,none}]), + ok. + +filter_config(_Config) -> + ok = logger:add_handler(?MODULE,logger_std_h,#{}), + {ok,#{config:=HConfig}=Config} = logger:get_handler_config(?MODULE), + HConfig = maps:without([handler_pid,mode_tab],HConfig), + + FakeFullHConfig = HConfig#{handler_pid=>self(),mode_tab=>erlang:make_ref()}, + #{config:=HConfig} = + logger_std_h:filter_config(Config#{config=>FakeFullHConfig}), + ok. + +filter_config(cleanup,_Config) -> + logger:remove_handler(?MODULE), + ok. + +errors(Config) -> + Dir = ?config(priv_dir,Config), + Log = filename:join(Dir,?FUNCTION_NAME), + + ok = logger:add_handler(?MODULE,logger_std_h,#{}), + {error,{already_exist,?MODULE}} = + logger:add_handler(?MODULE,logger_std_h,#{}), + + {error,{not_found,no_such_name}} = logger:remove_handler(no_such_name), + + ok = logger:remove_handler(?MODULE), + {error,{not_found,?MODULE}} = logger:remove_handler(?MODULE), + + {error, + {handler_not_added, + {invalid_config,logger_std_h,{type,faulty_type}}}} = + logger:add_handler(?MODULE,logger_std_h, + #{config => #{type => faulty_type}}), + + case os:type() of + {win32,_} -> + %% No use in testing file access on windows + ok; + _ -> + NoDir = lists:concat(["/",?MODULE,"_dir"]), + {error, + {handler_not_added,{{open_failed,NoDir,eacces},_}}} = + logger:add_handler(myh2,logger_std_h, + #{config=>#{type=>{file,NoDir}}}) + end, + + {error, + {handler_not_added,{{open_failed,Log,_},_}}} = + logger:add_handler(myh3,logger_std_h, + #{config=>#{type=>{file,Log,[bad_file_opt]}}}), + + ok = logger:notice(?msg). + +errors(cleanup,_Config) -> + logger:remove_handler(?MODULE). + +formatter_fail(Config) -> + Dir = ?config(priv_dir,Config), + Log = filename:join(Dir,?FUNCTION_NAME), + + %% no formatter + ok = logger:add_handler(?MODULE, + logger_std_h, + #{config => #{type => {file,Log}}, + filter_default=>stop, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE])}), + Pid = whereis(h_proc_name()), + true = is_pid(Pid), + H = logger:get_handler_ids(), + true = lists:member(?MODULE,H), + + %% Formatter is added automatically + {ok,#{formatter:={logger_formatter,_}}} = logger:get_handler_config(?MODULE), + logger:notice(M1=?msg,?domain), + Got1 = try_match_file(Log,"[0-9\\+\\-T:\\.]* notice: "++M1,5000), + + ok = logger:set_handler_config(?MODULE,formatter,{nonexistingmodule,#{}}), + logger:notice(M2=?msg,?domain), + Got2 = try_match_file(Log, + escape(Got1)++"[0-9\\+\\-T:\\.]* notice: FORMATTER CRASH: .*"++M2, + 5000), + + ok = logger:set_handler_config(?MODULE,formatter,{?MODULE,crash}), + logger:notice(M3=?msg,?domain), + Got3 = try_match_file(Log, + escape(Got2)++"[0-9\\+\\-T:\\.]* notice: FORMATTER CRASH: .*"++M3, + 5000), + + ok = logger:set_handler_config(?MODULE,formatter,{?MODULE,bad_return}), + logger:notice(?msg,?domain), + try_match_file(Log, + escape(Got3)++"FORMATTER ERROR: bad return value", + 5000), + + %% Check that handler is still alive and was never dead + Pid = whereis(h_proc_name()), + H = logger:get_handler_ids(), + + ok. + +formatter_fail(cleanup,_Config) -> + logger:remove_handler(?MODULE). + +config_fail(_Config) -> + {error,{handler_not_added,{invalid_config,logger_std_h,{bad,bad}}}} = + logger:add_handler(?MODULE,logger_std_h, + #{config => #{bad => bad}, + filter_default=>log, + formatter=>{?MODULE,self()}}), + {error,{handler_not_added,{invalid_config,logger_std_h, + {restart_type,bad}}}} = + logger:add_handler(?MODULE,logger_std_h, + #{config => #{restart_type => bad}, + filter_default=>log, + formatter=>{?MODULE,self()}}), + {error,{handler_not_added,{invalid_levels,{_,1,_}}}} = + logger:add_handler(?MODULE,logger_std_h, + #{config => #{drop_mode_qlen=>1}}), + {error,{handler_not_added,{invalid_levels,{43,42,_}}}} = + logger:add_handler(?MODULE,logger_std_h, + #{config => #{sync_mode_qlen=>43, + drop_mode_qlen=>42}}), + {error,{handler_not_added,{invalid_levels,{_,43,42}}}} = + logger:add_handler(?MODULE,logger_std_h, + #{config => #{drop_mode_qlen=>43, + flush_qlen=>42}}), + + ok = logger:add_handler(?MODULE,logger_std_h, + #{filter_default=>log, + formatter=>{?MODULE,self()}}), + {error,{illegal_config_change,#{config:=#{type:=_}},#{config:=#{type:=_}}}} = + logger:set_handler_config(?MODULE,config, + #{type=>{file,"file"}}), + + {error,{invalid_levels,_}} = + logger:set_handler_config(?MODULE,config, + #{sync_mode_qlen=>100, + flush_qlen=>99}), + {error,{invalid_config,logger_std_h,{filesync_rep_int,2000}}} = + logger:set_handler_config(?MODULE, config, + #{filesync_rep_int => 2000}), + + %% Read-only fields may (accidentially) be included in the change, + %% but it won't take effect + {ok,C} = logger:get_handler_config(?MODULE), + ok = logger:set_handler_config(?MODULE,config, + #{handler_pid=>self(), + mode_tab=>erlang:make_ref()}), + {ok,C} = logger:get_handler_config(?MODULE), + + ok. + +config_fail(cleanup,_Config) -> + logger:remove_handler(?MODULE). + +crash_std_h_to_file(Config) -> + Dir = ?config(priv_dir,Config), + Log = filename:join(Dir,lists:concat([?MODULE,"_",?FUNCTION_NAME,".log"])), + crash_std_h(Config,?FUNCTION_NAME, + [{handler,default,logger_std_h, + #{ config => #{ type => {file, Log} }}}], + file, Log). +crash_std_h_to_file(cleanup,_Config) -> + crash_std_h(cleanup). + +crash_std_h_to_disk_log(Config) -> + Dir = ?config(priv_dir,Config), + Log = filename:join(Dir,lists:concat([?MODULE,"_",?FUNCTION_NAME,".log"])), + crash_std_h(Config,?FUNCTION_NAME, + [{handler,default,logger_disk_log_h, + #{ config => #{ file => Log }}}], + disk_log,Log). +crash_std_h_to_disk_log(cleanup,_Config) -> + crash_std_h(cleanup). + +crash_std_h(Config,Func,Var,Type,Log) -> + Dir = ?config(priv_dir,Config), + SysConfig = filename:join(Dir,lists:concat([?MODULE,"_",Func,".config"])), + ok = file:write_file(SysConfig, io_lib:format("[{kernel,[{logger,~p}]}].",[Var])), + Pa = filename:dirname(code:which(?MODULE)), + Name = lists:concat([?MODULE,"_",Func]), + Args = lists:concat([" -config ",filename:rootname(SysConfig)," -pa ",Pa]), + ct:pal("Starting ~p with ~tp", [Name,Args]), + %% Start a node which prints kernel logs to the destination specified by Type + {ok,Node} = test_server:start_node(Name, peer, [{args, Args}]), + HProcName = + case Type of + file -> ?name_to_reg_name(logger_std_h,?STANDARD_HANDLER); + disk_log -> ?name_to_reg_name(logger_disk_log_h,?STANDARD_HANDLER) + end, + Pid = rpc:call(Node,erlang,whereis,[HProcName]), + ok = rpc:call(Node,logger,set_handler_config,[?STANDARD_HANDLER,formatter, + {?MODULE,self()}]), + ok = log_on_remote_node(Node,"dummy1"), + ?check("dummy1"), + {ok,Bin1} = sync_and_read(Node,Type,Log), + <<"dummy1\n">> = binary:part(Bin1,{byte_size(Bin1),-7}), + + %% Kill the logger_std_h process + exit(Pid, kill), + + %% Wait a bit, then check that it is gone + timer:sleep(2000), + undefined = rpc:call(Node,erlang,whereis,[HProcName]), + + %% Check that file is not empty + {ok,Bin2} = sync_and_read(Node,Type,Log), + <<"dummy1\n">> = binary:part(Bin2,{byte_size(Bin2),-7}), + ok. + +%% Can not use rpc:call here, since the code would execute on a +%% process with group_leader on this (the calling) node, and thus +%% logger would send the log event to the logger process here instead +%% of logging it itself. +log_on_remote_node(Node,Msg) -> + _ = spawn_link(Node, + fun() -> erlang:group_leader(whereis(user),self()), + logger:notice(Msg) + end), + ok. + + +crash_std_h(cleanup) -> + Nodes = nodes(), + [test_server:stop_node(Node) || Node <- Nodes]. + +sync_and_read(Node,disk_log,Log) -> + rpc:call(Node,logger_disk_log_h,filesync,[?STANDARD_HANDLER]), + case file:read_file(Log ++ ".1") of + {ok,<<>>} -> + timer:sleep(5000), + file:read_file(Log ++ ".1"); + Ok -> + Ok + end; +sync_and_read(Node,file,Log) -> + rpc:call(Node,logger_std_h,filesync,[?STANDARD_HANDLER]), + case file:read_file(Log) of + {ok,<<>>} -> + timer:sleep(5000), + file:read_file(Log); + Ok -> + Ok + end. + +bad_input(_Config) -> + {error,{badarg,{filesync,["BadType"]}}} = logger_std_h:filesync("BadType"), + {error,{badarg,{info,["BadType"]}}} = logger_std_h:info("BadType"), + {error,{badarg,{reset,["BadType"]}}} = logger_std_h:reset("BadType"). + + +info_and_reset(_Config) -> + #{id := ?STANDARD_HANDLER} = logger_std_h:info(?STANDARD_HANDLER), + ok = logger_std_h:reset(?STANDARD_HANDLER). + +reconfig(Config) -> + Dir = ?config(priv_dir,Config), + ok = logger:add_handler(?MODULE, + logger_std_h, + #{config => #{type => standard_io}, + filter_default=>log, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter=>{?MODULE,self()}}), + #{id := ?MODULE, + type := standard_io, + file_ctrl_pid := FileCtrlPid, + sync_mode_qlen := ?SYNC_MODE_QLEN, + drop_mode_qlen := ?DROP_MODE_QLEN, + flush_qlen := ?FLUSH_QLEN, + burst_limit_enable := ?BURST_LIMIT_ENABLE, + burst_limit_max_count := ?BURST_LIMIT_MAX_COUNT, + burst_limit_window_time := ?BURST_LIMIT_WINDOW_TIME, + overload_kill_enable := ?OVERLOAD_KILL_ENABLE, + overload_kill_qlen := ?OVERLOAD_KILL_QLEN, + overload_kill_mem_size := ?OVERLOAD_KILL_MEM_SIZE, + overload_kill_restart_after := ?OVERLOAD_KILL_RESTART_AFTER, + filesync_repeat_interval := ?FILESYNC_REPEAT_INTERVAL} = DefaultInfo = + logger_std_h:info(?MODULE), + + {ok, + #{config:= + #{type := standard_io, + sync_mode_qlen := ?SYNC_MODE_QLEN, + drop_mode_qlen := ?DROP_MODE_QLEN, + flush_qlen := ?FLUSH_QLEN, + burst_limit_enable := ?BURST_LIMIT_ENABLE, + burst_limit_max_count := ?BURST_LIMIT_MAX_COUNT, + burst_limit_window_time := ?BURST_LIMIT_WINDOW_TIME, + overload_kill_enable := ?OVERLOAD_KILL_ENABLE, + overload_kill_qlen := ?OVERLOAD_KILL_QLEN, + overload_kill_mem_size := ?OVERLOAD_KILL_MEM_SIZE, + overload_kill_restart_after := ?OVERLOAD_KILL_RESTART_AFTER, + filesync_repeat_interval := ?FILESYNC_REPEAT_INTERVAL} = + DefaultHConf}} + = logger:get_handler_config(?MODULE), + + ok = logger:set_handler_config(?MODULE, config, + #{sync_mode_qlen => 1, + drop_mode_qlen => 2, + flush_qlen => 3, + burst_limit_enable => false, + burst_limit_max_count => 10, + burst_limit_window_time => 10, + overload_kill_enable => true, + overload_kill_qlen => 100000, + overload_kill_mem_size => 10000000, + overload_kill_restart_after => infinity, + filesync_repeat_interval => no_repeat}), + #{id := ?MODULE, + type := standard_io, + file_ctrl_pid := FileCtrlPid, + sync_mode_qlen := 1, + drop_mode_qlen := 2, + flush_qlen := 3, + burst_limit_enable := false, + burst_limit_max_count := 10, + burst_limit_window_time := 10, + overload_kill_enable := true, + overload_kill_qlen := 100000, + overload_kill_mem_size := 10000000, + overload_kill_restart_after := infinity, + filesync_repeat_interval := no_repeat} = Info = logger_std_h:info(?MODULE), + + {ok,#{config := + #{type := standard_io, + sync_mode_qlen := 1, + drop_mode_qlen := 2, + flush_qlen := 3, + burst_limit_enable := false, + burst_limit_max_count := 10, + burst_limit_window_time := 10, + overload_kill_enable := true, + overload_kill_qlen := 100000, + overload_kill_mem_size := 10000000, + overload_kill_restart_after := infinity, + filesync_repeat_interval := no_repeat} = HConf}} = + logger:get_handler_config(?MODULE), + + ok = logger:update_handler_config(?MODULE, config, + #{flush_qlen => ?FLUSH_QLEN}), + {ok,#{config:=C1}} = logger:get_handler_config(?MODULE), + ct:log("C1: ~p",[C1]), + C1 = HConf#{flush_qlen => ?FLUSH_QLEN}, + + ok = logger:set_handler_config(?MODULE, config, #{sync_mode_qlen => 1}), + {ok,#{config:=C2}} = logger:get_handler_config(?MODULE), + ct:log("C2: ~p",[C2]), + C2 = DefaultHConf#{sync_mode_qlen => 1}, + + ok = logger:set_handler_config(?MODULE, config, #{drop_mode_qlen => 100}), + {ok,#{config:=C3}} = logger:get_handler_config(?MODULE), + ct:log("C3: ~p",[C3]), + C3 = DefaultHConf#{drop_mode_qlen => 100}, + + ok = logger:update_handler_config(?MODULE, config, #{sync_mode_qlen => 1}), + {ok,#{config:=C4}} = logger:get_handler_config(?MODULE), + ct:log("C4: ~p",[C4]), + C4 = DefaultHConf#{sync_mode_qlen => 1, + drop_mode_qlen => 100}, + + ok = logger:remove_handler(?MODULE), + + File = filename:join(Dir,lists:concat([?FUNCTION_NAME,".log"])), + ok = logger:add_handler(?MODULE, + logger_std_h, + #{config => #{type => {file,File}}, + filter_default=>log, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter=>{?MODULE,self()}}), + + {ok,#{config:=#{filesync_repeat_interval:=FSI}=FileHConfig}} = + logger:get_handler_config(?MODULE), + ok = logger:update_handler_config(?MODULE,config, + #{filesync_repeat_interval=>FSI+2000}), + {ok,#{config:=C5}} = logger:get_handler_config(?MODULE), + ct:log("C5: ~p",[C5]), + C5 = FileHConfig#{filesync_repeat_interval=>FSI+2000}, + + %% You are not allowed to actively set 'type' in runtime, since + %% this is a write once field. + {error, {illegal_config_change,_,_}} = + logger:set_handler_config(?MODULE,config,#{type=>standard_io}), + {ok,#{config:=C6}} = logger:get_handler_config(?MODULE), + ct:log("C6: ~p",[C6]), + C6 = C5, + + %% ... but if you don't specify 'type', then set_handler_config shall + %% NOT reset it to its default value + ok = logger:set_handler_config(?MODULE,config,#{sync_mode_qlen=>1}), + {ok,#{config:=C7}} = logger:get_handler_config(?MODULE), + ct:log("C7: ~p",[C7]), + C7 = FileHConfig#{sync_mode_qlen=>1}, + ok. + +reconfig(cleanup, _Config) -> + logger:remove_handler(?MODULE). + + +file_opts(Config) -> + Dir = ?config(priv_dir,Config), + Log = filename:join(Dir, lists:concat([?FUNCTION_NAME,".log"])), + BadFileOpts = [raw], + BadType = {file,Log,BadFileOpts}, + {error,{handler_not_added,{{open_failed,Log,enoent},_}}} = + logger:add_handler(?MODULE, logger_std_h, + #{config => #{type => BadType}}), + + OkFileOpts = [raw,append], + OkType = {file,Log,OkFileOpts}, + ok = logger:add_handler(?MODULE, + logger_std_h, + #{config => #{type => OkType}, + filter_default=>log, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter=>{?MODULE,self()}}), + + #{type := OkType} = logger_std_h:info(?MODULE), + logger:notice(M1=?msg,?domain), + ?check(M1), + B1 = ?bin(M1), + try_read_file(Log, {ok,B1}, filesync_rep_int()), + ok. +file_opts(cleanup, _Config) -> + logger:remove_handler(?MODULE). + + +sync(Config) -> + Dir = ?config(priv_dir,Config), + Log = filename:join(Dir, lists:concat([?FUNCTION_NAME,".log"])), + Type = {file,Log}, + ok = logger:add_handler(?MODULE, + logger_std_h, + #{config => #{type => Type}, + filter_default=>log, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter=>{?MODULE,nl}}), + + %% check repeated filesync happens + start_tracer([{logger_std_h, write_to_dev, 5}, + {logger_std_h, sync_dev, 4}, + {file, datasync, 1}], + [{logger_std_h, write_to_dev, <<"first\n">>}, + {logger_std_h, sync_dev}, + {file,datasync}]), + + logger:notice("first", ?domain), + %% wait for automatic filesync + check_tracer(filesync_rep_int()*2), + + %% check that explicit filesync is only done once + start_tracer([{logger_std_h, write_to_dev, 5}, + {logger_std_h, sync_dev, 4}, + {file, datasync, 1}], + [{logger_std_h, write_to_dev, <<"second\n">>}, + {logger_std_h, sync_dev}, + {file,datasync}, + {no_more,500} + ]), + logger:notice("second", ?domain), + %% do explicit sync + logger_std_h:filesync(?MODULE), + %% a second sync should be ignored + logger_std_h:filesync(?MODULE), + check_tracer(100), + + %% check that if there's no repeated filesync active, + %% a filesync is still performed when handler goes idle + ok = logger:update_handler_config(?MODULE, config, + #{filesync_repeat_interval => no_repeat}), + no_repeat = maps:get(filesync_repeat_interval, logger_std_h:info(?MODULE)), + %% The following timer is to make sure the time from last log + %% ("second") to next ("third") is long enough, so the a flush is + %% triggered by the idle timeout between "thrid" and "fourth". + timer:sleep(?IDLE_DETECT_TIME_MSEC*2), + start_tracer([{logger_std_h, write_to_dev, 5}, + {logger_std_h, sync_dev, 4}, + {file, datasync, 1}], + [{logger_std_h, write_to_dev, <<"third\n">>}, + {logger_std_h, sync_dev}, + {file,datasync}, + {logger_std_h, write_to_dev, <<"fourth\n">>}, + {logger_std_h, sync_dev}, + {file,datasync}]), + logger:notice("third", ?domain), + %% wait for automatic filesync + timer:sleep(?IDLE_DETECT_TIME_MSEC*2), + logger:notice("fourth", ?domain), + %% wait for automatic filesync + check_tracer(?IDLE_DETECT_TIME_MSEC*2), + + %% switch repeated filesync on and verify that the looping works + SyncInt = 1000, + WaitT = 4500, + OneSync = {logger_std_h,handle_cast,repeated_filesync}, + %% receive 1 initial repeated_filesync, then 1 per sec + start_tracer([{logger_std_h,handle_cast,2}], + [OneSync || _ <- lists:seq(1, 1 + trunc(WaitT/SyncInt))]), + + ok = logger:update_handler_config(?MODULE, config, + #{filesync_repeat_interval => SyncInt}), + SyncInt = maps:get(filesync_repeat_interval, logger_std_h:info(?MODULE)), + timer:sleep(WaitT), + ok = logger:update_handler_config(?MODULE, config, + #{filesync_repeat_interval => no_repeat}), + check_tracer(100), + ok. +sync(cleanup, _Config) -> + dbg:stop_clear(), + logger:remove_handler(?MODULE). + +write_failure(Config) -> + Dir = ?config(priv_dir, Config), + File = lists:concat([?MODULE,"_",?FUNCTION_NAME,".log"]), + Log = filename:join(Dir, File), + Node = start_std_h_on_new_node(Config, Log), + false = (undefined == rpc:call(Node, ets, whereis, [?TEST_HOOKS_TAB])), + rpc:call(Node, ets, insert, [?TEST_HOOKS_TAB,{tester,self()}]), + rpc:call(Node, ?MODULE, set_internal_log, [?MODULE,internal_log]), + rpc:call(Node, ?MODULE, set_result, [file_write,ok]), + + ok = log_on_remote_node(Node, "Logged1"), + rpc:call(Node, logger_std_h, filesync, [?STANDARD_HANDLER]), + ?check_no_log, + try_read_file(Log, {ok,<<"Logged1\n">>}, filesync_rep_int()), + + rpc:call(Node, ?MODULE, set_result, [file_write,{error,terminated}]), + ok = log_on_remote_node(Node, "Cause simple error printout"), + + ?check({error,{?STANDARD_HANDLER,write,Log,{error,terminated}}}), + + ok = log_on_remote_node(Node, "No second error printout"), + ?check_no_log, + + rpc:call(Node, ?MODULE, set_result, [file_write,{error,eacces}]), + ok = log_on_remote_node(Node, "Cause simple error printout"), + ?check({error,{?STANDARD_HANDLER,write,Log,{error,eacces}}}), + + rpc:call(Node, ?MODULE, set_result, [file_write,ok]), + ok = log_on_remote_node(Node, "Logged2"), + rpc:call(Node, logger_std_h, filesync, [?STANDARD_HANDLER]), + ?check_no_log, + try_read_file(Log, {ok,<<"Logged1\nLogged2\n">>}, filesync_rep_int()), + ok. +write_failure(cleanup, _Config) -> + Nodes = nodes(), + [test_server:stop_node(Node) || Node <- Nodes]. + +sync_failure(Config) -> + Dir = ?config(priv_dir, Config), + File = lists:concat([?MODULE,"_",?FUNCTION_NAME,".log"]), + Log = filename:join(Dir, File), + Node = start_std_h_on_new_node(Config, Log), + false = (undefined == rpc:call(Node, ets, whereis, [?TEST_HOOKS_TAB])), + rpc:call(Node, ets, insert, [?TEST_HOOKS_TAB,{tester,self()}]), + rpc:call(Node, ?MODULE, set_internal_log, [?MODULE,internal_log]), + rpc:call(Node, ?MODULE, set_result, [file_datasync,ok]), + + SyncInt = 500, + ok = rpc:call(Node, logger, update_handler_config, + [?STANDARD_HANDLER, config, + #{filesync_repeat_interval => SyncInt}]), + Info = rpc:call(Node, logger_std_h, info, [?STANDARD_HANDLER]), + SyncInt = maps:get(filesync_repeat_interval, Info), + + ok = log_on_remote_node(Node, "Logged1"), + ?check_no_log, + + rpc:call(Node, ?MODULE, set_result, [file_datasync,{error,terminated}]), + ok = log_on_remote_node(Node, "Cause simple error printout"), + + ?check({error,{?STANDARD_HANDLER,filesync,Log,{error,terminated}}}), + + ok = log_on_remote_node(Node, "No second error printout"), + ?check_no_log, + + rpc:call(Node, ?MODULE, set_result, [file_datasync,{error,eacces}]), + ok = log_on_remote_node(Node, "Cause simple error printout"), + ?check({error,{?STANDARD_HANDLER,filesync,Log,{error,eacces}}}), + + rpc:call(Node, ?MODULE, set_result, [file_datasync,ok]), + ok = log_on_remote_node(Node, "Logged2"), + ?check_no_log, + ok. +sync_failure(cleanup, _Config) -> + Nodes = nodes(), + [test_server:stop_node(Node) || Node <- Nodes]. + +start_std_h_on_new_node(Config, Log) -> + {ok,_,Node} = + logger_test_lib:setup( + Config, + [{logger,[{handler,default,logger_std_h, + #{ config => #{ type => {file,Log}}}}]}]), + ok = rpc:call(Node,logger,set_handler_config,[?STANDARD_HANDLER,formatter, + {?MODULE,nl}]), + Node. + +%% functions for test hook macros to be called by rpc +set_internal_log(_Mod, _Func) -> + ?set_internal_log({_Mod,_Func}). +set_result(_Op, _Result) -> + ?set_result(_Op, _Result). +set_defaults() -> + ?set_defaults(). + +%% internal log function that sends the term to the test case process +internal_log(Type, Term) -> + [{tester,Tester}] = ets:lookup(?TEST_HOOKS_TAB, tester), + Tester ! {log,{Type,Term}}, + logger:internal_log(Type, Term), + ok. + + +%%%----------------------------------------------------------------- +%%% Overload protection tests + +op_switch_to_sync_file(Config) -> + {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + NumOfReqs = 500, + NewHConfig = + HConfig#{config => StdHConfig#{sync_mode_qlen => 2, + drop_mode_qlen => NumOfReqs+1, + flush_qlen => 2*NumOfReqs, + burst_limit_enable => false}}, + ok = logger:update_handler_config(?MODULE, NewHConfig), + %% TRecvPid = start_op_trace(), + send_burst({n,NumOfReqs}, seq, {chars,79}, notice), + Lines = count_lines(Log), + %% true = analyse_trace(TRecvPid, + %% fun(Events) -> find_mode(async,Events) end), + %% true = analyse_trace(TRecvPid, + %% fun(Events) -> find_mode(sync,Events) end), + %% true = analyse_trace(TRecvPid, + %% fun(Events) -> find_switch(async,sync,Events) end), + %% false = analyse_trace(TRecvPid, + %% fun(Events) -> find_mode(drop,Events) end), + %% false = analyse_trace(TRecvPid, + %% fun(Events) -> find_mode(flush,Events) end), + %% stop_op_trace(TRecvPid), + NumOfReqs = Lines, + ok = file_delete(Log), + ok. +op_switch_to_sync_file(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +op_switch_to_sync_tty(Config) -> + {HConfig,StdHConfig} = start_handler(?MODULE, standard_io, Config), + NumOfReqs = 500, + NewHConfig = + HConfig#{config => StdHConfig#{sync_mode_qlen => 3, + drop_mode_qlen => NumOfReqs+1, + flush_qlen => 2*NumOfReqs, + burst_limit_enable => false}}, + ok = logger:update_handler_config(?MODULE, NewHConfig), + send_burst({n,NumOfReqs}, seq, {chars,79}, notice), + ok. +op_switch_to_sync_tty(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +op_switch_to_drop_file() -> + [{timetrap,{seconds,180}}]. +op_switch_to_drop_file(Config) -> + Test = + fun() -> + {Log,HConfig,StdHConfig} = + start_handler(?MODULE, ?FUNCTION_NAME, Config), + NumOfReqs = 300, + Procs = 2, + Bursts = 10, + NewHConfig = + HConfig#{config => + StdHConfig#{sync_mode_qlen => 1, + drop_mode_qlen => 2, + flush_qlen => + Procs*NumOfReqs*Bursts, + burst_limit_enable => false}}, + ok = logger:update_handler_config(?MODULE, NewHConfig), + %% It sometimes happens that the handler gets the + %% requests in a slow enough pace so that dropping + %% never occurs. Therefore, lets generate a number of + %% bursts to increase the chance of message buildup. + [send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, notice) || + _ <- lists:seq(1, Bursts)], + Logged = count_lines(Log), + ok = stop_handler(?MODULE), + ct:pal("Number of messages dropped = ~w (~w)", + [Procs*NumOfReqs*Bursts-Logged,Procs*NumOfReqs*Bursts]), + true = (Logged < (Procs*NumOfReqs*Bursts)), + true = (Logged > 0), + _ = file_delete(Log), + ok + end, + %% As it's tricky to get the timing right in only one go, we perform the + %% test repeatedly, hoping that will generate a successful result. + case repeat_until_ok(Test, 10) of + {ok,{Failures,_Result}} -> + ct:log("Failed ~w times before success!", [Failures]); + {fails,Reason} -> + ct:fail(Reason) + end. +op_switch_to_drop_file(cleanup, _Config) -> + _ = stop_handler(?MODULE). + +op_switch_to_drop_tty(Config) -> + {HConfig,StdHConfig} = start_handler(?MODULE, standard_io, Config), + NumOfReqs = 300, + Procs = 2, + NewHConfig = + HConfig#{config => StdHConfig#{sync_mode_qlen => 1, + drop_mode_qlen => 2, + flush_qlen => + Procs*NumOfReqs+1, + burst_limit_enable => false}}, + ok = logger:update_handler_config(?MODULE, NewHConfig), + send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, notice), + ok. +op_switch_to_drop_tty(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +op_switch_to_flush_file() -> + [{timetrap,{minutes,5}}]. +op_switch_to_flush_file(Config) -> + Test = + fun() -> + {Log,HConfig,StdHConfig} = + start_handler(?MODULE, ?FUNCTION_NAME, Config), + + %% NOTE: it's important that both async and sync + %% requests have been queued when the flush happens + %% (verify with coverage of flush_log_requests/2) + + NewHConfig = + HConfig#{config => + StdHConfig#{sync_mode_qlen => 2, + %% disable drop mode + drop_mode_qlen => 300, + flush_qlen => 300, + burst_limit_enable => false}}, + ok = logger:update_handler_config(?MODULE, NewHConfig), + NumOfReqs = 1500, + Procs = 10, + Bursts = 10, + %% It sometimes happens that the handler either gets + %% the requests in a slow enough pace so that flushing + %% never occurs, or it gets all messages at once, + %% causing all messages to get flushed (no dropping of + %% sync messages gets tested). Therefore, lets + %% generate a number of bursts to increase the chance + %% of message buildup in some random fashion. + [send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, notice) || + _ <- lists:seq(1,Bursts)], + Logged = count_lines(Log), + ok = stop_handler(?MODULE), + ct:pal("Number of messages flushed/dropped = ~w (~w)", + [NumOfReqs*Procs*Bursts-Logged,NumOfReqs*Procs*Bursts]), + true = (Logged < (NumOfReqs*Procs*Bursts)), + true = (Logged > 0), + _ = file_delete(Log), + ok + end, + %% As it's tricky to get the timing right in only one go, we perform the + %% test repeatedly, hoping that will generate a successful result. + case repeat_until_ok(Test, 10) of + {ok,{Failures,_Result}} -> + ct:log("Failed ~w times before success!", [Failures]); + {fails,Reason} -> + ct:fail(Reason) + end. +op_switch_to_flush_file(cleanup, _Config) -> + _ = stop_handler(?MODULE). + +op_switch_to_flush_tty() -> + [{timetrap,{minutes,5}}]. +op_switch_to_flush_tty(Config) -> + {HConfig,StdHConfig} = start_handler(?MODULE, standard_io, Config), + + %% it's important that both async and sync requests have been queued + %% when the flush happens (verify with coverage of flush_log_requests/2) + + NewHConfig = + HConfig#{config => StdHConfig#{sync_mode_qlen => 2, + %% disable drop mode + drop_mode_qlen => 100, + flush_qlen => 100, + burst_limit_enable => false}}, + ok = logger:update_handler_config(?MODULE, NewHConfig), + NumOfReqs = 1000, + Procs = 100, + send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, notice), + ok. +op_switch_to_flush_tty(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +limit_burst_disabled(Config) -> + {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + NewHConfig = + HConfig#{config => StdHConfig#{burst_limit_enable => false, + burst_limit_max_count => 10, + burst_limit_window_time => 2000, + drop_mode_qlen => 200, + flush_qlen => 300}}, + ok = logger:update_handler_config(?MODULE, NewHConfig), + NumOfReqs = 100, + send_burst({n,NumOfReqs}, seq, {chars,79}, notice), + Logged = count_lines(Log), + ct:pal("Number of messages logged = ~w", [Logged]), + NumOfReqs = Logged, + ok = file_delete(Log), + ok. +limit_burst_disabled(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +limit_burst_enabled_one(Config) -> + {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + ReqLimit = 10, + NewHConfig = + HConfig#{config => StdHConfig#{burst_limit_enable => true, + burst_limit_max_count => ReqLimit, + burst_limit_window_time => 2000, + drop_mode_qlen => 200, + flush_qlen => 300}}, + ok = logger:update_handler_config(?MODULE, NewHConfig), + NumOfReqs = 100, + send_burst({n,NumOfReqs}, seq, {chars,79}, notice), + Logged = count_lines(Log), + ct:pal("Number of messages logged = ~w", [Logged]), + ReqLimit = Logged, + ok = file_delete(Log), + ok. +limit_burst_enabled_one(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +limit_burst_enabled_period(Config) -> + {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + ReqLimit = 10, + BurstTWin = 1000, + NewHConfig = + HConfig#{config => StdHConfig#{burst_limit_enable => true, + burst_limit_max_count => ReqLimit, + burst_limit_window_time => BurstTWin, + drop_mode_qlen => 20000, + flush_qlen => 20001}}, + ok = logger:update_handler_config(?MODULE, NewHConfig), + + Windows = 3, + Sent = send_burst({t,BurstTWin*Windows}, seq, {chars,79}, notice), + Logged = count_lines(Log), + ct:pal("Number of messages sent = ~w~nNumber of messages logged = ~w", + [Sent,Logged]), + true = (Logged > (ReqLimit*Windows)) andalso + (Logged < (ReqLimit*(Windows+2))), + ok = file_delete(Log), + ok. +limit_burst_enabled_period(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +kill_disabled(Config) -> + {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + NewHConfig = + HConfig#{config=>StdHConfig#{overload_kill_enable=>false, + overload_kill_qlen=>10, + overload_kill_mem_size=>100}}, + ok = logger:update_handler_config(?MODULE, NewHConfig), + NumOfReqs = 100, + send_burst({n,NumOfReqs}, seq, {chars,79}, notice), + Logged = count_lines(Log), + ct:pal("Number of messages logged = ~w", [Logged]), + ok = file_delete(Log), + true = is_pid(whereis(h_proc_name())), + ok. +kill_disabled(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +qlen_kill_new(Config) -> + {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + Pid0 = whereis(h_proc_name()), + {_,Mem0} = process_info(Pid0, memory), + RestartAfter = ?OVERLOAD_KILL_RESTART_AFTER, + NewHConfig = + HConfig#{config=>StdHConfig#{overload_kill_enable=>true, + overload_kill_qlen=>10, + overload_kill_mem_size=>Mem0+50000, + overload_kill_restart_after=>RestartAfter}}, + ok = logger:update_handler_config(?MODULE, NewHConfig), + MRef = erlang:monitor(process, Pid0), + NumOfReqs = 100, + Procs = 4, + send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, notice), + %% send_burst({n,NumOfReqs}, seq, {chars,79}, notice), + receive + {'DOWN', MRef, _, _, Info} -> + case Info of + {shutdown,{overloaded,?MODULE,QLen,Mem}} -> + ct:pal("Terminated with qlen = ~w, mem = ~w", [QLen,Mem]); + killed -> + ct:pal("Slow shutdown, handler process was killed!", []) + end, + file_delete(Log), + {ok,_} = wait_for_process_up(RestartAfter * 3), + ok + after + 5000 -> + Info = logger_std_h:info(?MODULE), + ct:pal("Handler state = ~p", [Info]), + ct:fail("Handler not dead! It should not have survived this!") + end. +qlen_kill_new(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +%% choke the standard handler on remote node to verify the termination +%% works as expected +qlen_kill_std(_Config) -> + %%! HERE + %% Dir = ?config(priv_dir, Config), + %% File = lists:concat([?MODULE,"_",?FUNCTION_NAME,".log"]), + %% Log = filename:join(Dir, File), + %% Node = start_std_h_on_new_node(Config, ?FUNCTION_NAME, Log), + %% ok = rpc:call(Node, logger, update_handler_config, + %% [?STANDARD_HANDLER, config, + %% #{overload_kill_enable=>true, + %% overload_kill_qlen=>10, + %% overload_kill_mem_size=>100000}]), + {skip,"Not done yet"}. + +mem_kill_new(Config) -> + {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + Pid0 = whereis(h_proc_name()), + {_,Mem0} = process_info(Pid0, memory), + RestartAfter = ?OVERLOAD_KILL_RESTART_AFTER, + NewHConfig = + HConfig#{config=>StdHConfig#{overload_kill_enable=>true, + overload_kill_qlen=>50000, + overload_kill_mem_size=>Mem0+500, + overload_kill_restart_after=>RestartAfter}}, + ok = logger:update_handler_config(?MODULE, NewHConfig), + MRef = erlang:monitor(process, Pid0), + NumOfReqs = 100, + Procs = 4, + send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, notice), + %% send_burst({n,NumOfReqs}, seq, {chars,79}, notice), + receive + {'DOWN', MRef, _, _, Info} -> + case Info of + {shutdown,{overloaded,?MODULE,QLen,Mem}} -> + ct:pal("Terminated with qlen = ~w, mem = ~w", [QLen,Mem]); + killed -> + ct:pal("Slow shutdown, handler process was killed!", []) + end, + file_delete(Log), + {ok,_} = wait_for_process_up(RestartAfter * 3), + ok + after + 5000 -> + Info = logger_std_h:info(?MODULE), + ct:pal("Handler state = ~p", [Info]), + ct:fail("Handler not dead! It should not have survived this!") + end. +mem_kill_new(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +%% choke the standard handler on remote node to verify the termination +%% works as expected +mem_kill_std(_Config) -> + {skip,"Not done yet"}. + +restart_after() -> + [{timetrap,{minutes,2}}]. +restart_after(Config) -> + {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + NewHConfig1 = + HConfig#{config=>StdHConfig#{overload_kill_enable=>true, + overload_kill_qlen=>10, + overload_kill_restart_after=>infinity}}, + ok = logger:update_handler_config(?MODULE, NewHConfig1), + MRef1 = erlang:monitor(process, whereis(h_proc_name())), + %% kill handler + send_burst({n,100}, {spawn,4,0}, {chars,79}, notice), + receive + {'DOWN', MRef1, _, _, _Reason1} -> + file_delete(Log), + error = wait_for_process_up(?OVERLOAD_KILL_RESTART_AFTER * 3), + ok + after + 5000 -> + Info1 = logger_std_h:info(?MODULE), + ct:pal("Handler state = ~p", [Info1]), + ct:fail("Handler not dead! It should not have survived this!") + end, + + {Log,_,_} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + RestartAfter = ?OVERLOAD_KILL_RESTART_AFTER, + + NewHConfig2 = + HConfig#{config=>StdHConfig#{overload_kill_enable=>true, + overload_kill_qlen=>10, + overload_kill_restart_after=>RestartAfter}}, + ok = logger:update_handler_config(?MODULE, NewHConfig2), + Pid0 = whereis(h_proc_name()), + MRef2 = erlang:monitor(process, Pid0), + %% kill handler + send_burst({n,100}, {spawn,4,0}, {chars,79}, notice), + receive + {'DOWN', MRef2, _, _, _Reason2} -> + file_delete(Log), + {ok,Pid1} = wait_for_process_up(RestartAfter * 3), + false = (Pid1 == Pid0), + ok + after + 5000 -> + Info2 = logger_std_h:info(?MODULE), + ct:pal("Handler state = ~p", [Info2]), + ct:fail("Handler not dead! It should not have survived this!") + end, + ok. +restart_after(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +%% send handler requests (sync, info, reset, change_config) +%% during high load to verify that sync, dropping and flushing is +%% handled correctly. +handler_requests_under_load() -> + [{timetrap,{minutes,3}}]. +handler_requests_under_load(Config) -> + {Log,HConfig,StdHConfig} = + start_handler(?MODULE, ?FUNCTION_NAME, Config), + NewHConfig = + HConfig#{config => StdHConfig#{sync_mode_qlen => 2, + drop_mode_qlen => 1000, + flush_qlen => 2000, + burst_limit_enable => false}}, + ok = logger:update_handler_config(?MODULE, NewHConfig), + Pid = spawn_link(fun() -> send_requests(?MODULE, 1, [{filesync,[]}, + {info,[]}, + {reset,[]}, + {change_config,[]}]) + end), + Sent = send_burst({t,10000}, seq, {chars,79}, notice), + Pid ! {self(),finish}, + ReqResult = receive {Pid,Result} -> Result end, + Logged = count_lines(Log), + ct:pal("Number of messages sent = ~w~nNumber of messages logged = ~w", + [Sent,Logged]), + FindError = fun(Res) -> + [E || E <- Res, + is_tuple(E) andalso (element(1,E) == error)] + end, + Errors = [{Req,FindError(Res)} || {Req,Res} <- ReqResult], + NoOfReqs = lists:foldl(fun({_,Res}, N) -> N + length(Res) end, 0, ReqResult), + ct:pal("~w requests made. Errors: ~n~p", [NoOfReqs,Errors]), + ok = file_delete(Log). +handler_requests_under_load(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +send_requests(HName, TO, Reqs = [{Req,Res}|Rs]) -> + receive + {From,finish} -> + From ! {self(),Reqs} + after + TO -> + Result = + case Req of + change_config -> + logger:update_handler_config(HName, config, + #{overload_kill_enable => + false}); + Func -> + logger_std_h:Func(HName) + end, + send_requests(HName, TO, Rs ++ [{Req,[Result|Res]}]) + end. + + +%%%----------------------------------------------------------------- +%%% +start_handler(Name, TTY, Config) when TTY == standard_io; + TTY == standard_error-> + ok = logger:add_handler(Name, + logger_std_h, + #{config => #{type => TTY}, + filter_default=>log, + filters=>?DEFAULT_HANDLER_FILTERS([Name]), + formatter=>{?MODULE,op}}), + {ok,HConfig = #{config := StdHConfig}} = logger:get_handler_config(Name), + {HConfig,StdHConfig}; + +start_handler(Name, FuncName, Config) -> + Dir = ?config(priv_dir,Config), + Log = filename:join(Dir, lists:concat([FuncName,".log"])), + ct:pal("Logging to ~tp", [Log]), + Type = {file,Log}, + _ = file_delete(Log), + ok = logger:add_handler(Name, + logger_std_h, + #{config => #{type => Type}, + filter_default=>log, + filters=>?DEFAULT_HANDLER_FILTERS([Name]), + formatter=>{?MODULE,op}}), + {ok,HConfig = #{config := StdHConfig}} = logger:get_handler_config(Name), + {Log,HConfig,StdHConfig}. + +stop_handler(Name) -> + R = logger:remove_handler(Name), + ct:pal("Handler ~p stopped! Result: ~p", [Name,R]), + R. + +count_lines(File) -> + wait_until_written(File, -1), + count_lines1(File). + +wait_until_written(File, Sz) -> + timer:sleep(2000), + case file:read_file_info(File) of + {ok,#file_info{size = Sz}} -> + timer:sleep(1000), + case file:read_file_info(File) of + {ok,#file_info{size = Sz}} -> + ok; + {ok,#file_info{size = Sz1}} -> + wait_until_written(File, Sz1) + end; + {ok,#file_info{size = Sz1}} -> + wait_until_written(File, Sz1) + end. + +count_lines1(File) -> + {_,Dev} = file:open(File, [read]), + Lines = count_lines2(Dev, 0), + file:close(Dev), + Lines. + +count_lines2(Dev, LC) -> + case file:read_line(Dev) of + {ok,"Handler logger_std_h_SUITE " ++_} -> + %% Not counting handler info + count_lines2(Dev,LC); + {ok,_} -> + count_lines2(Dev,LC+1); + eof -> LC + end. + +send_burst(NorT, Type, {chars,Sz}, Class) -> + Text = [34 + rand:uniform(126-34) || _ <- lists:seq(1,Sz)], + case NorT of + {n,N} -> + %% process_flag(priority, high), + send_n_burst(N, Type, Text, Class), + %% process_flag(priority, normal), + N; + {t,T} -> + ct:pal("Sending messages sequentially for ~w ms", [T]), + T0 = erlang:monotonic_time(millisecond), + send_t_burst(T0, T, Text, Class, 0) + end. + +send_n_burst(0, _, _Text, _Class) -> + ok; +send_n_burst(N, seq, Text, Class) -> + ok = logger:Class(Text, ?domain), + send_n_burst(N-1, seq, Text, Class); +send_n_burst(N, {spawn,Ps,TO}, Text, Class) -> + ct:pal("~w processes each sending ~w messages", [Ps,N]), + MRefs = [begin if TO == 0 -> ok; true -> timer:sleep(TO) end, + monitor(process,spawn_link(per_proc_fun(N,Text,Class,X))) + end || X <- lists:seq(1,Ps)], + lists:foreach(fun(MRef) -> + receive + {'DOWN', MRef, _, _, _} -> + ok + end + end, MRefs), + ct:pal("Message burst sent", []), + ok. + +send_t_burst(T0, T, Text, Class, N) -> + T1 = erlang:monotonic_time(millisecond), + if (T1-T0) > T -> + N; + true -> + ok = logger:Class(Text, ?domain), + send_t_burst(T0, T, Text, Class, N+1) + end. + +per_proc_fun(N,Text,Class,X) when X rem 2 == 0 -> + fun() -> + process_flag(priority,high), + send_n_burst(N, seq, Text, Class) + end; +per_proc_fun(N,Text,Class,_) -> + fun() -> + send_n_burst(N, seq, Text, Class) + end. + +%%%----------------------------------------------------------------- +%%% Formatter callback +%%% Using this to send the formatted string back to the test case +%%% process - so it can check for logged events. +format(_,bad_return) -> + bad_return; +format(_,crash) -> + erlang:error(formatter_crashed); +format(#{msg:={string,String0}},no_nl) -> + String = unicode:characters_to_list(String0), + String; +format(#{msg:={string,String0}},nl) -> + String = unicode:characters_to_list(String0), + String++"\n"; +format(#{msg:={string,String0}},op) -> + String = unicode:characters_to_list(String0), + String++"\n"; +format(#{msg:={report,#{label:={supervisor,progress}}}},op) -> + ""; +format(#{msg:={report,#{label:={gen_server,terminate}}}},op) -> + ""; +format(#{msg:={report,#{label:={proc_lib,crash}}}},op) -> + ""; +format(#{msg:={F,A}},OpOrPid) when is_list(F), is_list(A) -> + String = lists:flatten(io_lib:format(F,A)), + if is_pid(OpOrPid) -> OpOrPid ! {log,String}; + true -> ok + end, + String++"\n"; +format(#{msg:={string,String0}},Pid) -> + String = unicode:characters_to_list(String0), + Pid ! {log,String}, + String++"\n". + +add_remove_instance_nofile(Type) -> + ok = logger:add_handler(?MODULE,logger_std_h, + #{config => #{type => Type}, + filter_default=>stop, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter=>{?MODULE,self()}}), + Pid = whereis(h_proc_name()), + true = is_pid(Pid), + group_leader(group_leader(),Pid), % to get printouts in test log + logger:notice(M1=?msg,?domain), + ?check(M1), + %% check that sync doesn't do damage even if not relevant + ok = logger_std_h:filesync(?MODULE), + ok = logger:remove_handler(?MODULE), + timer:sleep(500), + undefined = whereis(h_proc_name()), + logger:notice(?msg,?domain), + ?check_no_log, + ok. + +logger_std_h_remove() -> + logger:remove_handler(?MODULE). +logger_std_h_remove(Id) -> + logger:remove_handler(Id). + +try_read_file(FileName, Expected, Time) when Time > 0 -> + case file:read_file(FileName) of + Expected -> + ok; + Error = {error,_Reason} -> + ct:pal("Can't read ~tp: ~tp", [FileName,Error]), + erlang:error(Error); + Got -> + ct:pal("try_read_file got ~tp", [Got]), + timer:sleep(500), + try_read_file(FileName, Expected, Time-500) + end; +try_read_file(FileName, Expected, _) -> + ct:pal("Missing pattern ~tp in ~tp", [Expected,FileName]), + erlang:error({error,missing_expected_pattern}). + +try_match_file(FileName, Pattern, Time) -> + try_match_file(FileName, Pattern, Time, <<>>). + +try_match_file(FileName, Pattern, Time, _) when Time > 0 -> + case file:read_file(FileName) of + {ok, Bin} -> + case re:run(Bin,Pattern,[{capture,none}]) of + match -> + unicode:characters_to_list(Bin); + _ -> + timer:sleep(100), + try_match_file(FileName, Pattern, Time-100, Bin) + end; + Error -> + erlang:error(Error) + end; +try_match_file(_,Pattern,_,Incorrect) -> + ct:pal("try_match_file did not match pattern: ~p~nGot: ~p~n", + [Pattern,Incorrect]), + erlang:error({error,not_matching_pattern,Pattern,Incorrect}). + +repeat_until_ok(Fun, N) -> + repeat_until_ok(Fun, 0, N, undefined). + +repeat_until_ok(_Fun, Stop, Stop, Reason) -> + {fails,Reason}; + +repeat_until_ok(Fun, C, Stop, FirstReason) -> + if C > 0 -> timer:sleep(5000); + true -> ok + end, + try Fun() of + Result -> + {ok,{C,Result}} + catch + _:Reason:Stack -> + ct:pal("Test fails: ~p (~p)~n", [Reason,hd(Stack)]), + if FirstReason == undefined -> + repeat_until_ok(Fun, C+1, Stop, {Reason,Stack}); + true -> + repeat_until_ok(Fun, C+1, Stop, FirstReason) + end + end. + + +%%%----------------------------------------------------------------- +%%% +start_op_trace() -> + TraceFun = fun({trace,_,call,{_Mod,Func,Details}}, Pid) -> + Pid ! {trace_call,Func,Details}, + Pid; + ({trace,_,return_from,{_Mod,Func,_},RetVal}, Pid) -> + Pid ! {trace_return,Func,RetVal}, + Pid + end, + TRecvPid = spawn_link(fun() -> trace_receiver(5000) end), + {ok,_} = dbg:tracer(process, {TraceFun, TRecvPid}), + + {ok,_} = dbg:p(whereis(h_proc_name()), [c]), + {ok,_} = dbg:p(self(), [c]), + + MS1 = dbg:fun2ms(fun([_]) -> return_trace() end), + {ok,_} = dbg:tp(logger_h_common, check_load, 1, MS1), + + {ok,_} = dbg:tpl(logger_h_common, flush_log_requests, 2, []), + + MS2 = dbg:fun2ms(fun([_,mode]) -> return_trace() end), + {ok,_} = dbg:tpl(ets, lookup, 2, MS2), + + ct:pal("Tracing started!", []), + TRecvPid. + +stop_op_trace(TRecvPid) -> + dbg:stop_clear(), + unlink(TRecvPid), + exit(TRecvPid, kill), + ok. + +find_mode(flush, Events) -> + lists:any(fun({trace_call,flush_log_requests,[_,_]}) -> true; + (_) -> false + end, Events); +find_mode(Mode, Events) -> + lists:keymember([{mode,Mode}], 3, Events). + +%% find_switch(_From, To, Events) -> +%% try lists:foldl(fun({trace_return,check_load,{To,_,_,_}}, +%% {trace_call,check_load,[#{mode := From}]}) -> +%% throw(match); +%% (Event, _) -> +%% Event +%% end, undefined, Events) of +%% _ -> false +%% catch +%% throw:match -> true +%% end. + +analyse_trace(TRecvPid, TestFun) -> + TRecvPid ! {test,self(),TestFun}, + receive + {result,TRecvPid,Result} -> + Result + after + 60000 -> + fails + end. + +trace_receiver(IdleT) -> + Msgs = receive_until_idle(IdleT, 5, []), + ct:pal("~w trace events generated", [length(Msgs)]), + analyse(Msgs). + +receive_until_idle(IdleT, WaitN, Msgs) -> + receive + Msg = {trace_call,_,_} -> + receive_until_idle(IdleT, 5, [Msg | Msgs]); + Msg = {trace_return,_,_} -> + receive_until_idle(IdleT, 5, [Msg | Msgs]) + after + IdleT -> + if WaitN == 0 -> + Msgs; + true -> + receive_until_idle(IdleT, WaitN-1, Msgs) + end + end. + +analyse(Msgs) -> + receive + {test,From,TestFun} -> + From ! {result,self(),TestFun(Msgs)}, + analyse(Msgs) + end. + +start_tracer(Trace,Expected) -> + Pid = self(), + FileCtrlPid = maps:get(file_ctrl_pid, logger_std_h:info(?MODULE)), + dbg:tracer(process,{fun tracer/2,{Pid,Expected}}), + dbg:p(whereis(h_proc_name()),[c]), + dbg:p(FileCtrlPid,[c]), + tpl(Trace), + ok. + +tpl([{M,F,A}|Trace]) -> + {ok,Match} = dbg:tpl(M,F,A,[]), + case lists:keyfind(matched,1,Match) of + {_,_,1} -> + ok; + _ -> + dbg:stop_clear(), + throw({skip,"Can't trace "++atom_to_list(M)++":"++ + atom_to_list(F)++"/"++integer_to_list(A)}) + end, + tpl(Trace); +tpl([]) -> + ok. + +tracer({trace,_,call,{logger_std_h,handle_cast,[Op|_]}}, + {Pid,[{Mod,Func,Op}|Expected]}) -> + maybe_tracer_done(Pid,Expected,{Mod,Func,Op}); +tracer({trace,_,call,{Mod=logger_std_h,Func=write_to_dev,[_,Data,_,_,_]}}, + {Pid,[{Mod,Func,Data}|Expected]}) -> + maybe_tracer_done(Pid,Expected,{Mod,Func,Data}); +tracer({trace,_,call,{Mod,Func,_}}, {Pid,[{Mod,Func}|Expected]}) -> + maybe_tracer_done(Pid,Expected,{Mod,Func}); +tracer({trace,_,call,Call}, {Pid,Expected}) -> + ct:log("Tracer got unexpected: ~p~nExpected: ~p~n",[Call,Expected]), + Pid ! {tracer_got_unexpected,Call,Expected}, + {Pid,Expected}. + +maybe_tracer_done(Pid,[]=Expected,Got) -> + ct:log("Tracer got: ~p~n",[Got]), + Pid ! {tracer_done,0}, + {Pid,Expected}; +maybe_tracer_done(Pid,[{no_more,T}]=Expected,Got) -> + ct:log("Tracer got: ~p~n",[Got]), + Pid ! {tracer_done,T}, + {Pid,Expected}; +maybe_tracer_done(Pid,Expected,Got) -> + ct:log("Tracer got: ~p~n",[Got]), + {Pid,Expected}. + +check_tracer(T) -> + check_tracer(T,fun() -> ct:fail({timeout,tracer}) end). +check_tracer(T,TimeoutFun) -> + receive + {tracer_done,Delay} -> + %% Possibly wait Delay ms to check that no unexpected + %% traces are received + check_tracer(Delay,fun() -> ok end); + {tracer_got_unexpected,Got,Expected} -> + dbg:stop_clear(), + ct:fail({tracer_got_unexpected,Got,Expected}) + after T -> + dbg:stop_clear(), + TimeoutFun() + end. + +escape([$+|Rest]) -> + [$\\,$+|escape(Rest)]; +escape([H|T]) -> + [H|escape(T)]; +escape([]) -> + []. + +h_proc_name() -> + h_proc_name(?MODULE). +h_proc_name(Name) -> + ?name_to_reg_name(logger_std_h,Name). + +wait_for_process_up(T) -> + wait_for_process_up(?MODULE, h_proc_name(), T). + +wait_for_process_up(Name, RegName, T) -> + N = (T div 500) + 1, + wait_for_process_up1(Name, RegName, N). + +wait_for_process_up1(_Name, _RegName, 0) -> + error; +wait_for_process_up1(Name, RegName, N) -> + timer:sleep(500), + case whereis(RegName) of + Pid when is_pid(Pid) -> + case logger:get_handler_config(Name) of + {ok,_} -> + %% ct:pal("Process ~p up (~p tries left)",[Name,N]), + {ok,Pid}; + _ -> + wait_for_process_up1(Name, RegName, N-1) + end; + undefined -> + %% ct:pal("Waiting for process ~p (~p tries left)",[Name,N]), + wait_for_process_up1(Name, RegName, N-1) + end. + +filesync_rep_int() -> + case (fun() -> is_atom(?FILESYNC_REPEAT_INTERVAL) end)() of + true -> 5500; + false -> ?FILESYNC_REPEAT_INTERVAL + 500 + end. + + +file_delete(Log) -> + file:delete(Log). + diff --git a/lib/kernel/test/logger_test_lib.erl b/lib/kernel/test/logger_test_lib.erl new file mode 100644 index 0000000000..81eb9ce5eb --- /dev/null +++ b/lib/kernel/test/logger_test_lib.erl @@ -0,0 +1,82 @@ +% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2018. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(logger_test_lib). + +-include_lib("kernel/src/logger_internal.hrl"). + +-export([setup/2, log/3, sync_and_read/3]). + +-export([init/2, + pre_init_per_suite/3, pre_init_per_testcase/4, + post_end_per_testcase/5, post_end_per_suite/3]). + +setup(Config,Vars) -> + FuncStr = lists:concat([proplists:get_value(suite, Config), "_", + proplists:get_value(tc, Config)]), + ConfigFileName = filename:join(proplists:get_value(priv_dir, Config), FuncStr), + file:write_file(ConfigFileName ++ ".config", io_lib:format("[{kernel, ~p}].",[Vars])), + case test_server:start_node(proplists:get_value(tc, Config), slave, + [{args, ["-pa ",filename:dirname(code:which(?MODULE)), + " -boot start_sasl -kernel start_timer true " + "-config ",ConfigFileName]}]) of + {ok, Node} -> + L = rpc:call(Node, logger, get_config, []), + ct:log("~p",[L]), + {ok, L, Node}; + {error, Reason} -> + ct:log("Failed to start node: ~p",[Reason]), + error + end. + +log(Node, F, A) -> + log(Node, logger, F, A). +log(Node, M, F, A) -> + MD = #{ gl => rpc:call(Node, erlang, whereis, [logger]) }, + rpc:call(Node, M, F, A ++ [MD]). + +sync_and_read(Node,disk_log,Log) -> + rpc:call(Node,logger_disk_log_h,filesync,[?STANDARD_HANDLER]), + file:read_file(Log ++ ".1"); +sync_and_read(Node, file,Log) -> + ok = rpc:call(Node,logger_std_h,filesync,[?STANDARD_HANDLER]), + file:read_file(Log). + + +init(_, _) -> + {ok, []}. + +pre_init_per_suite(_Suite, Config, State) -> + {[{nodes, nodes()} | Config], State}. + +pre_init_per_testcase(Suite, TC, Config, State) -> + cleanup(Config), + {[{suite, Suite}, {tc, TC} | Config], State}. + +post_end_per_testcase(_, _TC, Config, Res, State) -> + cleanup(Config), + {Res, State}. + +post_end_per_suite(_, Config, State) -> + cleanup(Config), + {Config, State}. + +cleanup(Config) -> + [test_server:stop_node(N) || N <- nodes(), + not lists:member(N, proplists:get_value(nodes, Config))]. diff --git a/lib/kernel/test/os_SUITE.erl b/lib/kernel/test/os_SUITE.erl index 53a9e168ef..710b9b115c 100644 --- a/lib/kernel/test/os_SUITE.erl +++ b/lib/kernel/test/os_SUITE.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1997-2017. All Rights Reserved. +%% Copyright Ericsson AB 1997-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -22,10 +22,12 @@ -export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1, init_per_group/2,end_per_group/2, init_per_testcase/2,end_per_testcase/2]). --export([space_in_cwd/1, quoting/1, cmd_unicode/1, space_in_name/1, bad_command/1, +-export([space_in_cwd/1, quoting/1, cmd_unicode/1, + null_in_command/1, space_in_name/1, bad_command/1, find_executable/1, unix_comment_in_command/1, deep_list_command/1, large_output_command/1, background_command/0, background_command/1, - message_leak/1, close_stdin/0, close_stdin/1, perf_counter_api/1]). + message_leak/1, close_stdin/0, close_stdin/1, max_size_command/1, + perf_counter_api/1]). -include_lib("common_test/include/ct.hrl"). @@ -34,10 +36,11 @@ suite() -> {timetrap,{minutes,1}}]. all() -> - [space_in_cwd, quoting, cmd_unicode, space_in_name, bad_command, + [space_in_cwd, quoting, cmd_unicode, null_in_command, + space_in_name, bad_command, find_executable, unix_comment_in_command, deep_list_command, large_output_command, background_command, message_leak, - close_stdin, perf_counter_api]. + close_stdin, max_size_command, perf_counter_api]. groups() -> []. @@ -125,6 +128,14 @@ cmd_unicode(Config) when is_list(Config) -> [] = receive_all(), ok. +null_in_command(Config) -> + {Ok, Error} = case os:type() of + {win32,_} -> {"dir", "di\0r"}; + _ -> {"ls", "l\0s"} + end, + true = is_list(try os:cmd(Ok) catch Class0:_ -> Class0 end), + error = try os:cmd(Error) catch Class1:_ -> Class1 end, + ok. %% Test that program with a space in its name can be executed. space_in_name(Config) when is_list(Config) -> @@ -216,8 +227,8 @@ find_executable(Config) when is_list(Config) -> DataDir = proplists:get_value(data_dir, Config), %% Smoke test. - case lib:progname() of - erl -> + case ct:get_progname() of + "erl" -> ErlPath = os:find_executable("erl"), true = is_list(ErlPath), true = filelib:is_regular(ErlPath); @@ -312,6 +323,19 @@ close_stdin(Config) -> "-1" = os:cmd(Fds). +max_size_command(_Config) -> + + Res20 = os:cmd("cat /dev/zero", #{ max_size => 20 }), + 20 = length(Res20), + + Res0 = os:cmd("cat /dev/zero", #{ max_size => 0 }), + 0 = length(Res0), + + Res32768 = os:cmd("cat /dev/zero", #{ max_size => 32768 }), + 32768 = length(Res32768), + + ResHello = string:trim(os:cmd("echo hello", #{ max_size => 20 })), + 5 = length(ResHello). %% Test that the os:perf_counter api works as expected perf_counter_api(_Config) -> @@ -364,7 +388,7 @@ comp(Expected, Got) -> ct:fail(failed) end. -%% Like lib:nonl/1, but strips \r as well as \n. +%% strips \n and \r\n from end of string strip_nl([$\r, $\n]) -> []; strip_nl([$\n]) -> []; diff --git a/lib/kernel/test/pdict_SUITE.erl b/lib/kernel/test/pdict_SUITE.erl index d105952df9..3685e51c10 100644 --- a/lib/kernel/test/pdict_SUITE.erl +++ b/lib/kernel/test/pdict_SUITE.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1999-2016. All Rights Reserved. +%% Copyright Ericsson AB 1999-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -33,6 +33,7 @@ init_per_group/2,end_per_group/2, mixed/1, literals/1, + destructive/1, simple/1, complicated/1, heavy/1, simple_all_keys/1, info/1]). -export([init_per_testcase/2, end_per_testcase/2]). -export([other_process/2]). @@ -52,6 +53,7 @@ suite() -> all() -> [simple, complicated, heavy, simple_all_keys, info, literals, + destructive, mixed]. groups() -> @@ -367,6 +369,36 @@ match_keys(All) -> ok. +%% Test destructive put optimization of immed values +%% does not affect get/0 or process_info. +destructive(_Config) -> + Keys = lists:seq(1,100), + [put(Key, 17) || Key <- Keys], + Get1 = get(), + {dictionary,PI1} = process_info(self(), dictionary), + + [begin + {Key, 17} = lists:keyfind(Key, 1, Get1), + {Key, 17} = lists:keyfind(Key, 1, PI1) + end + || Key <- Keys], + + [17 = put(Key, 42) || Key <- Keys], % Mutate + + Get2 = get(), + {dictionary,PI2} = process_info(self(), dictionary), + + [begin + {Key, 17} = lists:keyfind(Key, 1, Get1), + {Key, 17} = lists:keyfind(Key, 1, PI1), + {Key, 42} = lists:keyfind(Key, 1, Get2), + {Key, 42} = lists:keyfind(Key, 1, PI2) + + end + || Key <- Keys], + + ok. + %% Do random mixed put/erase to test grow/shrink %% Written for a temporary bug in gc during shrink mixed(_Config) -> diff --git a/lib/kernel/test/prim_file_SUITE.erl b/lib/kernel/test/prim_file_SUITE.erl index 2f4330c217..2f465a15bc 100644 --- a/lib/kernel/test/prim_file_SUITE.erl +++ b/lib/kernel/test/prim_file_SUITE.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 2000-2017. All Rights Reserved. +%% Copyright Ericsson AB 2000-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -21,38 +21,23 @@ -export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1, init_per_group/2,end_per_group/2, init_per_testcase/2, end_per_testcase/2, read_write_file/1, free_memory/0]). --export([cur_dir_0a/1, cur_dir_0b/1, - cur_dir_1a/1, cur_dir_1b/1, - make_del_dir_a/1, make_del_dir_b/1, - pos1/1, pos2/1]). --export([close/1, - delete_a/1, delete_b/1]). --export([ open1/1, modes/1]). --export([ - file_info_basic_file_a/1, file_info_basic_file_b/1, - file_info_basic_directory_a/1, file_info_basic_directory_b/1, - file_info_bad_a/1, file_info_bad_b/1, - file_info_times_a/1, file_info_times_b/1, - file_write_file_info_a/1, file_write_file_info_b/1, - file_read_file_info_opts/1, file_write_file_info_opts/1, - file_write_read_file_info_opts/1 - ]). --export([rename_a/1, rename_b/1, - access/1, truncate/1, datasync/1, sync/1, +-export([cur_dir_0/1, cur_dir_1/1, + make_del_dir/1, pos1/1, pos2/1]). +-export([close/1, delete/1]). +-export([open1/1, modes/1]). +-export([file_info_basic_file/1, file_info_basic_directory/1, file_info_bad/1, + file_info_times/1, file_write_file_info/1, + file_read_file_info_opts/1, file_write_file_info_opts/1, + file_write_read_file_info_opts/1]). +-export([rename/1, access/1, truncate/1, datasync/1, sync/1, read_write/1, pread_write/1, append/1, exclusive/1]). --export([ e_delete/1, e_rename/1, e_make_dir/1, e_del_dir/1]). +-export([e_delete/1, e_rename/1, e_make_dir/1, e_del_dir/1]). --export([ read_not_really_compressed/1, - read_compressed/1, write_compressed/1, - compress_errors/1]). - --export([ - make_link_a/1, make_link_b/1, - read_link_info_for_non_link/1, - symlinks_a/1, symlinks_b/1, - list_dir_limit/1, - list_dir_error/1, - list_dir/1]). +-export([make_link/1, read_link_info_for_non_link/1, + symlinks/1, + list_dir_limit/1, + list_dir_error/1, + list_dir/1]). -export([advise/1]). -export([large_write/1]). @@ -67,29 +52,16 @@ -define(PRIM_FILE, prim_file). -%% Calls ?PRIM_FILE:F with arguments A and an optional handle H -%% as first argument, unless the handle is [], i.e no handle. -%% This is a macro to give the compiler and thereby -%% the cross reference tool the possibility to interprete -%% the call, since M, F, A (or [H | A]) can all be known at -%% compile time. --define(PRIM_FILE_call(F, H, A), - case H of - [] -> apply(?PRIM_FILE, F, A); - _ -> apply(?PRIM_FILE, F, [H | A]) - end). - suite() -> []. all() -> [read_write_file, {group, dirs}, {group, files}, - delete_a, delete_b, rename_a, rename_b, {group, errors}, - {group, compression}, {group, links}, list_dir_limit, list_dir]. + delete, rename, {group, errors}, {group, links}, + list_dir_limit, list_dir]. groups() -> [{dirs, [], - [make_del_dir_a, make_del_dir_b, cur_dir_0a, cur_dir_0b, - cur_dir_1a, cur_dir_1b]}, + [make_del_dir, cur_dir_0, cur_dir_1]}, {files, [], [{group, open}, {group, pos}, {group, file_info}, truncate, sync, datasync, advise, large_write, allocate]}, @@ -98,22 +70,14 @@ groups() -> append, exclusive]}, {pos, [], [pos1, pos2]}, {file_info, [], - [file_info_basic_file_a, file_info_basic_file_b, - file_info_basic_directory_a, - file_info_basic_directory_b, file_info_bad_a, - file_info_bad_b, file_info_times_a, file_info_times_b, - file_write_file_info_a, file_write_file_info_b, - file_read_file_info_opts, file_write_file_info_opts, - file_write_read_file_info_opts + [file_info_basic_file,file_info_basic_directory, file_info_bad, + file_info_times, file_write_file_info, file_read_file_info_opts, + file_write_file_info_opts, file_write_read_file_info_opts ]}, {errors, [], [e_delete, e_rename, e_make_dir, e_del_dir]}, - {compression, [], - [read_compressed, read_not_really_compressed, - write_compressed, compress_errors]}, {links, [], - [make_link_a, make_link_b, read_link_info_for_non_link, - symlinks_a, symlinks_b, list_dir_error]}]. + [make_link, read_link_info_for_non_link, symlinks, list_dir_error]}]. init_per_testcase(large_write, Config) -> {ok, Started} = application:ensure_all_started(os_mon), @@ -246,39 +210,27 @@ read_write_file(Config) when is_list(Config) -> %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - -make_del_dir_a(Config) when is_list(Config) -> - make_del_dir(Config, [], "_a"). - -make_del_dir_b(Config) when is_list(Config) -> - {ok, Handle} = ?PRIM_FILE:start(), - Result = make_del_dir(Config, Handle, "_b"), - ok = ?PRIM_FILE:stop(Handle), - %% Just to make sure the state of the server makes a difference - {error, einval} = ?PRIM_FILE_call(get_cwd, Handle, []), - Result. - -make_del_dir(Config, Handle, Suffix) -> +make_del_dir(Config) when is_list(Config) -> RootDir = proplists:get_value(priv_dir,Config), NewDir = filename:join(RootDir, atom_to_list(?MODULE) - ++"_mk-dir"++Suffix), - ok = ?PRIM_FILE_call(make_dir, Handle, [NewDir]), - {error, eexist} = ?PRIM_FILE_call(make_dir, Handle, [NewDir]), - ok = ?PRIM_FILE_call(del_dir, Handle, [NewDir]), - {error, enoent} = ?PRIM_FILE_call(del_dir, Handle, [NewDir]), + ++"_mk-dir"), + ok = ?PRIM_FILE:make_dir(NewDir), + {error, eexist} = ?PRIM_FILE:make_dir(NewDir), + ok = ?PRIM_FILE:del_dir(NewDir), + {error, enoent} = ?PRIM_FILE:del_dir(NewDir), %% Make sure we are not in a directory directly under test_server %% as that would result in eacces errors when trying to delete '..', %% because there are processes having that directory as current. - ok = ?PRIM_FILE_call(make_dir, Handle, [NewDir]), - {ok, CurrentDir} = ?PRIM_FILE_call(get_cwd, Handle, []), + ok = ?PRIM_FILE:make_dir(NewDir), + {ok, CurrentDir} = ?PRIM_FILE:get_cwd(), case {os:type(), length(NewDir) >= 260 } of {{win32,_}, true} -> io:format("Skip set_cwd for windows path longer than 260 (MAX_PATH)\n", []), io:format("\nNewDir = ~p\n", [NewDir]); _ -> - ok = ?PRIM_FILE_call(set_cwd, Handle, [NewDir]) + ok = ?PRIM_FILE:set_cwd(NewDir) end, try %% Check that we get an error when trying to create... @@ -286,14 +238,14 @@ make_del_dir(Config, Handle, Suffix) -> NewDir2 = filename:join(RootDir, atom_to_list(?MODULE) ++"_mk-dir-noexist/foo"), - {error, enoent} = ?PRIM_FILE_call(make_dir, Handle, [NewDir2]), + {error, enoent} = ?PRIM_FILE:make_dir(NewDir2), %% a nameless directory - {error, enoent} = ?PRIM_FILE_call(make_dir, Handle, [""]), + {error, enoent} = ?PRIM_FILE:make_dir(""), %% a directory with illegal name - {error, badarg} = ?PRIM_FILE_call(make_dir, Handle, ['mk-dir']), + {error, badarg} = ?PRIM_FILE:make_dir('mk-dir'), %% a directory with illegal name, even if it's a (bad) list - {error, badarg} = ?PRIM_FILE_call(make_dir, Handle, [[1,2,3,{}]]), + {error, badarg} = ?PRIM_FILE:make_dir([1,2,3,{}]), %% Maybe this isn't an error, exactly, but worth mentioning anyway: %% ok = ?PRIM_FILE:make_dir([$f,$o,$o,0,$b,$a,$r])), @@ -306,125 +258,101 @@ make_del_dir(Config, Handle, Suffix) -> %% Try deleting some bad directories %% Deleting the parent directory to the current, sounds dangerous, huh? %% Don't worry ;-) the parent directory should never be empty, right? - case ?PRIM_FILE_call(del_dir, Handle, [".."]) of + case ?PRIM_FILE:del_dir("..") of {error, eexist} -> ok; {error, eacces} -> ok; %OpenBSD {error, einval} -> ok %FreeBSD end, - {error, enoent} = ?PRIM_FILE_call(del_dir, Handle, [""]), - {error, badarg} = ?PRIM_FILE_call(del_dir, Handle, [[3,2,1,{}]]) + {error, enoent} = ?PRIM_FILE:del_dir(""), + {error, badarg} = ?PRIM_FILE:del_dir([3,2,1,{}]) after - ok = ?PRIM_FILE_call(set_cwd, Handle, [CurrentDir]) + ok = ?PRIM_FILE:set_cwd(CurrentDir) end, ok. -cur_dir_0a(Config) when is_list(Config) -> - cur_dir_0(Config, []). - -cur_dir_0b(Config) when is_list(Config) -> - {ok, Handle} = ?PRIM_FILE:start(), - Result = cur_dir_0(Config, Handle), - ok = ?PRIM_FILE:stop(Handle), - Result. - -cur_dir_0(Config, Handle) -> +cur_dir_0(Config) when is_list(Config) -> %% Find out the current dir, and cd to it ;-) - {ok,BaseDir} = ?PRIM_FILE_call(get_cwd, Handle, []), + {ok,BaseDir} = ?PRIM_FILE:get_cwd(), Dir1 = BaseDir ++ "", %% Check that it's a string - ok = ?PRIM_FILE_call(set_cwd, Handle, [Dir1]), - DirName = atom_to_list(?MODULE) ++ - case Handle of - [] -> - "_curdir"; - _ -> - "_curdir_h" - end, + ok = ?PRIM_FILE:set_cwd(Dir1), + DirName = atom_to_list(?MODULE) ++ "_curdir", %% Make a new dir, and cd to that RootDir = proplists:get_value(priv_dir,Config), NewDir = filename:join(RootDir, DirName), - ok = ?PRIM_FILE_call(make_dir, Handle, [NewDir]), + ok = ?PRIM_FILE:make_dir(NewDir), case {os:type(), length(NewDir) >= 260} of {{win32,_}, true} -> io:format("Skip set_cwd for windows path longer than 260 (MAX_PATH):\n"), io:format("\nNewDir = ~p\n", [NewDir]); _ -> io:format("cd to ~s",[NewDir]), - ok = ?PRIM_FILE_call(set_cwd, Handle, [NewDir]), + ok = ?PRIM_FILE:set_cwd(NewDir), %% Create a file in the new current directory, and check that it %% really is created there UncommonName = "uncommon.fil", {ok,Fd} = ?PRIM_FILE:open(UncommonName, [read, write]), ok = ?PRIM_FILE:close(Fd), - {ok,NewDirFiles} = ?PRIM_FILE_call(list_dir, Handle, ["."]), + {ok,NewDirFiles} = ?PRIM_FILE:list_dir("."), true = lists:member(UncommonName,NewDirFiles), %% Delete the directory and return to the old current directory %% and check that the created file isn't there (too!) expect({error, einval}, {error, eacces}, {error, eexist}, - ?PRIM_FILE_call(del_dir, Handle, [NewDir])), - ?PRIM_FILE_call(delete, Handle, [UncommonName]), - {ok,[]} = ?PRIM_FILE_call(list_dir, Handle, ["."]), - ok = ?PRIM_FILE_call(set_cwd, Handle, [Dir1]), + ?PRIM_FILE:del_dir(NewDir)), + ?PRIM_FILE:delete(UncommonName), + {ok,[]} = ?PRIM_FILE:list_dir("."), + ok = ?PRIM_FILE:set_cwd(Dir1), io:format("cd back to ~s",[Dir1]), - ok = ?PRIM_FILE_call(del_dir, Handle, [NewDir]), - {error, enoent} = ?PRIM_FILE_call(set_cwd, Handle, [NewDir]), - ok = ?PRIM_FILE_call(set_cwd, Handle, [Dir1]), + ok = ?PRIM_FILE:del_dir(NewDir), + {error, enoent} = ?PRIM_FILE:set_cwd(NewDir), + ok = ?PRIM_FILE:set_cwd(Dir1), io:format("cd back to ~s",[Dir1]), - {ok,OldDirFiles} = ?PRIM_FILE_call(list_dir, Handle, ["."]), + {ok,OldDirFiles} = ?PRIM_FILE:list_dir("."), false = lists:member(UncommonName,OldDirFiles) end, %% Try doing some bad things {error, badarg} = - ?PRIM_FILE_call(set_cwd, Handle, [{foo,bar}]), + ?PRIM_FILE:set_cwd({foo,bar}), {error, enoent} = - ?PRIM_FILE_call(set_cwd, Handle, [""]), + ?PRIM_FILE:set_cwd(""), {error, enoent} = - ?PRIM_FILE_call(set_cwd, Handle, [".......a......"]), + ?PRIM_FILE:set_cwd(".......a......"), {ok,BaseDir} = - ?PRIM_FILE_call(get_cwd, Handle, []), %% Still there? + ?PRIM_FILE:get_cwd(), %% Still there? %% On Windows, there should only be slashes, no backslashes, %% in the return value of get_cwd(). %% (The test is harmless on Unix, because filenames usually %% don't contain backslashes.) - {ok, BaseDir} = ?PRIM_FILE_call(get_cwd, Handle, []), + {ok, BaseDir} = ?PRIM_FILE:get_cwd(), false = lists:member($\\, BaseDir), ok. %% Tests ?PRIM_FILE:get_cwd/1. -cur_dir_1a(Config) when is_list(Config) -> - cur_dir_1(Config, []). - -cur_dir_1b(Config) when is_list(Config) -> - {ok, Handle} = ?PRIM_FILE:start(), - Result = cur_dir_1(Config, Handle), - ok = ?PRIM_FILE:stop(Handle), - Result. - -cur_dir_1(Config, Handle) -> +cur_dir_1(Config) when is_list(Config) -> case os:type() of {win32, _} -> - win_cur_dir_1(Config, Handle); + win_cur_dir_1(Config); _ -> {error, enotsup} = - ?PRIM_FILE_call(get_cwd, Handle, ["d:"]) + ?PRIM_FILE:get_cwd("d:") end, ok. -win_cur_dir_1(_Config, Handle) -> - {ok, BaseDir} = ?PRIM_FILE_call(get_cwd, Handle, []), +win_cur_dir_1(_Config) -> + {ok, BaseDir} = ?PRIM_FILE:get_cwd(), %% Get the drive letter from the current directory, %% and try to get current directory for that drive. [Drive, $:|_] = BaseDir, - {ok, BaseDir} = ?PRIM_FILE_call(get_cwd, Handle, [[Drive, $:]]), + {ok, BaseDir} = ?PRIM_FILE:get_cwd([Drive, $:]), io:format("BaseDir = ~s\n", [BaseDir]), %% Unfortunately, there is no way to move away from the @@ -446,12 +374,12 @@ open1(Config) when is_list(Config) -> Name = filename:join(NewDir, "foo1.fil"), {ok,Fd1} = ?PRIM_FILE:open(Name, [read, write]), {ok,Fd2} = ?PRIM_FILE:open(Name, [read]), - Str = "{a,tuple}.\n", - Length = length(Str), - ?PRIM_FILE:write(Fd1,Str), + Bin = list_to_binary("{a,tuple}.\n"), + Length = byte_size(Bin), + ?PRIM_FILE:write(Fd1,Bin), {ok,0} = ?PRIM_FILE:position(Fd1,bof), - {ok, Str} = ?PRIM_FILE:read(Fd1,Length), - {ok, Str} = ?PRIM_FILE:read(Fd2,Length), + {ok, Bin} = ?PRIM_FILE:read(Fd1,Length), + {ok, Bin} = ?PRIM_FILE:read(Fd2,Length), ok = ?PRIM_FILE:close(Fd2), {ok,0} = ?PRIM_FILE:position(Fd1,bof), ok = ?PRIM_FILE:truncate(Fd1), @@ -471,13 +399,13 @@ modes(Config) when is_list(Config) -> ++"_open_modes"), ok = ?PRIM_FILE:make_dir(NewDir), Name1 = filename:join(NewDir, "foo1.fil"), - Marker = "hello, world", - Length = length(Marker), + Marker = <<"hello, world">>, + Length = byte_size(Marker), %% write {ok, Fd1} = ?PRIM_FILE:open(Name1, [write]), ok = ?PRIM_FILE:write(Fd1, Marker), - ok = ?PRIM_FILE:write(Fd1, ".\n"), + ok = ?PRIM_FILE:write(Fd1, <<".\n">>), ok = ?PRIM_FILE:close(Fd1), %% read @@ -496,12 +424,6 @@ modes(Config) when is_list(Config) -> {ok, Marker} = ?PRIM_FILE:read(Fd4, Length), ok = ?PRIM_FILE:close(Fd4), - %% read and binary - BinaryMarker = list_to_binary(Marker), - {ok, Fd5} = ?PRIM_FILE:open(Name1, [read, binary]), - {ok, BinaryMarker} = ?PRIM_FILE:read(Fd5, Length), - ok = ?PRIM_FILE:close(Fd5), - ok. close(Config) when is_list(Config) -> @@ -528,9 +450,9 @@ access(Config) when is_list(Config) -> Name = filename:join(RootDir, atom_to_list(?MODULE) ++"_access.fil"), - Str = "ABCDEFGH", + Bin = <<"ABCDEFGH">>, {ok,Fd1} = ?PRIM_FILE:open(Name, [write]), - ?PRIM_FILE:write(Fd1,Str), + ?PRIM_FILE:write(Fd1,Bin), ok = ?PRIM_FILE:close(Fd1), %% Check that we can't write when in read only mode {ok,Fd2} = ?PRIM_FILE:open(Name, [read]), @@ -542,7 +464,7 @@ access(Config) when is_list(Config) -> end, ok = ?PRIM_FILE:close(Fd2), {ok, Fd3} = ?PRIM_FILE:open(Name, [read]), - {ok, Str} = ?PRIM_FILE:read(Fd3,length(Str)), + {ok, Bin} = ?PRIM_FILE:read(Fd3,byte_size(Bin)), ok = ?PRIM_FILE:close(Fd3), ok. @@ -564,7 +486,7 @@ read_write(Config) when is_list(Config) -> ok. read_write_test(File) -> - Marker = "hello, world", + Marker = <<"hello, world">>, ok = ?PRIM_FILE:write(File, Marker), {ok, 0} = ?PRIM_FILE:position(File, 0), {ok, Marker} = ?PRIM_FILE:read(File, 100), @@ -590,15 +512,15 @@ pread_write(Config) when is_list(Config) -> ok. pread_write_test(File) -> - Marker = "hello, world", - Len = length(Marker), + Marker = <<"hello, world">>, + Len = byte_size(Marker), ok = ?PRIM_FILE:write(File, Marker), {ok, Marker} = ?PRIM_FILE:pread(File, 0, 100), eof = ?PRIM_FILE:pread(File, 100, 1), ok = ?PRIM_FILE:pwrite(File, Len, Marker), {ok, Marker} = ?PRIM_FILE:pread(File, Len, 100), eof = ?PRIM_FILE:pread(File, 100, 1), - MM = Marker ++ Marker, + MM = <<Marker/binary,Marker/binary>>, {ok, MM} = ?PRIM_FILE:pread(File, 0, 100), ok = ?PRIM_FILE:close(File), ok. @@ -655,24 +577,24 @@ pos1(Config) when is_list(Config) -> atom_to_list(?MODULE) ++"_pos1.fil"), {ok, Fd1} = ?PRIM_FILE:open(Name, [write]), - ?PRIM_FILE:write(Fd1,"ABCDEFGH"), + ?PRIM_FILE:write(Fd1,<<"ABCDEFGH">>), ok = ?PRIM_FILE:close(Fd1), {ok, Fd2} = ?PRIM_FILE:open(Name, [read]), %% Start pos is first char io:format("Relative positions"), - {ok, "A"} = ?PRIM_FILE:read(Fd2,1), + {ok, <<"A">>} = ?PRIM_FILE:read(Fd2,1), {ok, 2} = ?PRIM_FILE:position(Fd2,{cur,1}), - {ok, "C"} = ?PRIM_FILE:read(Fd2,1), + {ok, <<"C">>} = ?PRIM_FILE:read(Fd2,1), {ok, 0} = ?PRIM_FILE:position(Fd2,{cur,-3}), - {ok, "A"} = ?PRIM_FILE:read(Fd2,1), + {ok, <<"A">>} = ?PRIM_FILE:read(Fd2,1), %% Backwards from first char should be an error {ok,0} = ?PRIM_FILE:position(Fd2,{cur,-1}), {error, einval} = ?PRIM_FILE:position(Fd2,{cur,-1}), %% Reset position and move again {ok, 0} = ?PRIM_FILE:position(Fd2,0), {ok, 2} = ?PRIM_FILE:position(Fd2,{cur,2}), - {ok, "C"} = ?PRIM_FILE:read(Fd2,1), + {ok, <<"C">>} = ?PRIM_FILE:read(Fd2,1), %% Go a lot forwards {ok, 13} = ?PRIM_FILE:position(Fd2,{cur,10}), eof = ?PRIM_FILE:read(Fd2,1), @@ -684,27 +606,27 @@ pos1(Config) when is_list(Config) -> {ok, 8} = ?PRIM_FILE:position(Fd2,cur), eof = ?PRIM_FILE:read(Fd2,1), {ok, 7} = ?PRIM_FILE:position(Fd2,7), - {ok, "H"} = ?PRIM_FILE:read(Fd2,1), + {ok, <<"H">>} = ?PRIM_FILE:read(Fd2,1), {ok, 0} = ?PRIM_FILE:position(Fd2,0), - {ok, "A"} = ?PRIM_FILE:read(Fd2,1), + {ok, <<"A">>} = ?PRIM_FILE:read(Fd2,1), {ok, 3} = ?PRIM_FILE:position(Fd2,3), - {ok, "D"} = ?PRIM_FILE:read(Fd2,1), + {ok, <<"D">>} = ?PRIM_FILE:read(Fd2,1), {ok, 12} = ?PRIM_FILE:position(Fd2,12), eof = ?PRIM_FILE:read(Fd2,1), {ok, 3} = ?PRIM_FILE:position(Fd2,3), - {ok, "D"} = ?PRIM_FILE:read(Fd2,1), + {ok, <<"D">>} = ?PRIM_FILE:read(Fd2,1), %% Try the {bof,X} notation {ok, 3} = ?PRIM_FILE:position(Fd2,{bof,3}), - {ok, "D"} = ?PRIM_FILE:read(Fd2,1), + {ok, <<"D">>} = ?PRIM_FILE:read(Fd2,1), %% Try eof positions io:format("EOF positions"), {ok, 8} = ?PRIM_FILE:position(Fd2,{eof,0}), eof = ?PRIM_FILE:read(Fd2,1), {ok, 7} = ?PRIM_FILE:position(Fd2,{eof,-1}), - {ok, "H"} = ?PRIM_FILE:read(Fd2,1), + {ok, <<"H">>} = ?PRIM_FILE:read(Fd2,1), {ok, 0} = ?PRIM_FILE:position(Fd2,{eof,-8}), - {ok, "A"} = ?PRIM_FILE:read(Fd2,1), + {ok, <<"A">>} = ?PRIM_FILE:read(Fd2,1), {error, einval} = ?PRIM_FILE:position(Fd2,{eof,-9}), ok. @@ -714,7 +636,7 @@ pos2(Config) when is_list(Config) -> atom_to_list(?MODULE) ++"_pos2.fil"), {ok,Fd1} = ?PRIM_FILE:open(Name, [write]), - ?PRIM_FILE:write(Fd1,"ABCDEFGH"), + ?PRIM_FILE:write(Fd1,<<"ABCDEFGH">>), ok = ?PRIM_FILE:close(Fd1), {ok, Fd2} = ?PRIM_FILE:open(Name, [read]), {error, einval} = ?PRIM_FILE:position(Fd2,-1), @@ -722,35 +644,25 @@ pos2(Config) when is_list(Config) -> %% Make sure that we still can search after an error. {ok, 0} = ?PRIM_FILE:position(Fd2, 0), {ok, 3} = ?PRIM_FILE:position(Fd2, {bof,3}), - {ok, "D"} = ?PRIM_FILE:read(Fd2,1), + {ok, <<"D">>} = ?PRIM_FILE:read(Fd2,1), io:format("DONE"), ok. - -file_info_basic_file_a(Config) when is_list(Config) -> - file_info_basic_file(Config, [], "_a"). - -file_info_basic_file_b(Config) when is_list(Config) -> - {ok, Handle} = ?PRIM_FILE:start(), - Result = file_info_basic_file(Config, Handle, "_b"), - ok = ?PRIM_FILE:stop(Handle), - Result. - -file_info_basic_file(Config, Handle, Suffix) -> +file_info_basic_file(Config) when is_list(Config) -> RootDir = proplists:get_value(priv_dir, Config), %% Create a short file. Name = filename:join(RootDir, atom_to_list(?MODULE) - ++"_basic_test"++Suffix++".fil"), + ++"_basic_test"".fil"), {ok,Fd1} = ?PRIM_FILE:open(Name, [write]), ?PRIM_FILE:write(Fd1, "foo bar"), ok = ?PRIM_FILE:close(Fd1), %% Test that the file has the expected attributes. %% The times are tricky, so we will save them to a separate test case. - {ok, FileInfo} = ?PRIM_FILE_call(read_file_info, Handle, [Name]), + {ok, FileInfo} = ?PRIM_FILE:read_file_info(Name), #file_info{size = Size, type = Type, access = Access, atime = AccessTime, mtime = ModifyTime} = FileInfo, @@ -768,39 +680,30 @@ file_info_basic_file(Config, Handle, Suffix) -> ok. -file_info_basic_directory_a(Config) when is_list(Config) -> - file_info_basic_directory(Config, []). - -file_info_basic_directory_b(Config) when is_list(Config) -> - {ok, Handle} = ?PRIM_FILE:start(), - Result = file_info_basic_directory(Config, Handle), - ok = ?PRIM_FILE:stop(Handle), - Result. - -file_info_basic_directory(Config, Handle) -> +file_info_basic_directory(Config) when is_list(Config) -> %% Note: filename:join/1 removes any trailing slash, %% which is essential for ?PRIM_FILE:read_file_info/1 to work on %% platforms such as Windows95. RootDir = filename:join([proplists:get_value(priv_dir, Config)]), %% Test that the RootDir directory has the expected attributes. - test_directory(RootDir, read_write, Handle), + test_directory(RootDir, read_write), %% Note that on Windows file systems, "/" or "c:/" are *NOT* directories. %% Therefore, test that ?PRIM_FILE:read_file_info/1 behaves %% as if they were directories. case os:type() of {win32, _} -> - test_directory("/", read_write, Handle), - test_directory("c:/", read_write, Handle), - test_directory("c:\\", read_write, Handle); + test_directory("/", read_write), + test_directory("c:/", read_write), + test_directory("c:\\", read_write); _ -> - test_directory("/", read, Handle) + test_directory("/", read) end, ok. -test_directory(Name, ExpectedAccess, Handle) -> - {ok, FileInfo} = ?PRIM_FILE_call(read_file_info, Handle, [Name]), +test_directory(Name, ExpectedAccess) -> + {ok, FileInfo} = ?PRIM_FILE:read_file_info(Name), #file_info{size = Size, type = Type, access = Access, atime = AccessTime, mtime = ModifyTime} = FileInfo, @@ -824,45 +727,24 @@ all_integers([]) -> %% Try something nonexistent. -file_info_bad_a(Config) when is_list(Config) -> - file_info_bad(Config, []). - -file_info_bad_b(Config) when is_list(Config) -> - {ok, Handle} = ?PRIM_FILE:start(), - Result = file_info_bad(Config, Handle), - ok = ?PRIM_FILE:stop(Handle), - Result. - -file_info_bad(Config, Handle) -> +file_info_bad(Config) when is_list(Config) -> RootDir = filename:join([proplists:get_value(priv_dir, Config)]), - {error, enoent} = - ?PRIM_FILE_call( - read_file_info, Handle, - [filename:join(RootDir, - atom_to_list(?MODULE)++"_nonexistent")]), + NonExistent = filename:join(RootDir, atom_to_list(?MODULE)++"_nonexistent"), + {error, enoent} = ?PRIM_FILE:read_file_info(NonExistent), ok. %% Test that the file times behave as they should. -file_info_times_a(Config) when is_list(Config) -> - file_info_times(Config, [], "_a"). - -file_info_times_b(Config) when is_list(Config) -> - {ok, Handle} = ?PRIM_FILE:start(), - Result = file_info_times(Config, Handle, "_b"), - ok = ?PRIM_FILE:stop(Handle), - Result. - -file_info_times(Config, Handle, Suffix) -> +file_info_times(Config) when is_list(Config) -> %% We have to try this twice, since if the test runs across the change %% of a month the time diff calculations will fail. But it won't happen %% if you run it twice in succession. test_server:m_out_of_n( 1,2, - fun() -> file_info_int(Config, Handle, Suffix) end), + fun() -> file_info_int(Config) end), ok. -file_info_int(Config, Handle, Suffix) -> +file_info_int(Config) -> %% Note: filename:join/1 removes any trailing slash, %% which is essential for ?PRIM_FILE:read_file_info/1 to work on %% platforms such as Windows95. @@ -872,14 +754,14 @@ file_info_int(Config, Handle, Suffix) -> Name = filename:join(RootDir, atom_to_list(?MODULE) - ++"_file_info"++Suffix++".fil"), + ++"_file_info.fil"), {ok,Fd1} = ?PRIM_FILE:open(Name, [write]), ?PRIM_FILE:write(Fd1,"foo"), %% check that the file got a modify date max a few seconds away from now {ok, #file_info{type = regular, atime = AccTime1, mtime = ModTime1}} = - ?PRIM_FILE_call(read_file_info, Handle, [Name]), + ?PRIM_FILE:read_file_info(Name), Now = erlang:localtime(), io:format("Now ~p",[Now]), io:format("Open file Acc ~p Mod ~p",[AccTime1,ModTime1]), @@ -897,7 +779,7 @@ file_info_int(Config, Handle, Suffix) -> ok = ?PRIM_FILE:close(Fd1), {ok, #file_info{size = Size, type = regular, access = Access, atime = AccTime2, mtime = ModTime2}} = - ?PRIM_FILE_call(read_file_info, Handle, [Name]), + ?PRIM_FILE:read_file_info(Name), io:format("Closed file Acc ~p Mod ~p",[AccTime2,ModTime2]), true = time_dist(ModTime1, ModTime2) >= 0, @@ -909,7 +791,7 @@ file_info_int(Config, Handle, Suffix) -> {ok, #file_info{size = DSize, type = directory, access = DAccess, atime = AccTime3, mtime = ModTime3}} = - ?PRIM_FILE_call(read_file_info, Handle, [RootDir]), + ?PRIM_FILE:read_file_info(RootDir), %% this dir was modified only a few secs ago io:format("Dir Acc ~p; Mod ~p; Now ~p", [AccTime3, ModTime3, Now]), @@ -936,16 +818,7 @@ filter_atime(Atime, Config) -> %% Test the write_file_info/2 function. -file_write_file_info_a(Config) when is_list(Config) -> - file_write_file_info(Config, [], "_a"). - -file_write_file_info_b(Config) when is_list(Config) -> - {ok, Handle} = ?PRIM_FILE:start(), - Result = file_write_file_info(Config, Handle, "_b"), - ok = ?PRIM_FILE:stop(Handle), - Result. - -file_write_file_info(Config, Handle, Suffix) -> +file_write_file_info(Config) when is_list(Config) -> RootDir = get_good_directory(Config), io:format("RootDir = ~p", [RootDir]), @@ -955,16 +828,16 @@ file_write_file_info(Config, Handle, Suffix) -> Name = filename:join(RootDir, atom_to_list(?MODULE) - ++"_write_file_info_ro"++Suffix), + ++"_write_file_info_ro"), ok = ?PRIM_FILE:write_file(Name, "hello"), Time = {{1997, 01, 02}, {12, 35, 42}}, Info = #file_info{mode=8#400, atime=Time, mtime=Time, ctime=Time}, - ok = ?PRIM_FILE_call(write_file_info, Handle, [Name, Info]), + ok = ?PRIM_FILE:write_file_info(Name, Info), %% Read back the times. {ok, ActualInfo} = - ?PRIM_FILE_call(read_file_info, Handle, [Name]), + ?PRIM_FILE:read_file_info(Name), #file_info{mode=_Mode, atime=ActAtime, mtime=Time, ctime=ActCtime} = ActualInfo, FilteredAtime = filter_atime(Time, Config), @@ -980,14 +853,11 @@ file_write_file_info(Config, Handle, Suffix) -> {error, eacces} = ?PRIM_FILE:write_file(Name, "hello again"), %% Make the file writable again. - - ?PRIM_FILE_call(write_file_info, Handle, - [Name, #file_info{mode=8#600}]), + ?PRIM_FILE:write_file_info(Name, #file_info{mode=8#600}), ok = ?PRIM_FILE:write_file(Name, "hello again"), %% And unwritable. - ?PRIM_FILE_call(write_file_info, Handle, - [Name, #file_info{mode=8#400}]), + ?PRIM_FILE:write_file_info(Name, #file_info{mode=8#400}), {error, eacces} = ?PRIM_FILE:write_file(Name, "hello again"), %% Write the times again. @@ -995,9 +865,9 @@ file_write_file_info(Config, Handle, Suffix) -> NewTime = {{1997, 02, 15}, {13, 18, 20}}, NewInfo = #file_info{atime=NewTime, mtime=NewTime, ctime=NewTime}, - ok = ?PRIM_FILE_call(write_file_info, Handle, [Name, NewInfo]), + ok = ?PRIM_FILE:write_file_info(Name, NewInfo), {ok, ActualInfo2} = - ?PRIM_FILE_call(read_file_info, Handle, [Name]), + ?PRIM_FILE:read_file_info(Name), #file_info{atime=NewActAtime, mtime=NewTime, ctime=NewActCtime} = ActualInfo2, NewFilteredAtime = filter_atime(NewTime, Config), @@ -1012,14 +882,12 @@ file_write_file_info(Config, Handle, Suffix) -> %% Make the file writeable again, so that we can remove the %% test suites ... :-) - ?PRIM_FILE_call(write_file_info, Handle, - [Name, #file_info{mode=8#600}]), + ?PRIM_FILE:write_file_info(Name, #file_info{mode=8#600}), ok. %% Test the write_file_info/3 function. file_write_file_info_opts(Config) when is_list(Config) -> - {ok, Handle} = ?PRIM_FILE:start(), RootDir = get_good_directory(Config), io:format("RootDir = ~p", [RootDir]), @@ -1028,7 +896,7 @@ file_write_file_info_opts(Config) when is_list(Config) -> lists:foreach(fun ({FI, Opts}) -> - ok = ?PRIM_FILE_call(write_file_info, Handle, [Name, FI, Opts]) + ok = ?PRIM_FILE:write_file_info(Name, FI, Opts) end, [ {#file_info{ mode=8#600, atime = Time, mtime = Time, ctime = Time}, Opts} || Opts <- [[{time, posix}]], @@ -1038,7 +906,7 @@ file_write_file_info_opts(Config) when is_list(Config) -> %% REM: determine date range dependent on time_t = Uint32 | Sint32 | Sint64 | Uint64 %% Determine time_t on os:type()? lists:foreach(fun ({FI, Opts}) -> - ok = ?PRIM_FILE_call(write_file_info, Handle, [Name, FI, Opts]) + ok = ?PRIM_FILE:write_file_info(Name, FI, Opts) end, [ {#file_info{ mode=8#400, atime = Time, mtime = Time, ctime = Time}, Opts} || Opts <- [[{time, universal}],[{time, local}]], Time <- [ @@ -1050,11 +918,9 @@ file_write_file_info_opts(Config) when is_list(Config) -> {{2037,2,3},{23,59,59}}, erlang:localtime() ]]), - ok = ?PRIM_FILE:stop(Handle), ok. file_read_file_info_opts(Config) when is_list(Config) -> - {ok, Handle} = ?PRIM_FILE:start(), RootDir = get_good_directory(Config), io:format("RootDir = ~p", [RootDir]), @@ -1063,41 +929,38 @@ file_read_file_info_opts(Config) when is_list(Config) -> lists:foreach(fun (Opts) -> - {ok,_} = ?PRIM_FILE_call(read_file_info, Handle, [Name, Opts]) + {ok,_} = ?PRIM_FILE:read_file_info(Name, Opts) end, [[{time, Type}] || Type <- [local, universal, posix]]), - ok = ?PRIM_FILE:stop(Handle), ok. %% Test the write and read back *_file_info/3 functions. file_write_read_file_info_opts(Config) when is_list(Config) -> - {ok, Handle} = ?PRIM_FILE:start(), RootDir = get_good_directory(Config), io:format("RootDir = ~p", [RootDir]), Name = filename:join(RootDir, atom_to_list(?MODULE) ++"_read_write_file_info_opts"), ok = ?PRIM_FILE:write_file(Name, "hello_opts2"), - ok = file_write_read_file_info_opts(Handle, Name, {{1989, 04, 28}, {19,30,22}}, [{time, local}]), - ok = file_write_read_file_info_opts(Handle, Name, {{1989, 04, 28}, {19,30,22}}, [{time, universal}]), + ok = file_write_read_file_info_opts(Name, {{1989, 04, 28}, {19,30,22}}, [{time, local}]), + ok = file_write_read_file_info_opts(Name, {{1989, 04, 28}, {19,30,22}}, [{time, universal}]), %% will not work on platforms with unsigned time_t - %ok = file_write_read_file_info_opts(Handle, Name, {{1930, 04, 28}, {19,30,22}}, [{time, local}]), - %ok = file_write_read_file_info_opts(Handle, Name, {{1930, 04, 28}, {19,30,22}}, [{time, universal}]), - ok = file_write_read_file_info_opts(Handle, Name, 1, [{time, posix}]), + %ok = file_write_read_file_info_opts(Name, {{1930, 04, 28}, {19,30,22}}, [{time, local}]), + %ok = file_write_read_file_info_opts(Name, {{1930, 04, 28}, {19,30,22}}, [{time, universal}]), + ok = file_write_read_file_info_opts(Name, 1, [{time, posix}]), %% will not work on platforms with unsigned time_t - %ok = file_write_read_file_info_opts(Handle, Name, -1, [{time, posix}]), - %ok = file_write_read_file_info_opts(Handle, Name, -300000, [{time, posix}]), - ok = file_write_read_file_info_opts(Handle, Name, 300000, [{time, posix}]), - ok = file_write_read_file_info_opts(Handle, Name, 0, [{time, posix}]), + %ok = file_write_read_file_info_opts(Name, -1, [{time, posix}]), + %ok = file_write_read_file_info_opts(Name, -300000, [{time, posix}]), + ok = file_write_read_file_info_opts(Name, 300000, [{time, posix}]), + ok = file_write_read_file_info_opts(Name, 0, [{time, posix}]), - ok = ?PRIM_FILE:stop(Handle), ok. -file_write_read_file_info_opts(Handle, Name, Mtime, Opts) -> - {ok, FI} = ?PRIM_FILE_call(read_file_info, Handle, [Name, Opts]), +file_write_read_file_info_opts(Name, Mtime, Opts) -> + {ok, FI} = ?PRIM_FILE:read_file_info(Name, Opts), FI2 = FI#file_info{ mtime = Mtime }, - ok = ?PRIM_FILE_call(write_file_info, Handle, [Name, FI2, Opts]), - {ok, FI3} = ?PRIM_FILE_call(read_file_info, Handle, [Name, Opts]), + ok = ?PRIM_FILE:write_file_info(Name, FI2, Opts), + {ok, FI3} = ?PRIM_FILE:read_file_info(Name, Opts), io:format("Expecting mtime = ~p, got ~p~n", [FI2#file_info.mtime, FI3#file_info.mtime]), FI2 = FI3, ok. @@ -1175,8 +1038,8 @@ advise(Config) when is_list(Config) -> atom_to_list(?MODULE) ++"_advise.fil"), - Line1 = "Hello\n", - Line2 = "World!\n", + Line1 = <<"Hello\n">>, + Line2 = <<"World!\n">>, {ok, Fd} = ?PRIM_FILE:open(Advise, [write]), ok = ?PRIM_FILE:advise(Fd, 0, 0, normal), @@ -1226,7 +1089,7 @@ advise(Config) when is_list(Config) -> {ok, Fd9} = ?PRIM_FILE:open(Advise, [read]), Offset = 0, %% same as a 0 length in some implementations - Length = length(Line1) + length(Line2), + Length = byte_size(Line1) + byte_size(Line2), ok = ?PRIM_FILE:advise(Fd9, Offset, Length, sequential), {ok, Line1} = ?PRIM_FILE:read_line(Fd9), {ok, Line2} = ?PRIM_FILE:read_line(Fd9), @@ -1250,23 +1113,18 @@ do_large_write(Name) -> Chunk = <<0:ChunkSize/unit:8>>, Data = zip_data(lists:duplicate(Chunks, Chunk), Interleave), Size = Chunks * ChunkSize + Chunks, % 4 G + 32 - Wordsize = erlang:system_info(wordsize), - case prim_file:write_file(Name, Data) of - ok when Wordsize =:= 8 -> - {ok,#file_info{size=Size}} = file:read_file_info(Name), - {ok,Fd} = prim_file:open(Name, [read]), - check_large_write(Fd, ChunkSize, 0, Interleave); - {error,einval} when Wordsize =:= 4 -> - ok - end. + ok = ?PRIM_FILE:write_file(Name, Data), + {ok,#file_info{size=Size}} = file:read_file_info(Name), + {ok,Fd} = ?PRIM_FILE:open(Name, [read]), + check_large_write(Fd, ChunkSize, 0, Interleave). check_large_write(Fd, ChunkSize, Pos, [X|Interleave]) -> Pos1 = Pos + ChunkSize, - {ok,Pos1} = prim_file:position(Fd, {cur,ChunkSize}), - {ok,[X]} = prim_file:read(Fd, 1), + {ok,Pos1} = ?PRIM_FILE:position(Fd, {cur,ChunkSize}), + {ok,<<X>>} = ?PRIM_FILE:read(Fd, 1), check_large_write(Fd, ChunkSize, Pos1+1, Interleave); check_large_write(Fd, _, _, []) -> - eof = prim_file:read(Fd, 1), + eof = ?PRIM_FILE:read(Fd, 1), ok. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% @@ -1338,71 +1196,53 @@ allocate_and_assert(Fd, Offset, Length) -> %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -delete_a(Config) when is_list(Config) -> - delete(Config, [], "_a"). - -delete_b(Config) when is_list(Config) -> - {ok, Handle} = ?PRIM_FILE:start(), - Result = delete(Config, Handle, "_b"), - ok = ?PRIM_FILE:stop(Handle), - Result. - -delete(Config, Handle, Suffix) -> +delete(Config) when is_list(Config) -> RootDir = proplists:get_value(priv_dir,Config), Name = filename:join(RootDir, atom_to_list(?MODULE) - ++"_delete"++Suffix++".fil"), + ++"_delete.fil"), {ok, Fd1} = ?PRIM_FILE:open(Name, [write]), ?PRIM_FILE:write(Fd1,"ok.\n"), ok = ?PRIM_FILE:close(Fd1), %% Check that the file is readable {ok, Fd2} = ?PRIM_FILE:open(Name, [read]), ok = ?PRIM_FILE:close(Fd2), - ok = ?PRIM_FILE_call(delete, Handle, [Name]), + ok = ?PRIM_FILE:delete(Name), %% Check that the file is not readable anymore {error, _} = ?PRIM_FILE:open(Name, [read]), %% Try deleting a nonexistent file - {error, enoent} = ?PRIM_FILE_call(delete, Handle, [Name]), + {error, enoent} = ?PRIM_FILE:delete(Name), ok. -rename_a(Config) when is_list(Config) -> - rename(Config, [], "_a"). - -rename_b(Config) when is_list(Config) -> - {ok, Handle} = ?PRIM_FILE:start(), - Result = rename(Config, Handle, "_b"), - ok = ?PRIM_FILE:stop(Handle), - Result. - -rename(Config, Handle, Suffix) -> +rename(Config) when is_list(Config) -> RootDir = proplists:get_value(priv_dir,Config), - FileName1 = atom_to_list(?MODULE)++"_rename"++Suffix++".fil", - FileName2 = atom_to_list(?MODULE)++"_rename"++Suffix++".ful", + FileName1 = atom_to_list(?MODULE)++"_rename.fil", + FileName2 = atom_to_list(?MODULE)++"_rename.ful", Name1 = filename:join(RootDir, FileName1), Name2 = filename:join(RootDir, FileName2), {ok,Fd1} = ?PRIM_FILE:open(Name1, [write]), ok = ?PRIM_FILE:close(Fd1), %% Rename, and check that it really changed name - ok = ?PRIM_FILE_call(rename, Handle, [Name1, Name2]), + ok = ?PRIM_FILE:rename(Name1, Name2), {error, _} = ?PRIM_FILE:open(Name1, [read]), {ok, Fd2} = ?PRIM_FILE:open(Name2, [read]), ok = ?PRIM_FILE:close(Fd2), %% Try renaming something to itself - ok = ?PRIM_FILE_call(rename, Handle, [Name2, Name2]), + ok = ?PRIM_FILE:rename(Name2, Name2), %% Try renaming something that doesn't exist {error, enoent} = - ?PRIM_FILE_call(rename, Handle, [Name1, Name2]), + ?PRIM_FILE:rename(Name1, Name2), %% Try renaming to something else than a string {error, badarg} = - ?PRIM_FILE_call(rename, Handle, [Name1, foobar]), + ?PRIM_FILE:rename(Name1, foobar), %% Move between directories DirName1 = filename:join(RootDir, atom_to_list(?MODULE) - ++"_rename_dir"++Suffix), + ++"_rename_dir"), DirName2 = filename:join(RootDir, atom_to_list(?MODULE) - ++"_second_rename_dir"++Suffix), + ++"_second_rename_dir"), Name1foo = filename:join(DirName1, "foo.fil"), Name2foo = filename:join(DirName2, "foo.fil"), Name2bar = filename:join(DirName2, "bar.dir"), @@ -1410,21 +1250,21 @@ rename(Config, Handle, Suffix) -> %% The name has to include the full file name, path is not enough expect( {error, eexist}, {error, eisdir}, - ?PRIM_FILE_call(rename, Handle, [Name2, DirName1])), + ?PRIM_FILE:rename(Name2, DirName1)), ok = - ?PRIM_FILE_call(rename, Handle, [Name2, Name1foo]), + ?PRIM_FILE:rename(Name2, Name1foo), %% Now rename the directory - ok = ?PRIM_FILE_call(rename, Handle, [DirName1, DirName2]), + ok = ?PRIM_FILE:rename(DirName1, DirName2), %% And check that the file is there now {ok,Fd3} = ?PRIM_FILE:open(Name2foo, [read]), ok = ?PRIM_FILE:close(Fd3), %% Try some dirty things now: move the directory into itself {error, Msg1} = - ?PRIM_FILE_call(rename, Handle, [DirName2, Name2bar]), + ?PRIM_FILE:rename(DirName2, Name2bar), io:format("Errmsg1: ~p",[Msg1]), %% move dir into a file in itself {error, Msg2} = - ?PRIM_FILE_call(rename, Handle, [DirName2, Name2foo]), + ?PRIM_FILE:rename(DirName2, Name2foo), io:format("Errmsg2: ~p",[Msg2]), ok. @@ -1460,13 +1300,14 @@ e_delete(Config) when is_list(Config) -> case os:type() of {win32, _} -> %% Remove a character device. - {error, eacces} = ?PRIM_FILE:delete("nul"); + expect({error, eacces}, {error, einval}, + ?PRIM_FILE:delete("nul")); _ -> ?PRIM_FILE:write_file_info( Base, #file_info {mode=0}), {error, eacces} = ?PRIM_FILE:delete(Afile), ?PRIM_FILE:write_file_info( - Base, #file_info {mode=8#600}) + Base, #file_info {mode=8#700}) end, ok. @@ -1602,7 +1443,7 @@ e_make_dir(Config) when is_list(Config) -> ?PRIM_FILE:write_file_info(Base, #file_info {mode=0}), {error, eacces} = ?PRIM_FILE:make_dir(filename:join(Base, "xxxx")), - ?PRIM_FILE:write_file_info(Base, #file_info {mode=8#600}) + ?PRIM_FILE:write_file_info(Base, #file_info {mode=8#700}) end, ok. @@ -1652,170 +1493,24 @@ e_del_dir(Config) when is_list(Config) -> ?PRIM_FILE:write_file_info(Base, #file_info {mode=0}), {error, eacces} = ?PRIM_FILE:del_dir(ADirectory), ?PRIM_FILE:write_file_info( - Base, #file_info {mode=8#600}) - end, - ok. - - -%% Trying reading and positioning from a compressed file. - -read_compressed(Config) when is_list(Config) -> - Data = proplists:get_value(data_dir, Config), - Real = filename:join(Data, "realmen.html.gz"), - {ok, Fd} = ?PRIM_FILE:open(Real, [read, compressed]), - try_read_file(Fd). - -%% Trying reading and positioning from an uncompressed file, -%% but with the compressed flag given. - -read_not_really_compressed(Config) when is_list(Config) -> - Data = proplists:get_value(data_dir, Config), - Priv = proplists:get_value(priv_dir, Config), - - %% The file realmen.html might have got CRs added (by WinZip). - %% Remove them, or the file positions will not be correct. - - Real = filename:join(Data, "realmen.html"), - RealPriv = filename:join(Priv, - atom_to_list(?MODULE)++"_realmen.html"), - {ok, RealDataBin} = ?PRIM_FILE:read_file(Real), - RealData = remove_crs(binary_to_list(RealDataBin), []), - ok = ?PRIM_FILE:write_file(RealPriv, RealData), - {ok, Fd} = ?PRIM_FILE:open(RealPriv, [read, compressed]), - try_read_file(Fd). - -remove_crs([$\r|Rest], Result) -> - remove_crs(Rest, Result); -remove_crs([C|Rest], Result) -> - remove_crs(Rest, [C|Result]); -remove_crs([], Result) -> - lists:reverse(Result). - -try_read_file(Fd) -> - %% Seek to the current position (nothing should happen). - - {ok, 0} = ?PRIM_FILE:position(Fd, 0), - {ok, 0} = ?PRIM_FILE:position(Fd, {cur, 0}), - - %% Read a few lines from a compressed file. - - ShouldBe = "<TITLE>Real Programmers Don't Use PASCAL</TITLE>\n", - {ok, ShouldBe} = ?PRIM_FILE:read(Fd, length(ShouldBe)), - - %% Now seek forward. - - {ok, 381} = ?PRIM_FILE:position(Fd, 381), - Back = "Back in the good old days -- the \"Golden Era\" " ++ - "of computers, it was\n", - {ok, Back} = ?PRIM_FILE:read(Fd, length(Back)), - - %% Try to search forward relative to the current position. - - {ok, CurPos} = ?PRIM_FILE:position(Fd, {cur, 0}), - RealPos = 4273, - {ok, RealPos} = ?PRIM_FILE:position(Fd, {cur, RealPos-CurPos}), - RealProg = "<LI> Real Programmers aren't afraid to use GOTOs.\n", - {ok, RealProg} = ?PRIM_FILE:read(Fd, length(RealProg)), - - %% Seek backward. - - AfterTitle = length("<TITLE>"), - {ok, AfterTitle} = ?PRIM_FILE:position(Fd, AfterTitle), - Title = "Real Programmers Don't Use PASCAL</TITLE>\n", - {ok, Title} = ?PRIM_FILE:read(Fd, length(Title)), - - %% Done. - - ?PRIM_FILE:close(Fd), - ok. - -write_compressed(Config) when is_list(Config) -> - Priv = proplists:get_value(priv_dir, Config), - MyFile = filename:join(Priv, - atom_to_list(?MODULE)++"_test.gz"), - - %% Write a file. - - {ok, Fd} = ?PRIM_FILE:open(MyFile, [write, compressed]), - {ok, 0} = ?PRIM_FILE:position(Fd, 0), - Prefix = "hello\n", - End = "end\n", - ok = ?PRIM_FILE:write(Fd, Prefix), - {ok, 143} = ?PRIM_FILE:position(Fd, 143), - ok = ?PRIM_FILE:write(Fd, End), - ok = ?PRIM_FILE:close(Fd), - - %% Read the file and verify the contents. - - {ok, Fd1} = ?PRIM_FILE:open(MyFile, [read, compressed]), - {ok, Prefix} = ?PRIM_FILE:read(Fd1, length(Prefix)), - Second = lists:duplicate(143-length(Prefix), 0) ++ End, - {ok, Second} = ?PRIM_FILE:read(Fd1, length(Second)), - ok = ?PRIM_FILE:close(Fd1), - - %% Ensure that the file is compressed. - - TotalSize = 143 + length(End), - case ?PRIM_FILE:read_file_info(MyFile) of - {ok, #file_info{size=Size}} when Size < TotalSize -> - ok; - {ok, #file_info{size=Size}} when Size == TotalSize -> - ct:fail(file_not_compressed) + Base, #file_info {mode=8#700}) end, - - %% Write again to ensure that the file is truncated. - - {ok, Fd2} = ?PRIM_FILE:open(MyFile, [write, compressed]), - NewString = "aaaaaaaaaaa", - ok = ?PRIM_FILE:write(Fd2, NewString), - ok = ?PRIM_FILE:close(Fd2), - {ok, Fd3} = ?PRIM_FILE:open(MyFile, [read, compressed]), - {ok, NewString} = ?PRIM_FILE:read(Fd3, 1024), - ok = ?PRIM_FILE:close(Fd3), - - ok. - -compress_errors(Config) when is_list(Config) -> - Data = proplists:get_value(data_dir, Config), - {error, enoent} = ?PRIM_FILE:open("non_existing__", - [compressed, read]), - {error, einval} = ?PRIM_FILE:open("non_existing__", - [compressed, read, write]), - - %% Read a corrupted .gz file. - - Corrupted = filename:join(Data, "corrupted.gz"), - {ok, Fd} = ?PRIM_FILE:open(Corrupted, [read, compressed]), - {error, eio} = ?PRIM_FILE:read(Fd, 100), - ?PRIM_FILE:close(Fd), - ok. -%% Test creating a hard link. -make_link_a(Config) when is_list(Config) -> - make_link(Config, [], "_a"). - -%% Test creating a hard link. -make_link_b(Config) when is_list(Config) -> - {ok, Handle} = ?PRIM_FILE:start(), - Result = make_link(Config, Handle, "_b"), - ok = ?PRIM_FILE:stop(Handle), - Result. - -make_link(Config, Handle, Suffix) -> +make_link(Config) when is_list(Config) -> RootDir = proplists:get_value(priv_dir, Config), NewDir = filename:join(RootDir, atom_to_list(?MODULE) - ++"_make_link"++Suffix), - ok = ?PRIM_FILE_call(make_dir, Handle, [NewDir]), + ++"_make_link"), + ok = ?PRIM_FILE:make_dir(NewDir), Name = filename:join(NewDir, "a_file"), ok = ?PRIM_FILE:write_file(Name, "some contents\n"), Alias = filename:join(NewDir, "an_alias"), Result = - case ?PRIM_FILE_call(make_link, Handle, [Name, Alias]) of + case ?PRIM_FILE:make_link(Name, Alias) of {error, enotsup} -> {skipped, "Links not supported on this platform"}; ok -> @@ -1826,12 +1521,12 @@ make_link(Config, Handle, Suffix) -> %% since they are not used on symbolic links. {ok, Info} = - ?PRIM_FILE_call(read_link_info, Handle, [Name]), + ?PRIM_FILE:read_link_info(Name), {ok, Info} = - ?PRIM_FILE_call(read_link_info, Handle, [Alias]), + ?PRIM_FILE:read_link_info(Alias), #file_info{links = 2, type = regular} = Info, {error, eexist} = - ?PRIM_FILE_call(make_link, Handle, [Name, Alias]), + ?PRIM_FILE:make_link(Name, Alias), ok end, @@ -1843,30 +1538,19 @@ read_link_info_for_non_link(Config) when is_list(Config) -> {ok, #file_info{type=directory}} = ?PRIM_FILE:read_link_info("."), ok. -%% Test operations on symbolic links (for Unix). -symlinks_a(Config) when is_list(Config) -> - symlinks(Config, [], "_a"). - -%% Test operations on symbolic links (for Unix). -symlinks_b(Config) when is_list(Config) -> - {ok, Handle} = ?PRIM_FILE:start(), - Result = symlinks(Config, Handle, "_b"), - ok = ?PRIM_FILE:stop(Handle), - Result. - -symlinks(Config, Handle, Suffix) -> +symlinks(Config) when is_list(Config) -> RootDir = proplists:get_value(priv_dir, Config), NewDir = filename:join(RootDir, atom_to_list(?MODULE) - ++"_make_symlink"++Suffix), - ok = ?PRIM_FILE_call(make_dir, Handle, [NewDir]), + ++"_make_symlink"), + ok = ?PRIM_FILE:make_dir(NewDir), Name = filename:join(NewDir, "a_plain_file"), ok = ?PRIM_FILE:write_file(Name, "some stupid content\n"), Alias = filename:join(NewDir, "a_symlink_alias"), Result = - case ?PRIM_FILE_call(make_symlink, Handle, [Name, Alias]) of + case ?PRIM_FILE:make_symlink(Name, Alias) of {error, enotsup} -> {skipped, "Links not supported on this platform"}; {error, eperm} -> @@ -1874,20 +1558,20 @@ symlinks(Config, Handle, Suffix) -> {skipped, "Windows user not privileged to create links"}; ok -> {ok, Info1} = - ?PRIM_FILE_call(read_file_info, Handle, [Name]), + ?PRIM_FILE:read_file_info(Name), {ok, Info1} = - ?PRIM_FILE_call(read_file_info, Handle, [Alias]), + ?PRIM_FILE:read_file_info(Alias), {ok, Info1} = - ?PRIM_FILE_call(read_link_info, Handle, [Name]), + ?PRIM_FILE:read_link_info(Name), #file_info{links = 1, type = regular} = Info1, {ok, Info2} = - ?PRIM_FILE_call(read_link_info, Handle, [Alias]), + ?PRIM_FILE:read_link_info(Alias), #file_info{links=1, type=symlink} = Info2, {ok, Name} = - ?PRIM_FILE_call(read_link, Handle, [Alias]), + ?PRIM_FILE:read_link(Alias), {ok, Name} = - ?PRIM_FILE_call(read_link_all, Handle, [Alias]), + ?PRIM_FILE:read_link_all(Alias), %% If all is good, delete dir again (avoid hanging dir on windows) rm_rf(?PRIM_FILE,NewDir), ok @@ -1907,10 +1591,9 @@ list_dir_limit(Config) when is_list(Config) -> RootDir = proplists:get_value(priv_dir, Config), NewDir = filename:join(RootDir, atom_to_list(?MODULE)++"_list_dir_limit"), - {ok, Handle1} = ?PRIM_FILE:start(), - ok = ?PRIM_FILE_call(make_dir, Handle1, [NewDir]), + ok = ?PRIM_FILE:make_dir(NewDir), Ref = erlang:start_timer(MaxTime*1000, self(), []), - Result = list_dir_limit_loop(NewDir, Handle1, Ref, MaxNumber, 0), + Result = list_dir_limit_loop(NewDir, Ref, MaxNumber, 0), Time = case erlang:cancel_timer(Ref) of false -> MaxTime; T -> MaxTime - (T div 1000) @@ -1920,21 +1603,18 @@ list_dir_limit(Config) when is_list(Config) -> {error, _Reason, N} -> N; _ -> 0 end, - {ok, Handle2} = ?PRIM_FILE:start(), - list_dir_limit_cleanup(NewDir, Handle2, Number, 0), - ok = ?PRIM_FILE:stop(Handle1), - ok = ?PRIM_FILE:stop(Handle2), + list_dir_limit_cleanup(NewDir, Number, 0), {ok, Number} = Result, {comment, "Created " ++ integer_to_list(Number) ++ " files in " ++ integer_to_list(Time) ++ " seconds."}. -list_dir_limit_loop(Dir, Handle, _Ref, N, Cnt) when Cnt >= N -> - list_dir_check(Dir, Handle, Cnt); -list_dir_limit_loop(Dir, Handle, Ref, N, Cnt) -> +list_dir_limit_loop(Dir, _Ref, N, Cnt) when Cnt >= N -> + list_dir_check(Dir, Cnt); +list_dir_limit_loop(Dir, Ref, N, Cnt) -> receive {timeout, Ref, []} -> - list_dir_check(Dir, Handle, Cnt) + list_dir_check(Dir, Cnt) after 0 -> Name = integer_to_list(Cnt), case ?PRIM_FILE:write_file(filename:join(Dir, Name), Name) of @@ -1942,23 +1622,23 @@ list_dir_limit_loop(Dir, Handle, Ref, N, Cnt) -> Next = Cnt + 1, case Cnt rem 100 of 0 -> - case list_dir_check(Dir, Handle, Next) of + case list_dir_check(Dir, Next) of {ok, Next} -> list_dir_limit_loop( - Dir, Handle, Ref, N, Next); + Dir, Ref, N, Next); Other -> Other end; _ -> - list_dir_limit_loop(Dir, Handle, Ref, N, Next) + list_dir_limit_loop(Dir, Ref, N, Next) end; {error, Reason} -> {error, Reason, Cnt} end end. -list_dir_check(Dir, Handle, Cnt) -> - case ?PRIM_FILE:list_dir(Handle, Dir) of +list_dir_check(Dir, Cnt) -> + case ?PRIM_FILE:list_dir(Dir) of {ok, ListDir} -> case length(ListDir) of Cnt -> @@ -1975,18 +1655,18 @@ list_dir_check(Dir, Handle, Cnt) -> %% Deletes N files while ignoring errors, then continues deleting %% as long as they exist. -list_dir_limit_cleanup(Dir, Handle, N, Cnt) when Cnt >= N -> +list_dir_limit_cleanup(Dir, N, Cnt) when Cnt >= N -> Name = integer_to_list(Cnt), - case ?PRIM_FILE:delete(Handle, filename:join(Dir, Name)) of + case ?PRIM_FILE:delete(filename:join(Dir, Name)) of ok -> - list_dir_limit_cleanup(Dir, Handle, N, Cnt+1); + list_dir_limit_cleanup(Dir, N, Cnt+1); _ -> ok end; -list_dir_limit_cleanup(Dir, Handle, N, Cnt) -> +list_dir_limit_cleanup(Dir, N, Cnt) -> Name = integer_to_list(Cnt), - ?PRIM_FILE:delete(Handle, filename:join(Dir, Name)), - list_dir_limit_cleanup(Dir, Handle, N, Cnt+1). + ?PRIM_FILE:delete(filename:join(Dir, Name)), + list_dir_limit_cleanup(Dir, N, Cnt+1). %%% %%% Test list_dir() on a non-existing pathname. @@ -1995,7 +1675,7 @@ list_dir_limit_cleanup(Dir, Handle, N, Cnt) -> list_dir_error(Config) -> Priv = proplists:get_value(priv_dir, Config), NonExisting = filename:join(Priv, "non-existing-dir"), - {error,enoent} = prim_file:list_dir(NonExisting), + {error,enoent} = ?PRIM_FILE:list_dir(NonExisting), ok. %%% @@ -2063,7 +1743,7 @@ do_run_large_file_test(Config, Run, Name0) -> {'DOWN',Mref,_,_,_} -> ok; {Tester,done} -> ok end, - prim_file:delete(Name) + ?PRIM_FILE:delete(Name) end), %% Run the test case. @@ -2108,12 +1788,25 @@ free_memory() -> {value, {buffered_memory, Buffed}} -> Buffed; false -> 0 end), - TotFree div (1024*1024) + usable_mem(TotFree) div (1024*1024) catch error : undef -> ct:fail({"os_mon not built"}) end. +usable_mem(Memory) -> + case test_server:is_valgrind() of + true -> + %% Valgrind uses extra memory for the V- and A-bits. + %% http://valgrind.org/docs/manual/mc-manual.html#mc-manual.value + %% Docs says it uses "compression to represent the V bits compactly" + %% but let's be conservative and cut usable memory in half. + Memory div 2; + false -> + Memory + end. + + %%%----------------------------------------------------------------- %%% Utilities rm_rf(Mod,Dir) -> diff --git a/lib/kernel/test/sendfile_SUITE.erl b/lib/kernel/test/sendfile_SUITE.erl index bfa564c32c..0c0b1cbcb6 100644 --- a/lib/kernel/test/sendfile_SUITE.erl +++ b/lib/kernel/test/sendfile_SUITE.erl @@ -23,30 +23,41 @@ -include_lib("common_test/include/ct.hrl"). -include_lib("kernel/include/file.hrl"). --compile(export_all). - -all() -> [{group,async_threads}, - {group,no_async_threads}]. - -groups() -> - [{async_threads,[],tcs()}, - {no_async_threads,[],tcs()}]. - -tcs() -> - [t_sendfile_small - ,t_sendfile_big_all - ,t_sendfile_big_size - ,t_sendfile_many_small - ,t_sendfile_partial - ,t_sendfile_offset - ,t_sendfile_sendafter - ,t_sendfile_recvafter - ,t_sendfile_recvafter_remoteclose - ,t_sendfile_sendduring - ,t_sendfile_recvduring - ,t_sendfile_closeduring - ,t_sendfile_crashduring - ]. +-export([all/0, init_per_suite/1, end_per_suite/1, init_per_testcase/2]). + +-export([sendfile_server/2, sendfile_do_recv/2, init/1, handle_event/2]). + +-export( + [t_sendfile_small/1, + t_sendfile_big_all/1, + t_sendfile_big_size/1, + t_sendfile_many_small/1, + t_sendfile_partial/1, + t_sendfile_offset/1, + t_sendfile_sendafter/1, + t_sendfile_recvafter/1, + t_sendfile_recvafter_remoteclose/1, + t_sendfile_sendduring/1, + t_sendfile_recvduring/1, + t_sendfile_closeduring/1, + t_sendfile_crashduring/1, + t_sendfile_arguments/1]). + +all() -> + [t_sendfile_small, + t_sendfile_big_all, + t_sendfile_big_size, + t_sendfile_many_small, + t_sendfile_partial, + t_sendfile_offset, + t_sendfile_sendafter, + t_sendfile_recvafter, + t_sendfile_recvafter_remoteclose, + t_sendfile_sendduring, + t_sendfile_recvduring, + t_sendfile_closeduring, + t_sendfile_crashduring, + t_sendfile_arguments]. init_per_suite(Config) -> case {os:type(),os:version()} of @@ -72,28 +83,18 @@ init_per_suite(Config) -> end_per_suite(Config) -> file:delete(proplists:get_value(big_file, Config)). -init_per_group(async_threads,Config) -> - case erlang:system_info(thread_pool_size) of - 0 -> - {skip,"No async threads"}; - _ -> - [{sendfile_opts,[{use_threads,true}]}|Config] - end; -init_per_group(no_async_threads,Config) -> - [{sendfile_opts,[{use_threads,false}]}|Config]. - -end_per_group(_,_Config) -> - ok. - init_per_testcase(TC,Config) when TC == t_sendfile_recvduring; TC == t_sendfile_sendduring -> Filename = proplists:get_value(small_file, Config), Send = fun(Sock) -> {_Size, Data} = sendfile_file_info(Filename), - {ok,D} = file:open(Filename, [raw,binary,read]), - prim_file:sendfile(D, Sock, 0, 0, 0, - [],[],[]), + {ok,Fd} = file:open(Filename, [raw,binary,read]), + %% Determine whether the driver has native support by + %% hitting the raw module directly; file:sendfile/5 will + %% land in the fallback if it doesn't. + RawModule = Fd#file_descriptor.module, + {ok, _Ignored} = RawModule:sendfile(Fd,Sock,0,0,0,[],[],[]), Data end, @@ -105,9 +106,8 @@ init_per_testcase(TC,Config) when TC == t_sendfile_recvduring; ct:log("Error: ~p",[Error]), {skip,"Not supported"} end; -init_per_testcase(_Tc,Config) -> - Config ++ [{sendfile_opts,[{use_threads,false}]}]. - +init_per_testcase(_TC,Config) -> + Config. t_sendfile_small(Config) when is_list(Config) -> Filename = proplists:get_value(small_file, Config), @@ -124,7 +124,7 @@ t_sendfile_small(Config) when is_list(Config) -> t_sendfile_many_small(Config) when is_list(Config) -> Filename = proplists:get_value(small_file, Config), FileOpts = proplists:get_value(file_opts, Config, []), - SendfileOpts = proplists:get_value(sendfile_opts, Config), + SendfileOpts = proplists:get_value(sendfile_opts, Config, []), error_logger:add_report_handler(?MODULE,[self()]), @@ -151,7 +151,7 @@ t_sendfile_many_small(Config) when is_list(Config) -> t_sendfile_big_all(Config) when is_list(Config) -> Filename = proplists:get_value(big_file, Config), - SendfileOpts = proplists:get_value(sendfile_opts, Config), + SendfileOpts = proplists:get_value(sendfile_opts, Config, []), Send = fun(Sock) -> {ok, #file_info{size = Size}} = @@ -165,7 +165,7 @@ t_sendfile_big_all(Config) when is_list(Config) -> t_sendfile_big_size(Config) -> Filename = proplists:get_value(big_file, Config), FileOpts = proplists:get_value(file_opts, Config, []), - SendfileOpts = proplists:get_value(sendfile_opts, Config), + SendfileOpts = proplists:get_value(sendfile_opts, Config, []), SendAll = fun(Sock) -> {ok, #file_info{size = Size}} = @@ -180,7 +180,7 @@ t_sendfile_big_size(Config) -> t_sendfile_partial(Config) -> Filename = proplists:get_value(small_file, Config), FileOpts = proplists:get_value(file_opts, Config, []), - SendfileOpts = proplists:get_value(sendfile_opts, Config), + SendfileOpts = proplists:get_value(sendfile_opts, Config, []), SendSingle = fun(Sock) -> {_Size, <<Data:5/binary,_/binary>>} = @@ -217,7 +217,7 @@ t_sendfile_partial(Config) -> t_sendfile_offset(Config) -> Filename = proplists:get_value(small_file, Config), FileOpts = proplists:get_value(file_opts, Config, []), - SendfileOpts = proplists:get_value(sendfile_opts, Config), + SendfileOpts = proplists:get_value(sendfile_opts, Config, []), Send = fun(Sock) -> {_Size, <<_:5/binary,Data:3/binary,_/binary>> = AllData} = @@ -233,7 +233,7 @@ t_sendfile_offset(Config) -> t_sendfile_sendafter(Config) -> Filename = proplists:get_value(small_file, Config), - SendfileOpts = proplists:get_value(sendfile_opts, Config), + SendfileOpts = proplists:get_value(sendfile_opts, Config, []), Send = fun(Sock) -> {Size, Data} = sendfile_file_info(Filename), @@ -246,7 +246,7 @@ t_sendfile_sendafter(Config) -> t_sendfile_recvafter(Config) -> Filename = proplists:get_value(small_file, Config), - SendfileOpts = proplists:get_value(sendfile_opts, Config), + SendfileOpts = proplists:get_value(sendfile_opts, Config, []), Send = fun(Sock) -> {Size, Data} = sendfile_file_info(Filename), @@ -279,7 +279,7 @@ t_sendfile_recvafter_remoteclose(Config) -> t_sendfile_sendduring(Config) -> Filename = proplists:get_value(big_file, Config), - SendfileOpts = proplists:get_value(sendfile_opts, Config), + SendfileOpts = proplists:get_value(sendfile_opts, Config, []), Send = fun(Sock) -> {ok, #file_info{size = Size}} = @@ -296,7 +296,7 @@ t_sendfile_sendduring(Config) -> t_sendfile_recvduring(Config) -> Filename = proplists:get_value(big_file, Config), - SendfileOpts = proplists:get_value(sendfile_opts, Config), + SendfileOpts = proplists:get_value(sendfile_opts, Config, []), Send = fun(Sock) -> {ok, #file_info{size = Size}} = @@ -315,7 +315,7 @@ t_sendfile_recvduring(Config) -> t_sendfile_closeduring(Config) -> Filename = proplists:get_value(big_file, Config), - SendfileOpts = proplists:get_value(sendfile_opts, Config), + SendfileOpts = proplists:get_value(sendfile_opts, Config, []), Send = fun(Sock,SFServPid) -> spawn_link(fun() -> @@ -345,7 +345,7 @@ t_sendfile_closeduring(Config) -> t_sendfile_crashduring(Config) -> Filename = proplists:get_value(big_file, Config), - SendfileOpts = proplists:get_value(sendfile_opts, Config), + SendfileOpts = proplists:get_value(sendfile_opts, Config, []), error_logger:add_report_handler(?MODULE,[self()]), @@ -373,6 +373,36 @@ t_sendfile_crashduring(Config) -> end end. +t_sendfile_arguments(Config) -> + Filename = proplists:get_value(small_file, Config), + + {ok, Listener} = gen_tcp:listen(0, + [{packet, 0}, {active, false}, {reuseaddr, true}]), + {ok, Port} = inet:port(Listener), + + ErrorCheck = + fun(Reason, Offset, Length, Opts) -> + {ok, Sender} = gen_tcp:connect({127, 0, 0, 1}, Port, + [{packet, 0}, {active, false}]), + {ok, Receiver} = gen_tcp:accept(Listener), + {ok, Fd} = file:open(Filename, [read, raw]), + {error, Reason} = file:sendfile(Fd, Sender, Offset, Length, Opts), + gen_tcp:close(Receiver), + gen_tcp:close(Sender), + file:close(Fd) + end, + + ErrorCheck(einval, -1, 0, []), + ErrorCheck(einval, 0, -1, []), + ErrorCheck(badarg, gurka, 0, []), + ErrorCheck(badarg, 0, gurka, []), + ErrorCheck(badarg, 0, 0, gurka), + ErrorCheck(badarg, 0, 0, [{chunk_size, gurka}]), + + gen_tcp:close(Listener), + + ok. + %% Generic sendfile server code sendfile_send(Send) -> sendfile_send({127,0,0,1},Send). diff --git a/lib/kernel/test/seq_trace_SUITE.erl b/lib/kernel/test/seq_trace_SUITE.erl index be23a1933f..cf4bf11328 100644 --- a/lib/kernel/test/seq_trace_SUITE.erl +++ b/lib/kernel/test/seq_trace_SUITE.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1998-2016. All Rights Reserved. +%% Copyright Ericsson AB 1998-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -25,7 +25,7 @@ -export([token_set_get/1, tracer_set_get/1, print/1, send/1, distributed_send/1, recv/1, distributed_recv/1, trace_exit/1, distributed_exit/1, call/1, port/1, - match_set_seq_token/1, gc_seq_token/1]). + match_set_seq_token/1, gc_seq_token/1, label_capability_mismatch/1]). %% internal exports -export([simple_tracer/2, one_time_receiver/0, one_time_receiver/1, @@ -47,7 +47,7 @@ all() -> [token_set_get, tracer_set_get, print, send, distributed_send, recv, distributed_recv, trace_exit, distributed_exit, call, port, match_set_seq_token, - gc_seq_token]. + gc_seq_token, label_capability_mismatch]. groups() -> []. @@ -90,8 +90,8 @@ do_token_set_get(TsType) -> %% Test that initial seq_trace is disabled [] = seq_trace:get_token(), %% Test setting and reading the different fields - 0 = seq_trace:set_token(label,17), - {label,17} = seq_trace:get_token(label), + 0 = seq_trace:set_token(label,{my_label,1}), + {label,{my_label,1}} = seq_trace:get_token(label), false = seq_trace:set_token(print,true), {print,true} = seq_trace:get_token(print), false = seq_trace:set_token(send,true), @@ -101,12 +101,12 @@ do_token_set_get(TsType) -> false = seq_trace:set_token(TsType,true), {TsType,true} = seq_trace:get_token(TsType), %% Check the whole token - {Flags,17,0,Self,0} = seq_trace:get_token(), % all flags are set + {Flags,{my_label,1},0,Self,0} = seq_trace:get_token(), % all flags are set %% Test setting and reading the 'serial' field {0,0} = seq_trace:set_token(serial,{3,5}), {serial,{3,5}} = seq_trace:get_token(serial), %% Check the whole token, test that a whole token can be set and get - {Flags,17,5,Self,3} = seq_trace:get_token(), + {Flags,{my_label,1},5,Self,3} = seq_trace:get_token(), seq_trace:set_token({Flags,19,7,Self,5}), {Flags,19,7,Self,5} = seq_trace:get_token(), %% Check that receive timeout does not reset token @@ -166,11 +166,13 @@ do_send(TsType) -> seq_trace:reset_trace(), start_tracer(), Receiver = spawn(?MODULE,one_time_receiver,[]), + Label = make_ref(), + seq_trace:set_token(label,Label), set_token_flags([send, TsType]), Receiver ! send, Self = self(), seq_trace:reset_trace(), - [{0,{send,_,Self,Receiver,send}, Ts}] = stop_tracer(1), + [{Label,{send,_,Self,Receiver,send}, Ts}] = stop_tracer(1), check_ts(TsType, Ts). distributed_send(Config) when is_list(Config) -> @@ -184,14 +186,19 @@ do_distributed_send(TsType) -> seq_trace:reset_trace(), start_tracer(), Receiver = spawn(Node,?MODULE,one_time_receiver,[]), + + %% Make sure complex labels survive the trip. + Label = make_ref(), + seq_trace:set_token(label,Label), set_token_flags([send,TsType]), + Receiver ! send, Self = self(), seq_trace:reset_trace(), stop_node(Node), - [{0,{send,_,Self,Receiver,send}, Ts}] = stop_tracer(1), + [{Label,{send,_,Self,Receiver,send}, Ts}] = stop_tracer(1), check_ts(TsType, Ts). - + recv(Config) when is_list(Config) -> lists:foreach(fun do_recv/1, ?TIMESTAMP_MODES). @@ -220,7 +227,12 @@ do_distributed_recv(TsType) -> seq_trace:reset_trace(), rpc:call(Node,?MODULE,start_tracer,[]), Receiver = spawn(Node,?MODULE,one_time_receiver,[]), + + %% Make sure complex labels survive the trip. + Label = make_ref(), + seq_trace:set_token(label,Label), set_token_flags(['receive',TsType]), + Receiver ! 'receive', %% let the other process receive the message: receive after 1 -> ok end, @@ -229,7 +241,7 @@ do_distributed_recv(TsType) -> Result = rpc:call(Node,?MODULE,stop_tracer,[1]), stop_node(Node), ok = io:format("~p~n",[Result]), - [{0,{'receive',_,Self,Receiver,'receive'}, Ts}] = Result, + [{Label,{'receive',_,Self,Receiver,'receive'}, Ts}] = Result, check_ts(TsType, Ts). trace_exit(Config) when is_list(Config) -> @@ -240,7 +252,12 @@ do_trace_exit(TsType) -> start_tracer(), Receiver = spawn_link(?MODULE, one_time_receiver, [exit]), process_flag(trap_exit, true), + + %% Make sure complex labels survive the trip. + Label = make_ref(), + seq_trace:set_token(label,Label), set_token_flags([send, TsType]), + Receiver ! {before, exit}, %% let the other process receive the message: receive @@ -254,8 +271,8 @@ do_trace_exit(TsType) -> Result = stop_tracer(2), seq_trace:reset_trace(), ok = io:format("~p~n", [Result]), - [{0, {send, {0,1}, Self, Receiver, {before, exit}}, Ts0}, - {0, {send, {1,2}, Receiver, Self, + [{Label, {send, {0,1}, Self, Receiver, {before, exit}}, Ts0}, + {Label, {send, {1,2}, Receiver, Self, {'EXIT', Receiver, {exit, {before, exit}}}}, Ts1}] = Result, check_ts(TsType, Ts0), check_ts(TsType, Ts1). @@ -291,6 +308,74 @@ do_distributed_exit(TsType) -> {'EXIT', Receiver, {exit, {before, exit}}}}, Ts}] = Result, check_ts(TsType, Ts). +label_capability_mismatch(Config) when is_list(Config) -> + Releases = ["20_latest"], + Available = [Rel || Rel <- Releases, test_server:is_release_available(Rel)], + case Available of + [] -> {skipped, "No incompatible releases available"}; + _ -> + lists:foreach(fun do_incompatible_labels/1, Available), + lists:foreach(fun do_compatible_labels/1, Available), + ok + end. + +do_incompatible_labels(Rel) -> + Cookie = atom_to_list(erlang:get_cookie()), + {ok, Node} = test_server:start_node( + list_to_atom(atom_to_list(?MODULE)++"_"++Rel), peer, + [{args, " -setcookie "++Cookie}, {erl, [{release, Rel}]}]), + + {_,Dir} = code:is_loaded(?MODULE), + Mdir = filename:dirname(Dir), + true = rpc:call(Node,code,add_patha,[Mdir]), + seq_trace:reset_trace(), + rpc:call(Node,?MODULE,start_tracer,[]), + Receiver = spawn(Node,?MODULE,one_time_receiver,[]), + + %% This node does not support arbitrary labels, so it must fail with a + %% timeout as the token is dropped silently. + seq_trace:set_token(label,make_ref()), + seq_trace:set_token('receive',true), + + Receiver ! 'receive', + %% let the other process receive the message: + receive after 10 -> ok end, + seq_trace:reset_trace(), + + {error,timeout} = rpc:call(Node,?MODULE,stop_tracer,[1]), + stop_node(Node), + ok. + +do_compatible_labels(Rel) -> + Cookie = atom_to_list(erlang:get_cookie()), + {ok, Node} = test_server:start_node( + list_to_atom(atom_to_list(?MODULE)++"_"++Rel), peer, + [{args, " -setcookie "++Cookie}, {erl, [{release, Rel}]}]), + + {_,Dir} = code:is_loaded(?MODULE), + Mdir = filename:dirname(Dir), + true = rpc:call(Node,code,add_patha,[Mdir]), + seq_trace:reset_trace(), + rpc:call(Node,?MODULE,start_tracer,[]), + Receiver = spawn(Node,?MODULE,one_time_receiver,[]), + + %% This node does not support arbitrary labels, but small integers should + %% still work. + Label = 1234, + seq_trace:set_token(label,Label), + seq_trace:set_token('receive',true), + + Receiver ! 'receive', + %% let the other process receive the message: + receive after 10 -> ok end, + Self = self(), + seq_trace:reset_trace(), + Result = rpc:call(Node,?MODULE,stop_tracer,[1]), + stop_node(Node), + ok = io:format("~p~n",[Result]), + [{Label,{'receive',_,Self,Receiver,'receive'}, _}] = Result, + ok. + call(doc) -> "Tests special forms {is_seq_trace} and {get_seq_token} " "in trace match specs."; @@ -698,6 +783,24 @@ do_shrink(N) -> erlang:garbage_collect(), do_shrink(N-1). +%% Test that messages from a port does not clear the token +port_clean_token(Config) when is_list(Config) -> + seq_trace:reset_trace(), + Label = make_ref(), + seq_trace:set_token(label, Label), + {label,Label} = seq_trace:get_token(label), + + %% Create a port and get messages from it + %% We use os:cmd as a convenience as it does + %% open_port, port_command, port_close and receives replies. + %% Maybe it is not ideal to rely on the internal implementation + %% of os:cmd but it will have to do. + os:cmd("ls"), + + %% Make sure that the seq_trace token is still there + {label,Label} = seq_trace:get_token(label), + + ok. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% diff --git a/lib/kernel/test/wrap_log_reader_SUITE.erl b/lib/kernel/test/wrap_log_reader_SUITE.erl index 40a016aed0..59b088ca73 100644 --- a/lib/kernel/test/wrap_log_reader_SUITE.erl +++ b/lib/kernel/test/wrap_log_reader_SUITE.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1998-2016. All Rights Reserved. +%% Copyright Ericsson AB 1998-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -428,13 +428,14 @@ stop() -> ok = wrap_log_test:stop(), dl_wait(). -%% Give disk logs opened by 'logger' and 'wlt' time to close after +%% Give disk logs opened by 'wlr_logger' and 'wlt' time to close after %% receiving EXIT signals. dl_wait() -> case disk_log:accessible_logs() of {[], []} -> ok; - _ -> + _X -> + erlang:display(_X), timer:sleep(100), dl_wait() end. @@ -507,27 +508,27 @@ add_ext(Name, Ext) -> %% disk_log. open(Log, File, Where) -> - logger ! {open, self(), Log, File}, + wlr_logger ! {open, self(), Log, File}, rec1(ok, Where). open_ext(Log, File, Where) -> - logger ! {open_ext, self(), Log, File}, + wlr_logger ! {open_ext, self(), Log, File}, rec1(ok, Where). close(Log) -> - logger ! {close, self(), Log}, + wlr_logger ! {close, self(), Log}, rec(ok, ?LINE). sync(Log) -> - logger ! {sync, self(), Log}, + wlr_logger ! {sync, self(), Log}, rec(ok, ?LINE). log_terms(File, Terms) -> - logger ! {log_terms, self(), File, Terms}, + wlr_logger ! {log_terms, self(), File, Terms}, rec(ok, ?LINE). blog_terms(File, Terms) -> - logger ! {blog_terms, self(), File, Terms}, + wlr_logger ! {blog_terms, self(), File, Terms}, rec(ok, ?LINE). rec1(M, Where) -> diff --git a/lib/kernel/test/wrap_log_reader_SUITE_data/wrap_log_test.erl b/lib/kernel/test/wrap_log_reader_SUITE_data/wrap_log_test.erl index 38449b6bb3..d2bac40192 100644 --- a/lib/kernel/test/wrap_log_reader_SUITE_data/wrap_log_test.erl +++ b/lib/kernel/test/wrap_log_reader_SUITE_data/wrap_log_test.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1998-2016. All Rights Reserved. +%% Copyright Ericsson AB 1998-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -36,9 +36,9 @@ -endif. init() -> - spawn(fun() -> start(logger) end), + spawn(fun() -> start(wlr_logger) end), spawn(fun() -> start2(wlt) end), - wait_registered(logger), + wait_registered(wlr_logger), wait_registered(wlt), ok. @@ -52,9 +52,9 @@ wait_registered(Name) -> end. stop() -> - catch logger ! exit, + catch wlr_logger ! exit, catch wlt ! exit, - wait_unregistered(logger), + wait_unregistered(wlr_logger), wait_unregistered(wlt), ok. @@ -82,47 +82,47 @@ loop() -> {open, Pid, Name, File} -> R = disk_log:open([{name, Name}, {type, wrap}, {file, File}, {size, {?fsize, ?fno}}]), - ?format("logger: open ~p -> ~p~n", [Name, R]), + ?format("wlr_logger: open ~p -> ~p~n", [Name, R]), Pid ! R, loop(); {open_ext, Pid, Name, File} -> R = disk_log:open([{name, Name}, {type, wrap}, {file, File}, {format, external}, {size, {?fsize, ?fno}}]), - ?format("logger: open ~p -> ~p~n", [Name, R]), + ?format("wlr_logger: open ~p -> ~p~n", [Name, R]), Pid ! R, loop(); {close, Pid, Name} -> R = disk_log:close(Name), - ?format("logger: close ~p -> ~p~n", [Name, R]), + ?format("wlr_logger: close ~p -> ~p~n", [Name, R]), Pid ! R, loop(); {sync, Pid, Name} -> R = disk_log:sync(Name), - ?format("logger: sync ~p -> ~p~n", [Name, R]), + ?format("wlr_logger: sync ~p -> ~p~n", [Name, R]), Pid ! R, loop(); {log_terms, Pid, Name, Terms} -> R = disk_log:log_terms(Name, Terms), - ?format("logger: log_terms ~p -> ~p~n", [Name, R]), + ?format("wlr_logger: log_terms ~p -> ~p~n", [Name, R]), Pid ! R, loop(); {blog_terms, Pid, Name, Terms} -> R = disk_log:blog_terms(Name, Terms), - ?format("logger: blog_terms ~p -> ~p~n", [Name, R]), + ?format("wlr_logger: blog_terms ~p -> ~p~n", [Name, R]), Pid ! R, loop(); exit -> - ?format("Stopping logger~n", []), + ?format("Stopping wlr_logger~n", []), exit(normal); _Else -> - ?format("logger: ignored: ~p~n", [_Else]), + ?format("wlr_logger: ignored: ~p~n", [_Else]), loop() end. diff --git a/lib/kernel/test/zlib_SUITE.erl b/lib/kernel/test/zlib_SUITE.erl index 4b67fce9a8..52ae1b3ae6 100644 --- a/lib/kernel/test/zlib_SUITE.erl +++ b/lib/kernel/test/zlib_SUITE.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 2005-2016. All Rights Reserved. +%% Copyright Ericsson AB 2005-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -21,60 +21,56 @@ -module(zlib_SUITE). -include_lib("common_test/include/ct.hrl"). - --compile(export_all). - --define(error(Format,Args), - put(test_server_loc,{?MODULE,?LINE}), - error(Format,Args,?MODULE,?LINE)). - -%% Learn erts team how to really write tests ;-) --define(m(ExpectedRes,Expr), - fun() -> - ACtual1 = (catch (Expr)), - try case ACtual1 of - ExpectedRes -> ACtual1 - end - catch - error:{case_clause,ACtuAl} -> - ?error("Not Matching Actual result was:~n ~p ~n", - [ACtuAl]), - ACtuAl - end - end()). - --define(BARG, {'EXIT',{badarg,[{zlib,_,_,_}|_]}}). --define(DATA_ERROR, {'EXIT',{data_error,[{zlib,_,_,_}|_]}}). - -init_per_testcase(_Func, Config) -> - Config. - -end_per_testcase(_Func, _Config) -> - ok. - -error(Format, Args, File, Line) -> - io:format("~p:~p: ERROR: " ++ Format, [File,Line|Args]), - group_leader() ! {failed, File, Line}. - -%% Hopefully I don't need this to get it to work with the testserver.. -%% Fail = #'REASON'{file = filename:basename(File), -%% line = Line, -%% desc = Args}, -%% case global:whereis_name(mnesia_test_case_sup) of -%% undefined -> -%% ignore; -%% Pid -> -%% Pid ! Fail -%% %% global:send(mnesia_test_case_sup, Fail), -%% end, -%% log("<>ERROR<>~n" ++ Format, Args, File, Line). +-include_lib("common_test/include/ct_event.hrl"). + +-export([suite/0, all/0, groups/0]). + +%% API group +-export([api_open_close/1]). +-export([api_deflateInit/1, api_deflateSetDictionary/1, api_deflateReset/1, + api_deflateParams/1, api_deflate/1, api_deflateEnd/1]). +-export([api_inflateInit/1, api_inflateReset/1, api_inflate2/1, api_inflate3/1, + api_inflateChunk/1, api_safeInflate/1, api_inflateEnd/1]). +-export([api_inflateSetDictionary/1, api_inflateGetDictionary/1]). +-export([api_crc32/1, api_adler32/1]). +-export([api_un_compress/1, api_un_zip/1, api_g_un_zip/1]). + +%% Examples group +-export([intro/1]). + +%% Usage group +-export([zip_usage/1, gz_usage/1, gz_usage2/1, compress_usage/1, + dictionary_usage/1, large_deflate/1, crc/1, adler/1, + only_allow_owner/1, sub_heap_binaries/1]). + +%% Bench group +-export([inflate_bench_zeroed/1, inflate_bench_rand/1, + deflate_bench_zeroed/1, deflate_bench_rand/1, + chunk_bench_zeroed/1, chunk_bench_rand/1]). + +%% Others +-export([smp/1, otp_9981/1, otp_7359/1]). + +-define(m(Guard, Expression), + fun() -> + Actual = (catch (Expression)), + case Actual of + Guard -> Actual; + _Other -> + ct:fail("Failed to match ~p, actual result was ~p", + [??Guard, Actual]) + end + end()). + +-define(EXIT(Reason), {'EXIT',{Reason,[{_,_,_,_}|_]}}). suite() -> [{ct_hooks,[ts_install_cth]}, {timetrap,{minutes,1}}]. all() -> - [{group, api}, {group, examples}, {group, func}, smp, + [{group, api}, {group, examples}, {group, func}, + {group, bench}, smp, otp_9981, otp_7359]. @@ -84,28 +80,19 @@ groups() -> api_deflateSetDictionary, api_deflateReset, api_deflateParams, api_deflate, api_deflateEnd, api_inflateInit, api_inflateSetDictionary, api_inflateGetDictionary, - api_inflateSync, api_inflateReset, api_inflate, api_inflateChunk, - api_inflateEnd, api_setBufsz, api_getBufsz, api_crc32, - api_adler32, api_getQSize, api_un_compress, api_un_zip, + api_inflateReset, api_inflate2, api_inflate3, api_inflateChunk, + api_safeInflate, api_inflateEnd, api_crc32, + api_adler32, api_un_compress, api_un_zip, api_g_un_zip]}, {examples, [], [intro]}, {func, [], [zip_usage, gz_usage, gz_usage2, compress_usage, - dictionary_usage, large_deflate, crc, adler]}]. - -init_per_suite(Config) -> - Config. - -end_per_suite(_Config) -> - ok. - -init_per_group(_GroupName, Config) -> - Config. - -end_per_group(_GroupName, Config) -> - Config. - - + dictionary_usage, large_deflate, crc, adler, + only_allow_owner, sub_heap_binaries]}, + {bench, + [inflate_bench_zeroed, inflate_bench_rand, + deflate_bench_zeroed, deflate_bench_rand, + chunk_bench_zeroed, chunk_bench_rand]}]. %% Test open/0 and close/1. api_open_close(Config) when is_list(Config) -> @@ -113,7 +100,7 @@ api_open_close(Config) when is_list(Config) -> Fd2 = zlib:open(), ?m(false,Fd1 == Fd2), ?m(ok,zlib:close(Fd1)), - ?m(?BARG, zlib:close(Fd1)), + ?m(?EXIT(not_initialized), zlib:close(Fd1)), ?m(ok,zlib:close(Fd2)), %% Make sure that we don't get any EXIT messages if trap_exit is enabled. @@ -128,9 +115,11 @@ api_open_close(Config) when is_list(Config) -> %% Test deflateInit/2 and /6. api_deflateInit(Config) when is_list(Config) -> Z1 = zlib:open(), - ?m(?BARG, zlib:deflateInit(gurka, none)), - ?m(?BARG, zlib:deflateInit(gurka, gurka)), - ?m(?BARG, zlib:deflateInit(Z1, gurka)), + + ?m(?EXIT(badarg), zlib:deflateInit(gurka, none)), + + ?m(?EXIT(bad_compression_level), zlib:deflateInit(gurka, gurka)), + ?m(?EXIT(bad_compression_level), zlib:deflateInit(Z1, gurka)), Levels = [none, default, best_speed, best_compression] ++ lists:seq(0,9), lists:foreach(fun(Level) -> Z = zlib:open(), @@ -138,20 +127,30 @@ api_deflateInit(Config) when is_list(Config) -> ?m(ok,zlib:close(Z)) end, Levels), %% /6 - ?m(?BARG, zlib:deflateInit(Z1,gurka,deflated,-15,8,default)), - - ?m(?BARG, zlib:deflateInit(Z1,default,undefined,-15,8,default)), - - ?m(?BARG, zlib:deflateInit(Z1,default,deflated,48,8,default)), - ?m(?BARG, zlib:deflateInit(Z1,default,deflated,-20,8,default)), - ?m(?BARG, zlib:deflateInit(Z1,default,deflated,-7,8,default)), - ?m(?BARG, zlib:deflateInit(Z1,default,deflated,7,8,default)), - - ?m(?BARG, zlib:deflateInit(Z1,default,deflated,-15,0,default)), - ?m(?BARG, zlib:deflateInit(Z1,default,deflated,-15,10,default)), - - ?m(?BARG, zlib:deflateInit(Z1,default,deflated,-15,8,0)), - ?m(?BARG, zlib:deflateInit(Z1,default,deflated,-15,8,undefined)), + ?m(?EXIT(bad_compression_level), + zlib:deflateInit(Z1,gurka,deflated,-15,8,default)), + + ?m(?EXIT(bad_compression_method), + zlib:deflateInit(Z1,default,undefined,-15,8,default)), + + ?m(?EXIT(bad_compression_strategy), + zlib:deflateInit(Z1,default,deflated,-15,8,0)), + ?m(?EXIT(bad_compression_strategy), + zlib:deflateInit(Z1,default,deflated,-15,8,undefined)), + + ?m(?EXIT(bad_windowbits), + zlib:deflateInit(Z1,default,deflated,48,8,default)), + ?m(?EXIT(bad_windowbits), + zlib:deflateInit(Z1,default,deflated,-20,8,default)), + ?m(?EXIT(bad_windowbits), + zlib:deflateInit(Z1,default,deflated,-7,8,default)), + ?m(?EXIT(bad_windowbits), + zlib:deflateInit(Z1,default,deflated,7,8,default)), + + ?m(?EXIT(bad_memlevel), + zlib:deflateInit(Z1,default,deflated,-15,0,default)), + ?m(?EXIT(bad_memlevel), + zlib:deflateInit(Z1,default,deflated,-15,10,default)), lists:foreach(fun(Level) -> Z = zlib:open(), @@ -167,7 +166,7 @@ api_deflateInit(Config) when is_list(Config) -> ?m(ok, zlib:deflateInit(Z12,default,deflated,-Wbits,8,default)), ?m(ok,zlib:close(Z11)), ?m(ok,zlib:close(Z12)) - end, lists:seq(8, 15)), + end, lists:seq(9, 15)), lists:foreach(fun(MemLevel) -> Z = zlib:open(), @@ -183,7 +182,11 @@ api_deflateInit(Config) when is_list(Config) -> ?m(ok,zlib:close(Z)) end, Strategies), ?m(ok, zlib:deflateInit(Z1,default,deflated,-15,8,default)), - ?m({'EXIT',_}, zlib:deflateInit(Z1,none,deflated,-15,8,default)), %% ?? + + %% Let it crash for any reason; we don't care about the order in which the + %% parameters are checked. + ?m(?EXIT(_), zlib:deflateInit(Z1,none,deflated,-15,8,default)), + ?m(ok, zlib:close(Z1)). %% Test deflateSetDictionary. @@ -192,17 +195,17 @@ api_deflateSetDictionary(Config) when is_list(Config) -> ?m(ok, zlib:deflateInit(Z1, default)), ?m(Id when is_integer(Id), zlib:deflateSetDictionary(Z1, <<1,1,2,3,4,5,1>>)), ?m(Id when is_integer(Id), zlib:deflateSetDictionary(Z1, [1,1,2,3,4,5,1])), - ?m(?BARG, zlib:deflateSetDictionary(Z1, gurka)), - ?m(?BARG, zlib:deflateSetDictionary(Z1, 128)), - ?m(_, zlib:deflate(Z1, <<1,1,1,1,1,1,1,1,1>>, none)), - ?m({'EXIT',{stream_error,_}},zlib:deflateSetDictionary(Z1,<<1,1,2,3,4,5,1>>)), + ?m(?EXIT(badarg), zlib:deflateSetDictionary(Z1, gurka)), + ?m(?EXIT(badarg), zlib:deflateSetDictionary(Z1, 128)), + ?m(L when is_list(L), zlib:deflate(Z1, <<1,1,1,1,1,1,1,1,1>>, none)), + ?m(?EXIT(stream_error), zlib:deflateSetDictionary(Z1,<<1,1,2,3,4,5,1>>)), ?m(ok, zlib:close(Z1)). %% Test deflateReset. api_deflateReset(Config) when is_list(Config) -> Z1 = zlib:open(), ?m(ok, zlib:deflateInit(Z1, default)), - ?m(_, zlib:deflate(Z1, <<1,1,1,1,1,1,1,1,1>>, none)), + ?m(L when is_list(L), zlib:deflate(Z1, <<1,1,1,1,1,1,1,1,1>>, none)), ?m(ok, zlib:deflateReset(Z1)), ?m(ok, zlib:deflateReset(Z1)), %% FIXME how do I make this go wrong?? @@ -210,12 +213,46 @@ api_deflateReset(Config) when is_list(Config) -> %% Test deflateParams. api_deflateParams(Config) when is_list(Config) -> + Levels = [none, default, best_speed, best_compression] ++ lists:seq(0, 9), + Strategies = [filtered, huffman_only, rle, default], + Z1 = zlib:open(), ?m(ok, zlib:deflateInit(Z1, default)), - ?m(_, zlib:deflate(Z1, <<1,1,1,1,1,1,1,1,1>>, none)), - ?m(ok, zlib:deflateParams(Z1, best_compression, huffman_only)), - ?m(_, zlib:deflate(Z1, <<1,1,1,1,1,1,1,1,1>>, sync)), - ?m(ok, zlib:close(Z1)). + + ApiTest = + fun(Level, Strategy) -> + ?m(ok, zlib:deflateParams(Z1, Level, Strategy)), + ?m(ok, zlib:deflateReset(Z1)) + end, + + [ ApiTest(Level, Strategy) || Level <- Levels, Strategy <- Strategies ], + + ?m(ok, zlib:close(Z1)), + + FlushTest = + fun FlushTest(Size, Level, Strategy) -> + Z = zlib:open(), + ok = zlib:deflateInit(Z, default), + Data = gen_determ_rand_bytes(Size), + case zlib:deflate(Z, Data, none) of + [<<120, 156>>] -> + %% All data is present in the internal zlib state, and will + %% be flushed on deflateParams. + + ok = zlib:deflateParams(Z, Level, Strategy), + Compressed = [<<120, 156>>, zlib:deflate(Z, <<>>, finish)], + Data = zlib:uncompress(Compressed), + zlib:close(Z), + + FlushTest(Size + (1 bsl 10), Level, Strategy); + _Other -> + ok + end + end, + + [ FlushTest(1, Level, Strategy) || Level <- Levels, Strategy <- Strategies ], + + ok. %% Test deflate. api_deflate(Config) when is_list(Config) -> @@ -231,11 +268,13 @@ api_deflate(Config) when is_list(Config) -> ?m(B when is_list(B), zlib:deflate(Z1, <<1,1,1,1,1,1,1,1,1>>, full)), ?m(B when is_list(B), zlib:deflate(Z1, <<>>, finish)), - ?m(?BARG, zlib:deflate(gurka, <<1,1,1,1,1,1,1,1,1>>, full)), - ?m(?BARG, zlib:deflate(Z1, <<1,1,1,1,1,1,1,1,1>>, asdj)), - ?m(?BARG, zlib:deflate(Z1, <<1,1,1,1,1,1,1,1,1>>, 198)), + ?m(?EXIT(badarg), zlib:deflate(gurka, <<1,1,1,1,1,1,1,1,1>>, full)), + + ?m(?EXIT(bad_flush_mode), zlib:deflate(Z1, <<1,1,1,1,1,1,1,1,1>>, asdj)), + ?m(?EXIT(bad_flush_mode), zlib:deflate(Z1, <<1,1,1,1,1,1,1,1,1>>, 198)), + %% Causes problems ERROR REPORT - ?m(?BARG, zlib:deflate(Z1, [asdj,asd], none)), + ?m(?EXIT(badarg), zlib:deflate(Z1, [asdj,asd], none)), ?m(ok, zlib:close(Z1)). @@ -244,11 +283,11 @@ api_deflateEnd(Config) when is_list(Config) -> Z1 = zlib:open(), ?m(ok, zlib:deflateInit(Z1, default)), ?m(ok, zlib:deflateEnd(Z1)), - ?m({'EXIT', {einval,_}}, zlib:deflateEnd(Z1)), %% ?? - ?m(?BARG, zlib:deflateEnd(gurka)), + ?m(?EXIT(not_initialized), zlib:deflateEnd(Z1)), + ?m(?EXIT(badarg), zlib:deflateEnd(gurka)), ?m(ok, zlib:deflateInit(Z1, default)), ?m(B when is_list(B), zlib:deflate(Z1, <<"Kilroy was here">>)), - ?m({'EXIT', {data_error,_}}, zlib:deflateEnd(Z1)), + ?m(?EXIT(data_error), zlib:deflateEnd(Z1)), ?m(ok, zlib:deflateInit(Z1, default)), ?m(B when is_list(B), zlib:deflate(Z1, <<"Kilroy was here">>)), ?m(B when is_list(B), zlib:deflate(Z1, <<"Kilroy was here">>, finish)), @@ -259,9 +298,9 @@ api_deflateEnd(Config) when is_list(Config) -> %% Test inflateInit /1 and /2. api_inflateInit(Config) when is_list(Config) -> Z1 = zlib:open(), - ?m(?BARG, zlib:inflateInit(gurka)), + ?m(?EXIT(badarg), zlib:inflateInit(gurka)), ?m(ok, zlib:inflateInit(Z1)), - ?m({'EXIT',{einval,_}}, zlib:inflateInit(Z1, 15)), %% ?? + ?m(?EXIT(already_initialized), zlib:inflateInit(Z1, 15)), lists:foreach(fun(Wbits) -> Z11 = zlib:open(), ?m(ok, zlib:inflateInit(Z11,Wbits)), @@ -270,33 +309,34 @@ api_inflateInit(Config) when is_list(Config) -> ?m(ok,zlib:close(Z11)), ?m(ok,zlib:close(Z12)) end, lists:seq(8,15)), - ?m(?BARG, zlib:inflateInit(gurka, -15)), - ?m(?BARG, zlib:inflateInit(Z1, 7)), - ?m(?BARG, zlib:inflateInit(Z1, -7)), - ?m(?BARG, zlib:inflateInit(Z1, 48)), - ?m(?BARG, zlib:inflateInit(Z1, -16)), + ?m(?EXIT(badarg), zlib:inflateInit(gurka, -15)), + ?m(?EXIT(bad_windowbits), zlib:inflateInit(Z1, 7)), + ?m(?EXIT(bad_windowbits), zlib:inflateInit(Z1, -7)), + ?m(?EXIT(bad_windowbits), zlib:inflateInit(Z1, 48)), + ?m(?EXIT(bad_windowbits), zlib:inflateInit(Z1, -16)), ?m(ok, zlib:close(Z1)). %% Test inflateSetDictionary. api_inflateSetDictionary(Config) when is_list(Config) -> Z1 = zlib:open(), ?m(ok, zlib:inflateInit(Z1)), - ?m(?BARG, zlib:inflateSetDictionary(gurka,<<1,1,1,1,1>>)), - ?m(?BARG, zlib:inflateSetDictionary(Z1,102)), - ?m(?BARG, zlib:inflateSetDictionary(Z1,gurka)), + ?m(?EXIT(badarg), zlib:inflateSetDictionary(gurka,<<1,1,1,1,1>>)), + ?m(?EXIT(badarg), zlib:inflateSetDictionary(Z1,102)), + ?m(?EXIT(badarg), zlib:inflateSetDictionary(Z1,gurka)), Dict = <<1,1,1,1,1>>, - ?m({'EXIT',{stream_error,_}}, zlib:inflateSetDictionary(Z1,Dict)), + ?m(?EXIT(stream_error), zlib:inflateSetDictionary(Z1,Dict)), ?m(ok, zlib:close(Z1)). %% Test inflateGetDictionary. api_inflateGetDictionary(Config) when is_list(Config) -> Z1 = zlib:open(), + zlib:inflateInit(Z1), IsOperationSupported = case catch zlib:inflateGetDictionary(Z1) of - {'EXIT',{einval,_}} -> true; - {'EXIT',{enotsup,_}} -> false + ?EXIT(not_supported) -> false; + _ -> true end, - _ = zlib:close(Z1), + zlib:close(Z1), api_inflateGetDictionary_if_supported(IsOperationSupported). api_inflateGetDictionary_if_supported(false) -> @@ -306,64 +346,53 @@ api_inflateGetDictionary_if_supported(true) -> Z1 = zlib:open(), ?m(ok, zlib:deflateInit(Z1)), Dict = <<"foobar barfoo foo bar far boo">>, - ?m(_, zlib:deflateSetDictionary(Z1, Dict)), + Checksum = zlib:deflateSetDictionary(Z1, Dict), Payload = <<"foobarbarbar">>, Compressed = zlib:deflate(Z1, Payload, finish), ?m(ok, zlib:close(Z1)), - % Decompress and test dictionary extraction + % Decompress and test dictionary extraction with inflate/2 Z2 = zlib:open(), ?m(ok, zlib:inflateInit(Z2)), ?m(<<>>, iolist_to_binary(zlib:inflateGetDictionary(Z2))), - ?m({'EXIT',{stream_error,_}}, zlib:inflateSetDictionary(Z2, Dict)), - ?m({'EXIT',{{need_dictionary,_},_}}, zlib:inflate(Z2, Compressed)), + ?m(?EXIT(stream_error), zlib:inflateSetDictionary(Z2, Dict)), + ?m(?EXIT({need_dictionary,Checksum}), zlib:inflate(Z2, Compressed)), ?m(ok, zlib:inflateSetDictionary(Z2, Dict)), ?m(Dict, iolist_to_binary(zlib:inflateGetDictionary(Z2))), - ?m(Payload, iolist_to_binary(zlib:inflate(Z2, Compressed))), + Payload = iolist_to_binary(zlib:inflate(Z2, [])), ?m(ok, zlib:close(Z2)), - ?m(?BARG, zlib:inflateSetDictionary(Z2, Dict)), - ok. + ?m(?EXIT(not_initialized), zlib:inflateSetDictionary(Z2, Dict)), -%% Test inflateSync. -api_inflateSync(Config) when is_list(Config) -> - {skip,"inflateSync/1 sucks"}. -%% Z1 = zlib:open(), -%% ?m(ok, zlib:deflateInit(Z1)), -%% B1list0 = zlib:deflate(Z1, "gurkan gurra ger galna tunnor", full), -%% B2 = zlib:deflate(Z1, "grodan boll", finish), -%% io:format("~p\n", [B1list0]), -%% io:format("~p\n", [B2]), -%% ?m(ok, zlib:deflateEnd(Z1)), -%% B1 = clobber(14, list_to_binary(B1list0)), -%% Compressed = list_to_binary([B1,B2]), -%% io:format("~p\n", [Compressed]), - -%% ?m(ok, zlib:inflateInit(Z1)), -%% ?m(?BARG, zlib:inflateSync(gurka)), -%% ?m({'EXIT',{data_error,_}}, zlib:inflate(Z1, Compressed)), -%% ?m(ok, zlib:inflateSync(Z1)), -%% Ubs = zlib:inflate(Z1, []), -%% <<"grodan boll">> = list_to_binary(Ubs), -%% ?m(ok, zlib:close(Z1)). - -clobber(N, Bin) when is_binary(Bin) -> - T = list_to_tuple(binary_to_list(Bin)), - Byte = case element(N, T) of - 255 -> 254; - B -> B+1 - end, - list_to_binary(tuple_to_list(setelement(N, T, Byte))). + %% ... And do the same for inflate/3 + Z3 = zlib:open(), + ?m(ok, zlib:inflateInit(Z3)), + ?m(<<>>, iolist_to_binary(zlib:inflateGetDictionary(Z3))), + ?m(?EXIT(stream_error), zlib:inflateSetDictionary(Z3, Dict)), + + {need_dictionary, Checksum, _Output = []} = + zlib:inflate(Z3, Compressed, [{exception_on_need_dict, false}]), + + ?m(ok, zlib:inflateSetDictionary(Z3, Dict)), + ?m(Dict, iolist_to_binary(zlib:inflateGetDictionary(Z3))), + + Payload = iolist_to_binary( + zlib:inflate(Z3, [], [{exception_on_need_dict, false}])), + + ?m(ok, zlib:close(Z3)), + ?m(?EXIT(not_initialized), zlib:inflateSetDictionary(Z3, Dict)), + + ok. %% Test inflateReset. api_inflateReset(Config) when is_list(Config) -> Z1 = zlib:open(), ?m(ok, zlib:inflateInit(Z1)), - ?m(?BARG, zlib:inflateReset(gurka)), + ?m(?EXIT(badarg), zlib:inflateReset(gurka)), ?m(ok, zlib:inflateReset(Z1)), ?m(ok, zlib:close(Z1)). -%% Test inflate. -api_inflate(Config) when is_list(Config) -> +%% Test inflate/2 +api_inflate2(Config) when is_list(Config) -> Data = [<<1,2,2,3,3,3,4,4,4,4>>], Compressed = zlib:compress(Data), Z1 = zlib:open(), @@ -373,12 +402,32 @@ api_inflate(Config) when is_list(Config) -> ?m(ok, zlib:inflateEnd(Z1)), ?m(ok, zlib:inflateInit(Z1)), ?m(Data, zlib:inflate(Z1, Compressed)), - ?m(?BARG, zlib:inflate(gurka, Compressed)), - ?m(?BARG, zlib:inflate(Z1, 4384)), - ?m(?BARG, zlib:inflate(Z1, [atom_list])), + ?m(?EXIT(badarg), zlib:inflate(gurka, Compressed)), + ?m(?EXIT(badarg), zlib:inflate(Z1, 4384)), + ?m(?EXIT(badarg), zlib:inflate(Z1, [atom_list])), ?m(ok, zlib:inflateEnd(Z1)), ?m(ok, zlib:inflateInit(Z1)), - ?m({'EXIT',{data_error,_}}, zlib:inflate(Z1, <<2,1,2,1,2>>)), + ?m(?EXIT(data_error), zlib:inflate(Z1, <<2,1,2,1,2>>)), + ?m(ok, zlib:close(Z1)). + +%% Test inflate/3; same as inflate/2 but with the default options inverted. +api_inflate3(Config) when is_list(Config) -> + Data = [<<1,2,2,3,3,3,4,4,4,4>>], + Options = [{exception_on_need_dict, false}], + Compressed = zlib:compress(Data), + Z1 = zlib:open(), + ?m(ok, zlib:inflateInit(Z1)), + ?m([], zlib:inflate(Z1, <<>>, Options)), + ?m(Data, zlib:inflate(Z1, Compressed)), + ?m(ok, zlib:inflateEnd(Z1)), + ?m(ok, zlib:inflateInit(Z1)), + ?m(Data, zlib:inflate(Z1, Compressed, Options)), + ?m(?EXIT(badarg), zlib:inflate(gurka, Compressed, Options)), + ?m(?EXIT(badarg), zlib:inflate(Z1, 4384, Options)), + ?m(?EXIT(badarg), zlib:inflate(Z1, [atom_list], Options)), + ?m(ok, zlib:inflateEnd(Z1)), + ?m(ok, zlib:inflateInit(Z1)), + ?m(?EXIT(data_error), zlib:inflate(Z1, <<2,1,2,1,2>>, Options)), ?m(ok, zlib:close(Z1)). %% Test inflateChunk. @@ -388,69 +437,109 @@ api_inflateChunk(Config) when is_list(Config) -> Part1 = binary:part(Data, 0, ChunkSize), Part2 = binary:part(Data, ChunkSize, ChunkSize), Part3 = binary:part(Data, ChunkSize * 2, ChunkSize), + Compressed = zlib:compress(Data), Z1 = zlib:open(), + zlib:setBufSize(Z1, ChunkSize), + ?m(ok, zlib:inflateInit(Z1)), - ?m([], zlib:inflateChunk(Z1, <<>>)), - ?m({more, Part1}, zlib:inflateChunk(Z1, Compressed)), - ?m({more, Part2}, zlib:inflateChunk(Z1)), - ?m(Part3, zlib:inflateChunk(Z1)), - ?m(ok, zlib:inflateEnd(Z1)), + 0 = iolist_size(zlib:inflateChunk(Z1, <<>>)), + + {more, Part1AsIOList} = zlib:inflateChunk(Z1, Compressed), + {more, Part2AsIOList} = zlib:inflateChunk(Z1), + {more, Part3AsIOList} = zlib:inflateChunk(Z1), + + [] = zlib:inflateChunk(Z1), + [] = zlib:inflateChunk(Z1), + [] = zlib:inflateChunk(Z1), + + ?m(Part1, iolist_to_binary(Part1AsIOList)), + ?m(Part2, iolist_to_binary(Part2AsIOList)), + ?m(Part3, iolist_to_binary(Part3AsIOList)), + + ?m(ok, zlib:inflateEnd(Z1)), ?m(ok, zlib:inflateInit(Z1)), - ?m({more, Part1}, zlib:inflateChunk(Z1, Compressed)), + + ?m({more, Part1AsIOList}, zlib:inflateChunk(Z1, Compressed)), ?m(ok, zlib:inflateReset(Z1)), - zlib:setBufSize(Z1, size(Data)), - ?m(Data, zlib:inflateChunk(Z1, Compressed)), - ?m(ok, zlib:inflateEnd(Z1)), + zlib:setBufSize(Z1, byte_size(Data) + 1), + + DataAsIOList = zlib:inflateChunk(Z1, Compressed), + ?m(Data, iolist_to_binary(DataAsIOList)), + ?m(ok, zlib:inflateEnd(Z1)), ?m(ok, zlib:inflateInit(Z1)), - ?m(?BARG, zlib:inflateChunk(gurka, Compressed)), - ?m(?BARG, zlib:inflateChunk(Z1, 4384)), - ?m({'EXIT',{data_error,_}}, zlib:inflateEnd(Z1)), + + ?m(?EXIT(badarg), zlib:inflateChunk(gurka, Compressed)), + ?m(?EXIT(badarg), zlib:inflateChunk(Z1, 4384)), + + ?m(?EXIT(data_error), zlib:inflateEnd(Z1)), + ?m(ok, zlib:close(Z1)). -%% Test inflateEnd. -api_inflateEnd(Config) when is_list(Config) -> +%% Test safeInflate as a mirror of inflateChunk, but ignore the stuff about +%% exact chunk sizes. +api_safeInflate(Config) when is_list(Config) -> + Data = << <<(I rem 150)>> || I <- lists:seq(1, 20 bsl 10) >>, + Compressed = zlib:compress(Data), Z1 = zlib:open(), - ?m({'EXIT',{einval,_}}, zlib:inflateEnd(Z1)), - ?m(ok, zlib:inflateInit(Z1)), - ?m(?BARG, zlib:inflateEnd(gurka)), - ?m({'EXIT',{data_error,_}}, zlib:inflateEnd(Z1)), - ?m({'EXIT',{einval,_}}, zlib:inflateEnd(Z1)), + ?m(ok, zlib:inflateInit(Z1)), - ?m(B when is_list(B), zlib:inflate(Z1, zlib:compress("abc"))), + + SafeInflateLoop = + fun + Loop({continue, Chunk}, Output) -> + Loop(zlib:safeInflate(Z1, []), [Output, Chunk]); + Loop({finished, Chunk}, Output) -> + [Output, Chunk] + end, + + Decompressed = SafeInflateLoop(zlib:safeInflate(Z1, Compressed), []), + Data = iolist_to_binary(Decompressed), + ?m(ok, zlib:inflateEnd(Z1)), - ?m(ok, zlib:close(Z1)). + ?m(ok, zlib:inflateInit(Z1)), -%% Test getBufsz. -api_getBufsz(Config) when is_list(Config) -> - Z1 = zlib:open(), - ?m(Val when is_integer(Val), zlib:getBufSize(Z1)), - ?m(?BARG, zlib:getBufSize(gurka)), - ?m(ok, zlib:close(Z1)). + {continue, Partial} = zlib:safeInflate(Z1, Compressed), + PBin = iolist_to_binary(Partial), + PSize = byte_size(PBin), + <<PBin:PSize/binary, Rest/binary>> = Data, -%% Test setBufsz. -api_setBufsz(Config) when is_list(Config) -> - Z1 = zlib:open(), - ?m(?BARG, zlib:setBufSize(Z1, gurka)), - ?m(?BARG, zlib:setBufSize(gurka, 1232330)), - Sz = ?m( Val when is_integer(Val), zlib:getBufSize(Z1)), - ?m(ok, zlib:setBufSize(Z1, Sz*2)), - DSz = Sz*2, - ?m(DSz, zlib:getBufSize(Z1)), + ?m(ok, zlib:inflateReset(Z1)), + + {continue, Partial} = zlib:safeInflate(Z1, Compressed), + PBin = iolist_to_binary(Partial), + PSize = byte_size(PBin), + <<PBin:PSize/binary, Rest/binary>> = Data, + + ?m(ok, zlib:inflateReset(Z1)), + + SafeInflateLoop(zlib:safeInflate(Z1, Compressed), []), + + ?m({finished, []}, zlib:safeInflate(Z1, Compressed)), + ?m({finished, []}, zlib:safeInflate(Z1, Compressed)), + + ?m(ok, zlib:inflateReset(Z1)), + ?m(?EXIT(badarg), zlib:safeInflate(gurka, Compressed)), + ?m(?EXIT(badarg), zlib:safeInflate(Z1, 4384)), + ?m(?EXIT(data_error), zlib:inflateEnd(Z1)), ?m(ok, zlib:close(Z1)). -%%% Debug function ?? -%% Test getQSize. -api_getQSize(Config) when is_list(Config) -> +%% Test inflateEnd. +api_inflateEnd(Config) when is_list(Config) -> Z1 = zlib:open(), - Q = ?m(Val when is_integer(Val), zlib:getQSize(Z1)), - io:format("QSize ~p ~n", [Q]), - ?m(?BARG, zlib:getQSize(gurka)), + ?m(?EXIT(not_initialized), zlib:inflateEnd(Z1)), + ?m(ok, zlib:inflateInit(Z1)), + ?m(?EXIT(badarg), zlib:inflateEnd(gurka)), + ?m(?EXIT(data_error), zlib:inflateEnd(Z1)), + ?m(?EXIT(not_initialized), zlib:inflateEnd(Z1)), + ?m(ok, zlib:inflateInit(Z1)), + ?m(B when is_list(B), zlib:inflate(Z1, zlib:compress("abc"))), + ?m(ok, zlib:inflateEnd(Z1)), ?m(ok, zlib:close(Z1)). %% Test crc32. @@ -458,8 +547,8 @@ api_crc32(Config) when is_list(Config) -> Z1 = zlib:open(), ?m(ok, zlib:deflateInit(Z1,best_speed,deflated,-15,8,default)), Bin = <<1,1,1,1,1,1,1,1,1>>, - Compressed1 = ?m(_, zlib:deflate(Z1, Bin, none)), - Compressed2 = ?m(_, zlib:deflate(Z1, <<>>, finish)), + Compressed1 = ?m(L when is_list(L), zlib:deflate(Z1, Bin, none)), + Compressed2 = ?m(L when is_list(L), zlib:deflate(Z1, <<>>, finish)), Compressed = list_to_binary(Compressed1 ++ Compressed2), CRC1 = ?m( CRC1 when is_integer(CRC1), zlib:crc32(Z1)), ?m(CRC1 when is_integer(CRC1), zlib:crc32(Z1,Bin)), @@ -467,15 +556,15 @@ api_crc32(Config) when is_list(Config) -> ?m(CRC2 when is_integer(CRC2), zlib:crc32(Z1,Compressed)), CRC2 = ?m(CRC2 when is_integer(CRC2), zlib:crc32(Z1,0,Compressed)), ?m(CRC3 when CRC2 /= CRC3, zlib:crc32(Z1,234,Compressed)), - ?m(?BARG, zlib:crc32(gurka)), - ?m(?BARG, zlib:crc32(Z1, not_a_binary)), - ?m(?BARG, zlib:crc32(gurka, <<1,1,2,4,4>>)), - ?m(?BARG, zlib:crc32(Z1, 2298929, not_a_binary)), - ?m(?BARG, zlib:crc32(Z1, not_an_int, <<123,123,123,35,231>>)), - ?m(?BARG, zlib:crc32_combine(Z1, not_an_int, 123123, 123)), - ?m(?BARG, zlib:crc32_combine(Z1, noint, 123123, 123)), - ?m(?BARG, zlib:crc32_combine(Z1, 123123, noint, 123)), - ?m(?BARG, zlib:crc32_combine(Z1, 123123, 123, noint)), + ?m(?EXIT(badarg), zlib:crc32(gurka)), + ?m(?EXIT(badarg), zlib:crc32(Z1, not_a_binary)), + ?m(?EXIT(badarg), zlib:crc32(gurka, <<1,1,2,4,4>>)), + ?m(?EXIT(badarg), zlib:crc32(Z1, 2298929, not_a_binary)), + ?m(?EXIT(badarg), zlib:crc32(Z1, not_an_int, <<123,123,123,35,231>>)), + ?m(?EXIT(badarg), zlib:crc32_combine(Z1, not_an_int, 123123, 123)), + ?m(?EXIT(badarg), zlib:crc32_combine(Z1, noint, 123123, 123)), + ?m(?EXIT(badarg), zlib:crc32_combine(Z1, 123123, noint, 123)), + ?m(?EXIT(badarg), zlib:crc32_combine(Z1, 123123, 123, noint)), ?m(ok, zlib:deflateEnd(Z1)), ?m(ok, zlib:close(Z1)). @@ -484,74 +573,129 @@ api_adler32(Config) when is_list(Config) -> Z1 = zlib:open(), ?m(ok, zlib:deflateInit(Z1,best_speed,deflated,-15,8,default)), Bin = <<1,1,1,1,1,1,1,1,1>>, - Compressed1 = ?m(_, zlib:deflate(Z1, Bin, none)), - Compressed2 = ?m(_, zlib:deflate(Z1, <<>>, finish)), + Compressed1 = ?m(L when is_list(L), zlib:deflate(Z1, Bin, none)), + Compressed2 = ?m(L when is_list(L), zlib:deflate(Z1, <<>>, finish)), Compressed = list_to_binary(Compressed1 ++ Compressed2), ?m(ADLER1 when is_integer(ADLER1), zlib:adler32(Z1,Bin)), ?m(ADLER1 when is_integer(ADLER1), zlib:adler32(Z1,binary_to_list(Bin))), ADLER2 = ?m(ADLER2 when is_integer(ADLER2), zlib:adler32(Z1,Compressed)), ?m(ADLER2 when is_integer(ADLER2), zlib:adler32(Z1,1,Compressed)), ?m(ADLER3 when ADLER2 /= ADLER3, zlib:adler32(Z1,234,Compressed)), - ?m(?BARG, zlib:adler32(Z1, not_a_binary)), - ?m(?BARG, zlib:adler32(gurka, <<1,1,2,4,4>>)), - ?m(?BARG, zlib:adler32(Z1, 2298929, not_a_binary)), - ?m(?BARG, zlib:adler32(Z1, not_an_int, <<123,123,123,35,231>>)), - ?m(?BARG, zlib:adler32_combine(Z1, noint, 123123, 123)), - ?m(?BARG, zlib:adler32_combine(Z1, 123123, noint, 123)), - ?m(?BARG, zlib:adler32_combine(Z1, 123123, 123, noint)), + ?m(?EXIT(badarg), zlib:adler32(Z1, not_a_binary)), + ?m(?EXIT(badarg), zlib:adler32(gurka, <<1,1,2,4,4>>)), + ?m(?EXIT(badarg), zlib:adler32(Z1, 2298929, not_a_binary)), + ?m(?EXIT(badarg), zlib:adler32(Z1, not_an_int, <<123,123,123,35,231>>)), + ?m(?EXIT(badarg), zlib:adler32_combine(Z1, noint, 123123, 123)), + ?m(?EXIT(badarg), zlib:adler32_combine(Z1, 123123, noint, 123)), + ?m(?EXIT(badarg), zlib:adler32_combine(Z1, 123123, 123, noint)), ?m(ok, zlib:deflateEnd(Z1)), ?m(ok, zlib:close(Z1)). %% Test compress. api_un_compress(Config) when is_list(Config) -> - ?m(?BARG,zlib:compress(not_a_binary)), + ?m(?EXIT(badarg),zlib:compress(not_a_binary)), Bin = <<1,11,1,23,45>>, Comp = zlib:compress(Bin), - ?m(?BARG,zlib:uncompress(not_a_binary)), - ?m({'EXIT',{data_error,_}}, zlib:uncompress(<<171,171,171,171,171>>)), - ?m({'EXIT',{data_error,_}}, zlib:uncompress(<<>>)), - ?m({'EXIT',{data_error,_}}, zlib:uncompress(<<120>>)), - ?m({'EXIT',{data_error,_}}, zlib:uncompress(<<120,156>>)), - ?m({'EXIT',{data_error,_}}, zlib:uncompress(<<120,156,3>>)), - ?m({'EXIT',{data_error,_}}, zlib:uncompress(<<120,156,3,0>>)), - ?m({'EXIT',{data_error,_}}, zlib:uncompress(<<0,156,3,0,0,0,0,1>>)), + ?m(?EXIT(badarg),zlib:uncompress(not_a_binary)), + ?m(?EXIT(data_error), zlib:uncompress(<<171,171,171,171,171>>)), + ?m(?EXIT(data_error), zlib:uncompress(<<>>)), + ?m(?EXIT(data_error), zlib:uncompress(<<120>>)), + ?m(?EXIT(data_error), zlib:uncompress(<<120,156>>)), + ?m(?EXIT(data_error), zlib:uncompress(<<120,156,3>>)), + ?m(?EXIT(data_error), zlib:uncompress(<<120,156,3,0>>)), + ?m(?EXIT(data_error), zlib:uncompress(<<0,156,3,0,0,0,0,1>>)), ?m(Bin, zlib:uncompress(binary_to_list(Comp))), ?m(Bin, zlib:uncompress(Comp)). %% Test zip. api_un_zip(Config) when is_list(Config) -> - ?m(?BARG,zlib:zip(not_a_binary)), + ?m(?EXIT(badarg),zlib:zip(not_a_binary)), Bin = <<1,11,1,23,45>>, Comp = zlib:zip(Bin), ?m(Comp, zlib:zip(binary_to_list(Bin))), - ?m(?BARG,zlib:unzip(not_a_binary)), - ?m({'EXIT',{data_error,_}}, zlib:unzip(<<171,171,171,171,171>>)), - ?m({'EXIT',{data_error,_}}, zlib:unzip(<<>>)), + ?m(?EXIT(badarg),zlib:unzip(not_a_binary)), + ?m(?EXIT(data_error), zlib:unzip(<<171,171,171,171,171>>)), + ?m(?EXIT(data_error), zlib:unzip(<<>>)), ?m(Bin, zlib:unzip(Comp)), ?m(Bin, zlib:unzip(binary_to_list(Comp))), %% OTP-6396 - B = <<131,104,19,100,0,13,99,95,99,105,100,95,99,115,103,115,110,95,50,97,1,107,0,4,208,161,246,29,107,0,3,237,166,224,107,0,6,66,240,153,0,2,10,1,0,8,97,116,116,97,99,104,101,100,104,2,100,0,22,117,112,100,97,116,101,95,112,100,112,95,99,111,110,116,101,120,116,95,114,101,113,107,0,114,69,3,12,1,11,97,31,113,150,64,104,132,61,64,104,12,3,197,31,113,150,64,104,132,61,64,104,12,1,11,97,31,115,150,64,104,116,73,64,104,0,0,0,0,0,0,65,149,16,61,65,149,16,61,1,241,33,4,5,0,33,4,4,10,6,10,181,4,10,6,10,181,38,15,99,111,109,109,97,110,100,1,114,45,97,112,110,45,49,3,99,111,109,5,109,110,99,57,57,6,109,99,99,50,52,48,4,103,112,114,115,8,0,104,2,104,2,100,0,8,97,99,116,105,118,97,116,101,104,23,100,0,11,112,100,112,95,99,111,110,116,1,120,116,100,0,7,112,114,105,109,97,114,121,97,1,100,0,9,117,110,100,101,102,105,110,101,100,97,1,97,4,97,4,97,7,100,0,9,117,110,100,101,102,105,110,101,100,100,0,9,117,110,100,101,102,105,110,10100,100,0,9,117,110,100,101,102,105,110,101,100,100,0,5,102,97,108,115,101,100,0,9,117,110,100,101,102,105,110,101,100,100,0,9,117,110,100,101,102,105,110,101,100,100,0,9,117,110,100,101,102,105,1,101,100,97,0,100,0,9,117,110,100,101,102,105,110,101,100,107,0,4,16,0,1,144,107,0,4,61,139,186,181,107,0,4,10,8,201,49,100,0,9,117,110,100,101,102,105,110,101,100,100,0,9,117,110,100,101,102,105,0,101,100,100,0,9,117,110,100,101,102,105,110,101,100,104,2,104,3,98,0,0,7,214,97,11,97,20,104,3,97,17,97,16,97,21,106,108,0,0,0,3,104,2,97,1,104,2,104,3,98,0,0,7,214,97,11,97,20,104,3,97,17,97,167,20,104,2,97,4,104,2,104,3,98,0,0,7,214,97,11,97,20,104,3,97,17,97,16,97,21,104,2,97,10,104,2,104,3,98,0,0,7,214,97,11,97,20,104,3,97,17,97,16,97,26,106,100,0,5,118,101,114,57,57,100,0,9,117,110,0,101,102,105,110,101,100,107,0,2,0,244,107,0,4,10,6,102,195,107,0,4,10,6,102,195,100,0,9,117,110,100,101,102,105,110,101,100,100,0,9,117,110,100,101,102,105,110,101,100,107,0,125,248,143,0,203,25115,157,116,65,185,65,172,55,87,164,88,225,50,203,251,115,157,116,65,185,65,172,55,87,164,88,225,50,0,0,82,153,50,0,200,98,87,148,237,193,185,65,149,167,69,144,14,16,153,50,3,81,70,94,13,109,193,1,120,5,181,113,198,118,50,3,81,70,94,13,109,193,185,120,5,181,113,198,118,153,3,81,70,94,13,109,193,185,120,5,181,113,198,118,153,50,16,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,113,92,2,119,128,0,0,108,0,0,1,107,0,114,69,3,12,1,11,97,31,113,150,64,104,132,61,64,104,12,3,11,97,31,113,150,64,104,132,61,64,104,12,1,11,97,31,115,150,64,104,116,73,64,104,0,0,0,0,0,0,65,149,16,61,65,149,16,61,1,241,33,4,0,33,4,4,10,6,10,181,4,10,6,10,181,38,15,99,111,109,109,97,110,100,101,114,45,97,112,110,45,49,3,99,111,109,5,109,110,99,57,57,6,109,99,99,50,52,48,4,103,112,114,115,8,0,106>>, + B = + <<131,104,19,100,0,13,99,95,99,105,100,95,99,115,103,115,110,95,50,97, + 1,107,0,4,208,161,246,29,107,0,3,237,166,224,107,0,6,66,240,153,0,2, + 10,1,0,8,97,116,116,97,99,104,101,100,104,2,100,0,22,117,112,100,97, + 116,101,95,112,100,112,95,99,111,110,116,101,120,116,95,114,101,113, + 107,0,114,69,3,12,1,11,97,31,113,150,64,104,132,61,64,104,12,3,197, + 31,113,150,64,104,132,61,64,104,12,1,11,97,31,115,150,64,104,116,73, + 64,104,0,0,0,0,0,0,65,149,16,61,65,149,16,61,1,241,33,4,5,0,33,4,4,10 + ,6,10,181,4,10,6,10,181,38,15,99,111,109,109,97,110,100,1,114,45,97, + 112,110,45,49,3,99,111,109,5,109,110,99,57,57,6,109,99,99,50,52,48,4, + 103,112,114,115,8,0,104,2,104,2,100,0,8,97,99,116,105,118,97,116,101, + 104,23,100,0,11,112,100,112,95,99,111,110,116,1,120,116,100,0,7,112, + 114,105,109,97,114,121,97,1,100,0,9,117,110,100,101,102,105,110,101, + 100,97,1,97,4,97,4,97,7,100,0,9,117,110,100,101,102,105,110,101,100, + 100,0,9,117,110,100,101,102,105,110,10100,100,0,9,117,110,100,101, + 102,105,110,101,100,100,0,5,102,97,108,115,101,100,0,9,117,110,100, + 101,102,105,110,101,100,100,0,9,117,110,100,101,102,105,110,101,100, + 100,0,9,117,110,100,101,102,105,1,101,100,97,0,100,0,9,117,110,100, + 101,102,105,110,101,100,107,0,4,16,0,1,144,107,0,4,61,139,186,181, + 107,0,4,10,8,201,49,100,0,9,117,110,100,101,102,105,110,101,100,100, + 0,9,117,110,100,101,102,105,0,101,100,100,0,9,117,110,100,101,102, + 105,110,101,100,104,2,104,3,98,0,0,7,214,97,11,97,20,104,3,97,17,97, + 16,97,21,106,108,0,0,0,3,104,2,97,1,104,2,104,3,98,0,0,7,214,97,11, + 97,20,104,3,97,17,97,167,20,104,2,97,4,104,2,104,3,98,0,0,7,214,97, + 11,97,20,104,3,97,17,97,16,97,21,104,2,97,10,104,2,104,3,98,0,0,7, + 214,97,11,97,20,104,3,97,17,97,16,97,26,106,100,0,5,118,101,114,57, + 57,100,0,9,117,110,0,101,102,105,110,101,100,107,0,2,0,244,107,0,4, + 10,6,102,195,107,0,4,10,6,102,195,100,0,9,117,110,100,101,102,105, + 110,101,100,100,0,9,117,110,100,101,102,105,110,101,100,107,0,125, + 248,143,0,203,25115,157,116,65,185,65,172,55,87,164,88,225,50,203, + 251,115,157,116,65,185,65,172,55,87,164,88,225,50,0,0,82,153,50,0, + 200,98,87,148,237,193,185,65,149,167,69,144,14,16,153,50,3,81,70,94, + 13,109,193,1,120,5,181,113,198,118,50,3,81,70,94,13,109,193,185,120, + 5,181,113,198,118,153,3,81,70,94,13,109,193,185,120,5,181,113,198, + 118,153,50,16,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,113,92,2,119,128,0,0, + 108,0,0,1,107,0,114,69,3,12,1,11,97,31,113,150,64,104,132,61,64,104, + 12,3,11,97,31,113,150,64,104,132,61,64,104,12,1,11,97,31,115,150,64, + 104,116,73,64,104,0,0,0,0,0,0,65,149,16,61,65,149,16,61,1,241,33,4,0, + 33,4,4,10,6,10,181,4,10,6,10,181,38,15,99,111,109,109,97,110,100,101, + 114,45,97,112,110,45,49,3,99,111,109,5,109,110,99,57,57,6,109,99,99, + 50,52,48,4,103,112,114,115,8,0,106>>, + Z = zlib:zip(B), ?m(B, zlib:unzip(Z)). %% Test gunzip. api_g_un_zip(Config) when is_list(Config) -> - ?m(?BARG,zlib:gzip(not_a_binary)), + ?m(?EXIT(badarg),zlib:gzip(not_a_binary)), Bin = <<1,11,1,23,45>>, Comp = zlib:gzip(Bin), + ?m(Comp, zlib:gzip(binary_to_list(Bin))), - ?m(?BARG, zlib:gunzip(not_a_binary)), - ?m(?DATA_ERROR, zlib:gunzip(<<171,171,171,171,171>>)), - ?m(?DATA_ERROR, zlib:gunzip(<<>>)), + ?m(?EXIT(badarg), zlib:gunzip(not_a_binary)), + ?m(?EXIT(data_error), zlib:gunzip(<<171,171,171,171,171>>)), + ?m(?EXIT(data_error), zlib:gunzip(<<>>)), ?m(Bin, zlib:gunzip(Comp)), ?m(Bin, zlib:gunzip(binary_to_list(Comp))), + %% RFC 1952: + %% + %% "A gzip file consists of a series of "members" (compressed data + %% sets). [...] The members simply appear one after another in the file, + %% with no additional information before, between, or after them." + Concatenated = <<Bin/binary, Bin/binary>>, + ?m(Concatenated, zlib:gunzip([Comp, Comp])), + + %% Don't explode if the uncompressed size is a perfect multiple of the + %% internal inflate chunk size. + ChunkSizedData = <<0:16384/unit:8>>, + ?m(ChunkSizedData, zlib:gunzip(zlib:gzip(ChunkSizedData))), + %% Bad CRC; bad length. BadCrc = bad_crc_data(), - ?m({'EXIT',{data_error,_}},(catch zlib:gunzip(BadCrc))), + ?m(?EXIT(data_error),(catch zlib:gunzip(BadCrc))), BadLen = bad_len_data(), - ?m({'EXIT',{data_error,_}},(catch zlib:gunzip(BadLen))), + ?m(?EXIT(data_error),(catch zlib:gunzip(BadLen))), ok. bad_crc_data() -> @@ -594,30 +738,15 @@ intro(Config) when is_list(Config) -> large_deflate(Config) when is_list(Config) -> large_deflate_do(). large_deflate_do() -> - Z = zlib:open(), - Plain = rand_bytes(zlib:getBufSize(Z)*5), - ok = zlib:deflateInit(Z), - _ZlibHeader = zlib:deflate(Z, [], full), - Deflated = zlib:deflate(Z, Plain, full), - ?m(ok, zlib:close(Z)), - ?m(Plain, zlib:unzip(list_to_binary([Deflated, 3, 0]))). - -rand_bytes(Sz) -> - L = <<8,2,3,6,1,2,3,2,3,4,8,7,3,7,2,3,4,7,5,8,9,3>>, - rand_bytes(erlang:md5(L),Sz). - -rand_bytes(Bin, Sz) when byte_size(Bin) >= Sz -> - <<Res:Sz/binary, _/binary>> = Bin, - Res; -rand_bytes(Bin, Sz) -> - rand_bytes(<<(erlang:md5(Bin))/binary, Bin/binary>>, Sz). - + Plain = gen_determ_rand_bytes(64 bsl 10), + Deflated = zlib:zip(Plain), + ?m(Plain, zlib:unzip(Deflated)). %% Test a standard compressed zip file. zip_usage(Config) when is_list(Config) -> zip_usage(zip_usage({get_arg,Config})); zip_usage({get_arg,Config}) -> - Out = conf(data_dir,Config), + Out = get_data_dir(Config), {ok,ZIP} = file:read_file(filename:join(Out,"zipdoc.zip")), {ok,ORIG} = file:read_file(filename:join(Out,"zipdoc")), {run,ZIP,ORIG}; @@ -672,13 +801,13 @@ zip_usage({run,ZIP,ORIG}) -> ?m(ok, zlib:deflateInit(Z, default, deflated, -15, 8, default)), C2 = zlib:deflate(Z, ORIG, finish), - ?m(true, C1 == list_to_binary(C2)), + ?m(ORIG, zlib:unzip(C2)), ?m(ok, zlib:deflateEnd(Z)), ?m(ok, zlib:deflateInit(Z, none, deflated, -15, 8, filtered)), ?m(ok, zlib:deflateParams(Z, default, default)), C3 = zlib:deflate(Z, ORIG, finish), - ?m(true, C1 == list_to_binary(C3)), + ?m(ORIG, zlib:unzip(C3)), ?m(ok, zlib:deflateEnd(Z)), ok = zlib:close(Z), @@ -688,7 +817,7 @@ zip_usage({run,ZIP,ORIG}) -> gz_usage(Config) when is_list(Config) -> gz_usage(gz_usage({get_arg,Config})); gz_usage({get_arg,Config}) -> - Out = conf(data_dir,Config), + Out = get_data_dir(Config), {ok,GZIP} = file:read_file(filename:join(Out,"zipdoc.1.gz")), {ok,ORIG} = file:read_file(filename:join(Out,"zipdoc")), {ok,GZIP2} = file:read_file(filename:join(Out,"zipdoc.txt.gz")), @@ -709,7 +838,7 @@ gz_usage2(Config) -> case os:find_executable("gzip") of Name when is_list(Name) -> Z = zlib:open(), - Out = conf(data_dir,Config), + Out = get_data_dir(Config), {ok,ORIG} = file:read_file(filename:join(Out,"zipdoc")), Compressed = zlib:gzip(ORIG), GzOutFile = filename:join(Out,"out.gz"), @@ -737,7 +866,7 @@ gz_usage2(Config) -> compress_usage(Config) when is_list(Config) -> compress_usage(compress_usage({get_arg,Config})); compress_usage({get_arg,Config}) -> - Out = conf(data_dir,Config), + Out = get_data_dir(Config), {ok,C1} = file:read_file(filename:join(Out,"png-compressed.zlib")), {run,C1}; compress_usage({run,C1}) -> @@ -792,7 +921,7 @@ compress_usage({run,C1}) -> crc(Config) when is_list(Config) -> crc(crc({get_arg,Config})); crc({get_arg,Config}) -> - Out = conf(data_dir,Config), + Out = get_data_dir(Config), {ok,C1} = file:read_file(filename:join(Out,"zipdoc")), {run,C1}; crc({run,C1}) -> @@ -821,7 +950,7 @@ crc({run,C1}) -> adler(Config) when is_list(Config) -> adler(adler({get_arg,Config})); adler({get_arg,Config}) -> - Out = conf(data_dir,Config), + Out = get_data_dir(Config), File1 = filename:join(Out,"zipdoc"), {ok,C1} = file:read_file(File1), {run,C1}; @@ -869,10 +998,14 @@ dictionary_usage({run}) -> %% Now uncompress. Z2 = zlib:open(), ?m(ok, zlib:inflateInit(Z2)), - {'EXIT',{{need_dictionary,DictID},_}} = (catch zlib:inflate(Z2, Compressed)), + + ?m(?EXIT({need_dictionary, DictID}), zlib:inflate(Z2, Compressed)), + ?m(ok, zlib:inflateSetDictionary(Z2, Dict)), ?m(ok, zlib:inflateSetDictionary(Z2, binary_to_list(Dict))), + Uncompressed = ?m(B when is_list(B), zlib:inflate(Z2, [])), + ?m(ok, zlib:inflateEnd(Z2)), ?m(ok, zlib:close(Z2)), ?m(Data, list_to_binary(Uncompressed)). @@ -882,33 +1015,73 @@ split_bin(<<Part:1997/binary,Rest/binary>>, Acc) -> split_bin(Last,Acc) -> lists:reverse([Last|Acc]). +only_allow_owner(Config) when is_list(Config) -> + Z = zlib:open(), + Owner = self(), + + ?m(ok, zlib:inflateInit(Z)), + ?m(ok, zlib:inflateReset(Z)), + + {Pid, Ref} = spawn_monitor( + fun() -> + ?m(?EXIT(not_on_controlling_process), zlib:inflateReset(Z)), + Owner ! '$transfer_ownership', + receive + '$ownership_transferred' -> + ?m(ok, zlib:inflateReset(Z)) + after 200 -> + ct:fail("Never received transfer signal.") + end + end), + ownership_transfer_check(Z, Pid, Ref). + +ownership_transfer_check(Z, WorkerPid, Ref) -> + receive + '$transfer_ownership' -> + zlib:set_controlling_process(Z, WorkerPid), + WorkerPid ! '$ownership_transferred', + ownership_transfer_check(Z, WorkerPid, Ref); + {'DOWN', Ref, process, WorkerPid, normal} -> + ok; + {'DOWN', Ref, process, WorkerPid, Reason} -> + ct:fail("Spawned worker crashed with reason ~p.", [Reason]) + after 200 -> + ct:fail("Spawned worker timed out.") + end. + +sub_heap_binaries(Config) when is_list(Config) -> + Compressed = zlib:compress(<<"gurka">>), + ConfLen = erlang:length(Config), + + HeapBin = <<ConfLen:8/integer, Compressed/binary>>, + <<_:8/integer, SubHeapBin/binary>> = HeapBin, + + ?m(<<"gurka">>, zlib:uncompress(SubHeapBin)), + ok. %% Check concurrent access to zlib driver. smp(Config) -> - case erlang:system_info(smp_support) of - true -> - NumOfProcs = lists:min([8,erlang:system_info(schedulers)]), - io:format("smp starting ~p workers\n",[NumOfProcs]), - - %% Tests to run in parallel. - Funcs = [zip_usage, gz_usage, compress_usage, dictionary_usage, - crc, adler], - - %% We get all function arguments here to avoid repeated parallel - %% file read access. - FnAList = lists:map(fun(F) -> {F,?MODULE:F({get_arg,Config})} - end, Funcs), - - Pids = [spawn_link(?MODULE, worker, [rand:uniform(9999), - list_to_tuple(FnAList), - self()]) - || _ <- lists:seq(1,NumOfProcs)], - wait_pids(Pids); - - false -> - {skipped,"No smp support"} - end. + NumOfProcs = lists:min([8,erlang:system_info(schedulers)]), + io:format("smp starting ~p workers\n",[NumOfProcs]), + + %% Tests to run in parallel. + Funcs = + [zip_usage, gz_usage, compress_usage, dictionary_usage, + crc, adler], + + %% We get all function arguments here to avoid repeated parallel + %% file read access. + UsageArgs = + list_to_tuple([{F, ?MODULE:F({get_arg,Config})} || F <- Funcs]), + Parent = self(), + + WorkerFun = + fun() -> + worker(rand:uniform(9999), UsageArgs, Parent) + end, + Pids = [spawn_link(WorkerFun) || _ <- lists:seq(1, NumOfProcs)], + wait_pids(Pids). worker(Seed, FnATpl, Parent) -> io:format("smp worker ~p, seed=~p~n",[self(),Seed]), @@ -999,43 +1172,98 @@ otp_9981(Config) when is_list(Config) -> Ports = lists:sort(erlang:ports()), ok. +-define(BENCH_SIZE, (16 bsl 20)). + +-define(DECOMPRESS_BENCH(Name, What, Data), + Name(Config) when is_list(Config) -> + Uncompressed = Data, + Compressed = zlib:compress(Uncompressed), + What(Compressed, byte_size(Uncompressed))). + +-define(COMPRESS_BENCH(Name, What, Data), + Name(Config) when is_list(Config) -> + Compressed = Data, + What(Compressed, byte_size(Compressed))). +?DECOMPRESS_BENCH(inflate_bench_zeroed, throughput_bench_inflate, + <<0:(8 * ?BENCH_SIZE)>>). +?DECOMPRESS_BENCH(inflate_bench_rand, throughput_bench_inflate, + gen_determ_rand_bytes(?BENCH_SIZE)). + +?DECOMPRESS_BENCH(chunk_bench_zeroed, throughput_bench_chunk, + <<0:(8 * ?BENCH_SIZE)>>). +?DECOMPRESS_BENCH(chunk_bench_rand, throughput_bench_chunk, + gen_determ_rand_bytes(?BENCH_SIZE)). + +?COMPRESS_BENCH(deflate_bench_zeroed, throughput_bench_deflate, + <<0:(8 * ?BENCH_SIZE)>>). +?COMPRESS_BENCH(deflate_bench_rand, throughput_bench_deflate, + gen_determ_rand_bytes(?BENCH_SIZE)). + +throughput_bench_inflate(Compressed, Size) -> + Z = zlib:open(), + zlib:inflateInit(Z), + + submit_throughput_results(Size, + fun() -> + zlib:inflate(Z, Compressed) + end). + +throughput_bench_deflate(Uncompressed, Size) -> + Z = zlib:open(), + zlib:deflateInit(Z), + + submit_throughput_results(Size, + fun() -> + zlib:deflate(Z, Uncompressed, finish) + end). + +throughput_bench_chunk(Compressed, Size) -> + Z = zlib:open(), + zlib:inflateInit(Z), + + ChunkLoop = + fun + Loop({more, _}) -> Loop(zlib:inflateChunk(Z)); + Loop(_) -> ok + end, + + submit_throughput_results(Size, + fun() -> + ChunkLoop(zlib:inflateChunk(Z, Compressed)) + end). + +submit_throughput_results(Size, Fun) -> + TimeTaken = measure_perf_counter(Fun, millisecond), + + KBPS = trunc((Size bsr 10) / (TimeTaken / 1000)), + ct_event:notify(#event{ name = benchmark_data, data = [{value,KBPS}] }), + {comment, io_lib:format("~p ms, ~p KBPS", [TimeTaken, KBPS])}. + +measure_perf_counter(Fun, Unit) -> + Start = os:perf_counter(Unit), + Fun(), + os:perf_counter(Unit) - Start. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%% Helps with testing directly %%%%%%%%%%%%% -conf(What,Config) -> - try proplists:get_value(What,Config) of - undefined -> - "./zlib_SUITE_data"; - Dir -> - Dir +get_data_dir(Config) -> + try proplists:get_value(data_dir,Config) of + undefined -> + "./zlib_SUITE_data"; + Dir -> + Dir catch - _:_ -> "./zlib_SUITE_data" + _:_ -> "./zlib_SUITE_data" end. -t() -> t([all]). - -t(What) when not is_list(What) -> - t([What]); -t(What) -> - lists:foreach(fun(T) -> - try ?MODULE:T([]) - catch _E:_R -> - Line = get(test_server_loc), - io:format("Failed ~p:~p ~p ~p ~p~n", - [T,Line,_E,_R, erlang:get_stacktrace()]) - end - end, expand(What)). - -expand(All) -> - lists:reverse(expand(All,[])). -expand([H|T], Acc) -> - case ?MODULE:H(suite) of - [] -> expand(T,[H|Acc]); - Cs -> - R = expand(Cs, Acc), - expand(T, R) - end; -expand([], Acc) -> Acc. - +%% Generates a bunch of statistically random bytes using the size as seed. +gen_determ_rand_bytes(Size) -> + gen_determ_rand_bytes(Size, erlang:md5_init(), <<>>). +gen_determ_rand_bytes(Size, _Context, Acc) when Size =< 0 -> + Acc; +gen_determ_rand_bytes(Size, Context0, Acc) when Size > 0 -> + Context = erlang:md5_update(Context0, <<Size/integer>>), + Checksum = erlang:md5_final(Context), + gen_determ_rand_bytes(Size - 16, Context, <<Acc/binary, Checksum/binary>>). diff --git a/lib/kernel/test/zzz_SUITE.erl b/lib/kernel/test/zzz_SUITE.erl new file mode 100644 index 0000000000..59c7fd7404 --- /dev/null +++ b/lib/kernel/test/zzz_SUITE.erl @@ -0,0 +1,37 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2006-2018. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(zzz_SUITE). + +%% The sole purpose of this test suite is for things we want to run last +%% before the VM terminates. + +-export([all/0]). + +-export([lc_graph/1]). + + +all() -> + [lc_graph]. + +lc_graph(_Config) -> + %% Create "lc_graph" file in current working dir + %% if lock checker is enabled. + erts_debug:lc_graph(), + ok. diff --git a/lib/kernel/vsn.mk b/lib/kernel/vsn.mk index 4edecd8969..df95174c9f 100644 --- a/lib/kernel/vsn.mk +++ b/lib/kernel/vsn.mk @@ -1 +1 @@ -KERNEL_VSN = 5.3 +KERNEL_VSN = 6.1 |