diff options
55 files changed, 12105 insertions, 11 deletions
diff --git a/bootstrap/lib/kernel/ebin/logger.beam b/bootstrap/lib/kernel/ebin/logger.beam Binary files differnew file mode 100644 index 0000000000..698505d097 --- /dev/null +++ b/bootstrap/lib/kernel/ebin/logger.beam diff --git a/bootstrap/lib/kernel/ebin/logger_backend.beam b/bootstrap/lib/kernel/ebin/logger_backend.beam Binary files differnew file mode 100644 index 0000000000..ccc03d104c --- /dev/null +++ b/bootstrap/lib/kernel/ebin/logger_backend.beam diff --git a/bootstrap/lib/kernel/ebin/logger_config.beam b/bootstrap/lib/kernel/ebin/logger_config.beam Binary files differnew file mode 100644 index 0000000000..0fc49960b4 --- /dev/null +++ b/bootstrap/lib/kernel/ebin/logger_config.beam diff --git a/bootstrap/lib/kernel/ebin/logger_disk_log_h.beam b/bootstrap/lib/kernel/ebin/logger_disk_log_h.beam Binary files differnew file mode 100644 index 0000000000..c8f20360fd --- /dev/null +++ b/bootstrap/lib/kernel/ebin/logger_disk_log_h.beam diff --git a/bootstrap/lib/kernel/ebin/logger_filters.beam b/bootstrap/lib/kernel/ebin/logger_filters.beam Binary files differnew file mode 100644 index 0000000000..0e8c97c98e --- /dev/null +++ b/bootstrap/lib/kernel/ebin/logger_filters.beam diff --git a/bootstrap/lib/kernel/ebin/logger_formatter.beam b/bootstrap/lib/kernel/ebin/logger_formatter.beam Binary files differnew file mode 100644 index 0000000000..ce8308c503 --- /dev/null +++ b/bootstrap/lib/kernel/ebin/logger_formatter.beam diff --git a/bootstrap/lib/kernel/ebin/logger_h_common.beam b/bootstrap/lib/kernel/ebin/logger_h_common.beam Binary files differnew file mode 100644 index 0000000000..ad27475332 --- /dev/null +++ b/bootstrap/lib/kernel/ebin/logger_h_common.beam diff --git a/bootstrap/lib/kernel/ebin/logger_server.beam b/bootstrap/lib/kernel/ebin/logger_server.beam Binary files differnew file mode 100644 index 0000000000..61f50c8bef --- /dev/null +++ b/bootstrap/lib/kernel/ebin/logger_server.beam diff --git a/bootstrap/lib/kernel/ebin/logger_simple.beam b/bootstrap/lib/kernel/ebin/logger_simple.beam Binary files differnew file mode 100644 index 0000000000..be8017391f --- /dev/null +++ b/bootstrap/lib/kernel/ebin/logger_simple.beam diff --git a/bootstrap/lib/kernel/ebin/logger_std_h.beam b/bootstrap/lib/kernel/ebin/logger_std_h.beam Binary files differnew file mode 100644 index 0000000000..6258544c15 --- /dev/null +++ b/bootstrap/lib/kernel/ebin/logger_std_h.beam diff --git a/bootstrap/lib/kernel/ebin/logger_sup.beam b/bootstrap/lib/kernel/ebin/logger_sup.beam Binary files differnew file mode 100644 index 0000000000..9bef3c861c --- /dev/null +++ b/bootstrap/lib/kernel/ebin/logger_sup.beam diff --git a/bootstrap/lib/kernel/include/logger.hrl b/bootstrap/lib/kernel/include/logger.hrl new file mode 100644 index 0000000000..2143ccd297 --- /dev/null +++ b/bootstrap/lib/kernel/include/logger.hrl @@ -0,0 +1,49 @@ +-ifndef(LOGGER_HRL). +-define(LOGGER_HRL,true). +-define(LOG_EMERGENCY(A),?DO_LOG(emergency,[A])). +-define(LOG_EMERGENCY(A,B),?DO_LOG(emergency,[A,B])). +-define(LOG_EMERGENCY(A,B,C),?DO_LOG(emergency,[A,B,C])). + +-define(LOG_ALERT(A),?DO_LOG(alert,[A])). +-define(LOG_ALERT(A,B),?DO_LOG(alert,[A,B])). +-define(LOG_ALERT(A,B,C),?DO_LOG(alert,[A,B,C])). + +-define(LOG_CRITICAL(A),?DO_LOG(critical,[A])). +-define(LOG_CRITICAL(A,B),?DO_LOG(critical,[A,B])). +-define(LOG_CRITICAL(A,B,C),?DO_LOG(critical,[A,B,C])). + +-define(LOG_ERROR(A),?DO_LOG(error,[A])). +-define(LOG_ERROR(A,B),?DO_LOG(error,[A,B])). +-define(LOG_ERROR(A,B,C),?DO_LOG(error,[A,B,C])). + +-define(LOG_WARNING(A),?DO_LOG(warning,[A])). +-define(LOG_WARNING(A,B),?DO_LOG(warning,[A,B])). +-define(LOG_WARNING(A,B,C),?DO_LOG(warning,[A,B,C])). + +-define(LOG_NOTICE(A),?DO_LOG(notice,[A])). +-define(LOG_NOTICE(A,B),?DO_LOG(notice,[A,B])). +-define(LOG_NOTICE(A,B,C),?DO_LOG(notice,[A,B,C])). + +-define(LOG_INFO(A),?DO_LOG(info,[A])). +-define(LOG_INFO(A,B),?DO_LOG(info,[A,B])). +-define(LOG_INFO(A,B,C),?DO_LOG(info,[A,B,C])). + +-define(LOG_DEBUG(A),?DO_LOG(debug,[A])). +-define(LOG_DEBUG(A,B),?DO_LOG(debug,[A,B])). +-define(LOG_DEBUG(A,B,C),?DO_LOG(debug,[A,B,C])). + +-define(LOCATION,#{mfa=>{?MODULE,?FUNCTION_NAME,?FUNCTION_ARITY}, + line=>?LINE, + file=>?FILE}). + +%%%----------------------------------------------------------------- +%%% Internal, i.e. not intended for direct use in code - use above +%%% macros instead! +-define(DO_LOG(Level,Args), + case logger:allow(Level,?MODULE) of + true -> + apply(logger,macro_log,[?LOCATION,Level|Args]); + false -> + ok + end). +-endif. diff --git a/lib/kernel/doc/src/Makefile b/lib/kernel/doc/src/Makefile index 2413541082..10bdf46119 100644 --- a/lib/kernel/doc/src/Makefile +++ b/lib/kernel/doc/src/Makefile @@ -56,6 +56,10 @@ XML_REF3_FILES = application.xml \ inet.xml \ inet_res.xml \ init_stub.xml \ + logger.xml \ + logger_std_h.xml \ + logger_filters.xml \ + logger_formatter.xml \ net_adm.xml \ net_kernel.xml \ os.xml \ @@ -70,11 +74,17 @@ XML_REF4_FILES = app.xml config.xml XML_REF6_FILES = kernel_app.xml -XML_PART_FILES = -XML_CHAPTER_FILES = notes.xml +XML_PART_FILES = part.xml +XML_CHAPTER_FILES = \ + notes.xml \ + introduction_chapter.xml \ + logger_chapter.xml BOOK_FILES = book.xml +IMAGE_FILES = \ + logger_arch.png + XML_FILES = \ $(BOOK_FILES) $(XML_CHAPTER_FILES) \ $(XML_PART_FILES) $(XML_REF3_FILES) $(XML_REF4_FILES)\ @@ -111,7 +121,7 @@ SPECS_FLAGS = -I../../include # ---------------------------------------------------- # Targets # ---------------------------------------------------- -$(HTMLDIR)/%.gif: %.gif +$(HTMLDIR)/%: % $(INSTALL_DATA) $< $@ docs: man pdf html @@ -120,11 +130,12 @@ $(TOP_PDF_FILE): $(XML_FILES) pdf: $(TOP_PDF_FILE) -html: gifs $(HTML_REF_MAN_FILE) +html: images $(HTML_REF_MAN_FILE) man: $(MAN3_FILES) $(MAN4_FILES) $(MAN6_FILES) -gifs: $(GIF_FILES:%=$(HTMLDIR)/%) +images: $(IMAGE_FILES:%=$(HTMLDIR)/%) + debug opt: clean clean_docs: diff --git a/lib/kernel/doc/src/book.xml b/lib/kernel/doc/src/book.xml index 81a87d126d..0b69b547e7 100644 --- a/lib/kernel/doc/src/book.xml +++ b/lib/kernel/doc/src/book.xml @@ -34,6 +34,9 @@ <preamble> <contents level="2"></contents> </preamble> + <parts lift="yes"> + <xi:include href="part.xml"/> + </parts> <applications> <xi:include href="ref_man.xml"/> </applications> diff --git a/lib/kernel/doc/src/introduction_chapter.xml b/lib/kernel/doc/src/introduction_chapter.xml new file mode 100644 index 0000000000..6e6990ddda --- /dev/null +++ b/lib/kernel/doc/src/introduction_chapter.xml @@ -0,0 +1,64 @@ +<?xml version="1.0" encoding="utf-8" ?> +<!DOCTYPE chapter SYSTEM "chapter.dtd"> + +<chapter> + <header> + <copyright> + <year>2017</year> + <holder>Ericsson AB. All Rights Reserved.</holder> + </copyright> + <legalnotice> + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + </legalnotice> + + <title>Introduction</title> + <prepared></prepared> + <responsible></responsible> + <docno></docno> + <approved></approved> + <checked></checked> + <date></date> + <rev></rev> + <file>introduction.xml</file> + </header> + + <section> + <title>Scope</title> + <p>The Kernel application has all the code necessary to run + the Erlang runtime system: file servers, code servers, + and so on.</p> + <p>The Kernel application is the first application started. It is + mandatory in the sense that the minimal system based on + Erlang/OTP consists of Kernel and STDLIB. Kernel + contains the following functional areas:</p> + <list type="bulleted"> + <item>Start, stop, supervision, configuration, and distribution of applications</item> + <item>Code loading</item> + <item>Logging</item> + <item>Error logging</item> + <item>Global name service</item> + <item>Supervision of Erlang/OTP</item> + <item>Communication with sockets</item> + <item>Operating system interface</item> + </list> + </section> + + <section> + <title>Prerequisites</title> + <p>It is assumed that the reader is familiar with the Erlang programming + language.</p> + </section> +</chapter> + + diff --git a/lib/kernel/doc/src/logger.xml b/lib/kernel/doc/src/logger.xml new file mode 100644 index 0000000000..66e6e5c689 --- /dev/null +++ b/lib/kernel/doc/src/logger.xml @@ -0,0 +1,478 @@ +<?xml version="1.0" encoding="utf-8" ?> +<!DOCTYPE erlref SYSTEM "erlref.dtd"> + +<erlref> + <header> + <copyright> + <year>2017</year> + <holder>Ericsson AB. All Rights Reserved.</holder> + </copyright> + <legalnotice> + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + </legalnotice> + + <title>logger</title> + <prepared></prepared> + <responsible></responsible> + <docno></docno> + <approved></approved> + <checked></checked> + <date></date> + <rev>A</rev> + <file>logger.xml</file> + </header> + <module>logger</module> + <modulesummary>API module for the logger application.</modulesummary> + + <description> + + </description> + + <datatypes> + <datatype> + <name name="level"/> + <desc> + <p>The severity level for the message to be logged.</p> + </desc> + </datatype> + <datatype> + <name name="log"/> + <desc> + <p></p> + </desc> + </datatype> + <datatype> + <name name="report"/> + <desc> + <p></p> + </desc> + </datatype> + <datatype> + <name name="msg_fun"/> + <desc> + <p></p> + </desc> + </datatype> + <datatype> + <name name="metadata"/> + <desc> + <p>Metadata associated with the message to be logged.</p> + </desc> + </datatype> + <datatype> + <name name="config"/> + <desc> + <p></p> + </desc> + </datatype> + <datatype> + <name name="handler_id"/> + <desc> + <p></p> + </desc> + </datatype> + <datatype> + <name name="filter_id"/> + <desc> + <p></p> + </desc> + </datatype> + <datatype> + <name name="filter"/> + <desc> + <p></p> + </desc> + </datatype> + <datatype> + <name name="filter_return"/> + <desc> + <p></p> + </desc> + </datatype> + </datatypes> + + <section> + <title>Macros</title> + <p>The following macros are defined:</p> + + <list> + <item><c>?LOG_EMERGENCY(StringOrReport[,Metadata])</c></item> + <item><c>?LOG_EMERGENCY(FunOrFormat,Args[,Metadata])</c></item> + <item><c>?LOG_ALERT(StringOrReport[,Metadata])</c></item> + <item><c>?LOG_ALERT(FunOrFormat,Args[,Metadata])</c></item> + <item><c>?LOG_CRITICAL(StringOrReport[,Metadata])</c></item> + <item><c>?LOG_CRITICAL(FunOrFormat,Args[,Metadata])</c></item> + <item><c>?LOG_ERROR(StringOrReport[,Metadata])</c></item> + <item><c>?LOG_ERROR(FunOrFormat,Args[,Metadata])</c></item> + <item><c>?LOG_WARNING(StringOrReport[,Metadata])</c></item> + <item><c>?LOG_WARNING(FunOrFormat,Args[,Metadata])</c></item> + <item><c>?LOG_NOTICE(StringOrReport[,Metadata])</c></item> + <item><c>?LOG_NOTICE(FunOrFormat,Args[,Metadata])</c></item> + <item><c>?LOG_INFO(StringOrReport[,Metadata])</c></item> + <item><c>?LOG_INFO(FunOrFormat,Args[,Metadata])</c></item> + <item><c>?LOG_DEBUG(StringOrReport[,Metadata])</c></item> + <item><c>?LOG_DEBUG(FunOrFormat,Args[,Metadata])</c></item> + </list> + + <p>All macros expand to a call to logger, where <c>Level</c> is + taken from the macro name, and the following metadata is added, + or merged with the given <c>Metadata</c>:</p> + + <code> +#{mfa=>{?MODULE,?FUNCTION_NAME,?FUNCTION_ARITY}, + file=>?FILE, + line=>?LINE} + </code> + + <p>The call is wrapped in a case statement and will be evaluated + only if <c>Level</c> is equal to or below the configured log + level.</p> + </section> + + <funcs> + <func> + <name>emergency(StringOrReport[,Metadata])</name> + <name>emergency(Format,Args[,Metadata])</name> + <name>emergency(Fun,FunArgs[,Metadata])</name> + <fsummary>Logs the given message as level <c>emergency</c>.</fsummary> + <desc> + <p>Equivalent to + <seealso marker="#log-2"><c>log(emergency,...)</c></seealso>.</p> + </desc> + </func> + + <func> + <name>alert(StringOrReport[,Metadata])</name> + <name>alert(Format,Args[,Metadata])</name> + <name>alert(Fun,FunArgs[,Metadata])</name> + <fsummary>Logs the given message as level <c>alert</c>.</fsummary> + <desc> + <p>Equivalent to + <seealso marker="#log-2"><c>log(alert,...)</c></seealso>.</p> + </desc> + </func> + + <func> + <name>critical(StringOrReport[,Metadata])</name> + <name>critical(Format,Args[,Metadata])</name> + <name>critical(Fun,FunArgs[,Metadata])</name> + <fsummary>Logs the given message as level <c>critical</c>.</fsummary> + <desc> + <p>Equivalent to + <seealso marker="#log-2"><c>log(critical,...)</c></seealso>.</p> + </desc> + </func> + + <func> + <name>error(StringOrReport[,Metadata])</name> + <name>error(Format,Args[,Metadata])</name> + <name>error(Fun,FunArgs[,Metadata])</name> + <fsummary>Logs the given message as level <c>error</c>.</fsummary> + <desc> + <p>Equivalent to + <seealso marker="#log-2"><c>log(error,...)</c></seealso>.</p> + </desc> + </func> + + <func> + <name>warning(StringOrReport[,Metadata])</name> + <name>warning(Format,Args[,Metadata])</name> + <name>warning(Fun,FunArgs[,Metadata])</name> + <fsummary>Logs the given message as level <c>warning</c>.</fsummary> + <desc> + <p>Equivalent to + <seealso marker="#log-2"><c>log(warning,...)</c></seealso>.</p> + </desc> + </func> + + <func> + <name>notice(StringOrReport[,Metadata])</name> + <name>notice(Format,Args[,Metadata])</name> + <name>notice(Fun,FunArgs[,Metadata])</name> + <fsummary>Logs the given message as level <c>notice</c>.</fsummary> + <desc> + <p>Equivalent to + <seealso marker="#log-2"><c>log(notice,...)</c></seealso>.</p> + </desc> + </func> + + <func> + <name>info(StringOrReport[,Metadata])</name> + <name>info(Format,Args[,Metadata])</name> + <name>info(Fun,FunArgs[,Metadata])</name> + <fsummary>Logs the given message as level <c>info</c>.</fsummary> + <desc> + <p>Equivalent to + <seealso marker="#log-2"><c>log(info,...)</c></seealso>.</p> + </desc> + </func> + + <func> + <name>debug(StringOrReport[,Metadata])</name> + <name>debug(Format,Args[,Metadata])</name> + <name>debug(Fun,FunArgs[,Metadata])</name> + <fsummary>Logs the given message as level <c>debug</c>.</fsummary> + <desc> + <p>Equivalent to + <seealso marker="#log-2"><c>log(debug,...)</c></seealso>.</p> + </desc> + </func> + + <func> + <name name="log" arity="2"/> + <name name="log" arity="3" clause_i="1"/> + <name name="log" arity="3" clause_i="2"/> + <name name="log" arity="3" clause_i="3"/> + <name name="log" arity="4" clause_i="1"/> + <name name="log" arity="4" clause_i="2"/> + <fsummary>Logs the given message.</fsummary> + <type variable="Level"/> + <type variable="StringOrReport" name_i="1"/> + <type variable="Format" name_i="3"/> + <type variable="Args" name_i="3"/> + <type variable="Fun" name_i="4"/> + <type variable="FunArgs" name_i="4"/> + <type variable="Metadata"/> + <desc> + <p>Log the given message.</p> + </desc> + </func> + + <func> + <name name="get_logger_config" arity="0"/> + <fsummary>Lookup the current configuration for logger.</fsummary> + <desc> + <p>Lookup the current configuration for logger.</p> + </desc> + </func> + + <func> + <name name="get_handler_config" arity="1"/> + <fsummary>Lookup the current configuration for the given handler.</fsummary> + <desc> + <p>Lookup the current configuration for the given handler.</p> + </desc> + </func> + + <func> + <name name="i" arity="0"/> + <fsummary>Get information about all logger configurations</fsummary> + <desc> + <p>Same as <seealso marker="#i/1"><c>logger:i(term)</c></seealso></p> + </desc> + </func> + + <func> + <name name="i" arity="1" clause_i="1"/> + <name name="i" arity="1" clause_i="2"/> + <name name="i" arity="1" clause_i="3"/> + <fsummary>Get information about all logger configurations</fsummary> + <desc> + <p>The <c>logger:i/1</c> function can be used to get all + current logger configuration. The way that the information + is returned depends on the <c><anno>Action</anno></c></p> + <taglist> + <tag>string</tag> + <item>Return the pretty printed current logger configuration + as iodata.</item> + <tag>term</tag> + <item>Return the current logger configuration as a term. The + format of this term may change inbetween releases. For a + stable format use <seealso marker="#get_handler_config/1"> + <c>logger:get_handler_config/1</c></seealso> + and <seealso marker="#get_logger_config/0"> + <c>logger:get_logger_config/0</c></seealso>. + The same as calling <c>logger:i()</c>.</item> + <tag>print</tag> + <item>Pretty print all the current logger configuration to + standard out. Example: + <code><![CDATA[1> logger:i(). +Current logger configuration: + Level: info + FilterDefault: log + Filters: + Handlers: + Id: logger_std_h + Module: logger_std_h + Level: info + Formatter: + Module: logger_formatter + Config: #{template => [{logger_formatter,header},"\n",msg,"\n"], + legacy_header => true} + Filter Default: stop + Filters: + Id: stop_progress + Fun: fun logger_filters:progress/2 + Config: stop + Id: remote_gl + Fun: fun logger_filters:remote_gl/2 + Config: stop + Id: domain + Fun: fun logger_filters:domain/2 + Config: {log,prefix_of,[beam,erlang,otp,sasl]} + Id: no_domain + Fun: fun logger_filters:domain/2 + Config: {log,no_domain,[]} + Handler Config: + logger_std_h: #{type => standard_io} + Level set per module: + Module: my_module + Level: debug]]></code> + </item> + </taglist> + </desc> + </func> + + <func> + <name name="add_logger_filter" arity="2"/> + <fsummary>Add a filter to the logger.</fsummary> + <desc> + <p>Add a filter to the logger.</p> + </desc> + </func> + + <func> + <name name="add_handler_filter" arity="3"/> + <fsummary>Add a filter to the specified handler.</fsummary> + <desc> + <p>Add a filter to the specified handler.</p> + </desc> + </func> + + <func> + <name name="remove_logger_filter" arity="1"/> + <fsummary>Remove a filter from the logger.</fsummary> + <desc> + <p>Remove the filter with the specified identity from the logger.</p> + </desc> + </func> + + <func> + <name name="remove_handler_filter" arity="2"/> + <fsummary>Remove a filter from the specified handler.</fsummary> + <desc> + <p>Remove the filter with the specified identity from the given handler.</p> + </desc> + </func> + + <func> + <name name="add_handler" arity="3"/> + <fsummary>Add a handler with the given configuration.</fsummary> + <desc> + <p>Add a handler with the given configuration.</p> + </desc> + </func> + + <func> + <name name="remove_handler" arity="1"/> + <fsummary>Remove the handler with the specified identity.</fsummary> + <desc> + <p>Remove the handler with the specified identity.</p> + </desc> + </func> + + <func> + <name name="set_module_level" arity="2"/> + <fsummary>Set the log level for the specified module.</fsummary> + <desc> + <p>Set the log level for the specified module.</p> + <p>To change the logging level globally, use + <seealso marker="#set_logger_config/2"><c>logger:set_logger_config(level, Level)</c></seealso>. + </p> + </desc> + </func> + + <func> + <name name="reset_module_level" arity="1"/> + <fsummary>Remove a module specific log setting.</fsummary> + <desc> + <p>Remove a module specific log setting. After this, the + global log level is used for the specified module.</p> + </desc> + </func> + + <func> + <name name="set_logger_config" arity="1"/> + <name name="set_logger_config" arity="2"/> + <fsummary>Add or update configuration data for the logger.</fsummary> + <desc> + <p>Add or update configuration data for the logger.</p> + </desc> + </func> + + <func> + <name name="set_handler_config" arity="2"/> + <name name="set_handler_config" arity="3"/> + <fsummary>Add or update configuration data for the specified + handler.</fsummary> + <desc> + <p>Add or update configuration data for the specified + handler.</p> + </desc> + </func> + + <func> + <name name="compare_levels" arity="2"/> + <fsummary>Compare the severity of two log levels.</fsummary> + <desc> + <p>Compare the severity of two log levels. Returns <c>gt</c> + if <c>Level1</c> is more severe than + <c>Level2</c>, <c>lt</c> if <c>Level1</c> is less severe, + and <c>eq</c> if the levels are equal.</p> + </desc> + </func> + + <func> + <name name="set_process_metadata" arity="1"/> + <fsummary>Set metadata to use when logging from current process.</fsummary> + <desc> + <p>Set metadata which <c>logger</c> automatically inserts it + in all log events produced on the current + process. Subsequent calls will overwrite previous data set + by this function.</p> + <p>When logging, location data produced by the log macros, + and/or metadata given as argument to the log call (API + function or macro), will be merged with the process + metadata. If the same keys occur, values from the metadata + argument to the log call will overwrite values in the + process metadata, which in turn will overwrite values from + the location data.</p> + </desc> + </func> + + <func> + <name name="get_process_metadata" arity="0"/> + <fsummary>Retrieve data set with set_process_metadata/1.</fsummary> + <desc> + <p>Retrieve data set + with <seealso marker="#set_process_metadata-1"> + <c>set_process_metadata/1</c></seealso>.</p> + </desc> + </func> + + <func> + <name name="unset_process_metadata" arity="0"/> + <fsummary>Delete data set with set_process_metadata/1.</fsummary> + <desc> + <p>Delete data set + with <seealso marker="#set_process_metadata-1"> + <c>set_process_metadata/1</c></seealso>.</p> + </desc> + </func> + + </funcs> + +</erlref> + + diff --git a/lib/kernel/doc/src/logger_arch.png b/lib/kernel/doc/src/logger_arch.png Binary files differnew file mode 100644 index 0000000000..727609a6ef --- /dev/null +++ b/lib/kernel/doc/src/logger_arch.png diff --git a/lib/kernel/doc/src/logger_chapter.xml b/lib/kernel/doc/src/logger_chapter.xml new file mode 100644 index 0000000000..2a325453da --- /dev/null +++ b/lib/kernel/doc/src/logger_chapter.xml @@ -0,0 +1,641 @@ +<?xml version="1.0" encoding="utf-8" ?> +<!DOCTYPE chapter SYSTEM "chapter.dtd"> + +<chapter> + <header> + <copyright> + <year>2017</year> + <holder>Ericsson AB. All Rights Reserved.</holder> + </copyright> + <legalnotice> + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + </legalnotice> + + <title>Logging</title> + <prepared></prepared> + <docno></docno> + <date></date> + <rev></rev> + <file>logger_chapter.xml</file> + </header> + + <section> + <title>Overview</title> + <p>Erlang/OTP provides a standard API for logging. The backend of + this API can be used as is, or it can be customized to suite + specific needs.</p> + <p>It consists of two parts - the <em>logger</em> part and the + <em>handler</em> part. The logger will forward log events to one + or more handler(s).</p> + + <image file="logger_arch.png"> + <icaption>Conceptual overview</icaption> + </image> + + <p><em>Filters</em> can be added to the logger and to each + handler. The filters decide if an event is to be forwarded or + not, and they can also modify all parts of the log event.</p> + + <p>A <em>formatter</em> can be set for each handler. The formatter + does the final formatting of the log event, including the log + message itself, and possibly a timestamp, header and other + metadata.</p> + + <p>In accordance with the Syslog protocol, RFC-5424, eight + severity levels can be specified:</p> + + <table align="left"> + <row> + <cell><strong>Level</strong></cell> + <cell align="center"><strong>Integer</strong></cell> + <cell><strong>Description</strong></cell> + </row> + <row> + <cell>emergency</cell> + <cell align="center">0</cell> + <cell>system is unusable</cell> + </row> + <row> + <cell>alert</cell> + <cell align="center">1</cell> + <cell>action must be taken immediately</cell> + </row> + <row> + <cell>critical</cell> + <cell align="center">2</cell> + <cell>critical contidions</cell> + </row> + <row> + <cell>error</cell> + <cell align="center">3</cell> + <cell>error conditions</cell> + </row> + <row> + <cell>warning</cell> + <cell align="center">4</cell> + <cell>warning conditions</cell> + </row> + <row> + <cell>notice</cell> + <cell align="center">5</cell> + <cell>normal but significant conditions</cell> + </row> + <row> + <cell>info</cell> + <cell align="center">6</cell> + <cell>informational messages</cell> + </row> + <row> + <cell>debug</cell> + <cell align="center">7</cell> + <cell>debug-level messages</cell> + </row> + <tcaption>Severity levels</tcaption> + </table> + + <p>A log event is allowed by Logger if the integer value of + its <c>Level</c> is less than or equal to the currently + configured log level. The log level can be configured globally, + or to allow more verbose logging from a specific part of the + system, per module.</p> + + <section> + <title>Customizable parts</title> + + <taglist> + <tag><marker id="Handler"/>Handler</tag> + <item> + <p>A handler is defined as a module exporting the following + function:</p> + + <code>log(Log, Config) -> ok</code> + + <p>A handler is called by the logger backend after filtering on + logger level and on handler level for the handler which is + about to be called. The function call is done on the client + process, and it is up to the handler implementation if other + processes are to be involved or not.</p> + + <p>Multiple instances of the same handler can be + added. Configuration is per instance.</p> + + </item> + + <tag><marker id="Filter"/>Filter</tag> + <item> + <p>Filters can be set on the logger or on a handler. Logger + filters are applied first, and if passed, the handler filters + for each handler are applied. The handler plugin is only + called if all handler filters for the handler in question also + pass.</p> + + <p>A filter is specified as:</p> + + <code>{fun((Log,Extra) -> Log | stop | ignore), Extra}</code> + + <p>The configuration parameter <c>filter_default</c> + specifies the behavior if all filters return <c>ignore</c>. + <c>filter_default</c> is by default set to <c>log</c>.</p> + + <p>The <c>Extra</c> parameter may contain any data that the + filter needs.</p> + </item> + + <tag><marker id="Formatter"/>Formatter</tag> + <item> + <p>A formatter is defined as a module exporting the following + function:</p> + + <code>format(Log,Extra) -> string()</code> + + <p>The formatter plugin is called by each handler, and the + returned string can be printed to the handler's destination + (stdout, file, ...).</p> + </item> + + </taglist> + </section> + + <section> + <title>Built-in handlers</title> + + <taglist> + <tag><c>logger_std_h</c></tag> + <item> + <p>This is the default handler used by OTP. Multiple instances + can be started, and each instance will write log events to a + given destination, console or file. Filters can be used for + selecting which event to send to which handler instance.</p> + </item> + + <tag><c>logger_disk_log_h</c></tag> + <item> + <p>This handler behaves much like logger_std_h, except it uses + <seealso marker="disk_log"><c>disk_log</c></seealso> as its + destination.</p> + </item> + + <tag><marker id="ErrorLoggerManager"/><c>error_logger</c></tag> + <item> + <p>This handler is to be used for backwards compatibility + only. It is not started by default, but will be automatically + started the first time an event handler is added + with <seealso marker="error_logger#add_report_handler-1"> + <c>error_logger:add_report_handler/1,2</c></seealso>.</p> + + <p>No built-in event handlers exist.</p> + </item> + </taglist> + </section> + + <section> + <title>Built-in filters</title> + + <taglist> + <tag><c>logger_filters:domain/2</c></tag> + <item> + <p>This filter provides a way of filtering log events based on a + <c>domain</c> field <c>Metadata</c>. See + <seealso marker="logger_filters#domain-2"> + <c>logger_filters:domain/2</c></seealso></p> + </item> + + <tag><c>logger_filters:level/2</c></tag> + <item> + <p>This filter provides a way of filtering log events based + on the log level. See <seealso marker="logger_filters#domain-2"> + <c>logger_filters:domain/2</c></seealso></p> + </item> + + <tag><c>logger_filters:progress/2</c></tag> + <item> + <p>This filter matches all progress reports + from <c>supervisor</c> and <c>application_controller</c>. + See <seealso marker="logger_filters#progress/2"> + <c>logger_filters:progress/2</c></seealso></p> + </item> + + <tag><c>logger_filters:remote_gl/2</c></tag> + <item> + <p>This filter matches all events originating from a process + that has its group leader on a remote node. + See <seealso marker="logger_filters#remote_gl/2"> + <c>logger_filters:remote_gl/2</c></seealso></p> + </item> + </taglist> + </section> + + <section> + <title>Default formatter</title> + + <p>The default formatter is <c>logger_formatter</c>. + See <seealso marker="logger_formatter#format-2"> + <c>logger_formatter:format/2</c></seealso>.</p> + </section> + </section> + + <section> + <title>Configuration</title> + + <section> + <title>Application environment variables</title> + <p>See <seealso marker="kernel_app#configuration">Kernel(6)</seealso> for + information about the application environment variables that can + be used for configuring logger.</p> + </section> + + <section> + <title>Logger configuration</title> + + <taglist> + <tag><c>level</c></tag> + <item> + <p>Specifies the severity level to log.</p> + </item> + <tag><c>filters</c></tag> + <item> + <p>Logger filters are added or removed with + <seealso marker="logger#add_logger_filter-2"> + <c>logger:add_logger_filter/2</c></seealso> and + <seealso marker="logger#remove_logger_filter-1"> + <c>logger:remove_logger_filter/1</c></seealso>, + respectively.</p> + <p>See <seealso marker="#Filter">Filter</seealso> for more + information.</p> + <p>By default, no filters exist.</p> + </item> + <tag><c>filter_default = log | stop</c></tag> + <item> + <p>Specifies what to do with an event if all filters + return <c>ignore</c>.</p> + <p>Default is <c>log</c>.</p> + </item> + <tag><c>handlers</c></tag> + <item> + <p>Handlers are added or removed with + <seealso marker="logger#add_handler-3"> + <c>logger:add_handler/3</c></seealso> and + <seealso marker="logger#remove_handler-1"> + <c>logger:remove_handler/1</c></seealso>, + respectively.</p> + <p>See <seealso marker="#Handler">Handler</seealso> for more + information.</p> + </item> + </taglist> + </section> + + <section> + <title>Handler configuration</title> + <taglist> + <tag><c>level</c></tag> + <item> + <p>Specifies the severity level to log.</p> + </item> + <tag><c>filters</c></tag> + <item> + <p>Handler filters can be specified when adding the handler, + or added or removed later with + <seealso marker="logger#add_handler_filter-3"> + <c>logger:add_handler_filter/3</c></seealso> and + <seealso marker="logger#remove_handler_filter-2"> + <c>logger:remove_handler_filter/2</c></seealso>, + respectively.</p> + <p>See <seealso marker="#Filter">Filter</seealso> for more + information.</p> + <p>By default, no filters exist.</p> + </item> + <tag><c>filter_default = log | stop</c></tag> + <item> + <p>Specifies what to do with an event if all filters + return <c>ignore</c>.</p> + <p>Default is <c>log</c>.</p> + </item> + <tag><c>depth = pos_integer() | unlimited</c></tag> + <item> + <p>Specifies if the depth of terms in the log events shall + be limited by using control characters <c>~P</c> + and <c>~W</c> instead of <c>~p</c> and <c>~w</c>, + respectively. See + <seealso marker="stdlib:io#format-1"><c>io:format</c></seealso>.</p> + </item> + <tag><c>max_size = pos_integer() | unlimited</c></tag> + <item> + <p>Specifies if the size of a log event shall be limited by + truncating the formatted string.</p> + </item> + <tag><c>formatter = {Module::module(),Extra::term()}</c></tag> + <item> + <p>See <seealso marker="#Formatter">Formatter</seealso> for more + information.</p> + <p>The default module is <seealso marker="logger_formatter"> + <c>logger_formatter</c></seealso>, and <c>Extra</c> is + it's configuration map.</p> + </item> + </taglist> + + <p>Note that <c>level</c> and <c>filters</c> are obeyed by + Logger itself before forwarding the log events to each + handler, while <c>depth</c>, <c>max_size</c> + and <c>formatter</c> are left to the handler + implementation. All Logger's built-in handlers do apply these + configuration parameters before printing.</p> + </section> + + </section> + + <section> + <marker id="compatibility"/> + <title>Backwards compatibility with error_logger</title> + <p>Logger provides backwards compatibility with the old + <c>error_logger</c> in the following ways:</p> + + <taglist> + <tag>Legacy event handlers</tag> + <item> + <p>To use event handlers written for <c>error_logger</c>, just + add your event handler with</p> + <code> +error_logger:add_report_handler/1,2. + </code> + <p>This will automatically start the <c>error_logger</c> + event manager, and add <c>error_logger</c> as a + handler to <c>logger</c>, with configuration</p> +<code> +#{level=>info, + filter_default=>log, + filters=>[]}. +</code> + <p>Note that this handler will ignore events that do not + originate from the old <c>error_logger</c> API, or from + within OTP. This means that if your code uses the logger API + for logging, then your log events will be discarded by this + handler.</p> + <p>Also note that <c>error_logger</c> is not overload + protected.</p> + </item> + <tag>Logger API</tag> + <item> + <p>The old <c>error_logger</c> API still exists, but should + only be used by legacy code. It will be removed in a later + release.</p> + </item> + <tag>Output format</tag> + <item> + <p>To get log events on the same format as produced + by <c>error_logger_tty_h</c> and <c>error_logger_file_h</c>, + use the default formatter, <c>logger_formatter</c>, with + configuration parameter <c>legacy_header=>true</c>. This is + also the default.</p> + </item> + <tag>Default format of log events from OTP</tag> + <item> + <p>By default, all log events originating from within OTP, + except the former so called "SASL reports", look the same as + before.</p> + </item> + <tag>SASL reports</tag> + <item> + <p>By SASL reports we mean supervisor reports, crash reports + and progress reports.</p> + <p>In earlier releases, these reports were only logged when + the SASL application was running, and they were printed + trough specific event handlers + named <c>sasl_report_tty_h</c> + and <c>sasl_report_file_h</c>.</p> + <p>The destination of these log events were configured by + environment variables for the SASL application.</p> + <p>Due to the specific event handlers, the output format + slightly differed from other log events.</p> + <p>As of OTP-21, the concept of SASL reports is removed, + meaning that the default behavior is as follows:</p> + <list> + <item>Supervisor reports, crash reports and progress reports + are no longer connected to the SASL application.</item> + <item>Supervisor reports and crash reports are logged by + default.</item> + <item>Progress reports are not logged by default, but can be + enabled with the kernel environment + variable <c>logger_log_progress</c>.</item> + <item>The output format is the same for all log + events.</item> + </list> + <p>If the old behavior is preferred, the kernel environment + variable <c>logger_sasl_compatible</c> can be set + to <c>true</c>. The old SASL environment variables can then + be used as before, and the SASL reports will only be printed + if the SASL application is running - through a second log + handler named <c>sasl_h</c>.</p> + <p>All SASL reports have a metadata + field <c>domain=>[beam,erlang,otp,sasl]</c>, which can be + used, for example, by filters to to stop or allow the + events.</p> + </item> + </taglist> + </section> + + + <section> + <title>Error handling</title> + <p>Log data is expected to be either a format string and + arguments, a string (unicode:chardata), or a report (map or + key-value list) which can be converted to a format string and + arguments by the handler. A default report callback should be + included in the log event's metadata, which can be used for + converting the report to a format string and arguments. The + handler might also do a custom conversion if the default format + is not desired.</p> + <p><c>logger</c> does, to a certain extent, check its input data + before forwarding a log event to the handlers, but it does not + evaluate conversion funs or check the validity of format strings + and arguments. This means that any filter or handler must be + careful when formatting the data of a log event, making sure + that it does not crash due to bad input data or faulty + callbacks.</p> + <p>If a filter or handler still crashes, logger will remove the + filter or handler in question from the configuration, and then + print a short error message on the console. A debug event + containing the crash reason and other details is also issued, + and can be seen if a handler is installed which logs on debug + level.</p> + </section> + + <section> + <title>Example: add a handler to log debug events to file</title> + <p>When starting an erlang node, the default behavior is that all + log events with level info and above are logged to the + console. In order to also log debug events, you can either + change the global log level to <c>debug</c> or add a separate + handler to take care of this. In this example we will add a new + handler which prints the debug events to a separate file.</p> + <p>First, we add an instance of logger_std_h with + type <c>{file,File}</c>, and we set the handler's level + to <c>debug</c>:</p> + <pre> +1> <input>Config = #{level=>debug,logger_std_h=>#{type=>{file,"./debug.log"}}}.</input> +#{logger_std_h => #{type => {file,"./debug.log"}}, + level => debug} +2> <input>logger:add_handler(debug_handler,logger_std_h,Config).</input> +ok</pre> + <p>By default, the handler receives all events, so we need to add a filter + to stop all non-debug events:</p> + <pre> +3> <input>Fun = fun(#{level:=debug}=Log,_) -> Log; (_,_) -> stop end.</input> +#Fun<erl_eval.12.98642416> +4> <input>logger:add_handler_filter(debug_handler,allow_debug,{Fun,[]}).</input> +ok</pre> + <p>And finally, we need to make sure that the logger itself allows + debug events. This can either be done by setting the global + logger level:</p> + <pre> +5> <input>logger:set_logger_config(level,debug).</input> +ok</pre> + <p>Or by allowing debug events from one or a few modules only:</p> + <pre> +6> <input>logger:set_module_level(mymodule,debug).</input> +ok</pre> + + </section> + + <section> + <title>Example: implement a handler</title> + <p>The only requirement that a handler MUST fulfill is to export + the following function:</p> + <code>log(logger:log(),logger:config()) ->ok</code> + <p>It may also implement the following callbacks:</p> + <code> +adding_handler(logger:handler_id(),logger:config()) -> {ok,logger:config()} | {error,term()} +removing_handler(logger:handler_id()) -> ok +changing_config(logger:handler_id(),logger:config(),logger:config()) -> {ok,logger:config()} | {error,term()} + </code> + <p>When logger:add_handler(Id,Module,Config) is called, logger + will first call Module:adding_handler(Id,Config), and if it + returns {ok,NewConfig} the NewConfig is written to the + configuration database. After this, the handler may receive log + events as calls to Module:log/2.</p> + <p>A handler can be removed by calling + logger:remove_handler(Id). logger will call + Module:removing_handler(Id), and then remove the handler's + configuration from the configuration database.</p> + <p>When logger:set_handler_config is called, logger calls + Module:changing_config(Id,OldConfig,NewConfig). If this function + returns ok, the NewConfig is written to the configuration + database.</p> + + <p>A simple handler which prints to the console could be + implemented as follows:</p> + <code> +-module(myhandler). +-export([log/2]). + +log(#{msg:={report,R}},_) -> + io:format("~p~n",[R]); +log(#{msg:={string,S}},_) -> + io:put_chars(S); +log(#{msg:={F,A}},_) -> + io:format(F,A). + </code> + + <p>A simple handler which prints to file could be implemented like + this:</p> + <code> +-module(myhandler). +-export([adding_handler/2, removing_handler/1, log/2]). +-export([init/1, handle_call/3, handle_cast/2, terminate/2]). + +adding_handler(Id,Config) -> + {ok,Fd} = file:open(File,[append,{encoding,utf8}]), + {ok,Config#{myhandler_fd=>Fd}}. + +removing_handler(Id,#{myhandler_fd:=Fd}) -> + _ = file:close(Fd), + ok. + +log(#{msg:={report,R}},#{myhandler_fd:=Fd}) -> + io:format(Fd,"~p~n",[R]); +log(#{msg:={string,S}},#{myhandler_fd:=Fd}) -> + io:put_chars(Fd,S); +log(#{msg:={F,A}},#{myhandler_fd:=Fd}) -> + io:format(Fd,F,A). + </code> + + <p>Note that none of the above handlers have any overload + protection, and all log events are printed directly from the + client process. Neither do the handlers use the formatter or + in any way add time or other metadata to the printed events.</p> + + <p>For examples of overload protection, please refer to the + implementation + of <seealso marker="logger_std_h"><c>logger_std_h</c></seealso> + and <!--<seealso marker="logger_disk_log_h"--><c>logger_disk_log_h</c> + <!--/seealso-->.</p> + + <p>Below is a simpler example of a handler which logs through one + single process, and uses the default formatter to gain a common + look of the log events.</p> + <p>It also uses the metadata field <c>report_cb</c>, if it exists, + to print reports in the way the event issuer suggests. The + formatter will normally do this, but if the handler either has + an own default (as in this example) or if the + given <c>report_cb</c> should not be used at all, then the + handler must take care of this itself.</p> + <code> +-module(myhandler). +-export([adding_handler/2, removing_handler/1, log/2]). +-export([init/1, handle_call/3, handle_cast/2, terminate/2]). + +adding_handler(Id,Config) -> + {ok,Pid} = gen_server:start(?MODULE,Config), + {ok,Config#{myhandler_pid=>Pid}}. + +removing_handler(Id,#{myhandler_pid:=Pid}) -> + gen_server:stop(Pid). + +log(Log,#{myhandler_pid:=Pid} = Config) -> + gen_server:cast(Pid,{log,Log,Config}). + +init(#{myhandler_file:=File}) -> + {ok,Fd} = file:open(File,[append,{encoding,utf8}]), + {ok,#{file=>File,fd=>Fd}}. + +handle_call(_,_,State) -> + {reply,{error,bad_request},State}. + +handle_cast({log,Log,Config},#{fd:=Fd} = State) -> + do_log(Fd,Log,Config), + {noreply,State}. + +terminate(Reason,#{fd:=Fd}) -> + _ = file:close(Fd), + ok. + +do_log(Fd,#{msg:={report,R}} = Log, Config) -> + Fun = maps:get(report_cb,Config,fun my_report_cb/1, + {F,A} = Fun(R), + do_log(Fd,Log#{msg=>{F,A},Config); +do_log(Fd,Log,#{formatter:={FModule,FConfig}}) -> + String = FModule:format(Log,FConfig), + io:put_chars(Fd,String). + +my_report_cb(R) -> + {"~p",[R]}. + </code> + </section> + + + <section> + <title>See Also</title> + <p><seealso marker="error_logger"><c>error_logger(3)</c></seealso>, + <seealso marker="sasl:sasl_app"><c>SASL(6)</c></seealso></p> + </section> +</chapter> diff --git a/lib/kernel/doc/src/logger_filters.xml b/lib/kernel/doc/src/logger_filters.xml new file mode 100644 index 0000000000..d742391e35 --- /dev/null +++ b/lib/kernel/doc/src/logger_filters.xml @@ -0,0 +1,191 @@ +<?xml version="1.0" encoding="utf-8" ?> +<!DOCTYPE erlref SYSTEM "erlref.dtd"> + +<erlref> + <header> + <copyright> + <year>2018</year> + <holder>Ericsson AB. All Rights Reserved.</holder> + </copyright> + <legalnotice> + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + </legalnotice> + + <title>logger_filters</title> + <prepared></prepared> + <responsible></responsible> + <docno></docno> + <approved></approved> + <checked></checked> + <date></date> + <rev>A</rev> + <file>logger_filters.xml</file> + </header> + <module>logger_filters</module> + <modulesummary>Filters to use with logger.</modulesummary> + + <description> + <p>Filters to use with logger. All functions exported from this + module can be used as logger or handler + filters. See <seealso marker="logger#add_logger_filter-2"> + <c>logger:add_logger_filter/2</c></seealso> + and <seealso marker="logger#add_handler_filter-3"> + <c>logger:add_handler_filter/3</c></seealso> + for more information about how filters are added.</p> + </description> + + <funcs> + <func> + <name name="domain" arity="2"/> + <fsummary>Filter log events based on the domain field in metadata.</fsummary> + <desc> + <p>This filter provides a way of filtering log events based on a + <c>domain</c> field <c>Metadata</c>.</p> + + <p>The <c><anno>Extra</anno></c> parameter is specified when + adding the filter + via <seealso marker="logger#add_logger_filter-2"> + <c>logger:add_logger_filter/2</c></seealso> + or <seealso marker="logger#add_handler_filter-3"> + <c>logger:add_handler_filter/3</c></seealso>.</p> + + <p>The filter compares the value of the <c>domain</c> field + in the log event's metadata (<c>Domain</c>) + to <c><anno>MatchDomain</anno></c> as follows:</p> + + <taglist> + <tag><c><anno>Compare</anno> = starts_with</c></tag> + <item><p>The filter matches if <c>MatchDomain</c> is a prefix + of <c>Domain</c>.</p></item> + <tag><c><anno>Compare</anno> = prefix_of</c></tag> + <item><p>The filter matches if <c>Domain</c> is a prefix + of <c>MatchDomain</c>.</p></item> + <tag><c><anno>Compare</anno> = equals</c></tag> + <item><p>The filter matches if <c>Domain</c> is equal + to <c>MatchDomain</c>.</p></item> + <tag><c><anno>Compare</anno> = no_domain</c></tag> + <item><p>The filter matches if there is no domain field in + metadata. In this case <c><anno>MatchDomain</anno></c> shall + be <c>[]</c>.</p></item> + </taglist> + + <p>If the filter matches and <c><anno>Action</anno> = + log</c>, the log event is allowed. If the filter matches + and <c><anno>Action</anno> = stop</c>, the log event is + stopped.</p> + + <p>If the filter does not match, it returns <c>ignore</c>, + meaning that other filters, or the value of the + configuration parameter <c>filter_default</c>, will decide + if the event is allowed or not.</p> + + <p>Log events that do not contain any domain field, will + only match when <c><anno>Compare</anno> = no_domain</c>.</p> + + <p>Example: stop all events with + domain <c>[beam,erlang,otp,sasl|_]</c></p> + + <code> +logger:set_handler_config(h1,filter_default,log). % this is the default +Filter = {fun logger_filters:domain/2,{stop,starts_with,[beam,erlang,otp,sasl]}}. +logger:add_handler_filter(h1,no_sasl,Filter). +ok</code> + </desc> + </func> + + <func> + <name name="level" arity="2"/> + <fsummary>Filter log events based on the log level.</fsummary> + <desc> + <p>This filter provides a way of filtering log events based + on the log level. It matches log events by comparing the + log level with a predefined <c>MatchLevel</c></p> + + <p>The <c><anno>Extra</anno></c> parameter is specified when + adding the filter + via <seealso marker="logger#add_logger_filter-2"> + <c>logger:add_logger_filter/2</c></seealso> + or <seealso marker="logger#add_handler_filter-3"> + <c>logger:add_handler_filter/3</c></seealso>.</p> + + <p>The filter compares the value of the event's log level + (<c>Level</c>) to <c><anno>MatchLevel</anno></c> by + calling <seealso marker="logger#compare_levels-2"> + <c>logger:compare_levels(Level,MatchLevel) -> CmpRet</c></seealso>. It + matches the event if:</p> + + <list> + <item><c>CmpRet = eq</c> and <c><anno>Operator</anno> = + eq | lteq | gteq</c></item> + <item><c>CmpRet = lt</c> and <c><anno>Operator</anno> = + lt | lteq | neq</c></item> + <item><c>CmpRet = gt</c> and <c><anno>Operator</anno> = + gt | gteq | neq</c></item> + </list> + + <p>If the filter matches and <c><anno>Action</anno> = + log</c>, the log event is allowed. If the filter matches + and <c><anno>Action</anno> = stop</c>, the log event is + stopped.</p> + + <p>If the filter does not match, it returns <c>ignore</c>, + meaning that other filters, or the value of the + configuration parameter <c>filter_default</c>, will decide + if the event is allowed or not.</p> + + <p>Example: only allow debug level log events</p> + + <code> +logger:set_handler_config(h1,filter_default,stop). +Filter = {fun logger_filters:level/2,{log,eq,debug}}. +logger:add_handler_filter(h1,debug_only,Filter). +ok</code> + </desc> + </func> + + <func> + <name name="progress" arity="2"/> + <fsummary>Filter progress reports from supervisor and application_controller.</fsummary> + <desc> + <p>This filter matches all progress reports + from <c>supervisor</c> and <c>application_controller</c>.</p> + + <p>If <c><anno>Extra</anno> = log</c>, the progress reports + are allowed. If <c><anno>Extra</anno> = stop</c>, the + progress reports are stopped.</p> + + <p>The filter returns <c>ignore</c> for all other log events.</p> + </desc> + </func> + + <func> + <name name="remote_gl" arity="2"/> + <fsummary>Filter events with group leader on remote node.</fsummary> + <desc> + <p>This filter matches all events originating from a process + that has its group leader on a remote node.</p> + + <p>If <c><anno>Extra</anno> = log</c>, the matching events + are allowed. If <c><anno>Extra</anno> = stop</c>, the + matching events are stopped.</p> + + <p>The filter returns <c>ignore</c> for all other log events.</p> + </desc> + </func> + + </funcs> + +</erlref> + + diff --git a/lib/kernel/doc/src/logger_formatter.xml b/lib/kernel/doc/src/logger_formatter.xml new file mode 100644 index 0000000000..213a592e47 --- /dev/null +++ b/lib/kernel/doc/src/logger_formatter.xml @@ -0,0 +1,149 @@ +<?xml version="1.0" encoding="utf-8" ?> +<!DOCTYPE erlref SYSTEM "erlref.dtd"> + +<erlref> + <header> + <copyright> + <year>2017</year> + <holder>Ericsson AB. All Rights Reserved.</holder> + </copyright> + <legalnotice> + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + </legalnotice> + + <title>logger_formatter</title> + <prepared></prepared> + <responsible></responsible> + <docno></docno> + <approved></approved> + <checked></checked> + <date></date> + <rev>A</rev> + <file>logger_formatter.xml</file> + </header> + <module>logger_formatter</module> + <modulesummary>Default formatter for the Logger application.</modulesummary> + + <description> + <p>Default formatter for the Logger application.</p> + </description> + + <datatypes> + <datatype> + <name name="template"/> + <desc> + </desc> + </datatype> + </datatypes> + + <funcs> + <func> + <name name="format" arity="2"/> + <fsummary>Formats the given message.</fsummary> + <desc> + <p>Formats the given message.</p> + <p>The template is a list of atoms, tuples and strings. Atoms + can be <c>level</c> or <c>msg</c>, which are placeholders + for the severity level and the log message, + repectively. Tuples are interpreted as placeholders for + metadata. Each element in the tuple must be an atom which + matches a key in the nested metadata map, e.g. the + tuple <c>{key1,key2}</c> will be replaced by the value of + the key2 field in this nested map (the value vill be + converted to a string):</p> + +<code> +#{key1=>#{key2=>my_value, + ...}, + ...}</code> + + + <p> Strings are printed literally.</p> + + <p><c>depth</c> is a positive integer representing the maximum + depth to which terms shall be printed by this + formatter. Format strings passed to this formatter are + rewritten. The format controls ~p and ~w are replaced with + ~P and ~W, respectively, and the value is used as the depth + parameter. For details, see + <seealso marker="stdlib:io#format-2">io:format/2</seealso> + in STDLIB.</p> + + <p><c>max_size</c> is a positive integer representing the + maximum size a string returned from this formatter can + have. If the formatted string is longer, it will be + truncated.</p> + + <p><c>utc</c> is a boolean. If set to true, all dates are + displayed in Universal Coordinated Time. Default + is <c>false</c>.</p> + + <p><c>report_cb</c> must be a function with arity 1, + returning <c>{Format,Args}</c>. This function will replace + any <c>report_cb</c> found in metadata.</p> + + <p>If <c>single_line=true</c>, all newlines in the message are + replaced with <c>", "</c>, and whitespaces following directly + after newlines are removed. Note that newlines added by the + formatter template are not replaced.</p> + + <p>If <c>legacy_header=true</c> a header field is added to + logger_formatter's part of <c>Metadata</c>. The value of + this field is a string similar to the header created by the + old <c>error_logger</c> event handlers. It can be included + in the log event by adding the + tuple <c>{logger_formatter,header}</c> to the template.</p> + + <p>The default template when <c>legacy_header=true</c> is</p> + + <code>[{logger_formatter,header},"\n",msg,"\n"]</code> + + <p>which will cause log entries like this:</p> + + <code>=ERROR REPORT==== 29-Dec-2017::13:30:51.245123 === + process: <0.74.0> + exit_reason: "Something went wrong"</code> + + <p>Note that all eight levels might occur here, not + only <c>ERROR</c>, <c>WARNING</c> or <c>INFO</c>. And also + that micro seconds are added at the end of the + timestamp.</p> + + <p>The default template when <c>single_line=true</c> is</p> + + <code>[time," ",level,": ",msg,"\n"]</code> + + <p>which will cause log entries like this:</p> + + <code>2017-12-29 13:31:49.640317 error: process: <0.74.0>, exit_reason: "Something went wrong"</code> + + <p>The default template when both <c>legacy_header</c> and + <c>single_line</c> are set to false is:</p> + + <code>[time," ",level,":\n",msg,"\n"]</code> + + <p>which will cause log entries like this:</p> + + <code>2017-12-29 13:32:25.191925 error: + process: <0.74.0> + exit_reason: "Something went wrong"</code> + + </desc> + </func> + + </funcs> + +</erlref> + + diff --git a/lib/kernel/doc/src/logger_std_h.xml b/lib/kernel/doc/src/logger_std_h.xml new file mode 100644 index 0000000000..2a368b16eb --- /dev/null +++ b/lib/kernel/doc/src/logger_std_h.xml @@ -0,0 +1,81 @@ +<?xml version="1.0" encoding="utf-8" ?> +<!DOCTYPE erlref SYSTEM "erlref.dtd"> + +<erlref> + <header> + <copyright> + <year>2017</year> + <holder>Ericsson AB. All Rights Reserved.</holder> + </copyright> + <legalnotice> + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + </legalnotice> + + <title>logger_std_h</title> + <prepared></prepared> + <responsible></responsible> + <docno></docno> + <approved></approved> + <checked></checked> + <date></date> + <rev>A</rev> + <file>logger_std_h.xml</file> + </header> + <module>logger_std_h</module> + <modulesummary>Default handler for the Logger application.</modulesummary> + + <description> + <p>This is the default handler for the Logger + application. Multiple instances of this handler can be added to + logger, and each instance will print logs to <c>standard_io</c>, + <c>standard_error</c> or to a file.</p> + + <p>To add a new instance, + use <seealso marker="logger#add_handler-3"><c>logger:add_handler/3</c> + </seealso>.</p> + + <p>The handler configuration may contain the following keys, + associated with values as described:</p> + <taglist> + <tag><c>filters</c></tag> + <item> + <p>A list of <c>{Id,{Fun,Args}}</c>, each representing a filter + that may selct or modify log events to forward to this + handler.</p></item> + <tag><c>filter_default</c></tag> + <item> + <p>The atom <c>log</c> or <c>stop</c>, specifying what to + do with a log event if all filters + return <c>ignore</c>.</p></item> + <tag><c>formatter</c></tag> + <item> + <p><c>{Module,Extra}</c>, + where <c>Module:format(Log,Extra)</c> will be called by + the handler to produce the string that will be printed to + the handler's destination.</p></item> + <tag><c>level</c></tag> + <item> + <p>The level of log events that <c>logger</c> shall forward to + this handler. Log events of the specified, or more severe + levels, are forwarded.</p></item> + </taglist> + + </description> + +<!-- <funcs> + </funcs> --> + +</erlref> + + diff --git a/lib/kernel/doc/src/part.xml b/lib/kernel/doc/src/part.xml new file mode 100644 index 0000000000..68eb4530e2 --- /dev/null +++ b/lib/kernel/doc/src/part.xml @@ -0,0 +1,40 @@ +<?xml version="1.0" encoding="utf-8" ?> +<!DOCTYPE part SYSTEM "part.dtd"> + +<part xmlns:xi="http://www.w3.org/2001/XInclude"> + <header> + <copyright> + <year>1996</year><year>2017</year> + <holder>Ericsson AB. All Rights Reserved.</holder> + </copyright> + <legalnotice> + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + </legalnotice> + + <title>Logger User's Guide</title> + <prepared>OTP Team</prepared> + <docno></docno> + <date>2017-12-01</date> + <rev>0.1</rev> + <file>part.xml</file> + </header> + <description> + <p>The System Architecture Support Libraries SASL application + provides support for alarm handling, release handling, and + related functions.</p> + </description> + <xi:include href="introduction_chapter.xml"/> + <xi:include href="logger_chapter.xml"/> +</part> + diff --git a/lib/kernel/doc/src/ref_man.xml b/lib/kernel/doc/src/ref_man.xml index 5cd77e0f6f..7f4d5b384f 100644 --- a/lib/kernel/doc/src/ref_man.xml +++ b/lib/kernel/doc/src/ref_man.xml @@ -52,6 +52,10 @@ <xi:include href="inet.xml"/> <xi:include href="inet_res.xml"/> <xi:include href="init_stub.xml"/> + <xi:include href="logger.xml"/> + <xi:include href="logger_filters.xml"/> + <xi:include href="logger_formatter.xml"/> + <xi:include href="logger_std_h.xml"/> <xi:include href="net_adm.xml"/> <xi:include href="net_kernel.xml"/> <xi:include href="os.xml"/> diff --git a/lib/kernel/doc/src/specs.xml b/lib/kernel/doc/src/specs.xml index 29d52f23bb..a67e26ef16 100644 --- a/lib/kernel/doc/src/specs.xml +++ b/lib/kernel/doc/src/specs.xml @@ -20,6 +20,10 @@ <xi:include href="../specs/specs_inet.xml"/> <xi:include href="../specs/specs_inet_res.xml"/> <xi:include href="../specs/specs_init_stub.xml"/> + <xi:include href="../specs/specs_logger.xml"/> + <xi:include href="../specs/specs_logger_filters.xml"/> + <xi:include href="../specs/specs_logger_formatter.xml"/> + <xi:include href="../specs/specs_logger_std_h.xml"/> <xi:include href="../specs/specs_net_adm.xml"/> <xi:include href="../specs/specs_net_kernel.xml"/> <xi:include href="../specs/specs_os.xml"/> diff --git a/lib/kernel/include/logger.hrl b/lib/kernel/include/logger.hrl new file mode 100644 index 0000000000..2143ccd297 --- /dev/null +++ b/lib/kernel/include/logger.hrl @@ -0,0 +1,49 @@ +-ifndef(LOGGER_HRL). +-define(LOGGER_HRL,true). +-define(LOG_EMERGENCY(A),?DO_LOG(emergency,[A])). +-define(LOG_EMERGENCY(A,B),?DO_LOG(emergency,[A,B])). +-define(LOG_EMERGENCY(A,B,C),?DO_LOG(emergency,[A,B,C])). + +-define(LOG_ALERT(A),?DO_LOG(alert,[A])). +-define(LOG_ALERT(A,B),?DO_LOG(alert,[A,B])). +-define(LOG_ALERT(A,B,C),?DO_LOG(alert,[A,B,C])). + +-define(LOG_CRITICAL(A),?DO_LOG(critical,[A])). +-define(LOG_CRITICAL(A,B),?DO_LOG(critical,[A,B])). +-define(LOG_CRITICAL(A,B,C),?DO_LOG(critical,[A,B,C])). + +-define(LOG_ERROR(A),?DO_LOG(error,[A])). +-define(LOG_ERROR(A,B),?DO_LOG(error,[A,B])). +-define(LOG_ERROR(A,B,C),?DO_LOG(error,[A,B,C])). + +-define(LOG_WARNING(A),?DO_LOG(warning,[A])). +-define(LOG_WARNING(A,B),?DO_LOG(warning,[A,B])). +-define(LOG_WARNING(A,B,C),?DO_LOG(warning,[A,B,C])). + +-define(LOG_NOTICE(A),?DO_LOG(notice,[A])). +-define(LOG_NOTICE(A,B),?DO_LOG(notice,[A,B])). +-define(LOG_NOTICE(A,B,C),?DO_LOG(notice,[A,B,C])). + +-define(LOG_INFO(A),?DO_LOG(info,[A])). +-define(LOG_INFO(A,B),?DO_LOG(info,[A,B])). +-define(LOG_INFO(A,B,C),?DO_LOG(info,[A,B,C])). + +-define(LOG_DEBUG(A),?DO_LOG(debug,[A])). +-define(LOG_DEBUG(A,B),?DO_LOG(debug,[A,B])). +-define(LOG_DEBUG(A,B,C),?DO_LOG(debug,[A,B,C])). + +-define(LOCATION,#{mfa=>{?MODULE,?FUNCTION_NAME,?FUNCTION_ARITY}, + line=>?LINE, + file=>?FILE}). + +%%%----------------------------------------------------------------- +%%% Internal, i.e. not intended for direct use in code - use above +%%% macros instead! +-define(DO_LOG(Level,Args), + case logger:allow(Level,?MODULE) of + true -> + apply(logger,macro_log,[?LOCATION,Level|Args]); + false -> + ok + end). +-endif. diff --git a/lib/kernel/src/Makefile b/lib/kernel/src/Makefile index 0bc9f121a0..702845512c 100644 --- a/lib/kernel/src/Makefile +++ b/lib/kernel/src/Makefile @@ -109,6 +109,17 @@ MODULES = \ kernel_refc \ local_udp \ local_tcp \ + logger \ + logger_backend \ + logger_config \ + logger_std_h \ + logger_disk_log_h \ + logger_h_common \ + logger_filters \ + logger_formatter \ + logger_server \ + logger_simple \ + logger_sup \ net \ net_adm \ net_kernel \ @@ -132,13 +143,14 @@ MODULES = \ HRL_FILES= ../include/file.hrl ../include/inet.hrl ../include/inet_sctp.hrl \ ../include/dist.hrl ../include/dist_util.hrl \ - ../include/net_address.hrl + ../include/net_address.hrl ../include/logger.hrl INTERNAL_HRL_FILES= application_master.hrl disk_log.hrl \ erl_epmd.hrl hipe_ext_format.hrl \ inet_dns.hrl inet_res.hrl \ inet_boot.hrl inet_config.hrl inet_int.hrl \ - inet_dns_record_adts.hrl + inet_dns_record_adts.hrl \ + logger_internal.hrl logger_h_common.hrl ERL_FILES= $(MODULES:%=%.erl) @@ -223,7 +235,7 @@ release_docs_spec: # Include dependencies -- list below added by Kostis Sagonas -$(EBIN)/application_controller.beam: application_master.hrl +$(EBIN)/application_controller.beam: application_master.hrl ../include/logger.hrl $(EBIN)/application_master.beam: application_master.hrl $(EBIN)/auth.beam: ../include/file.hrl $(EBIN)/code.beam: ../include/file.hrl @@ -234,6 +246,7 @@ $(EBIN)/disk_log_server.beam: disk_log.hrl $(EBIN)/dist_util.beam: ../include/dist_util.hrl ../include/dist.hrl $(EBIN)/erl_boot_server.beam: inet_boot.hrl $(EBIN)/erl_epmd.beam: inet_int.hrl erl_epmd.hrl +$(EBIN)/error_logger.beam: logger_internal.hrl ../include/logger.hrl $(EBIN)/file.beam: ../include/file.hrl file_int.hrl $(EBIN)/file_io_server.beam: ../include/file.hrl file_int.hrl $(EBIN)/gen_tcp.beam: inet_int.hrl @@ -259,6 +272,16 @@ $(EBIN)/inet_udp.beam: inet_int.hrl $(EBIN)/inet_sctp.beam: inet_int.hrl ../include/inet_sctp.hrl $(EBIN)/local_udp.beam: inet_int.hrl $(EBIN)/local_tcp.beam: inet_int.hrl +$(EBIN)/logger.beam: logger_internal.hrl ../include/logger.hrl +$(EBIN)/logger_backend.beam: logger_internal.hrl ../include/logger.hrl +$(EBIN)/logger_config.beam: logger_internal.hrl ../include/logger.hrl +$(EBIN)/logger_disk_log_h.beam: logger_h_common.hrl logger_internal.hrl ../include/logger.hrl ../include/file.hrl +$(EBIN)/logger_filters.beam: logger_internal.hrl ../include/logger.hrl +$(EBIN)/logger_formatter.beam: logger_internal.hrl ../include/logger.hrl +$(EBIN)/logger_server.beam: logger_internal.hrl ../include/logger.hrl +$(EBIN)/logger_simple.beam: logger_internal.hrl ../include/logger.hrl +$(EBIN)/logger_std_h.beam: logger_h_common.hrl logger_internal.hrl ../include/logger.hrl ../include/file.hrl +$(EBIN)/logger_h_common.beam: logger_h_common.hrl logger_internal.hrl ../include/logger.hrl $(EBIN)/net_kernel.beam: ../include/net_address.hrl $(EBIN)/os.beam: ../include/file.hrl $(EBIN)/ram_file.beam: ../include/file.hrl diff --git a/lib/kernel/src/kernel.app.src b/lib/kernel/src/kernel.app.src index 82a3571da9..afffcd156e 100644 --- a/lib/kernel/src/kernel.app.src +++ b/lib/kernel/src/kernel.app.src @@ -60,6 +60,17 @@ kernel_refc, local_tcp, local_udp, + logger, + logger_backend, + logger_config, + logger_disk_log_h, + logger_filters, + logger_formatter, + logger_h_common, + logger_server, + logger_simple, + logger_std_h, + logger_sup, net, net_adm, net_kernel, @@ -117,6 +128,8 @@ kernel_config, kernel_refc, kernel_sup, + logger, + logger_sup, net_kernel, net_sup, rex, @@ -127,7 +140,7 @@ inet_db, pg2]}, {applications, []}, - {env, [{error_logger, tty}]}, + {env, []}, {mod, {kernel, []}}, {runtime_dependencies, ["erts-10.0", "stdlib-3.5", "sasl-3.0"]} ] diff --git a/lib/kernel/src/logger.erl b/lib/kernel/src/logger.erl new file mode 100644 index 0000000000..7606912da4 --- /dev/null +++ b/lib/kernel/src/logger.erl @@ -0,0 +1,799 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2017. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(logger). + +%% Log interface +-export([emergency/1,emergency/2,emergency/3, + alert/1,alert/2,alert/3, + critical/1,critical/2,critical/3, + error/1,error/2,error/3, + warning/1,warning/2,warning/3, + notice/1,notice/2,notice/3, + info/1,info/2,info/3, + debug/1,debug/2,debug/3]). +-export([log/2,log/3,log/4]). + +%% Called by macro +-export([allow/2,macro_log/3,macro_log/4,macro_log/5,add_default_metadata/1]). + +%% Configuration +-export([add_handler/3, remove_handler/1, + add_logger_filter/2, add_handler_filter/3, + remove_logger_filter/1, remove_handler_filter/2, + set_module_level/2, reset_module_level/1, + set_logger_config/1, set_logger_config/2, + set_handler_config/2, set_handler_config/3, + get_logger_config/0, get_handler_config/1]). + +%% Misc +-export([compare_levels/2]). +-export([set_process_metadata/1, unset_process_metadata/0, + get_process_metadata/0]). +-export([i/0, i/1]). +-export([setup_standard_handler/0, replace_simple_handler/3]). +-export([limit_term/1, get_format_depth/0, get_max_size/0, get_utc_config/0]). + +%% Basic report formatting +-export([format_report/1, format_otp_report/1]). + +-export([internal_log/2,filter_stacktrace/2]). + +-include("logger_internal.hrl"). +-include("logger.hrl"). + +%%%----------------------------------------------------------------- +%%% Types +-type log() :: #{level=>level(), + msg=>{io:format(),[term()]} | + {report,report()} | + {string,unicode:chardata()}, + meta=>metadata()}. +-type level() :: emergency | alert | critical | error | + warning | notice | info | debug. +-type report() :: map() | [{atom(),term()}]. +-type msg_fun() :: fun((term()) -> {io:format(),[term()]} | + report() | + unicode:chardata()). +-type metadata() :: map(). + +-type handler_id() :: atom(). +-type filter_id() :: atom(). +-type filter() :: {fun((log(),term()) -> filter_return()),term()}. +-type filter_return() :: stop | ignore | log(). +-type config() :: map(). + +-export_type([log/0,level/0,report/0,msg_fun/0,metadata/0,config/0,handler_id/0, + filter_id/0,filter/0,filter_return/0]). + +%%%----------------------------------------------------------------- +%%% API +emergency(X) -> + log(emergency,X). +emergency(X,Y) -> + log(emergency,X,Y). +emergency(X,Y,Z) -> + log(emergency,X,Y,Z). + +alert(X) -> + log(alert,X). +alert(X,Y) -> + log(alert,X,Y). +alert(X,Y,Z) -> + log(alert,X,Y,Z). + +critical(X) -> + log(critical,X). +critical(X,Y) -> + log(critical,X,Y). +critical(X,Y,Z) -> + log(critical,X,Y,Z). + +error(X) -> + log(error,X). +error(X,Y) -> + log(error,X,Y). +error(X,Y,Z) -> + log(error,X,Y,Z). + +warning(X) -> + log(warning,X). +warning(X,Y) -> + log(warning,X,Y). +warning(X,Y,Z) -> + log(warning,X,Y,Z). + +notice(X) -> + log(notice,X). +notice(X,Y) -> + log(notice,X,Y). +notice(X,Y,Z) -> + log(notice,X,Y,Z). + +info(X) -> + log(info,X). +info(X,Y) -> + log(info,X,Y). +info(X,Y,Z) -> + log(info,X,Y,Z). + +debug(X) -> + log(debug,X). +debug(X,Y) -> + log(debug,X,Y). +debug(X,Y,Z) -> + log(debug,X,Y,Z). + +-spec log(Level,StringOrReport) -> ok when + Level :: level(), + StringOrReport :: unicode:chardata() | report(). +log(Level, StringOrReport) -> + do_log(Level,StringOrReport,#{}). + +-spec log(Level,StringOrReport,Metadata) -> ok when + Level :: level(), + StringOrReport :: unicode:chardata() | report(), + Metadata :: metadata(); + (Level,Format,Args) -> ok when + Level :: level(), + Format :: io:format(), + Args ::[term()]; + (Level,Fun,FunArgs) -> ok when + Level :: level(), + Fun :: msg_fun(), + FunArgs :: term(). +log(Level, StringOrReport, Metadata) + when is_map(Metadata), not is_function(StringOrReport) -> + do_log(Level,StringOrReport,Metadata); +log(Level, FunOrFormat, Args) -> + do_log(Level,{FunOrFormat,Args},#{}). + +-spec log(Level,Format, Args, Metadata) -> ok when + Level :: level(), + Format :: io:format(), + Args :: [term()], + Metadata :: metadata(); + (Level,Fun,FunArgs,Metadata) -> ok when + Level :: level(), + Fun :: msg_fun(), + FunArgs :: term(), + Metadata :: metadata(). +log(Level, FunOrFormat, Args, Metadata) -> + do_log(Level,{FunOrFormat,Args},Metadata). + +-spec allow(Level,Module) -> boolean() when + Level :: level(), + Module :: module(). +allow(Level,Module) when ?IS_LEVEL(Level), is_atom(Module) -> + logger_config:allow(?LOGGER_TABLE,Level,Module). + + +-spec macro_log(Location,Level,StringOrReport) -> ok when + Location :: map(), + Level :: level(), + StringOrReport :: unicode:chardata() | report(). +macro_log(Location,Level,StringOrReport) -> + log_allowed(Location,Level,StringOrReport,#{}). + +-spec macro_log(Location,Level,StringOrReport,Meta) -> ok when + Location :: map(), + Level :: level(), + StringOrReport :: unicode:chardata() | report(), + Meta :: metadata(); + (Location,Level,Format,Args) -> ok when + Location :: map(), + Level :: level(), + Format :: io:format(), + Args ::[term()]; + (Location,Level,Fun,FunArgs) -> ok when + Location :: map(), + Level :: level(), + Fun :: msg_fun(), + FunArgs :: term(). +macro_log(Location,Level,StringOrReport,Meta) + when is_map(Meta), not is_function(StringOrReport) -> + log_allowed(Location,Level,StringOrReport,Meta); +macro_log(Location,Level,FunOrFormat,Args) -> + log_allowed(Location,Level,{FunOrFormat,Args},#{}). + +-spec macro_log(Location,Level,Format,Args,Meta) -> ok when + Location :: map(), + Level :: level(), + Format :: io:format(), + Args ::[term()], + Meta :: metadata(); + (Location,Level,Fun,FunArgs,Meta) -> ok when + Location :: map(), + Level :: level(), + Fun :: msg_fun(), + FunArgs :: term(), + Meta :: metadata(). +macro_log(Location,Level,FunOrFormat,Args,Meta) -> + log_allowed(Location,Level,{FunOrFormat,Args},Meta). + +-spec format_otp_report(Report) -> FormatArgs when + Report :: report(), + FormatArgs :: {io:format(),[term()]}. +format_otp_report(#{label:=_,report:=Report}) -> + format_report(Report); +format_otp_report(Report) -> + format_report(Report). + +-spec format_report(Report) -> FormatArgs when + Report :: report(), + FormatArgs :: {io:format(),[term()]}. +format_report(Report) when is_map(Report) -> + format_report(maps:to_list(Report)); +format_report(Report) when is_list(Report) -> + case lists:flatten(Report) of + [] -> + {"~tp",[[]]}; + FlatList -> + case string_p1(FlatList) of + true -> + {"~ts",[FlatList]}; + false -> + format_term_list(Report,[],[]) + end + end; +format_report(Report) -> + {"~tp",[Report]}. + +format_term_list([{Tag,Data}|T],Format,Args) -> + PorS = case string_p(Data) of + true -> "s"; + false -> "p" + end, + format_term_list(T,[" ~tp: ~t"++PorS|Format],[Data,Tag|Args]); +format_term_list([Data|T],Format,Args) -> + format_term_list(T,[" ~tp"|Format],[Data|Args]); +format_term_list([],Format,Args) -> + {lists:flatten(lists:join($\n,lists:reverse(Format))),lists:reverse(Args)}. + +string_p(List) when is_list(List) -> + string_p1(lists:flatten(List)); +string_p(_) -> + false. + +string_p1([]) -> + false; +string_p1(FlatList) -> + io_lib:printable_unicode_list(FlatList). + +internal_log(Level,Term) when is_atom(Level) -> + erlang:display_string("Logger - "++ atom_to_list(Level) ++ ": "), + erlang:display(Term). + +%%%----------------------------------------------------------------- +%%% Configuration +-spec add_logger_filter(FilterId,Filter) -> ok | {error,term()} when + FilterId :: filter_id(), + Filter :: filter(). +add_logger_filter(FilterId,Filter) -> + logger_server:add_filter(logger,{FilterId,Filter}). + +-spec add_handler_filter(HandlerId,FilterId,Filter) -> ok | {error,term()} when + HandlerId :: handler_id(), + FilterId :: filter_id(), + Filter :: filter(). +add_handler_filter(HandlerId,FilterId,Filter) -> + logger_server:add_filter(HandlerId,{FilterId,Filter}). + + +-spec remove_logger_filter(FilterId) -> ok | {error,term()} when + FilterId :: filter_id(). +remove_logger_filter(FilterId) -> + logger_server:remove_filter(logger,FilterId). + +-spec remove_handler_filter(HandlerId,FilterId) -> ok | {error,term()} when + HandlerId :: handler_id(), + FilterId :: filter_id(). +remove_handler_filter(HandlerId,FilterId) -> + logger_server:remove_filter(HandlerId,FilterId). + +-spec add_handler(HandlerId,Module,Config) -> ok | {error,term()} when + HandlerId :: handler_id(), + Module :: module(), + Config :: config(). +add_handler(HandlerId,Module,Config) -> + logger_server:add_handler(HandlerId,Module,Config). + +-spec remove_handler(HandlerId) -> ok | {error,term()} when + HandlerId :: handler_id(). +remove_handler(HandlerId) -> + logger_server:remove_handler(HandlerId). + +-spec set_logger_config(Key,Value) -> ok | {error,term()} when + Key :: atom(), + Value :: term(). +set_logger_config(Key,Value) -> + logger_server:set_config(logger,Key,Value). + +-spec set_logger_config(Config) -> ok | {error,term()} when + Config :: config(). +set_logger_config(Config) -> + logger_server:set_config(logger,Config). + +-spec set_handler_config(HandlerId,Key,Value) -> ok | {error,term()} when + HandlerId :: handler_id(), + Key :: atom(), + Value :: term(). +set_handler_config(HandlerId,Key,Value) -> + logger_server:set_config(HandlerId,Key,Value). + +-spec set_handler_config(HandlerId,Config) -> ok | {error,term()} when + HandlerId :: handler_id(), + Config :: config(). +set_handler_config(HandlerId,Config) -> + logger_server:set_config(HandlerId,Config). + +-spec get_logger_config() -> {ok,Config} when + Config :: config(). +get_logger_config() -> + logger_config:get(?LOGGER_TABLE,logger). + +-spec get_handler_config(HandlerId) -> {ok,{Module,Config}} | {error,term()} when + HandlerId :: handler_id(), + Module :: module(), + Config :: config(). +get_handler_config(HandlerId) -> + logger_config:get(?LOGGER_TABLE,HandlerId). + +-spec set_module_level(Module,Level) -> ok | {error,term()} when + Module :: module(), + Level :: level(). +set_module_level(Module,Level) -> + logger_server:set_module_level(Module,Level). + +-spec reset_module_level(Module) -> ok | {error,term()} when + Module :: module(). +reset_module_level(Module) -> + logger_server:reset_module_level(Module). + +%%%----------------------------------------------------------------- +%%% Misc +-spec compare_levels(Level1,Level2) -> eq | gt | lt when + Level1 :: level(), + Level2 :: level(). +compare_levels(Level,Level) when ?IS_LEVEL(Level) -> + eq; +compare_levels(Level1,Level2) when ?IS_LEVEL(Level1), ?IS_LEVEL(Level2) -> + Int1 = logger_config:level_to_int(Level1), + Int2 = logger_config:level_to_int(Level2), + if Int1 < Int2 -> gt; + true -> lt + end; +compare_levels(Level1,Level2) -> + erlang:error(badarg,[Level1,Level2]). + +-spec set_process_metadata(Meta) -> ok when + Meta :: metadata(). +set_process_metadata(Meta) when is_map(Meta) -> + _ = put(?LOGGER_META_KEY,Meta), + ok; +set_process_metadata(Meta) -> + erlang:error(badarg,[Meta]). + +-spec get_process_metadata() -> Meta | undefined when + Meta :: metadata(). +get_process_metadata() -> + get(?LOGGER_META_KEY). + +-spec unset_process_metadata() -> ok. +unset_process_metadata() -> + _ = erase(?LOGGER_META_KEY), + ok. + +-spec i() -> #{logger=>config(), + handlers=>[{handler_id(),module(),config()}], + module_levels=>[{module(),level()}]}. +i() -> + i(term). + +-spec i(term) -> #{logger=>config(), + handlers=>[{handler_id(),module(),config()}], + module_levels=>[{module(),level()}]}; + (print) -> ok; + (string) -> iolist(). +i(_Action = print) -> + io:put_chars(i(string)); +i(_Action = string) -> + #{logger := #{level := Level, handlers := Handlers, + filters := Filters, filter_default := FilterDefault}, + handlers := HandlerConfigs, + module_levels := Modules} = i(term), + [io_lib:format("Current logger configuration:~n", []), + io_lib:format(" Level: ~p~n",[Level]), + io_lib:format(" Filter Default: ~p~n", [FilterDefault]), + io_lib:format(" Filters: ~n", []), + print_filters(4, Filters), + io_lib:format(" Handlers: ~n", []), + print_handlers([C || {Id, _, _} = C <- HandlerConfigs, + lists:member(Id, Handlers)]), + io_lib:format(" Level set per module: ~n", []), + print_module_levels(Modules) + ]; +i(_Action = term) -> + {Logger, Handlers, Modules} = logger_config:get(tid()), + #{logger=>Logger, + handlers=>Handlers, + module_levels=>Modules}. + +print_filters(Indent, {Id, {Fun, Config}}) -> + io_lib:format("~sId: ~p~n" + "~s Fun: ~p~n" + "~s Config: ~p~n",[Indent, Id, Indent, Fun, Indent, Config]); +print_filters(Indent, Filters) -> + IndentStr = io_lib:format("~.*s",[Indent, ""]), + lists:map(fun(Filter) ->print_filters(IndentStr, Filter) end, Filters). + + +print_handlers({Id,Module, + #{level := Level, + filters := Filters, filter_default := FilterDefault, + formatter := {FormatterModule,FormatterConfig}} = Config}) -> + MyKeys = [filter_default, filters, formatter, level, id], + UnhandledConfig = maps:filter(fun(Key, _) -> + not lists:member(Key, MyKeys) + end, Config), + Unhandled = lists:map(fun({Key, Value}) -> + io_lib:format(" ~p: ~p~n",[Key, Value]) + end, maps:to_list(UnhandledConfig)), + io_lib:format(" Id: ~p~n" + " Module: ~p~n" + " Level: ~p~n" + " Formatter:~n" + " Module: ~p~n" + " Config: ~p~n" + " Filter Default: ~p~n" + " Filters:~n~s" + " Handler Config:~n" + "~s" + "",[Id, Module, Level, FormatterModule, FormatterConfig, + FilterDefault, print_filters(8, Filters), Unhandled]); +print_handlers(Handlers) -> + lists:map(fun print_handlers/1, Handlers). + +print_module_levels({Module,Level}) -> + io_lib:format(" Module: ~p~n" + " Level: ~p~n", + [Module,Level]); +print_module_levels(ModuleLevels) -> + lists:map(fun print_module_levels/1, ModuleLevels). + +-spec setup_standard_handler() -> ok | {error,term()}. +setup_standard_handler() -> + case get_logger_type() of + {ok,silent} -> + Level = get_logger_level(), + ok = set_logger_config(level,Level), + remove_handler(logger_simple); + {ok,Type} -> + Level = get_logger_level(), + ok = set_logger_config(level,Level), + Filters = get_logger_filters(), + setup_standard_handler(Type,#{level=>Level, + filter_default=>stop, + filters=>Filters}); + Error -> + Error + end. + +-spec setup_standard_handler(Type,Config) -> ok | {error,term()} when + Type :: tty | standard_io | standard_error | {file,File} | + {file,File,Modes} | {disk_log,LogOpts} | false, + File :: file:filename(), + Modes :: [term()], % [file:mode()], or more specific? + Config :: config(), + LogOpts :: map(). +setup_standard_handler(false,#{level:=Level,filters:=Filters}) -> + case set_handler_config(logger_simple,level,Level) of + ok -> + set_handler_config(logger_simple,filters,Filters); + Error -> + Error + end; +setup_standard_handler(Type,Config) -> + {Module,TypeConfig} = get_type_config(Type), + replace_simple_handler(?STANDARD_HANDLER, + Module, + maps:merge(Config,TypeConfig)). + +-spec replace_simple_handler(Id,Module,Config) -> ok | {error,term()} when + Id :: handler_id(), + Module :: module(), + Config :: config(). +replace_simple_handler(Id,Module,Config) -> + _ = code:ensure_loaded(Module), + DoBuffer = erlang:function_exported(Module,swap_buffer,2), + case add_handler(Id,Module,Config#{wait_for_buffer=>DoBuffer}) of + ok -> + if DoBuffer -> + {ok,Buffered} = logger_simple:get_buffer(), + _ = remove_handler(logger_simple), + Module:swap_buffer(?STANDARD_HANDLER,Buffered); + true -> + _ = remove_handler(logger_simple), + ok + end, + ok; + Error -> + Error + end. + +get_logger_type() -> + Type0 = + case application:get_env(kernel, logger_dest) of + undefined -> + application:get_env(kernel, error_logger); + T -> + T + end, + case Type0 of + {ok, tty} -> + {ok, tty}; + {ok, {file, File}} when is_list(File) -> + {ok, {file, File}}; + {ok, {file, File, Modes}} when is_list(File), is_list(Modes) -> + {ok, {file, File, Modes}}; + {ok, {disk_log, File}} when is_list(File) -> + {ok, {disk_log, get_disk_log_config(File)}}; + {ok, false} -> + {ok, false}; + {ok, silent} -> + {ok, silent}; + undefined -> + {ok, tty}; % default value + {ok, Bad} -> + {error,{bad_config, {kernel, {logger_dest, Bad}}}} + end. + +get_disk_log_config(File) -> + Config1 = + case application:get_env(kernel,logger_disk_log_maxfiles) of + undefined -> #{}; + {ok,MF} -> #{max_no_files=>MF} + end, + Config2 = + case application:get_env(kernel,logger_disk_log_maxbytes) of + undefined -> Config1; + {ok,MB} -> Config1#{max_no_bytes=>MB} + end, + Config3 = + case application:get_env(kernel,logger_disk_log_type) of + undefined -> Config2; + {ok,T} -> Config1#{type=>T} + end, + Config3#{file=>File}. + +get_logger_level() -> + case application:get_env(kernel,logger_level) of + undefined -> info; + {ok,Level} when ?IS_LEVEL(Level) -> Level + end. + +get_logger_filters() -> + case application:get_env(kernel, logger_sasl_compatible, false) of + true -> + ?DEFAULT_HANDLER_FILTERS([beam,erlang,otp]); + false -> + Extra = + case application:get_env(kernel, logger_log_progress, false) of + true -> + []; + false -> + [{stop_progress, + {fun logger_filters:progress/2,stop}}] + end, + Extra ++ ?DEFAULT_HANDLER_FILTERS([beam,erlang,otp,sasl]) + end. + +get_type_config({disk_log,LogOpts}) -> + {logger_disk_log_h,#{disk_log_opts=>LogOpts}}; +get_type_config(tty) -> + %% This is only for backwards compatibility with error_logger and + %% old kernel and sasl environment variables + get_type_config(standard_io); +get_type_config(Type) when Type==standard_io; + Type==standard_error; + element(1,Type)==file -> + {logger_std_h,#{logger_std_h=>#{type=>Type}}}; +get_type_config(Type) -> + {error,{illegal_logger_type,Type}}. + +%%%----------------------------------------------------------------- +-spec limit_term(term()) -> term(). + +limit_term(Term) -> + case get_format_depth() of + unlimited -> Term; + D -> io_lib:limit_term(Term, D) + end. + +-spec get_format_depth() -> 'unlimited' | pos_integer(). + +get_format_depth() -> + Depth = + case application:get_env(kernel, logger_format_depth) of + {ok, D} when is_integer(D) -> + D; + undefined -> + case application:get_env(kernel, error_logger_format_depth) of + {ok, D} when is_integer(D) -> + D; + undefined -> + unlimited + end + end, + max(10, Depth). + +-spec get_max_size() -> 'unlimited' | pos_integer(). + +get_max_size() -> + case application:get_env(kernel, logger_max_size) of + {ok, Size} when is_integer(Size) -> + max(50, Size); + undefined -> + unlimited + end. + +-spec get_utc_config() -> boolean(). + +get_utc_config() -> + %% Kernel's logger_utc configuration overrides SASL utc_log, which + %% in turn overrides stdlib config - in order to have uniform + %% timestamps in log messages + case application:get_env(kernel, logger_utc) of + {ok, Val} -> Val; + undefined -> + case application:get_env(sasl, utc_log) of + {ok, Val} -> Val; + undefined -> + case application:get_env(stdlib, utc_log) of + {ok, Val} -> Val; + undefined -> false + end + end + end. + +%%%----------------------------------------------------------------- +%%% Internal +do_log(warning,Msg,Meta) -> + do_log_1(error_logger:warning_map(),Msg,Meta); +do_log(Level,Msg,Meta) -> + do_log_1(Level,Msg,Meta). + +do_log_1(Level,Msg,#{mfa:={Module,_,_}}=Meta) -> + case logger_config:allow(?LOGGER_TABLE,Level,Module) of + true -> + log_allowed(#{},Level,Msg,Meta); + false -> + ok + end; +do_log_1(Level,Msg,Meta) -> + case logger_config:allow(?LOGGER_TABLE,Level) of + true -> + log_allowed(#{},Level,Msg,Meta); + false -> + ok + end. + +-spec log_allowed(Location,Level,Msg,Meta) -> ok when + Location :: map(), + Level :: level(), + Msg :: {msg_fun(),term()} | + {io:format(),[term()]} | + report() | + unicode:chardata(), + Meta :: metadata(). +log_allowed(Location,Level,{Fun,FunArgs},Meta) when is_function(Fun,1) -> + try Fun(FunArgs) of + Msg={Format,Args} when is_list(Format), is_list(Args) -> + log_allowed(Location,Level,Msg,Meta); + Report when ?IS_REPORT(Report) -> + log_allowed(Location,Level,Report,Meta); + String when ?IS_STRING(String) -> + log_allowed(Location,Level,String,Meta); + Other -> + log_allowed(Location,Level, + {"LAZY_FUN ERROR: ~tp; Returned: ~tp", + [{Fun,FunArgs},Other]}, + Meta) + catch C:R -> + log_allowed(Location,Level, + {"LAZY_FUN CRASH: ~tp; Reason: ~tp", + [{Fun,FunArgs},{C,R}]}, + Meta) + end; +log_allowed(Location,Level,Msg,Meta0) when is_map(Meta0) -> + %% Metadata priorities are: + %% Location (added in API macros) - will be overwritten by process + %% metadata (set by set_process_metadata/1), which in turn will be + %% overwritten by the metadata given as argument in the log call + %% (function or macro). + Meta = add_default_metadata( + maps:merge(Location,maps:merge(proc_meta(),Meta0))), + case node(maps:get(gl,Meta)) of + Node when Node=/=node() -> + log_remote(Node,Level,Msg,Meta), + do_log_allowed(Level,Msg,Meta); + _ -> + do_log_allowed(Level,Msg,Meta) + end. + +do_log_allowed(Level,{Format,Args}=Msg,Meta) + when ?IS_LEVEL(Level), + is_list(Format), + is_list(Args), + is_map(Meta) -> + logger_backend:log_allowed(#{level=>Level,msg=>Msg,meta=>Meta},tid()); +do_log_allowed(Level,Report,Meta) + when ?IS_LEVEL(Level), + ?IS_REPORT(Report), + is_map(Meta) -> + logger_backend:log_allowed(#{level=>Level,msg=>{report,Report},meta=>Meta}, + tid()); +do_log_allowed(Level,String,Meta) + when ?IS_LEVEL(Level), + ?IS_STRING(String), + is_map(Meta) -> + logger_backend:log_allowed(#{level=>Level,msg=>{string,String},meta=>Meta}, + tid()). +tid() -> + ets:whereis(?LOGGER_TABLE). + +log_remote(Node,Level,{Format,Args},Meta) -> + log_remote(Node,{log,Level,Format,Args,Meta}); +log_remote(Node,Level,Msg,Meta) -> + log_remote(Node,{log,Level,Msg,Meta}). + +log_remote(Node,Request) -> + {logger,Node} ! Request, + ok. + +add_default_metadata(Meta) -> + add_default_metadata([pid,gl,time],Meta). + +add_default_metadata([Key|Keys],Meta) -> + case maps:is_key(Key,Meta) of + true -> + add_default_metadata(Keys,Meta); + false -> + add_default_metadata(Keys,Meta#{Key=>default(Key)}) + end; +add_default_metadata([],Meta) -> + Meta. + +proc_meta() -> + case get_process_metadata() of + ProcMeta when is_map(ProcMeta) -> ProcMeta; + _ -> #{} + end. + +default(pid) -> self(); +default(gl) -> group_leader(); +default(time) -> erlang:monotonic_time(microsecond). + +%% Remove everything upto and including this module from the stacktrace +filter_stacktrace(Module,[{Module,_,_,_}|_]) -> + []; +filter_stacktrace(Module,[H|T]) -> + [H|filter_stacktrace(Module,T)]; +filter_stacktrace(_,[]) -> + []. diff --git a/lib/kernel/src/logger_backend.erl b/lib/kernel/src/logger_backend.erl new file mode 100644 index 0000000000..d9f5aa6faf --- /dev/null +++ b/lib/kernel/src/logger_backend.erl @@ -0,0 +1,133 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2017. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(logger_backend). + +-export([log_allowed/2]). + +-include("logger_internal.hrl"). + +-define(OWN_KEYS,[level,filters,filter_default,handlers]). + +%%%----------------------------------------------------------------- +%%% The default logger backend +log_allowed(Log, Tid) -> + {ok,Config} = logger_config:get(Tid,logger), + Filters = maps:get(filters,Config,[]), + case apply_filters(logger,Log,Filters,Config) of + stop -> + ok; + Log1 -> + Handlers = maps:get(handlers,Config,[]), + call_handlers(Log1,Handlers,Tid) + end, + ok. + +call_handlers(#{level:=Level}=Log,[Id|Handlers],Tid) -> + case logger_config:get(Tid,Id,Level) of + {ok,{Module,Config}} -> + Filters = maps:get(filters,Config,[]), + case apply_filters(Id,Log,Filters,Config) of + stop -> + ok; + Log1 -> + Config1 = maps:without(?OWN_KEYS,Config), + try Module:log(Log1,Config1) + catch C:R:S -> + case logger:remove_handler(Id) of + ok -> + logger:internal_log( + error,{removed_failing_handler,Id}), + ?LOG_INTERNAL( + debug, + [{logger,removed_failing_handler}, + {handler,{Id,Module}}, + {log,Log1}, + {config,Config1}, + {reason,{C,R,filter_stacktrace(S)}}]); + {error,{not_found,_}} -> + %% Probably already removed by other client + %% Don't report again + ok; + {error,Reason} -> + ?LOG_INTERNAL( + debug, + [{logger,remove_handler_failed}, + {reason,Reason}]) + end + end + end; + _ -> + ok + end, + call_handlers(Log,Handlers,Tid); +call_handlers(_Log,[],_Tid) -> + ok. + +apply_filters(Owner,Log,Filters,Config) -> + case do_apply_filters(Owner,Log,Filters,ignore) of + stop -> + stop; + ignore -> + case maps:get(filter_default,Config) of + log -> + Log; + stop -> + stop + end; + Log1 -> + Log1 + end. + +do_apply_filters(Owner,Log,[{_Id,{FilterFun,FilterArgs}}=Filter|Filters],State) -> + try FilterFun(Log,FilterArgs) of + stop -> + stop; + ignore -> + do_apply_filters(Owner,Log,Filters,State); + Log1=#{level:=Level,msg:=Msg,meta:=Meta} + when is_atom(Level), ?IS_MSG(Msg), is_map(Meta) -> + do_apply_filters(Owner,Log1,Filters,log); + Bad -> + handle_filter_failed(Filter,Owner,Log,{bad_return_value,Bad}) + catch C:R:S -> + handle_filter_failed(Filter,Owner,Log,{C,R,filter_stacktrace(S)}) + end; +do_apply_filters(_Owner,_Log,[],ignore) -> + ignore; +do_apply_filters(_Owner,Log,[],log) -> + Log. + +handle_filter_failed({Id,_}=Filter,Owner,Log,Reason) -> + case logger_server:remove_filter(Owner,Id) of + ok -> + logger:internal_log(error,{removed_failing_filter,Id}), + ?LOG_INTERNAL(debug, + [{logger,removed_failing_filter}, + {filter,Filter}, + {owner,Owner}, + {log,Log}, + {reason,Reason}]); + _ -> + ok + end, + ignore. + +filter_stacktrace(Stacktrace) -> + logger:filter_stacktrace(?MODULE,Stacktrace). diff --git a/lib/kernel/src/logger_config.erl b/lib/kernel/src/logger_config.erl new file mode 100644 index 0000000000..799aea9617 --- /dev/null +++ b/lib/kernel/src/logger_config.erl @@ -0,0 +1,151 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2017. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(logger_config). + +-export([new/1,delete/2, + exist/2, + allow/2,allow/3, + get/2, get/3, get/1, + create/3, create/4, set/3, + set_module_level/3,reset_module_level/2, + cache_module_level/2, + level_to_int/1]). + +-include("logger_internal.hrl"). + +new(Name) -> + _ = ets:new(Name,[set,protected,named_table]), + ets:whereis(Name). + +delete(Tid,Id) -> + ets:delete(Tid,table_key(Id)). + +allow(Tid,Level,Module) -> + LevelInt = level_to_int(Level), + case ets:lookup(Tid,Module) of + [{Module,{ModLevel,cached}}] when is_integer(ModLevel), + LevelInt =< ModLevel -> + true; + [{Module,ModLevel}] when is_integer(ModLevel), + LevelInt =< ModLevel -> + true; + [] -> + logger_server:cache_module_level(Module), + allow(Tid,Level); + _ -> + false + end. + +allow(Tid,Level) -> + GlobalLevelInt = ets:lookup_element(Tid,?LOGGER_KEY,2), + level_to_int(Level) =< GlobalLevelInt. + +exist(Tid,What) -> + ets:member(Tid,table_key(What)). + +get(Tid,What) -> + case ets:lookup(Tid,table_key(What)) of + [{_,_,Config}] -> + {ok,Config}; + [{_,_,Config,Module}] -> + {ok,{Module,Config}}; + [] -> + {error,{not_found,What}} + end. + +get(Tid,What,Level) -> + MS = [{{table_key(What),'$1','$2'}, % logger config + [{'>=','$1',level_to_int(Level)}], + ['$2']}, + {{table_key(What),'$1','$2','$3'}, % handler config + [{'>=','$1',level_to_int(Level)}], + [{{'$3','$2'}}]}], + case ets:select(Tid,MS) of + [] -> error; + [Data] -> {ok,Data} + end. + +create(Tid,What,Module,Config) -> + LevelInt = level_to_int(maps:get(level,Config)), + ets:insert(Tid,{table_key(What),LevelInt,Config,Module}). +create(Tid,What,Config) -> + LevelInt = level_to_int(maps:get(level,Config)), + ets:insert(Tid,{table_key(What),LevelInt,Config}). + +set(Tid,What,Config) -> + LevelInt = level_to_int(maps:get(level,Config)), + %% Should do this only if the level has actually changed. Possibly + %% overwrite instead of delete? + case What of + logger -> + _ = ets:select_delete(Tid,[{{'_',{'$1',cached}}, + [{'=/=','$1',LevelInt}], + [true]}]), + ok; + _ -> + ok + end, + ets:update_element(Tid,table_key(What),[{2,LevelInt},{3,Config}]), + ok. + +set_module_level(Tid,Module,Level) -> + ets:insert(Tid,{Module,level_to_int(Level)}), + ok. + +reset_module_level(Tid,Module) -> + ets:delete(Tid,Module), % should possibley overwrite instead of delete? + ok. + +cache_module_level(Tid,Module) -> + GlobalLevelInt = ets:lookup_element(Tid,?LOGGER_KEY,2), + ets:insert_new(Tid,{Module,{GlobalLevelInt,cached}}), + ok. + +get(Tid) -> + {ok,Logger} = get(Tid,logger), + HMS = [{{table_key('$1'),'_','$2','$3'},[],[{{'$1','$3','$2'}}]}], + Handlers = ets:select(Tid,HMS), + MMS = [{{'$1','$2'},[{is_atom,'$1'},{is_integer,'$2'}],[{{'$1','$2'}}]}], + Modules = ets:select(Tid,MMS), + {Logger,Handlers,[{M,int_to_level(L)} || {M,L} <- Modules]}. + +level_to_int(emergency) -> ?EMERGENCY; +level_to_int(alert) -> ?ALERT; +level_to_int(critical) -> ?CRITICAL; +level_to_int(error) -> ?ERROR; +level_to_int(warning) -> ?WARNING; +level_to_int(notice) -> ?NOTICE; +level_to_int(info) -> ?INFO; +level_to_int(debug) -> ?DEBUG. + +int_to_level(?EMERGENCY) -> emergency; +int_to_level(?ALERT) -> alert; +int_to_level(?CRITICAL) -> critical; +int_to_level(?ERROR) -> error; +int_to_level(?WARNING) -> warning; +int_to_level(?NOTICE) -> notice; +int_to_level(?INFO) -> info; +int_to_level(?DEBUG) -> debug. + +%%%----------------------------------------------------------------- +%%% Internal + +table_key(logger) -> ?LOGGER_KEY; +table_key(HandlerId) -> {?HANDLER_KEY,HandlerId}. diff --git a/lib/kernel/src/logger_disk_log_h.erl b/lib/kernel/src/logger_disk_log_h.erl new file mode 100644 index 0000000000..3b71f936d8 --- /dev/null +++ b/lib/kernel/src/logger_disk_log_h.erl @@ -0,0 +1,694 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2017. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(logger_disk_log_h). + +-behaviour(gen_server). + +-include("logger.hrl"). +-include("logger_internal.hrl"). +-include("logger_h_common.hrl"). + +%%% API +-export([start_link/3, info/1, disk_log_sync/1, reset/1]). + +%% gen_server callbacks +-export([init/1, handle_call/3, handle_cast/2, handle_info/2, + terminate/2, code_change/3]). + +%% logger callbacks +-export([log/2, + adding_handler/2, removing_handler/1, + changing_config/3, swap_buffer/2]). + +%%%=================================================================== +%%% API +%%%=================================================================== + +%%%----------------------------------------------------------------- +%%% Start a disk_log handler process and link to caller. +%%% This function is called by the kernel supervisor when this +%%% handler process gets added (as a result of calling add/3). +-spec start_link(Name, Config, HandlerState) -> {ok,Pid} | {error,Reason} when + Name :: atom(), + Config :: logger:config(), + HandlerState :: map(), + Pid :: pid(), + Reason :: term(). + +start_link(Name, Config, HandlerState) -> + proc_lib:start_link(?MODULE,init,[[Name,Config,HandlerState]]). + +%%%----------------------------------------------------------------- +%%% +-spec disk_log_sync(Name) -> ok | {error,Reason} when + Name :: atom(), + Reason :: handler_busy | {badarg,term()}. + +disk_log_sync(Name) when is_atom(Name) -> + try + gen_server:call(Name, disk_log_sync, ?DEFAULT_CALL_TIMEOUT) + catch + _:{timeout,_} -> {error,handler_busy} + end; +disk_log_sync(Name) -> + {error,{badarg,{disk_log_sync,[Name]}}}. + +%%%----------------------------------------------------------------- +%%% +-spec info(Name) -> Info | {error,Reason} when + Name :: atom(), + Info :: term(), + Reason :: handler_busy | {badarg,term()}. + +info(Name) when is_atom(Name) -> + try + gen_server:call(Name, info, ?DEFAULT_CALL_TIMEOUT) + catch + _:{timeout,_} -> {error,handler_busy} + end; +info(Name) -> + {error,{badarg,{info,[Name]}}}. + +%%%----------------------------------------------------------------- +%%% +-spec reset(Name) -> ok | {error,Reason} when + Name :: atom(), + Reason :: handler_busy | {badarg,term()}. + +reset(Name) when is_atom(Name) -> + try + gen_server:call(Name, reset, ?DEFAULT_CALL_TIMEOUT) + catch + _:{timeout,_} -> {error,handler_busy} + end; +reset(Name) -> + {error,{badarg,{reset,[Name]}}}. + + +%%%=================================================================== +%%% logger callbacks +%%%=================================================================== + +%%%----------------------------------------------------------------- +%%% Handler being added +adding_handler(Name, Config) -> + case check_config(adding, Name, Config) of + {ok, Config1} -> + %% create initial handler state by merging defaults with config + HConfig = maps:get(?MODULE, Config1, #{}), + HState = maps:merge(get_init_state(), HConfig), + case logger_h_common:overload_levels_ok(HState) of + true -> + case start(Name, Config1, HState) of + ok -> + %% Make sure wait_for_buffer is not stored, so we + %% won't hang and wait for buffer on a restart + {ok, maps:remove(wait_for_buffer,Config1)}; + Error -> + Error + end; + false -> + #{toggle_sync_qlen := TSQL, + drop_new_reqs_qlen := DNRQL, + flush_reqs_qlen := FRQL} = HState, + {error,{invalid_levels,{TSQL,DNRQL,FRQL}}} + end; + Error -> + Error + end. + +%%%----------------------------------------------------------------- +%%% Updating handler config +changing_config(Name, + OldConfig=#{id:=Id, disk_log_opts:=DLOpts}, + NewConfig=#{id:=Id, disk_log_opts:=DLOpts}) -> + case check_config(changing, Name, NewConfig) of + Result = {ok,NewConfig1} -> + try gen_server:call(Name, {change_config,OldConfig,NewConfig1}, + ?DEFAULT_CALL_TIMEOUT) of + ok -> Result; + HError -> HError + catch + _:{timeout,_} -> {error,handler_busy} + end; + Error -> + Error + end; +changing_config(_Name, OldConfig, NewConfig) -> + {error,{illegal_config_change,OldConfig,NewConfig}}. + +check_config(adding, Name, Config0) -> + %% Merge in defaults on top level + Config = maps:merge(#{id => Name}, Config0), + %% Merge in defaults on handler level + LogOpts0 = maps:get(disk_log_opts, Config, #{}), + LogOpts = merge_default_logopts(Name, LogOpts0), + case check_log_opts(maps:to_list(LogOpts)) of + ok -> + MyConfig = maps:get(?MODULE, Config, #{}), + case check_my_config(maps:to_list(MyConfig)) of + ok -> + {ok,Config#{disk_log_opts=>LogOpts, + ?MODULE=>MyConfig}}; + Error -> + Error + end; + Error -> + Error + end; +check_config(changing, _Name, Config) -> + MyConfig = maps:get(?MODULE, Config, #{}), + case check_my_config(maps:to_list(MyConfig)) of + ok -> {ok,Config}; + Error -> Error + end. + +merge_default_logopts(Name, LogOpts) -> + Type = maps:get(type, LogOpts, wrap), + {DefaultNoFiles,DefaultNoBytes} = + case Type of + halt -> {undefined,infinity}; + _wrap -> {10,1048576} + end, + {ok,Dir} = file:get_cwd(), + Default = #{file => filename:join(Dir,Name), + max_no_files => DefaultNoFiles, + max_no_bytes => DefaultNoBytes, + type => Type}, + maps:merge(Default,LogOpts). + +check_log_opts([{file,File}|Opts]) when is_list(File) -> + check_log_opts(Opts); +check_log_opts([{max_no_files,undefined}|Opts]) -> + check_log_opts(Opts); +check_log_opts([{max_no_files,N}|Opts]) when is_integer(N), N>0 -> + check_log_opts(Opts); +check_log_opts([{max_no_bytes,infinity}|Opts]) -> + check_log_opts(Opts); +check_log_opts([{max_no_bytes,N}|Opts]) when is_integer(N), N>0 -> + check_log_opts(Opts); +check_log_opts([{type,Type}|Opts]) when Type==wrap; Type==halt -> + check_log_opts(Opts); +check_log_opts([Invalid|_]) -> + {error,{invalid_config,disk_log_opt,Invalid}}; +check_log_opts([]) -> + ok. + +check_my_config([Other | Config]) -> + case logger_h_common:check_common_config(Other) of + valid -> + check_my_config(Config); + invalid -> + {error,{invalid_config,?MODULE,Other}} + end; +check_my_config([]) -> + ok. + +%%%----------------------------------------------------------------- +%%% Handler being removed +removing_handler(Name) -> + stop(Name). + +%%%----------------------------------------------------------------- +%%% Get buffer when swapping from simple handler +swap_buffer(Name,Buffer) -> + case whereis(Name) of + undefined -> + ok; + _ -> + Name ! {buffer,Buffer} + end. + +%%%----------------------------------------------------------------- +%%% Log a string or report +-spec log(Log, Config) -> ok | dropped when + Log :: logger:log(), + Config :: logger:config(). + +log(Log,Config=#{id:=Name}) -> + %% if the handler has crashed, we must drop this request + %% and hope the handler restarts so we can try again + true = is_pid(whereis(Name)), + Bin = logger_h_common:log_to_binary(Log,Config), + logger_h_common:call_cast_or_drop(Name, Bin). + + +%%%=================================================================== +%%% gen_server callbacks +%%%=================================================================== + +init([Name, Config = #{disk_log_opts := LogOpts}, + State = #{dl_sync_int := DLSyncInt}]) -> + register(Name, self()), + process_flag(trap_exit, true), + process_flag(message_queue_data, off_heap), + + ?init_test_hooks(), + ?start_observation(Name), + + case open_disk_log(Name, LogOpts) of + ok -> + catch ets:new(Name, [public, named_table]), + ?set_mode(Name, async), + proc_lib:init_ack({ok,self()}), + T0 = ?timestamp(), + State1 = + ?merge_with_stats(State#{id => Name, + mode => async, + dl_sync => DLSyncInt, + log_opts => LogOpts, + last_qlen => 0, + last_log_ts => T0, + burst_win_ts => T0, + burst_msg_count => 0, + prev_log_result => ok, + prev_sync_result => ok, + prev_disk_log_info => undefined}), + gen_server:cast(self(), {repeated_disk_log_sync,T0}), + enter_loop(Config, State1); + Error -> + logger_h_common:error_notify({open_disk_log,Name,Error}), + proc_lib:init_ack(Error) + end. + +enter_loop(#{wait_for_buffer:=true}=Config,State) -> + State1 = + receive + {buffer,Buffer} -> + lists:foldl( + fun(Log,S) -> + Bin = logger_h_common:log_to_binary(Log,Config), + {_,S1} = do_log(Bin,cast,S), + S1 + end, + State, + Buffer) + end, + gen_server:enter_loop(?MODULE,[],State1); +enter_loop(_Config,State) -> + gen_server:enter_loop(?MODULE,[],State). + +%% This is the synchronous log request. +handle_call({log, Bin}, _From, State) -> + {Result,State1} = do_log(Bin, call, State), + %% Result == ok | dropped + {reply, Result, State1}; + +handle_call(disk_log_sync, _From, State = #{id := Name}) -> + State1 = #{prev_sync_result := Result} = disk_log_sync(Name, State), + {reply, Result, State1}; + +handle_call({change_config,_OldConfig,NewConfig}, _From, + State = #{filesync_repeat_interval := FSyncInt0, + last_log_ts := LastLogTS}) -> + HConfig = maps:get(?MODULE, NewConfig, #{}), + State1 = #{toggle_sync_qlen := TSQL, + drop_new_reqs_qlen := DNRQL, + flush_reqs_qlen := FRQL} = maps:merge(State, HConfig), + case logger_h_common:overload_levels_ok(State1) of + true -> + _ = + case maps:get(filesync_repeat_interval, HConfig, undefined) of + undefined -> + ok; + no_repeat -> + _ = logger_h_common:cancel_timer(maps:get(rep_sync_tref, + State, + undefined)); + FSyncInt0 -> + ok; + _FSyncInt1 -> + _ = logger_h_common:cancel_timer(maps:get(rep_sync_tref, + State, + undefined)), + _ = gen_server:cast(self(), {repeated_disk_log_sync, + LastLogTS}) + end, + {reply, ok, State1}; + false -> + {reply, {error,{invalid_levels,{TSQL,DNRQL,FRQL}}}, State} + end; + +handle_call(info, _From, State) -> + {reply, State, State}; + +handle_call(reset, _From, State) -> + State1 = ?merge_with_stats(State), + {reply, ok, State1#{last_qlen => 0, + last_log_ts => ?timestamp(), + prev_log_result => ok, + prev_sync_result => ok, + prev_disk_log_info => undefined}}; + +handle_call(stop, _From, State) -> + {stop, {shutdown,stopped}, ok, State}. + + +%% This is the asynchronous log request. +handle_cast({log, Bin}, State) -> + {_,State1} = do_log(Bin, cast, State), + {noreply, State1}; + +%% If FILESYNC_REPEAT_INTERVAL is set to a millisec value, this +%% clause gets called repeatedly by the handler. In order to +%% guarantee that a filesync *always* happens after the last log +%% request, the repeat operation must be active! +handle_cast({repeated_disk_log_sync,LastLogTS0}, + State = #{id := Name, + filesync_repeat_interval := FSyncInt, + last_log_ts := LastLogTS1}) -> + State1 = + if is_integer(FSyncInt) -> + %% only do filesync if something has been + %% written since last time we checked + NewState = if LastLogTS1 == LastLogTS0 -> + State; + true -> + disk_log_sync(Name, State) + end, + {ok,TRef} = + timer:apply_after(FSyncInt, gen_server,cast, + [self(), + {repeated_disk_log_sync,LastLogTS1}]), + NewState#{rep_sync_tref => TRef}; + true -> + State + end, + {noreply,State1}. + +%% The disk log owner must handle status messages from disk_log. +handle_info({disk_log, _Node, _Log, {wrap,_NoLostItems}}, State) -> + {noreply, State}; +handle_info({disk_log, _Node, Log, Info = {truncated,_NoLostItems}}, + State = #{id := Name, prev_disk_log_info := PrevInfo}) -> + error_notify_new(Info, PrevInfo, {disk_log,Name,Log,Info}), + {noreply, State#{prev_disk_log_info => Info}}; +handle_info({disk_log, _Node, Log, Info = {blocked_log,_Items}}, + State = #{id := Name, prev_disk_log_info := PrevInfo}) -> + error_notify_new(Info, PrevInfo, {disk_log,Name,Log,Info}), + {noreply, State#{prev_disk_log_info => Info}}; +handle_info({disk_log, _Node, Log, full}, + State = #{id := Name, prev_disk_log_info := PrevInfo}) -> + error_notify_new(full, PrevInfo, {disk_log,Name,Log,full}), + {noreply, State#{prev_disk_log_info => full}}; +handle_info({disk_log, _Node, Log, Info = {error_status,_Status}}, + State = #{id := Name, prev_disk_log_info := PrevInfo}) -> + error_notify_new(Info, PrevInfo, {disk_log,Name,Log,Info}), + {noreply, State#{prev_disk_log_info => Info}}; + +handle_info({'EXIT',_Pid,_Why}, State = #{id := _Name}) -> + {noreply, State}; + +handle_info(_, State) -> + {noreply, State}. + +terminate(Reason, State = #{id := Name}) -> + _ = logger_h_common:cancel_timer(maps:get(rep_sync_tref, State, + undefined)), + _ = close_disk_log(Name, normal), + logger_h_common:stop_or_restart(Name, Reason, State). + +code_change(_OldVsn, State, _Extra) -> + {ok, State}. + +%%%----------------------------------------------------------------- +%%% Internal functions + +%%%----------------------------------------------------------------- +%%% +get_init_state() -> + #{toggle_sync_qlen => ?TOGGLE_SYNC_QLEN, + drop_new_reqs_qlen => ?DROP_NEW_REQS_QLEN, + flush_reqs_qlen => ?FLUSH_REQS_QLEN, + enable_burst_limit => ?ENABLE_BURST_LIMIT, + burst_limit_size => ?BURST_LIMIT_SIZE, + burst_window_time => ?BURST_WINDOW_TIME, + enable_kill_overloaded => ?ENABLE_KILL_OVERLOADED, + handler_overloaded_qlen => ?HANDLER_OVERLOADED_QLEN, + handler_overloaded_mem => ?HANDLER_OVERLOADED_MEM, + handler_restart_after => ?HANDLER_RESTART_AFTER, + dl_sync_int => ?CONTROLLER_SYNC_INTERVAL, + filesync_ok_qlen => ?FILESYNC_OK_QLEN, + filesync_repeat_interval => ?FILESYNC_REPEAT_INTERVAL}. + +%%%----------------------------------------------------------------- +%%% Add a disk_log handler to the logger. +%%% This starts a dedicated handler process which should always +%%% exist if the handler is registered with logger (and should not +%%% exist if the handler is not registered). +%%% +%%% Config is the logger:config() map containing a sub map with any of +%%% the following associations: +%%% +%%% Config = #{disk_log_opts => #{file => file:filename(), +%%% max_no_bytes => integer(), +%%% max_no_files => integer(), +%%% type => wrap | halt}}. +%%% +%%% This map will be merged with the logger configuration data for +%%% the disk_log LogName. If type == halt, then max_no_files is +%%% ignored. +%%% +%%% Handler specific config should be provided with a sub map associated +%%% with a key named the same as this module, e.g: +%%% +%%% Config = #{logger_disk_log_h => #{toggle_sync_qlen => 50} +%%% +%%% The disk_log handler process is linked to logger_sup, which is +%%% part of the kernel application's supervision tree. +start(Name, Config, HandlerState) -> + LoggerDLH = + #{id => Name, + start => {?MODULE, start_link, [Name,Config,HandlerState]}, + restart => temporary, + shutdown => 2000, + type => worker, + modules => [?MODULE]}, + case supervisor:start_child(logger_sup, LoggerDLH) of + {ok,_} -> + ok; + Error -> + Error + end. + +%%%----------------------------------------------------------------- +%%% Stop and remove the handler. +stop(Name) -> + case whereis(Name) of + undefined -> + ok; + _ -> + %% We don't want to do supervisor:terminate_child here + %% since we need to distinguish this explicit stop from a + %% system termination in order to avoid circular attempts + %% at removing the handler (implying deadlocks and + %% timeouts). + _ = gen_server:call(Name,stop), + _ = supervisor:delete_child(logger_sup, Name), + ok + end. + +%%%----------------------------------------------------------------- +%%% Logging and overload control. +-define(update_dl_sync(C, Interval), + if C == 0 -> Interval; + true -> C-1 end). + +%% check for overload between every request (and set Mode to async, +%% sync or drop accordingly), but never flush the whole mailbox +%% before LogWindowSize requests have been handled +do_log(Bin, CallOrCast, State = #{id:=Name, mode := _Mode0}) -> + T1 = ?timestamp(), + + %% check if the handler is getting overloaded, or if it's + %% recovering from overload (the check must be done for each + %% request to react quickly to large bursts of requests and + %% to ensure that the handler can never end up in drop mode + %% with an empty mailbox, which would stop operation) + {Mode1,QLen,Mem,State1} = logger_h_common:check_load(State), + + %% kill the handler if it can't keep up with the load + logger_h_common:kill_if_choked(Name, QLen, Mem, State), + + if Mode1 == flush -> + flush(Name, QLen, T1, State1); + true -> + write(Name, Mode1, T1, Bin, CallOrCast, State1) + end. + +%% this function is called by do_log/3 after an overload check +%% has been performed, where QLen > FlushQLen +flush(Name, _QLen0, T1, State=#{last_log_ts := _T0}) -> + %% flush messages in the mailbox (a limited number in + %% order to not cause long delays) + _NewFlushed = logger_h_common:flush_log_requests(?FLUSH_MAX_N), + + %% because of the receive loop when flushing messages, the + %% handler will be scheduled out often and the mailbox could + %% grow very large, so we'd better check the queue again here + {_,_QLen1} = process_info(self(), message_queue_len), + ?observe(Name,{max_qlen,_QLen1}), + + %% Add 1 for the current log request + ?observe(Name,{flushed,_NewFlushed+1}), + + State1 = ?update_max_time(?diff_time(T1,_T0),State), + {dropped,?update_other(flushed,FLUSHED,_NewFlushed, + State1#{mode => ?set_mode(Name,async), + last_qlen => 0, + last_log_ts => T1})}. + +%% this function is called to write to disk_log +write(Name, Mode, T1, Bin, _CallOrCast, + State = #{dl_sync := DLSync, + dl_sync_int := DLSyncInt, + last_qlen := LastQLen, + last_log_ts := T0}) -> + %% check if we need to limit the number of writes + %% during a burst of log requests + {DoWrite,BurstWinT,BurstMsgCount} = logger_h_common:limit_burst(State), + + %% only send a synhrounous request to the disk_log process + %% every DLSyncInt time, to give the handler time between + %% writes so it can keep up with incoming messages + {Status,LastQLen1,State1} = + if DoWrite, DLSync == 0 -> + ?observe(Name,{_CallOrCast,1}), + NewState = disk_log_write(Name, Bin, State), + {ok, element(2,process_info(self(),message_queue_len)), + NewState}; + DoWrite -> + ?observe(Name,{_CallOrCast,1}), + NewState = disk_log_write(Name, Bin, State), + {ok, LastQLen, NewState}; + not DoWrite -> + ?observe(Name,{flushed,1}), + {dropped, LastQLen, State} + end, + + %% Check if the time since the previous log request is long enough - + %% and the queue length small enough - to assume the mailbox has + %% been emptied, and if so, do filesync operation and reset mode to + %% async. Note that this is the best we can do to detect an idle + %% handler without setting a timer after each log call/cast. If the + %% time between two consecutive log requests is fast and no new + %% request comes in after the last one, idle state won't be detected! + Time = ?diff_time(T1,T0), + {Mode1,BurstMsgCount1,State2} = + if (LastQLen1 < ?FILESYNC_OK_QLEN) andalso + (Time > ?IDLE_DETECT_TIME_USEC) -> + {?change_mode(Name,Mode,async), 0, disk_log_sync(Name,State1)}; + true -> + {Mode, BurstMsgCount,State1} + end, + + State3 = + ?update_calls_or_casts(_CallOrCast,1,State2), + State4 = + ?update_max_time(Time, + State3#{mode => Mode1, + last_qlen := LastQLen1, + last_log_ts => T1, + burst_win_ts => BurstWinT, + burst_msg_count => BurstMsgCount1, + dl_sync => ?update_dl_sync(DLSync,DLSyncInt)}), + {Status,State4}. + + +open_disk_log(Name, LogOpts) -> + #{file := File, + max_no_bytes := MaxNoBytes, + max_no_files := MaxNoFiles, + type := Type} = LogOpts, + case filelib:ensure_dir(File) of + ok -> + Size = + if Type == halt -> MaxNoBytes; + Type == wrap -> {MaxNoBytes,MaxNoFiles} + end, + Opts = [{name, Name}, + {file, File}, + {size, Size}, + {type, Type}, + {linkto, self()}, + {repair, false}, + {format, external}, + {notify, true}, + {quiet, true}, + {mode, read_write}], + case disk_log:open(Opts) of + {ok,Name} -> + ok; + Error = {error,_Reason} -> + Error + end; + Error -> + Error + end. + +close_disk_log(Name, _) -> + _ = ?disk_log_sync(Name), + _ = disk_log:lclose(Name), + ok. + +disk_log_write(Name, Bin, State) -> + Result = + case ?disk_log_blog(Name, Bin) of + ok -> + ok; + LogError -> + _ = case maps:get(prev_log_result, State) of + LogError -> + %% don't report same error twice + ok; + _ -> + LogOpts = maps:get(log_opts, State), + logger_h_common:error_notify({Name,log, + LogOpts, + LogError}) + end, + LogError + end, + State#{prev_log_result => Result}. + +disk_log_sync(Name, State) -> + Result = + case ?disk_log_sync(Name) of + ok -> + ok; + SyncError -> + _ = case maps:get(prev_sync_result, State) of + SyncError -> + %% don't report same error twice + ok; + _ -> + LogOpts = maps:get(log_opts, State), + logger_h_common:error_notify({Name,sync, + LogOpts, + SyncError}) + end, + SyncError + end, + State#{prev_sync_result => Result}. + +error_notify_new(Info,Info, _Term) -> + ok; +error_notify_new(_Info0,_Info1, Term) -> + logger_h_common:error_notify(Term). diff --git a/lib/kernel/src/logger_filters.erl b/lib/kernel/src/logger_filters.erl new file mode 100644 index 0000000000..85928f0fd6 --- /dev/null +++ b/lib/kernel/src/logger_filters.erl @@ -0,0 +1,123 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2017. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(logger_filters). + +-export([domain/2, + level/2, + progress/2, + remote_gl/2]). + +-include("logger_internal.hrl"). +-define(IS_ACTION(A), (A==log orelse A==stop)). + +-spec domain(Log,Extra) -> logger:filter_return() when + Log :: logger:log(), + Extra :: {Action,Compare,MatchDomain}, + Action :: log | stop, + Compare :: prefix_of | starts_with | equals | no_domain, + MatchDomain :: list(atom()). +domain(#{meta:=Meta}=Log,{Action,Compare,MatchDomain}) + when ?IS_ACTION(Action) andalso + (Compare==prefix_of orelse + Compare==starts_with orelse + Compare==equals orelse + Compare==no_domain) andalso + is_list(MatchDomain) -> + filter_domain(Compare,Meta,MatchDomain,on_match(Action,Log)); +domain(Log,Extra) -> + erlang:error(badarg,[Log,Extra]). + +-spec level(Log,Extra) -> logger:filter_return() when + Log :: logger:log(), + Extra :: {Action,Operator,MatchLevel}, + Action :: log | stop, + Operator :: neq | eq | lt | gt | lteq | gteq, + MatchLevel :: logger:level(). +level(#{level:=L1}=Log,{Action,Op,L2}) + when ?IS_ACTION(Action) andalso + (Op==neq orelse + Op==eq orelse + Op==lt orelse + Op==gt orelse + Op==lteq orelse + Op==gteq) andalso + ?IS_LEVEL(L2) -> + filter_level(Op,L1,L2,on_match(Action,Log)); +level(Log,Extra) -> + erlang:error(badarg,[Log,Extra]). + +-spec progress(Log,Extra) -> logger:filter_return() when + Log :: logger:log(), + Extra :: log | stop. +progress(Log,Action) when ?IS_ACTION(Action) -> + filter_progress(Log,on_match(Action,Log)); +progress(Log,Action) -> + erlang:error(badarg,[Log,Action]). + +-spec remote_gl(Log,Extra) -> logger:filter_return() when + Log :: logger:log(), + Extra :: log | stop. +remote_gl(Log,Action) when ?IS_ACTION(Action) -> + filter_remote_gl(Log,on_match(Action,Log)); +remote_gl(Log,Action) -> + erlang:error(badarg,[Log,Action]). + +%%%----------------------------------------------------------------- +%%% Internal +filter_domain(prefix_of,#{domain:=Domain},MatchDomain,OnMatch) -> + is_prefix(Domain,MatchDomain,OnMatch); +filter_domain(starts_with,#{domain:=Domain},MatchDomain,OnMatch) -> + is_prefix(MatchDomain,Domain,OnMatch); +filter_domain(equals,#{domain:=Domain},Domain,OnMatch) -> + OnMatch; +filter_domain(Action,Meta,_,OnMatch) -> + case maps:is_key(domain,Meta) of + false when Action==no_domain -> OnMatch; + _ -> ignore + end. + +is_prefix(D1,D2,OnMatch) when is_list(D1), is_list(D2) -> + case lists:prefix(D1,D2) of + true -> OnMatch; + false -> ignore + end; +is_prefix(_,_,_) -> + ignore. + +filter_level(Op,L1,L2,OnMatch) -> + case logger:compare_levels(L1,L2) of + eq when Op==eq; Op==lteq; Op==gteq -> OnMatch; + lt when Op==lt; Op==lteq; Op==neq -> OnMatch; + gt when Op==gt; Op==gteq; Op==neq -> OnMatch; + _ -> ignore + end. + +filter_progress(#{msg:={report,#{label:={_,progress}}}},OnMatch) -> + OnMatch; +filter_progress(_,_) -> + ignore. + +filter_remote_gl(#{meta:=#{gl:=GL}},OnMatch) when node(GL)=/=node() -> + OnMatch; +filter_remote_gl(_,_) -> + ignore. + +on_match(log,Log) -> Log; +on_match(stop,_) -> stop. diff --git a/lib/kernel/src/logger_formatter.erl b/lib/kernel/src/logger_formatter.erl new file mode 100644 index 0000000000..91283ab299 --- /dev/null +++ b/lib/kernel/src/logger_formatter.erl @@ -0,0 +1,255 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2017. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(logger_formatter). + +-export([format/2]). + +-include("logger_internal.hrl"). + +%%%----------------------------------------------------------------- +%%% Types +-type template() :: [atom()|tuple()|string()]. + +%%%----------------------------------------------------------------- +%%% API +-spec format(Log,Config) -> String when + Log :: logger:log(), + Config :: #{single_line=>boolean(), + legacy_header=>boolean(), + report_cb=>fun((logger:report()) -> {io:format(),[term()]}), + max_size=>pos_integer() | unlimited, + depth=>pos_integer() | unlimited, + template=>template(), + utc=>boolean()}, + String :: string(). +format(#{level:=Level,msg:=Msg0,meta:=Meta},Config0) + when is_map(Config0) -> + Config = add_default_config(Config0), + Meta1 = maybe_add_legacy_header(Level,Meta,Config), + MsgStr0 = format_msg(Msg0,Meta1,Config), + MsgStr = + case maps:get(single_line,Config) of + true -> + %% Trim leading and trailing whitespaces, and replace + %% newlines with ", " + re:replace(string:trim(MsgStr0),",?\r?\n\s*",", ", + [{return,list},global]); + _false2 -> + MsgStr0 + end, + String = do_format(Level,MsgStr,Meta1,maps:get(template,Config),Config), + truncate(String,maps:get(max_size,Config)). + +do_format(Level,Msg,Data,[level|Format],Config) -> + [to_string(level,Level,Config)|do_format(Level,Msg,Data,Format,Config)]; +do_format(Level,Msg,Data,[msg|Format],Config) -> + [Msg|do_format(Level,Msg,Data,Format,Config)]; +do_format(Level,Msg,Data,[Key|Format],Config) when is_atom(Key); is_tuple(Key) -> + Value = value(Key,Data), + [to_string(Key,Value,Config)|do_format(Level,Msg,Data,Format,Config)]; +do_format(Level,Msg,Data,[Str|Format],Config) -> + [Str|do_format(Level,Msg,Data,Format,Config)]; +do_format(_Level,_Msg,_Data,[],_Config) -> + []. + +value(Key,Meta) when is_atom(Key), is_map(Meta) -> + maps:get(Key,Meta,""); +value(Key,_) when is_atom(Key) -> + ""; +value(Keys,Meta) when is_tuple(Keys) -> + value(tuple_to_list(Keys),Meta); +value([Key|Keys],Meta) -> + value(Keys,value(Key,Meta)); +value([],Value) -> + Value. + +to_string(time,Time,Config) -> + format_time(Time,Config); +to_string(mfa,MFA,_Config) -> + format_mfa(MFA); +to_string(_,Value,_Config) -> + to_string(Value). + +to_string(X) when is_atom(X) -> + atom_to_list(X); +to_string(X) when is_integer(X) -> + integer_to_list(X); +to_string(X) when is_pid(X) -> + pid_to_list(X); +to_string(X) when is_reference(X) -> + ref_to_list(X); +to_string(X) when is_list(X) -> + case io_lib:printable_unicode_list(lists:flatten(X)) of + true -> X; + _ -> io_lib:format("~tp",[X]) + end; +to_string(X) -> + io_lib:format("~tp",[X]). + +format_msg({string,Chardata},Meta,Config) -> + try unicode:characters_to_list(Chardata) + catch _:_ -> format_msg({"INVALID STRING: ~tp",[Chardata]},Meta,Config) + end; +format_msg({report,_}=Msg,Meta,#{report_cb:=Fun}=Config) when is_function(Fun,1) -> + format_msg(Msg,Meta#{report_cb=>Fun},maps:remove(report_cb,Config)); +format_msg({report,Report},#{report_cb:=Fun}=Meta,Config) when is_function(Fun,1) -> + try Fun(Report) of + {Format,Args} when is_list(Format), is_list(Args) -> + format_msg({Format,Args},maps:remove(report_cb,Meta),Config); + Other -> + format_msg({"REPORT_CB ERROR: ~tp; Returned: ~tp", + [Report,Other]},Meta,Config) + catch C:R -> + format_msg({"REPORT_CB CRASH: ~tp; Reason: ~tp", + [Report,{C,R}]},Meta,Config) + end; +format_msg({report,Report},Meta,Config) -> + format_msg({report,Report}, + Meta#{report_cb=>fun logger:format_report/1}, + Config); +format_msg(Msg,_Meta,Config) -> + limit_depth(Msg, maps:get(depth,Config)). + +limit_depth(Msg,false) -> + Depth = logger:get_format_depth(), + limit_depth(Msg,Depth); +limit_depth({Format,Args},unlimited) -> + try io_lib:format(Format,Args) + catch _:_ -> + io_lib:format("FORMAT ERROR: ~tp - ~tp",[Format,Args]) + end; +limit_depth({Format0,Args},Depth) -> + try + Format1 = io_lib:scan_format(Format0, Args), + Format = limit_format(Format1, Depth), + io_lib:build_text(Format) + catch _:_ -> + limit_depth({"FORMAT ERROR: ~tp - ~tp",[Format0,Args]},Depth) + end. + +limit_format([#{control_char:=C0}=M0|T], Depth) when C0 =:= $p; + C0 =:= $w -> + C = C0 - ($a - $A), %To uppercase. + #{args:=Args} = M0, + M = M0#{control_char:=C,args:=Args++[Depth]}, + [M|limit_format(T, Depth)]; +limit_format([H|T], Depth) -> + [H|limit_format(T, Depth)]; +limit_format([], _) -> + []. + +truncate(String,unlimited) -> + String; +truncate(String,false) -> + Size = logger:get_max_size(), + truncate(String,Size); +truncate(String,Size) -> + Length = string:length(String), + if Length>Size -> + string:slice(String,0,Size-3)++"..."; + true -> + String + end. + +format_time(Timestamp,Config) when is_integer(Timestamp) -> + {Date,Time,Micro} = timestamp_to_datetimemicro(Timestamp,Config), + format_time(Date,Time,Micro); +format_time(Other,_Config) -> + %% E.g. a string + to_string(Other). + +format_time({Y,M,D},{H,Min,S},Micro) -> + io_lib:format("~4w-~2..0w-~2..0w ~2w:~2..0w:~2..0w.~6..0w", + [Y,M,D,H,Min,S,Micro]). + +%% Assuming this is monotonic time in microseconds +timestamp_to_datetimemicro(Timestamp,Config) when is_integer(Timestamp) -> + SysTime = Timestamp + erlang:time_offset(microsecond), + Micro = SysTime rem 1000000, + Sec = SysTime div 1000000, + UniversalTime = erlang:posixtime_to_universaltime(Sec), + {Date,Time} = + case Config of + #{utc:=true} -> UniversalTime; + _ -> erlang:universaltime_to_localtime(UniversalTime) + end, + {Date,Time,Micro}. + +format_mfa({M,F,A}) when is_atom(M), is_atom(F), is_integer(A) -> + atom_to_list(M)++":"++atom_to_list(F)++"/"++integer_to_list(A); +format_mfa({M,F,A}) when is_atom(M), is_atom(F), is_list(A) -> + format_mfa({M,F,length(A)}); +format_mfa(MFA) -> + to_string(MFA). + +maybe_add_legacy_header(Level, + #{time:=Timestamp}=Meta, + #{legacy_header:=true}=Config) -> + #{title:=Title}=MyMeta = add_legacy_title(Level,maps:get(?MODULE,Meta,#{})), + {{Y,Mo,D},{H,Mi,S},Micro} = timestamp_to_datetimemicro(Timestamp,Config), + Header = io_lib:format("=~ts==== ~w-~s-~4w::~2..0w:~2..0w:~2..0w.~6..0w ~s===", + [Title,D,month(Mo),Y,H,Mi,S,Micro,utcstr(Config)]), + Meta#{?MODULE=>MyMeta#{header=>Header}}; +maybe_add_legacy_header(_,Meta,_) -> + Meta. + +add_legacy_title(_Level,#{title:=_}=MyMeta) -> + MyMeta; +add_legacy_title(Level,MyMeta) -> + Title = string:uppercase(atom_to_list(Level)) ++ " REPORT", + MyMeta#{title=>Title}. + +month(1) -> "Jan"; +month(2) -> "Feb"; +month(3) -> "Mar"; +month(4) -> "Apr"; +month(5) -> "May"; +month(6) -> "Jun"; +month(7) -> "Jul"; +month(8) -> "Aug"; +month(9) -> "Sep"; +month(10) -> "Oct"; +month(11) -> "Nov"; +month(12) -> "Dec". + +utcstr(#{utc:=true}) -> "UTC "; +utcstr(_) -> "". + +add_default_config(#{utc:=_}=Config) -> + Default = + #{legacy_header=>false, + single_line=>false, + max_size=>false, + depth=>false}, + add_default_template(maps:merge(Default,Config)); +add_default_config(Config) -> + add_default_config(Config#{utc=>logger:get_utc_config()}). + +add_default_template(#{template:=_}=Config) -> + Config; +add_default_template(Config) -> + Config#{template=>default_template(Config)}. + +default_template(#{legacy_header:=true}) -> + ?DEFAULT_FORMAT_TEMPLATE_HEADER; +default_template(#{single_line:=true}) -> + ?DEFAULT_FORMAT_TEMPLATE_SINGLE; +default_template(_) -> + ?DEFAULT_FORMAT_TEMPLATE. diff --git a/lib/kernel/src/logger_h_common.erl b/lib/kernel/src/logger_h_common.erl new file mode 100644 index 0000000000..7a5082fb84 --- /dev/null +++ b/lib/kernel/src/logger_h_common.erl @@ -0,0 +1,305 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2017. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(logger_h_common). + +-include("logger_h_common.hrl"). +-include("logger_internal.hrl"). + +-export([log_to_binary/2, + check_common_config/1, + call_cast_or_drop/2, + check_load/1, + limit_burst/1, + kill_if_choked/4, + flush_log_requests/0, + flush_log_requests/1, + handler_exit/2, + cancel_timer/1, + stop_or_restart/3, + overload_levels_ok/1, + error_notify/1, + info_notify/1]). + +%%%----------------------------------------------------------------- +%%% Covert log data on any form to binary +-spec log_to_binary(Log,Config) -> LogString when + Log :: logger:log(), + Config :: logger:config(), + LogString :: binary(). +log_to_binary(#{msg:={report,_},meta:=#{report_cb:=_}}=Log,Config) -> + do_log_to_binary(Log,Config); +log_to_binary(#{msg:={report,_},meta:=Meta}=Log,Config) -> + DefaultReportCb = fun logger:format_otp_report/1, + do_log_to_binary(Log#{meta=>Meta#{report_cb=>DefaultReportCb}},Config); +log_to_binary(Log,Config) -> + do_log_to_binary(Log,Config). + +do_log_to_binary(Log,Config) -> + {Formatter,FormatterConfig} = maps:get(formatter,Config), + String = try_format(Log,Formatter,FormatterConfig), + try unicode:characters_to_binary(String) + catch _:_ -> + ?LOG_INTERNAL(debug,[{formatter_error,Formatter}, + {config,FormatterConfig}, + {log,Log}, + {bad_return_value,String}]), + <<"FORMATTER ERROR: bad_return_value">> + end. + +try_format(Log,Formatter,FormatterConfig) -> + try Formatter:format(Log,FormatterConfig) + catch + C:R:S -> + ?LOG_INTERNAL(debug,[{formatter_crashed,Formatter}, + {config,FormatterConfig}, + {log,Log}, + {reason, + {C,R,logger:filter_stacktrace(?MODULE,S)}}]), + case {?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG} of + {Formatter,FormatterConfig} -> + "DEFAULT FORMATTER CRASHED"; + {DefaultFormatter,DefaultConfig} -> + try_format(Log#{msg=>{"FORMATTER CRASH: ~tp", + [maps:get(msg,Log)]}}, + DefaultFormatter,DefaultConfig) + end + end. + +%%%----------------------------------------------------------------- +%%% Check that the configuration term is valid +check_common_config({toggle_sync_qlen,N}) when is_integer(N) -> + valid; +check_common_config({drop_new_reqs_qlen,N}) when is_integer(N) -> + valid; +check_common_config({flush_reqs_qlen,N}) when is_integer(N) -> + valid; +check_common_config({enable_burst_limit,Bool}) when Bool == true; + Bool == false -> + valid; +check_common_config({burst_limit_size,N}) when is_integer(N) -> + valid; +check_common_config({burst_window_time,N}) when is_integer(N) -> + valid; +check_common_config({enable_kill_overloaded,Bool}) when Bool == true; + Bool == false -> + valid; +check_common_config({handler_overloaded_qlen,N}) when is_integer(N) -> + valid; +check_common_config({handler_overloaded_mem,N}) when is_integer(N) -> + valid; +check_common_config({handler_restart_after,NorA}) when is_integer(NorA); + NorA == never -> + valid; +check_common_config({filesync_repeat_interval,NorA}) when is_integer(NorA); + NorA == no_repeat -> + valid; +check_common_config(_) -> + invalid. + + +%%%----------------------------------------------------------------- +%%% Overload Protection +call_cast_or_drop(Name, Bin) -> + %% If the handler process is getting overloaded, the log request + %% will be synchronous instead of asynchronous (slows down the + %% logging tempo of a process doing lots of logging. If the + %% handler is choked, drop mode is set and no request will be sent. + try ?get_mode(Name) of + async -> + gen_server:cast(Name, {log,Bin}); + sync -> + try gen_server:call(Name, {log,Bin}, ?DEFAULT_CALL_TIMEOUT) of + %% if return value from call == dropped, the + %% message has been flushed by handler and should + %% therefore not be counted as dropped in stats + ok -> ok; + dropped -> ok + catch + _:{timeout,_} -> + %%! --- Tue Mar 6 23:10:48 2018 --- peppe was here! + erlang:display('CLIENT_TIMEOUT'), + %% exit('CLIENT_TIMEOUT'), + + ?observe(Name,{dropped,1}) + end; + drop -> ?observe(Name,{dropped,1}) + catch + %% if the ETS table doesn't exist (maybe because of a + %% handler restart), we can only drop the request + _:_ -> ?observe(Name,{dropped,1}) + end, + ok. + +handler_exit(_Name, Reason) -> + exit(Reason). + +check_load(State = #{id:=Name, mode := Mode, + toggle_sync_qlen := ToggleSyncQLen, + drop_new_reqs_qlen := DropNewQLen, + flush_reqs_qlen := FlushQLen}) -> + {_,Mem} = process_info(self(), memory), + ?observe(Name,{max_mem,Mem}), + %% make sure the handler process doesn't get scheduled + %% out between the message_queue_len check below and the + %% action that follows (flush or write). + {_,QLen} = process_info(self(), message_queue_len), + ?observe(Name,{max_qlen,QLen}), + + {Mode1,_NewDrops,_NewFlushes} = + if + QLen >= FlushQLen -> + {flush, 0,1}; + QLen >= DropNewQLen -> + %% Note that drop mode will force log requests to + %% be dropped on the client side (never sent get to + %% the handler). + IncDrops = if Mode == drop -> 0; true -> 1 end, + {?change_mode(Name, Mode, drop), IncDrops,0}; + QLen >= ToggleSyncQLen -> + {?change_mode(Name, Mode, sync), 0,0}; + true -> + {?change_mode(Name, Mode, async), 0,0} + end, + State1 = ?update_other(drops,DROPS,_NewDrops,State), + {Mode1, QLen, Mem, + ?update_other(flushes,FLUSHES,_NewFlushes, + State1#{last_qlen => QLen})}. + +limit_burst(#{enable_burst_limit := false}) -> + {true,0,0}; +limit_burst(#{burst_win_ts := BurstWinT0, + burst_msg_count := BurstMsgCount, + burst_window_time := BurstWinTime, + burst_limit_size := BurstLimitSz}) -> + if (BurstMsgCount >= BurstLimitSz) -> + %% the limit for allowed messages has been reached + BurstWinT1 = ?timestamp(), + case ?diff_time(BurstWinT1,BurstWinT0) of + BurstCheckTime when BurstCheckTime < (BurstWinTime*1000) -> + %% we're still within the burst time frame + {false,BurstWinT0,BurstMsgCount}; + _BurstCheckTime -> + %% burst time frame passed, reset counters + {true,BurstWinT1,0} + end; + true -> + %% the limit for allowed messages not yet reached + {true,BurstWinT0,BurstMsgCount+1} + end. + +kill_if_choked(Name, QLen, Mem, + #{enable_kill_overloaded := KillIfOL, + handler_overloaded_qlen := HOLQLen, + handler_overloaded_mem := HOLMem}) -> + if KillIfOL andalso + ((QLen > HOLQLen) orelse (Mem > HOLMem)) -> + handler_exit(Name, {shutdown,{overloaded,Name,QLen,Mem}}); + true -> + ok + end. + +flush_log_requests() -> + flush_log_requests(-1). + +flush_log_requests(Limit) -> + process_flag(priority, high), + Flushed = flush_log_requests(0, Limit), + process_flag(priority, normal), + Flushed. + +flush_log_requests(Limit, Limit) -> + Limit; +flush_log_requests(N, Limit) -> + %% flush log requests but leave other requests, such as + %% file/disk_log_sync, info and change_config, so that these + %% have a chance to be processed even under heavy load + receive + {'$gen_cast',{log,_}} -> + flush_log_requests(N+1, Limit); + {'$gen_call',{Pid,MRef},{log,_}} -> + Pid ! {MRef, dropped}, + flush_log_requests(N+1, Limit) + after + 0 -> N + end. + +cancel_timer(TRef) when is_atom(TRef) -> ok; +cancel_timer(TRef) -> timer:cancel(TRef). + + +stop_or_restart(Name, {shutdown,Reason={overloaded,_Name,_QLen,_Mem}}, + #{handler_restart_after := RestartAfter}) -> + %% If we're terminating because of an overload situation (see + %% logger_h_common:kill_if_choked/4), we need to remove the handler + %% and set a restart timer. A separate process must perform this + %% in order to avoid deadlock. + HandlerPid = self(), + RemoveAndRestart = + fun() -> + MRef = erlang:monitor(process, HandlerPid), + receive + {'DOWN',MRef,_,_,_} -> + ok + after 30000 -> + error_notify(Reason), + exit(HandlerPid, kill) + end, + case logger:get_handler_config(Name) of + {ok,{HMod,HConfig}} when is_integer(RestartAfter) -> + _ = logger:remove_handler(Name), + _ = timer:apply_after(RestartAfter, logger, add_handler, + [Name,HMod,HConfig]); + {ok,_} -> + _ = logger:remove_handler(Name); + {error,CfgReason} when is_integer(RestartAfter) -> + error_notify({Name,restart_impossible,CfgReason}); + {error,_} -> + ok + end + end, + spawn(RemoveAndRestart), + ok; + +stop_or_restart(Name, shutdown, _State) -> + %% Probably terminated by supervisor. Remove the handler to avoid + %% error printouts due to failing handler. + _ = case logger:get_handler_config(Name) of + {ok,_} -> + %% Spawning to avoid deadlock + spawn(logger,remove_handler,[Name]); + _ -> + ok + end, + ok; + +stop_or_restart(_Name, _Reason, _State) -> + ok. + +overload_levels_ok(HandlerConfig) -> + TSQL = maps:get(toggle_sync_qlen, HandlerConfig, ?TOGGLE_SYNC_QLEN), + DNRQL = maps:get(drop_new_reqs_qlen, HandlerConfig, ?DROP_NEW_REQS_QLEN), + FRQL = maps:get(flush_reqs_qlen, HandlerConfig, ?FLUSH_REQS_QLEN), + (TSQL < DNRQL) andalso (DNRQL < FRQL). + +error_notify(Term) -> + ?internal_log(error, Term). + +info_notify(Term) -> + ?internal_log(info, Term). diff --git a/lib/kernel/src/logger_h_common.hrl b/lib/kernel/src/logger_h_common.hrl new file mode 100644 index 0000000000..f110bbaf97 --- /dev/null +++ b/lib/kernel/src/logger_h_common.hrl @@ -0,0 +1,262 @@ + +%%%----------------------------------------------------------------- +%%% Overload protection configuration + +%%! *** NOTE *** +%%! It's important that: +%%! TOGGLE_SYNC_QLEN < DROP_NEW_REQS_QLEN < FLUSH_REQS_QLEN +%%! and that DROP_NEW_REQS_QLEN >= 2. +%%! Otherwise the handler could end up in drop mode with no new +%%! log requests to process. This would cause all future requests +%%! to be dropped (no switch to async mode would ever take place). + +%% This specifies the message_queue_len value where the log +%% requests switch from asynchronous casts to synchronous calls. +-define(TOGGLE_SYNC_QLEN, 10). +%% Above this message_queue_len, log requests will be dropped, +%% i.e. no log requests get sent to the handler process. +-define(DROP_NEW_REQS_QLEN, 200). +%% Above this message_queue_len, the handler process will flush +%% its mailbox and only leave this number of messages in it. +-define(FLUSH_REQS_QLEN, 1000). + +%% Never flush more than this number of messages in one go, +%% or the handler will be unresponsive for seconds (keep this +%% number as large as possible or the mailbox could grow large). +-define(FLUSH_MAX_N, 5000). + +%% BURST_LIMIT is the max number of log requests allowed to be +%% written within a BURST_WINDOW_TIME time frame. +-define(ENABLE_BURST_LIMIT, true). +-define(BURST_LIMIT_SIZE, 500). +-define(BURST_WINDOW_TIME, 1000). + +%% This enables/disables the feature to automatically get the +%% handler terminated if it gets too loaded (and can't keep up). +-define(ENABLE_KILL_OVERLOADED, false). +%% If the message_queue_len goes above this size even after +%% flushing has been performed, the handler is terminated. +-define(HANDLER_OVERLOADED_QLEN, 20000). +%% If the memory usage exceeds this level +-define(HANDLER_OVERLOADED_MEM, 3000000). + +%% This is the default time that the handler will wait before +%% restarting and accepting new requests. The value 'never' +%% disables restarts. +-define(HANDLER_RESTART_AFTER, 5000). +%%-define(HANDLER_RESTART_AFTER, never). + +%% The handler sends asynchronous write requests to the process +%% controlling the i/o device, but every once in this interval +%% will the write request be synchronous, so that the i/o device +%% process doesn't get overloaded. This gives the handler time +%% to keep up with its mailbox in overload situations, even if +%% the i/o is slow. +-define(CONTROLLER_SYNC_INTERVAL, 20). +%% The handler will not perform a file sync operation if the +%% mailbox size is greater than this number. This is to ensure +%% the handler process doesn't get overloaded while waiting for +%% an expensive file sync operation to finish. +-define(FILESYNC_OK_QLEN, 2). +%% Do a file/disk_log sync operation every integer() millisec +%% (if necessary) or set to 'no_repeat' to only do file sync when +%% the handler is idle. Note that file sync is not guaranteed to +%% happen automatically if this operation is disabled. +-define(FILESYNC_REPEAT_INTERVAL, 5000). +%%-define(FILESYNC_REPEAT_INTERVAL, no_repeat). + +%% This is the time after last message received that we think/hope +%% that the handler has an empty mailbox (no new log request has +%% come in). +-define(IDLE_DETECT_TIME_MSEC, 100). +-define(IDLE_DETECT_TIME_USEC, 100000). + +%% Default disk log option values +-define(DISK_LOG_TYPE, wrap). +-define(DISK_LOG_MAX_NO_FILES, 10). +-define(DISK_LOG_MAX_NO_BYTES, 1048576). + +%%%----------------------------------------------------------------- +%%% Overload protection macros + +-define(timestamp(), erlang:monotonic_time(microsecond)). + +-define(get_mode(HandlerName), + case ets:lookup(HandlerName, mode) of + [{mode,sync}] -> + case whereis(HandlerName)==self() of + true -> async; + _ -> sync + end; + [{mode,M}] -> M; + _ -> async + end). + +-define(set_mode(HandlerName, M), + begin ets:insert(HandlerName, {mode,M}), M end). + +-define(change_mode(HandlerName, M0, M1), + if M0 == M1 -> + M0; + true -> + ets:insert(HandlerName, {mode,M1}), + M1 + end). + +-define(min(X1, X2), + if X2 == undefined -> X1; + X2 < X1 -> X2; + true -> X1 + end). + +-define(max(X1, X2), + if + X2 == undefined -> X1; + X2 > X1 -> X2; + true -> X1 + end). + +-define(diff_time(OS_T1, OS_T0), OS_T1-OS_T0). + +%%%----------------------------------------------------------------- +%%% The test hook macros make it possible to observe and manipulate +%%% internal handler functionality. When enabled, these macros will +%%% slow down execution and therefore should not be include in code +%%% to be officially released. + +%% -define(TEST_HOOKS, true). +-ifdef(TEST_HOOKS). + -define(TEST_HOOKS_TAB, logger_h_test_hooks). + + -define(init_test_hooks(), + _ = case ets:whereis(?TEST_HOOKS_TAB) of + undefined -> ets:new(?TEST_HOOKS_TAB, [named_table,public]); + _ -> ok + end, + ets:insert(?TEST_HOOKS_TAB, {internal_log,{logger,internal_log}}), + ets:insert(?TEST_HOOKS_TAB, {file_write,ok}), + ets:insert(?TEST_HOOKS_TAB, {file_datasync,ok}), + ets:insert(?TEST_HOOKS_TAB, {disk_log_blog,ok}), + ets:insert(?TEST_HOOKS_TAB, {disk_log_sync,ok})). + + -define(set_internal_log(MOD_FUNC), + ets:insert(?TEST_HOOKS_TAB, {internal_log,MOD_FUNC})). + + -define(set_result(OPERATION, RESULT), + ets:insert(?TEST_HOOKS_TAB, {OPERATION,RESULT})). + + -define(set_defaults(), + ets:insert(?TEST_HOOKS_TAB, {internal_log,{logger,internal_log}}), + ets:insert(?TEST_HOOKS_TAB, {file_write,ok}), + ets:insert(?TEST_HOOKS_TAB, {file_datasync,ok}), + ets:insert(?TEST_HOOKS_TAB, {disk_log_blog,ok}), + ets:insert(?TEST_HOOKS_TAB, {disk_log_sync,ok})). + + -define(internal_log(TYPE, TERM), + try ets:lookup(?TEST_HOOKS_TAB, internal_log) of + [{_,{LMOD,LFUNC}}] -> apply(LMOD, LFUNC, [TYPE,TERM]); + _ -> logger:internal_log(TYPE, TERM) + catch _:_ -> logger:internal_log(TYPE, TERM) end). + + -define(file_write(DEVICE, DATA), + try ets:lookup(?TEST_HOOKS_TAB, file_write) of + [{_,ok}] -> file:write(DEVICE, DATA); + [{_,ERROR}] -> ERROR + catch _:_ -> file:write(DEVICE, DATA) end). + + -define(file_datasync(DEVICE), + try ets:lookup(?TEST_HOOKS_TAB, file_datasync) of + [{_,ok}] -> file:datasync(DEVICE); + [{_,ERROR}] -> ERROR + catch _:_ -> file:datasync(DEVICE) end). + + -define(disk_log_blog(LOG, DATA), + try ets:lookup(?TEST_HOOKS_TAB, disk_log_blog) of + [{_,ok}] -> disk_log:blog(LOG, DATA); + [{_,ERROR}] -> ERROR + catch _:_ -> disk_log:blog(LOG, DATA) end). + + -define(disk_log_sync(LOG), + try ets:lookup(?TEST_HOOKS_TAB, disk_log_sync) of + [{_,ok}] -> disk_log:sync(LOG); + [{_,ERROR}] -> ERROR + catch _:_ -> disk_log:sync(LOG) end). + + -define(DEFAULT_CALL_TIMEOUT, 5000). + +-else. % DEFAULTS! + -define(TEST_HOOKS_TAB, undefined). + -define(init_test_hooks(), ok). + -define(set_internal_log(_MOD_FUNC), ok). + -define(set_result(_OPERATION, _RESULT), ok). + -define(set_defaults(), ok). + -define(internal_log(TYPE, TERM), logger:internal_log(TYPE, TERM)). + -define(file_write(DEVICE, DATA), file:write(DEVICE, DATA)). + -define(file_datasync(DEVICE), file:datasync(DEVICE)). + -define(disk_log_blog(LOG, DATA), disk_log:blog(LOG, DATA)). + -define(disk_log_sync(LOG), disk_log:sync(LOG)). + -define(DEFAULT_CALL_TIMEOUT, ?DEFAULT_LOGGER_CALL_TIMEOUT-2000). +-endif. + +%%%----------------------------------------------------------------- +%%% These macros enable statistics counters in the state of the +%%% handler which is useful for analysing the overload protection +%%% behaviour. These counters should not be included in code to be +%%% officially released (as some counters will grow very large +%%% over time). + +%%-define(SAVE_STATS, true). +-ifdef(SAVE_STATS). + -define(merge_with_stats(STATE), + STATE#{flushes => 0, flushed => 0, drops => 0, + casts => 0, calls => 0, + max_qlen => 0, max_time => 0}). + + -define(update_max_qlen(QLEN, STATE), + begin #{max_qlen := QLEN0} = STATE, + STATE#{max_qlen => ?max(QLEN0,QLEN)} end). + + -define(update_calls_or_casts(CALL_OR_CAST, INC, STATE), + case CALL_OR_CAST of + cast -> + #{casts := CASTS0} = STATE, + STATE#{casts => CASTS0+INC}; + call -> + #{calls := CALLS0} = STATE, + STATE#{calls => CALLS0+INC} + end). + + -define(update_max_time(TIME, STATE), + begin #{max_time := TIME0} = STATE, + STATE#{max_time => ?max(TIME0,TIME)} end). + + -define(update_other(OTHER, VAR, INCVAL, STATE), + begin #{OTHER := VAR} = STATE, + STATE#{OTHER => VAR+INCVAL} end). + +-else. % DEFAULT! + -define(merge_with_stats(STATE), STATE). + -define(update_max_qlen(_QLEN, STATE), STATE). + -define(update_calls_or_casts(_CALL_OR_CAST, _INC, STATE), STATE). + -define(update_max_time(_TIME, STATE), STATE). + -define(update_other(_OTHER, _VAR, _INCVAL, STATE), STATE). +-endif. + +%%%----------------------------------------------------------------- +%%% These macros enable callbacks that make it possible to analyse +%%% the overload protection behaviour from outside the handler +%%% process (including dropped requests on the client side). +%%% An external callback module (?OBSERVER_MOD) is required which +%%% is not part of the kernel application. For this reason, these +%%% callbacks should not be included in code to be officially released. + +%%-define(OBSERVER_MOD, logger_test). +-ifdef(OBSERVER_MOD). + -define(start_observation(NAME), ?OBSERVER:start_observation(NAME)). + -define(observe(NAME,EVENT), ?OBSERVER:observe(NAME,EVENT)). + +-else. % DEFAULT! + -define(start_observation(_NAME), ok). + -define(observe(_NAME,_EVENT), ok). +-endif. +%%! <--- diff --git a/lib/kernel/src/logger_internal.hrl b/lib/kernel/src/logger_internal.hrl new file mode 100644 index 0000000000..8fcaba8de6 --- /dev/null +++ b/lib/kernel/src/logger_internal.hrl @@ -0,0 +1,96 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2017. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-include_lib("kernel/include/logger.hrl"). +-define(LOGGER_TABLE,logger). +-define(LOGGER_KEY,'$logger_config$'). +-define(HANDLER_KEY,'$handler_config$'). +-define(LOGGER_META_KEY,'$logger_metadata$'). +-define(STANDARD_HANDLER, logger_std_h). +-define(DEFAULT_LOGGER_CALL_TIMEOUT, 10000). +-define(DEFAULT_HANDLER_FILTERS, + ?DEFAULT_HANDLER_FILTERS([beam,erlang,otp])). +-define(DEFAULT_HANDLER_FILTERS(Domain), + [{remote_gl,{fun logger_filters:remote_gl/2,stop}}, + {domain,{fun logger_filters:domain/2,{log,prefix_of,Domain}}}, + {no_domain,{fun logger_filters:domain/2,{log,no_domain,[]}}}]). +-define(DEFAULT_FORMATTER,logger_formatter). +-define(DEFAULT_FORMAT_CONFIG,#{legacy_header=>true, + template=>?DEFAULT_FORMAT_TEMPLATE_HEADER}). +-define(DEFAULT_FORMAT_TEMPLATE_HEADER, + [{logger_formatter,header},"\n",msg,"\n"]). +-define(DEFAULT_FORMAT_TEMPLATE_SINGLE, + [time," ",level,": ",msg,"\n"]). +-define(DEFAULT_FORMAT_TEMPLATE, + [time," ",level,":\n",msg,"\n"]). + +-define(LOG_INTERNAL(Level,Report), + case logger:allow(Level,?MODULE) of + true -> + _ = spawn(logger,macro_log,[?LOCATION,Level,Report, + logger:add_default_metadata(#{})]), + ok; + false -> + ok + end). + +%%%----------------------------------------------------------------- +%%% Levels +%%% Using same as syslog +-define(LEVELS,[emergency, + alert, + critical, + error, + warning, + notice, + info, + debug]). +-define(EMERGENCY,0). +-define(ALERT,1). +-define(CRITICAL,2). +-define(ERROR,3). +-define(WARNING,4). +-define(NOTICE,5). +-define(INFO,6). +-define(DEBUG,7). + +-define(IS_LEVEL(L), + (L=:=emergency orelse + L=:=alert orelse + L=:=critical orelse + L=:=error orelse + L=:=warning orelse + L=:=notice orelse + L=:=info orelse + L=:=debug)). + +-define(IS_MSG(Msg), + ((is_tuple(Msg) andalso tuple_size(Msg)==2) + andalso + (is_list(element(1,Msg)) andalso is_list(element(2,Msg))) + orelse + (element(1,Msg)==report andalso ?IS_REPORT(element(2,Msg))) + orelse + (element(1,Msg)==string andalso ?IS_STRING(element(2,Msg))))). + +-define(IS_REPORT(Report), + (is_map(Report) orelse (is_list(Report) andalso is_tuple(hd(Report))))). + +-define(IS_STRING(String), + (is_list(String) orelse is_binary(String))). diff --git a/lib/kernel/src/logger_server.erl b/lib/kernel/src/logger_server.erl new file mode 100644 index 0000000000..6ef3b8582a --- /dev/null +++ b/lib/kernel/src/logger_server.erl @@ -0,0 +1,440 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2017. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(logger_server). + +-behaviour(gen_server). + +%% API +-export([start_link/0, + add_handler/3, remove_handler/1, + add_filter/2, remove_filter/2, + set_module_level/2, reset_module_level/1, + cache_module_level/1, + set_config/2, set_config/3]). + +%% gen_server callbacks +-export([init/1, handle_call/3, handle_cast/2, handle_info/2, + terminate/2]). + +-include("logger_internal.hrl"). + +-define(SERVER, logger). + +-record(state, {tid}). + +%%%=================================================================== +%%% API +%%%=================================================================== + +start_link() -> + gen_server:start_link({local, ?SERVER}, ?MODULE, [], []). + +add_handler(Id,Module,Config0) -> + case sanity_check(logger,handlers,[Id]) of + ok -> + try check_mod(Module) of + ok -> + case sanity_check(Id,Config0) of + ok -> + Default = default_config(Id), + Config = maps:merge(Default,Config0), + call({add_handler,Id,Module,Config}); + Error -> + Error + end + catch throw:Error -> + {error,Error} + end; + Error -> + Error + end. + +remove_handler(HandlerId) -> + call({remove_handler,HandlerId}). + +add_filter(Owner,Filter) -> + case sanity_check(Owner,filters,[Filter]) of + ok -> call({add_filter,Owner,Filter}); + Error -> Error + end. + +remove_filter(Owner,FilterId) -> + call({remove_filter,Owner,FilterId}). + +set_module_level(Module,Level) when is_atom(Module) -> + case sanity_check(logger,level,Level) of + ok -> call({set_module_level,Module,Level}); + Error -> Error + end; +set_module_level(Module,_) -> + {error,{not_a_module,Module}}. + +reset_module_level(Module) when is_atom(Module) -> + call({reset_module_level,Module}); +reset_module_level(Module) -> + {error,{not_a_module,Module}}. + +cache_module_level(Module) -> + gen_server:cast(?SERVER,{cache_module_level,Module}). + + +set_config(Owner,Key,Value) -> + case sanity_check(Owner,Key,Value) of + ok -> call({update_config,Owner,#{Key=>Value}}); + Error -> Error + end. + +set_config(Owner,Config0) -> + case sanity_check(Owner,Config0) of + ok -> + Config = maps:merge(default_config(Owner),Config0), + call({set_config,Owner,Config}); + Error -> + Error + end. + +%%%=================================================================== +%%% gen_server callbacks +%%%=================================================================== + +init([]) -> + process_flag(trap_exit, true), + Tid = logger_config:new(?LOGGER_TABLE), + LoggerConfig = maps:merge(default_config(logger), + #{handlers=>[logger_simple]}), + logger_config:create(Tid,logger,LoggerConfig), + SimpleConfig0 = maps:merge(default_config(logger_simple), + #{filter_default=>stop, + filters=>?DEFAULT_HANDLER_FILTERS, + logger_simple=>#{buffer=>true}}), + %% If this fails, then the node should crash + {ok,SimpleConfig} = + logger_simple:adding_handler(logger_simple,SimpleConfig0), + logger_config:create(Tid,logger_simple,logger_simple,SimpleConfig), + {ok, #state{tid=Tid}}. + +handle_call({add_handler,Id,Module,HConfig}, _From, #state{tid=Tid}=State) -> + Reply = + case logger_config:exist(Tid,Id) of + true -> + {error,{already_exist,Id}}; + false -> + %% inform the handler + case call_h(Module,adding_handler,[Id,HConfig],{ok,HConfig}) of + {ok,HConfig1} -> + logger_config:create(Tid,Id,Module,HConfig1), + {ok,Config} = do_get_config(Tid,logger), + Handlers = maps:get(handlers,Config,[]), + do_set_config(Tid,logger, + Config#{handlers=>[Id|Handlers]}), + ok; + {error,HReason} -> + {error,{handler_not_added,HReason}} + end + end, + {reply,Reply,State}; +handle_call({remove_handler,HandlerId}, _From, #state{tid=Tid}=State) -> + Reply = + case logger_config:get(Tid,HandlerId) of + {ok,{Module,_}} -> + {ok,Config} = do_get_config(Tid,logger), + Handlers0 = maps:get(handlers,Config,[]), + Handlers = lists:delete(HandlerId,Handlers0), + %% inform the handler + _ = call_h(Module,removing_handler,[HandlerId],ok), + do_set_config(Tid,logger,Config#{handlers=>Handlers}), + logger_config:delete(Tid,HandlerId), + ok; + _ -> + {error,{not_found,HandlerId}} + end, + {reply,Reply,State}; +handle_call({add_filter,Id,Filter}, _From,#state{tid=Tid}=State) -> + Reply = do_add_filter(Tid,Id,Filter), + {reply,Reply,State}; +handle_call({remove_filter,Id,FilterId}, _From, #state{tid=Tid}=State) -> + Reply = do_remove_filter(Tid,Id,FilterId), + {reply,Reply,State}; +handle_call({update_config,Id,NewConfig}, _From, #state{tid=Tid}=State) -> + Reply = + case logger_config:get(Tid,Id) of + {ok,{Module,OldConfig}} -> + Config = maps:merge(OldConfig,NewConfig), + case call_h(Module,changing_config,[Id,OldConfig,Config], + {ok,Config}) of + {ok,Config1} -> + do_set_config(Tid,Id,Config1); + Error -> + Error + end; + {ok,OldConfig} -> + Config = maps:merge(OldConfig,NewConfig), + do_set_config(Tid,Id,Config); + Error -> + Error + end, + {reply,Reply,State}; +handle_call({set_config,logger,Config}, _From, #state{tid=Tid}=State) -> + Reply = do_set_config(Tid,logger,Config), + {reply,Reply,State}; +handle_call({set_config,HandlerId,Config}, _From, #state{tid=Tid}=State) -> + Reply = + case logger_config:get(Tid,HandlerId) of + {ok,{Module,OldConfig}} -> + case call_h(Module,changing_config,[HandlerId,OldConfig,Config], + {ok,Config}) of + {ok,Config1} -> + do_set_config(Tid,HandlerId,Config1); + Error -> + Error + end; + _ -> + {error,{not_found,HandlerId}} + end, + {reply,Reply,State}; +handle_call({set_module_level,Module,Level}, _From, #state{tid=Tid}=State) -> + Reply = logger_config:set_module_level(Tid,Module,Level), + {reply,Reply,State}; +handle_call({reset_module_level,Module}, _From, #state{tid=Tid}=State) -> + Reply = logger_config:reset_module_level(Tid,Module), + {reply,Reply,State}. + +handle_cast({cache_module_level,Module}, #state{tid=Tid}=State) -> + logger_config:cache_module_level(Tid,Module), + {noreply, State}. + +%% Interface for those who can't call the API - e.g. the emulator, or +%% places related to code loading. +%% +%% This can also be log events from remote nodes which are sent from +%% logger.erl when the group leader of the client process is on a +%% same node as the client process itself. +handle_info({log,Level,Format,Args,Meta}, State) -> + logger:log(Level,Format,Args,Meta), + {noreply, State}; +handle_info({log,Level,Report,Meta}, State) -> + logger:log(Level,Report,Meta), + {noreply, State}; +handle_info({Ref,_Reply},State) when is_reference(Ref) -> + %% Assuming this is a timed-out gen_server reply - ignoring + {noreply, State}. + +terminate(_Reason, _State) -> + ok. + +%%%=================================================================== +%%% Internal functions +%%%=================================================================== +call(Request) -> + case whereis(?SERVER) of + Pid when Pid==self() -> + {error,{attempting_syncronous_call_to_self,Request}}; + _ -> + gen_server:call(?SERVER,Request,?DEFAULT_LOGGER_CALL_TIMEOUT) + end. + +do_add_filter(Tid,Id,{FId,_} = Filter) -> + case do_get_config(Tid,Id) of + {ok,Config} -> + Filters = maps:get(filters,Config,[]), + case lists:keymember(FId,1,Filters) of + true -> + {error,{already_exist,FId}}; + false -> + do_set_config(Tid,Id,Config#{filters=>[Filter|Filters]}) + end; + Error -> + Error + end. + +do_remove_filter(Tid,Id,FilterId) -> + case do_get_config(Tid,Id) of + {ok,Config} -> + Filters0 = maps:get(filters,Config,[]), + case lists:keytake(FilterId,1,Filters0) of + {value,_,Filters} -> + do_set_config(Tid,Id,Config#{filters=>Filters}); + false -> + {error,{not_found,FilterId}} + end; + Error -> + Error + end. + +do_get_config(Tid,Id) -> + case logger_config:get(Tid,Id) of + {ok,{_,Config}} -> + {ok,Config}; + {ok,Config} -> + {ok,Config}; + Error -> + Error + end. + +do_set_config(Tid,Id,Config) -> + logger_config:set(Tid,Id,Config), + ok. + +default_config(logger) -> + #{level=>info, + filters=>[], + filter_default=>log, + handlers=>[]}; +default_config(_) -> + #{level=>info, + filters=>[], + filter_default=>log, + formatter=>{?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}}. + +sanity_check(Owner,Key,Value) -> + sanity_check_1(Owner,[{Key,Value}]). + +sanity_check(HandlerId,Config) when is_map(Config) -> + sanity_check_1(HandlerId,maps:to_list(Config)); +sanity_check(_,Config) -> + {error,{invalid_handler_config,Config}}. + +sanity_check_1(Owner,Config) when is_list(Config) -> + try + Type = get_type(Owner), + check_config(Type,Config) + catch throw:Error -> {error,Error} + end. + +get_type(logger) -> + logger; +get_type(Id) -> + check_id(Id), + handler. + +check_config(Owner,[{level,Level}|Config]) -> + check_level(Level), + check_config(Owner,Config); +check_config(logger,[{handlers,Handlers}|Config]) -> + check_handlers(Handlers), + check_config(logger,Config); +check_config(Owner,[{filters,Filters}|Config]) -> + check_filters(Filters), + check_config(Owner,Config); +check_config(Owner,[{filter_default,FD}|Config]) -> + check_filter_default(FD), + check_config(Owner,Config); +check_config(handler,[{formatter,Formatter}|Config]) -> + check_formatter(Formatter), + check_config(handler,Config); +check_config(logger,[C|_]) -> + throw({invalid_logger_config,C}); +check_config(handler,[{_,_}|Config]) -> + %% Arbitrary config elements are allowed for handlers + check_config(handler,Config); +check_config(_,[]) -> + ok. + +check_id(Id) when is_atom(Id) -> + ok; +check_id(Id) -> + throw({invalid_id,Id}). + +check_mod(Mod) when is_atom(Mod) -> + ok; +check_mod(Mod) -> + throw({invalid_module,Mod}). + +check_level({LevelInt,cached}) when LevelInt>=?EMERGENCY, LevelInt=<?DEBUG -> + ok; +check_level(Level) -> + case lists:member(Level,?LEVELS) of + true -> + ok; + false -> + throw({invalid_level,Level}) + end. + +check_handlers([Id|Handlers]) -> + check_id(Id), + check_handlers(Handlers); +check_handlers([]) -> + ok; +check_handlers(Handlers) -> + throw({invalid_handlers,Handlers}). + +check_filters([{Id,{Fun,_Args}}|Filters]) when is_atom(Id), is_function(Fun,2) -> + check_filters(Filters); +check_filters([Filter|_]) -> + throw({invalid_filter,Filter}); +check_filters([]) -> + ok; +check_filters(Filters) -> + throw({invalid_filters,Filters}). + +check_filter_default(FD) when FD==stop; FD==log -> + ok; +check_filter_default(FD) -> + throw({invalid_filter_default,FD}). + +check_formatter({logger_formatter,Config}) when is_map(Config) -> + check_logger_formatter_config(maps:to_list(Config)); +check_formatter({logger_formatter,Config}) -> + throw({invalid_formatter_config,Config}); +check_formatter({Mod,_}) -> + %% no knowledge of other formatters + check_mod(Mod); +check_formatter(Formatter) -> + throw({invalid_formatter,Formatter}). + + +check_logger_formatter_config([{template,T}|Config]) when is_list(T) -> + case lists:all(fun(X) when is_atom(X) -> true; + (X) when is_tuple(X), is_atom(element(1,X)) -> true; + (X) when is_list(X) -> io_lib:printable_unicode_list(X); + (_) -> false + end, + T) of + true -> + check_logger_formatter_config(Config); + false -> + throw({invalid_formatter_template,T}) + end; +check_logger_formatter_config([{legacy_header,LH}|Config]) when is_boolean(LH) -> + check_logger_formatter_config(Config); +check_logger_formatter_config([{single_line,SL}|Config]) when is_boolean(SL) -> + check_logger_formatter_config(Config); +check_logger_formatter_config([{utc,Utc}|Config]) when is_boolean(Utc) -> + check_logger_formatter_config(Config); +check_logger_formatter_config([C|_]) -> + throw({invalid_formatter_config,C}); +check_logger_formatter_config([]) -> + ok. + +call_h(Module, Function, Args, DefRet) -> + %% Not calling code:ensure_loaded + erlang:function_exported here, + %% since in some rare terminal cases, the code_server might not + %% exist and we'll get a deadlock in removing the handler. + try apply(Module, Function, Args) + catch + C:R:S -> + case {C,R,S} of + {error,undef,[{Module,Function,Args,_}|_]} -> + DefRet; + _ -> + {error,{callback_crashed, + {C,R,logger:filter_stacktrace(?MODULE,S)}}} + end + end. diff --git a/lib/kernel/src/logger_simple.erl b/lib/kernel/src/logger_simple.erl new file mode 100644 index 0000000000..61387ac7c9 --- /dev/null +++ b/lib/kernel/src/logger_simple.erl @@ -0,0 +1,238 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2017. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(logger_simple). + +-export([adding_handler/2, removing_handler/1, log/2]). +-export([get_buffer/0]). + +%% This module implements a simple handler for logger. It is the +%% default used during system start. + +%%%----------------------------------------------------------------- +%%% API +get_buffer() -> + case whereis(?MODULE) of + undefined -> + {error,noproc}; + Pid -> + Ref = erlang:monitor(process,Pid), + Pid ! {get_buffer,self()}, + receive + {buffer,Buffer} -> + erlang:demonitor(Ref,[flush]), + {ok,Buffer}; + {'DOWN',Ref,process,Pid,Reason} -> + {error,Reason} + end + end. + +%%%----------------------------------------------------------------- +%%% Logger callback + +adding_handler(?MODULE,Config) -> + Me = self(), + case whereis(?MODULE) of + undefined -> + {Pid,Ref} = spawn_opt(fun() -> init(Me,Config) end, + [link,monitor,{message_queue_data,off_heap}]), + receive + {'DOWN',Ref,process,Pid,Reason} -> + {error,Reason}; + {Pid,started} -> + erlang:demonitor(Ref), + {ok,Config} + end; + _ -> + {error,{handler_process_name_already_exists,?MODULE}} + end. + +removing_handler(?MODULE) -> + case whereis(?MODULE) of + undefined -> + ok; + Pid -> + Ref = erlang:monitor(process,Pid), + unlink(Pid), + Pid ! stop, + receive {'DOWN',Ref,process,Pid,_} -> + ok + end + end. + +log(#{meta:=#{error_logger:=#{tag:=info_report,type:=Type}}},_Config) + when Type=/=std_info -> + %% Skip info reports that are not 'std_info' (ref simple logger in + %% error_logger) + ok; +log(#{msg:=_,meta:=#{time:=_}}=Log,_Config) -> + _ = case whereis(?MODULE) of + undefined -> + %% Is the node on the way down? Real emergency? + %% Log directly from client just to get it out + do_log( + #{level=>error, + msg=>{report,{error,simple_handler_process_dead}}, + meta=>#{time=>erlang:monotonic_time(microsecond)}}), + do_log(Log); + _ -> + ?MODULE ! {log,Log} + end, + ok; +log(_,_) -> + %% Unexpected log. + %% We don't want to crash the simple logger, so ignore this. + ok. + +%%%----------------------------------------------------------------- +%%% Process +init(Starter,Config) -> + register(?MODULE,self()), + Starter ! {self(),started}, + BufferSize = + case Config of + #{?MODULE:=#{buffer:=true}} -> + 10; + _ -> + infinity + end, + loop(#{buffer_size=>BufferSize,dropped=>0,buffer=>[]},infinity). + +loop(Buffer,Timeout) -> + receive + stop -> + ok; + {get_buffer,From} -> + loop(Buffer#{send_to=>From},0); + {log,#{msg:=_,meta:=#{time:=_}}=Log} -> + do_log(Log), + loop(update_buffer(Buffer,Log),Timeout); + _ -> + %% Unexpected message - flush it! + loop(Buffer,Timeout) + after Timeout -> + #{dropped:=D,buffer:=B,send_to:=Pid} = Buffer, + LogList = lists:reverse(B) ++ drop_msg(D), + Pid ! {buffer,LogList}, + loop(Buffer#{buffer_size=>infinity, + dropped=>0, + buffer=>[], + send_to=>false}, + infinity) + end. + +update_buffer(#{buffer_size:=infinity}=Buffer,_Log) -> + Buffer; +update_buffer(#{buffer_size:=0,dropped:=D}=Buffer,_Log) -> + Buffer#{dropped=>D+1}; +update_buffer(#{buffer_size:=S,buffer:=B}=Buffer,Log) -> + Buffer#{buffer_size=>S-1,buffer=>[Log|B]}. + +drop_msg(0) -> + []; +drop_msg(N) -> + [#{level=>info, + msg=>{"Simple handler buffer full, dropped ~w messages",[N]}, + meta=>#{time=>erlang:monotonic_time(microsecond)}}]. + +%%%----------------------------------------------------------------- +%%% Internal + +%% Can't do io_lib:format + +do_log(#{msg:={report,Report}, + meta:=#{time:=T,error_logger:=#{type:=Type}}}) -> + display_date(T), + display_report(Type,Report); +do_log(#{msg:=Msg,meta:=#{time:=T}}) -> + %% Maybe buffer event - if next handler shall be able to + %% re-display it + display_date(T), + display(Msg). + +display_date(Timestamp0) when is_integer(Timestamp0) -> + Timestamp = Timestamp0 + erlang:time_offset(microsecond), + Micro = Timestamp rem 1000000, + Sec = Timestamp div 1000000, + {{Y,Mo,D},{H,Mi,S}} = erlang:universaltime_to_localtime( + erlang:posixtime_to_universaltime(Sec)), + erlang:display_string( + integer_to_list(Y) ++ "-" ++ + pad(Mo,2) ++ "-" ++ + pad(D,2) ++ " " ++ + pad(H,2) ++ ":" ++ + pad(Mi,2) ++ ":" ++ + pad(S,2) ++ "." ++ + pad(Micro,6) ++ " "). + +pad(Int,Size) when is_integer(Int) -> + pad(integer_to_list(Int),Size); +pad(Str,Size) when length(Str)==Size -> + Str; +pad(Str,Size) -> + pad([$0|Str],Size). + +display({string,Chardata}) -> + try unicode:characters_to_list(Chardata) of + String -> erlang:display_string(String), erlang:display_string("\n") + catch _:_ -> erlang:display(Chardata) + end; +display({report,Report}) when is_map(Report) -> + display_report(maps:to_list(Report)); +display({report,Report}) -> + display_report(Report); +display({F, A}) when is_list(F), is_list(A) -> + erlang:display_string(F ++ "\n"), + [begin + erlang:display_string("\t"), + erlang:display(Arg) + end || Arg <- A], + ok. + +display_report(Atom, A) when is_atom(Atom) -> + %% The widest atom seems to be 'supervisor_report' at 17. + ColumnWidth = 20, + AtomString = atom_to_list(Atom), + AtomLength = length(AtomString), + Padding = lists:duplicate(ColumnWidth - AtomLength, $\s), + erlang:display_string(AtomString ++ Padding), + display_report(A); +display_report(F, A) -> + erlang:display({F, A}). + +display_report([A, []]) -> + %% Special case for crash reports when process has no links + display_report(A); +display_report(A = [_|_]) -> + case lists:all(fun({Key,_Value}) -> is_atom(Key); (_) -> false end, A) of + true -> + erlang:display_string("\n"), + lists:foreach( + fun({Key, Value}) -> + erlang:display_string( + " " ++ + atom_to_list(Key) ++ + ": "), + erlang:display(Value) + end, A); + false -> + erlang:display(A) + end; +display_report(A) -> + erlang:display(A). diff --git a/lib/kernel/src/logger_std_h.erl b/lib/kernel/src/logger_std_h.erl new file mode 100644 index 0000000000..cbc9db372c --- /dev/null +++ b/lib/kernel/src/logger_std_h.erl @@ -0,0 +1,799 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2017. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(logger_std_h). + +-behaviour(gen_server). + +-include("logger.hrl"). +-include("logger_internal.hrl"). +-include("logger_h_common.hrl"). + +-include_lib("kernel/include/file.hrl"). + +%% API +-export([start_link/3, info/1, filesync/1, reset/1]). + +%% gen_server and proc_lib callbacks +-export([init/1, handle_call/3, handle_cast/2, handle_info/2, + terminate/2, code_change/3]). + +%% logger callbacks +-export([log/2, adding_handler/2, removing_handler/1, + changing_config/3, swap_buffer/2]). + +%%%=================================================================== +%%% API +%%%=================================================================== + +%%%----------------------------------------------------------------- +%%% Start a standard handler process and link to caller. +%%% This function is called by the kernel supervisor when this +%%% handler process gets added +-spec start_link(Name, Config, HandlerState) -> {ok,Pid} | {error,Reason} when + Name :: atom(), + Config :: logger:config(), + HandlerState :: map(), + Pid :: pid(), + Reason :: term(). + +start_link(Name, Config, HandlerState) -> + proc_lib:start_link(?MODULE,init,[[Name,Config,HandlerState]]). + +%%%----------------------------------------------------------------- +%%% +-spec filesync(Name) -> ok | {error,Reason} when + Name :: atom(), + Reason :: handler_busy | {badarg,term()}. + +filesync(Name) when is_atom(Name) -> + try + gen_server:call(Name, filesync, ?DEFAULT_CALL_TIMEOUT) + catch + _:{timeout,_} -> {error,handler_busy} + end; +filesync(Name) -> + {error,{badarg,{filesync,[Name]}}}. + +%%%----------------------------------------------------------------- +%%% +-spec info(Name) -> Info | {error,Reason} when + Name :: atom(), + Info :: term(), + Reason :: handler_busy | {badarg,term()}. + +info(Name) when is_atom(Name) -> + try + gen_server:call(Name, info, ?DEFAULT_CALL_TIMEOUT) + catch + _:{timeout,_} -> {error,handler_busy} + end; +info(Name) -> + {error,{badarg,{info,[Name]}}}. + +%%%----------------------------------------------------------------- +%%% +-spec reset(Name) -> ok | {error,Reason} when + Name :: atom(), + Reason :: handler_busy | {badarg,term()}. + +reset(Name) when is_atom(Name) -> + try + gen_server:call(Name, reset, ?DEFAULT_CALL_TIMEOUT) + catch + _:{timeout,_} -> {error,handler_busy} + end; +reset(Name) -> + {error,{badarg,{reset,[Name]}}}. + + +%%%=================================================================== +%%% logger callbacks +%%%=================================================================== + +%%%----------------------------------------------------------------- +%%% Handler being added +adding_handler(Name, Config) -> + case check_config(adding, Name, Config) of + {ok, Config1} -> + %% create initial handler state by merging defaults with config + HConfig = maps:get(?MODULE, Config1, #{}), + HState = maps:merge(get_init_state(), HConfig), + case logger_h_common:overload_levels_ok(HState) of + true -> + case start(Name, Config1, HState) of + ok -> + %% Make sure wait_for_buffer is not stored, so we + %% won't hang and wait for buffer on a restart + {ok, maps:remove(wait_for_buffer,Config1)}; + Error -> + Error + end; + false -> + #{toggle_sync_qlen := TSQL, + drop_new_reqs_qlen := DNRQL, + flush_reqs_qlen := FRQL} = HState, + {error,{invalid_levels,{TSQL,DNRQL,FRQL}}} + end; + Error -> + Error + end. + +%%%----------------------------------------------------------------- +%%% Updating handler config +changing_config(Name, + OldConfig=#{id:=Id, ?MODULE:=#{type:=Type}}, + NewConfig=#{id:=Id}) -> + MyConfig = maps:get(?MODULE, NewConfig, #{}), + case maps:get(type, MyConfig, Type) of + Type -> + MyConfig1 = MyConfig#{type=>Type}, + changing_config1(Name, OldConfig, + NewConfig#{?MODULE=>MyConfig1}); + _ -> + {error,{illegal_config_change,OldConfig,NewConfig}} + end; +changing_config(_Name, OldConfig, NewConfig) -> + {error,{illegal_config_change,OldConfig,NewConfig}}. + +changing_config1(Name, OldConfig, NewConfig) -> + case check_config(changing, Name, NewConfig) of + Result = {ok,NewConfig1} -> + try gen_server:call(Name, {change_config,OldConfig,NewConfig1}, + ?DEFAULT_CALL_TIMEOUT) of + ok -> Result; + HError -> HError + catch + _:{timeout,_} -> {error,handler_busy} + end; + Error -> + Error + end. + +check_config(adding, Name, Config0) -> + %% Merge in defaults on top level + Config = maps:merge(#{id => Name}, Config0), + %% Merge in defaults on handler level + MyConfig0 = maps:get(?MODULE, Config, #{}), + MyConfig = maps:merge(#{type => standard_io}, + MyConfig0), + case check_my_config(maps:to_list(MyConfig)) of + ok -> + {ok,Config#{?MODULE=>MyConfig}}; + Error -> + Error + end; +check_config(changing, _Name, Config) -> + MyConfig = maps:get(?MODULE, Config, #{}), + case check_my_config(maps:to_list(MyConfig)) of + ok -> {ok,Config}; + Error -> Error + end. + +check_my_config([{type,Type} | Config]) when Type == standard_io; + Type == standard_error -> + check_my_config(Config); +check_my_config([{type,{file,File}} | Config]) when is_list(File) -> + check_my_config(Config); +check_my_config([{type,{file,File,Modes}} | Config]) when is_list(File), + is_list(Modes) -> + check_my_config(Config); +check_my_config([Other | Config]) -> + case logger_h_common:check_common_config(Other) of + valid -> + check_my_config(Config); + invalid -> + {error,{invalid_config,?MODULE,Other}} + end; +check_my_config([]) -> + ok. + + +%%%----------------------------------------------------------------- +%%% Handler being removed +removing_handler(Name) -> + stop(Name). + +%%%----------------------------------------------------------------- +%%% Get buffer when swapping from simple handler +swap_buffer(Name,Buffer) -> + case whereis(Name) of + undefined -> + ok; + _ -> + Name ! {buffer,Buffer} + end. + +%%%----------------------------------------------------------------- +%%% Log a string or report +-spec log(Log, Config) -> ok | dropped when + Log :: logger:log(), + Config :: logger:config(). + +log(Log,Config=#{id:=Name}) -> + %% if the handler has crashed, we must drop this request + %% and hope the handler restarts so we can try again + true = is_pid(whereis(Name)), + Bin = logger_h_common:log_to_binary(Log,Config), + logger_h_common:call_cast_or_drop(Name, Bin). + +%%%=================================================================== +%%% gen_server callbacks +%%%=================================================================== + +init([Name, Config, + State0 = #{type := Type, file_ctrl_sync_int := FileCtrlSyncInt}]) -> + register(Name, self()), + process_flag(trap_exit, true), + process_flag(message_queue_data, off_heap), + + ?init_test_hooks(), + ?start_observation(Name), + + case do_init(Name, Type) of + {ok,InitState} -> + catch ets:new(Name, [public, named_table]), + ?set_mode(Name, async), + State = maps:merge(State0, InitState), + T0 = ?timestamp(), + State1 = + ?merge_with_stats(State#{mode => async, + file_ctrl_sync => FileCtrlSyncInt, + last_qlen => 0, + last_log_ts => T0, + burst_win_ts => T0, + burst_msg_count => 0}), + proc_lib:init_ack({ok,self()}), + gen_server:cast(self(), {repeated_filesync,T0}), + enter_loop(Config, State1); + Error -> + logger_h_common:error_notify({init_handler,Name,Error}), + proc_lib:init_ack(Error) + end. + +do_init(Name, Std) when Std=:=standard_io; Std=:=standard_error -> + case open_log_file(Name, Std) of + {ok,FileCtrlPid} -> + {ok,#{id=>Name,type=>Std,file_ctrl_pid=>FileCtrlPid}}; + Error -> + Error + end; +do_init(Name, FileInfo) when is_tuple(FileInfo) -> + case open_log_file(Name, FileInfo) of + {ok,FileCtrlPid} -> + {ok,#{id=>Name,type=>FileInfo,file_ctrl_pid=>FileCtrlPid}}; + Error -> + Error + end. + +enter_loop(#{wait_for_buffer:=true}=Config,State) -> + State1 = + receive + {buffer,Buffer} -> + lists:foldl( + fun(Log,S) -> + Bin = logger_h_common:log_to_binary(Log,Config), + {_,S1} = do_log(Bin,cast,S), + S1 + end, + State, + Buffer) + end, + gen_server:enter_loop(?MODULE,[],State1); +enter_loop(_Config,State) -> + gen_server:enter_loop(?MODULE,[],State). + +%% This is the synchronous log request. +handle_call({log, Bin}, _From, State) -> + {Result,State1} = do_log(Bin, call, State), + %% Result == ok | dropped + {reply,Result, State1}; + +handle_call(filesync, _From, State = #{type := Type, + file_ctrl_pid := FileCtrlPid}) -> + if is_atom(Type) -> + {reply, ok, State}; + true -> + {reply, file_ctrl_filesync_sync(FileCtrlPid), State} + end; + +handle_call({change_config,_OldConfig,NewConfig}, _From, + State = #{filesync_repeat_interval := FSyncInt0, + last_log_ts := LastLogTS}) -> + HConfig = maps:get(?MODULE, NewConfig, #{}), + State1 = maps:merge(State, HConfig), + case logger_h_common:overload_levels_ok(State1) of + true -> + _ = + case maps:get(filesync_repeat_interval, HConfig, undefined) of + undefined -> + ok; + no_repeat -> + _ = logger_h_common:cancel_timer(maps:get(rep_sync_tref, + State, + undefined)); + FSyncInt0 -> + ok; + _FSyncInt1 -> + _ = logger_h_common:cancel_timer(maps:get(rep_sync_tref, + State, + undefined)), + gen_server:cast(self(), {repeated_filesync, + LastLogTS}) + end, + {reply, ok, State1}; + false -> + #{toggle_sync_qlen := TSQL, + drop_new_reqs_qlen := DNRQL, + flush_reqs_qlen := FRQL} = State1, + {reply, {error,{invalid_levels,{TSQL,DNRQL,FRQL}}}, State} + end; + +handle_call(info, _From, State) -> + {reply, State, State}; + +handle_call(reset, _From, State) -> + State1 = ?merge_with_stats(State), + {reply, ok, State1#{last_qlen => 0, + last_log_ts => ?timestamp()}}; + +handle_call(stop, _From, State) -> + {stop, {shutdown,stopped}, ok, State}. + +%% This is the asynchronous log request. +handle_cast({log, Bin}, State) -> + {_,State1} = do_log(Bin, cast, State), + {noreply, State1}; + +%% If FILESYNC_REPEAT_INTERVAL is set to a millisec value, this +%% clause gets called repeatedly by the handler. In order to +%% guarantee that a filesync *always* happens after the last log +%% request, the repeat operation must be active! +handle_cast({repeated_filesync,LastLogTS0}, + State = #{type := Type, + file_ctrl_pid := FileCtrlPid, + filesync_repeat_interval := FSyncInt, + last_log_ts := LastLogTS1}) -> + State1 = + if not is_atom(Type), is_integer(FSyncInt) -> + %% only do filesync if something has been + %% written since last time we checked + if LastLogTS1 == LastLogTS0 -> + ok; + true -> + file_ctrl_filesync_async(FileCtrlPid) + end, + {ok,TRef} = + timer:apply_after(FSyncInt, gen_server,cast, + [self(),{repeated_filesync,LastLogTS1}]), + State#{rep_sync_tref => TRef}; + true -> + State + end, + {noreply,State1}. + +handle_info({'EXIT',Pid,Why}, State = #{id := Name, type := FileInfo}) -> + case maps:get(file_ctrl_pid, State, undefined) of + Pid -> + %% file error, terminate handler + logger_h_common:handler_exit(Name, + {error,{write_failed,FileInfo,Why}}); + _Other -> + %% ignore EXIT + ok + end, + {noreply, State}; + +handle_info(_Info, State) -> + {noreply, State}. + +terminate(Reason, State = #{id:=Name, file_ctrl_pid:=FWPid, + type:=_FileInfo}) -> + _ = logger_h_common:cancel_timer(maps:get(rep_sync_tref, State, + undefined)), + case is_process_alive(FWPid) of + true -> + unlink(FWPid), + _ = file_ctrl_stop(FWPid), + MRef = erlang:monitor(process, FWPid), + receive + {'DOWN',MRef,_,_,_} -> + ok + after + ?DEFAULT_CALL_TIMEOUT -> + exit(FWPid, kill) + end; + false -> + ok + end, + logger_h_common:stop_or_restart(Name, Reason, State). + +code_change(_OldVsn, State, _Extra) -> + {ok, State}. + +%%%=================================================================== +%%% Internal functions +%%%=================================================================== + +%%%----------------------------------------------------------------- +%%% +get_init_state() -> + #{toggle_sync_qlen => ?TOGGLE_SYNC_QLEN, + drop_new_reqs_qlen => ?DROP_NEW_REQS_QLEN, + flush_reqs_qlen => ?FLUSH_REQS_QLEN, + enable_burst_limit => ?ENABLE_BURST_LIMIT, + burst_limit_size => ?BURST_LIMIT_SIZE, + burst_window_time => ?BURST_WINDOW_TIME, + enable_kill_overloaded => ?ENABLE_KILL_OVERLOADED, + handler_overloaded_qlen => ?HANDLER_OVERLOADED_QLEN, + handler_overloaded_mem => ?HANDLER_OVERLOADED_MEM, + handler_restart_after => ?HANDLER_RESTART_AFTER, + file_ctrl_sync_int => ?CONTROLLER_SYNC_INTERVAL, + filesync_ok_qlen => ?FILESYNC_OK_QLEN, + filesync_repeat_interval => ?FILESYNC_REPEAT_INTERVAL}. + +%%%----------------------------------------------------------------- +%%% Add a standard handler to the logger. +%%% This starts a dedicated handler process which should always +%%% exist if the handler is registered with logger (and should not +%%% exist if the handler is not registered). +%%% +%%% Handler specific config should be provided with a sub map associated +%%% with a key named the same as this module, e.g: +%%% +%%% Config = #{logger_std_h => #{toggle_sync_qlen => 50} +%%% +%%% The standard handler process is linked to logger_sup, which is +%%% part of the kernel application's supervision tree. +start(Name, Config, HandlerState) -> + LoggerStdH = + #{id => Name, + start => {?MODULE, start_link, [Name,Config,HandlerState]}, + restart => temporary, + shutdown => 2000, + type => worker, + modules => [?MODULE]}, + case supervisor:start_child(logger_sup, LoggerStdH) of + {ok,_Pid} -> + ok; + Error -> + Error + end. + +%%%----------------------------------------------------------------- +%%% Stop and remove the handler. +stop(Name) -> + case whereis(Name) of + undefined -> + ok; + _ -> + %% We don't want to do supervisor:terminate_child here + %% since we need to distinguish this explicit stop from a + %% system termination in order to avoid circular attempts + %% at removing the handler (implying deadlocks and + %% timeouts). + _ = gen_server:call(Name,stop), + _ = supervisor:delete_child(logger_sup, Name), + ok + end. + +%%%----------------------------------------------------------------- +%%% Logging and overload control. +-define(update_file_ctrl_sync(C, Interval), + if C == 0 -> Interval; + true -> C-1 end). + +%% check for overload between every request (and set Mode to async, +%% sync or drop accordingly), but never flush the whole mailbox +%% before LogWindowSize requests have been handled +do_log(Bin, CallOrCast, State = #{id:=Name}) -> + T1 = ?timestamp(), + + %% check if the handler is getting overloaded, or if it's + %% recovering from overload (the check must be done for each + %% request to react quickly to large bursts of requests and + %% to ensure that the handler can never end up in drop mode + %% with an empty mailbox, which would stop operation) + {Mode1,QLen,Mem,State1} = logger_h_common:check_load(State), + + %% kill the handler if it can't keep up with the load + logger_h_common:kill_if_choked(Name, QLen, Mem, State), + + if Mode1 == flush -> + flush(Name, QLen, T1, State1); + true -> + write(Name, Mode1, T1, Bin, CallOrCast, State1) + end. + +%% this clause is called by do_log/3 after an overload check +%% has been performed, where QLen > FlushQLen +flush(Name, _QLen0, T1, State=#{last_log_ts := _T0}) -> + %% flush messages in the mailbox (a limited number in + %% order to not cause long delays) + _NewFlushed = logger_h_common:flush_log_requests(?FLUSH_MAX_N), + + %% because of the receive loop when flushing messages, the + %% handler will be scheduled out often and the mailbox could + %% grow very large, so we'd better check the queue again here + {_,_QLen1} = process_info(self(), message_queue_len), + ?observe(Name,{max_qlen,_QLen1}), + + %% Add 1 for the current log request + ?observe(Name,{flushed,_NewFlushed+1}), + + State1 = ?update_max_time(?diff_time(T1,_T0),State), + {dropped,?update_other(flushed,FLUSHED,_NewFlushed, + State1#{mode => ?set_mode(Name,async), + last_qlen => 0, + last_log_ts => T1})}. + +%% this clause is called to write to file +write(Name, Mode, T1, Bin, _CallOrCast, + State = #{file_ctrl_pid := FileCtrlPid, + file_ctrl_sync := FileCtrlSync, + last_qlen := LastQLen, + last_log_ts := T0, + file_ctrl_sync_int := FileCtrlSyncInt}) -> + %% check if we need to limit the number of writes + %% during a burst of log requests + {DoWrite,BurstWinT,BurstMsgCount} = logger_h_common:limit_burst(State), + + %% only send a synhrounous request to the file controller process + %% every FileCtrlSyncInt time, to give the handler time between + %% file writes so it can keep up with incoming messages + {Result,LastQLen1} = + if DoWrite, FileCtrlSync == 0 -> + ?observe(Name,{_CallOrCast,1}), + file_write_sync(FileCtrlPid, Bin, false), + {ok,element(2, process_info(self(), message_queue_len))}; + DoWrite -> + ?observe(Name,{_CallOrCast,1}), + file_write_async(FileCtrlPid, Bin), + {ok,LastQLen}; + not DoWrite -> + ?observe(Name,{flushed,1}), + {dropped,LastQLen} + end, + + %% Check if the time since the previous log request is long enough - + %% and the queue length small enough - to assume the mailbox has + %% been emptied, and if so, do filesync operation and reset mode to + %% async. Note that this is the best we can do to detect an idle + %% handler without setting a timer after each log call/cast. If the + %% time between two consecutive log requests is fast and no new + %% request comes in after the last one, idle state won't be detected! + Time = ?diff_time(T1,T0), + {Mode1,BurstMsgCount1} = + if (LastQLen1 < ?FILESYNC_OK_QLEN) andalso + (Time > ?IDLE_DETECT_TIME_USEC) -> + %% do filesync if necessary + case maps:get(type, State) of + Std when is_atom(Std) -> + ok; + _File -> + file_ctrl_filesync_async(FileCtrlPid) + end, + {?change_mode(Name, Mode, async),0}; + true -> + {Mode,BurstMsgCount} + end, + State1 = + ?update_calls_or_casts(_CallOrCast,1,State), + State2 = + ?update_max_time(Time, + State1#{mode => Mode1, + last_qlen := LastQLen1, + last_log_ts => T1, + burst_win_ts => BurstWinT, + burst_msg_count => BurstMsgCount1, + file_ctrl_sync => + ?update_file_ctrl_sync(FileCtrlSync, + FileCtrlSyncInt)}), + {Result,State2}. + +open_log_file(HandlerName, FileInfo) -> + case file_ctrl_start(HandlerName, FileInfo) of + OK = {ok,_FileCtrlPid} -> OK; + Error -> Error + end. + +do_open_log_file({file,File}) -> + do_open_log_file({file,File,[raw,append,delayed_write]}); + +do_open_log_file({file,File,[]}) -> + do_open_log_file({file,File,[raw,append,delayed_write]}); + +do_open_log_file({file,File,Modes}) -> + try + case filelib:ensure_dir(File) of + ok -> + file:open(File, Modes); + Error -> + Error + end + catch + _:Reason -> {error,Reason} + end. + +close_log_file(Std) when Std == standard_io; Std == standard_error -> + ok; +close_log_file(Fd) -> + _ = file:datasync(Fd), + _ = file:close(Fd). + +%%%----------------------------------------------------------------- +%%% File control process + +file_ctrl_start(HandlerName, FileInfo) -> + Starter = self(), + FileCtrlPid = + spawn_link(fun() -> + file_ctrl_init(HandlerName, FileInfo, Starter) + end), + receive + {FileCtrlPid,ok} -> + {ok,FileCtrlPid}; + {FileCtrlPid,Error} -> + Error + after + ?DEFAULT_CALL_TIMEOUT -> + {error,file_ctrl_process_not_started} + end. + +file_ctrl_stop(Pid) -> + Pid ! stop. + +file_write_async(Pid, Bin) -> + Pid ! {log,Bin}, + ok. + +file_write_sync(Pid, Bin, FileSync) -> + case file_ctrl_call(Pid, {log,self(),Bin,FileSync}) of + {error,Reason} -> + {error,{write_failed,Bin,Reason}}; + Result -> + Result + end. + +file_ctrl_filesync_async(Pid) -> + Pid ! filesync, + ok. + +file_ctrl_filesync_sync(Pid) -> + file_ctrl_call(Pid, {filesync,self()}). + +file_ctrl_call(Pid, Msg) -> + MRef = monitor(process, Pid), + Pid ! {Msg,MRef}, + receive + {MRef,Result} -> + demonitor(MRef, [flush]), + Result; + {'DOWN',MRef,_Type,_Object,Reason} -> + {error,Reason} + after + ?DEFAULT_CALL_TIMEOUT -> + {error,{no_response,Pid}} + end. + +file_ctrl_init(HandlerName, FileInfo, Starter) when is_tuple(FileInfo) -> + process_flag(message_queue_data, off_heap), + FileName = element(2, FileInfo), + case do_open_log_file(FileInfo) of + {ok,Fd} -> + Starter ! {self(),ok}, + file_ctrl_loop(Fd, file, FileName, false, ok, ok, HandlerName); + {error,Reason} -> + Starter ! {self(),{error,{open_failed,FileName,Reason}}} + end; +file_ctrl_init(HandlerName, StdDev, Starter) -> + Starter ! {self(),ok}, + file_ctrl_loop(StdDev, standard_io, StdDev, false, ok, ok, HandlerName). + +file_ctrl_loop(Fd, Type, DevName, Synced, + PrevWriteResult, PrevSyncResult, HandlerName) -> + receive + %% asynchronous request + {log,Bin} -> + Result = if Type == file -> + write_to_dev(Fd, Bin, DevName, + PrevWriteResult, HandlerName); + true -> + io:put_chars(Fd, Bin) + end, + file_ctrl_loop(Fd, Type, DevName, false, + Result, PrevSyncResult, HandlerName); + + %% synchronous request + {{log,From,Bin,FileSync},MRef} -> + if Type == file -> + %% check that file hasn't been deleted + CheckFile = + fun() -> {ok,_} = file:read_file_info(DevName) end, + spawn_link(CheckFile), + WResult = write_to_dev(Fd, Bin, DevName, + PrevWriteResult, HandlerName), + {Synced1,SResult} = + if not FileSync -> + {false,PrevSyncResult}; + true -> + case sync_dev(Fd, DevName, + PrevSyncResult, HandlerName) of + ok -> {true,ok}; + Error -> {false,Error} + end + end, + From ! {MRef,ok}, + file_ctrl_loop(Fd, Type, DevName, Synced1, + WResult, SResult, HandlerName); + true -> + _ = io:put_chars(Fd, Bin), + From ! {MRef,ok}, + file_ctrl_loop(Fd, Type, DevName, false, + ok, PrevSyncResult, HandlerName) + end; + + filesync when not Synced -> + Result = sync_dev(Fd, DevName, PrevSyncResult, HandlerName), + file_ctrl_loop(Fd, Type, DevName, true, + PrevWriteResult, Result, HandlerName); + + filesync -> + file_ctrl_loop(Fd, Type, DevName, true, + PrevWriteResult, PrevSyncResult, HandlerName); + + {{filesync,From},MRef} -> + Result = if not Synced -> + sync_dev(Fd, DevName, PrevSyncResult, HandlerName); + true -> + ok + end, + From ! {MRef,ok}, + file_ctrl_loop(Fd, Type, DevName, true, + PrevWriteResult, Result, HandlerName); + + stop -> + _ = close_log_file(Fd), + stopped + end. + +write_to_dev(Fd, Bin, FileName, PrevWriteResult, HandlerName) -> + case ?file_write(Fd, Bin) of + ok -> + ok; + PrevWriteResult -> + %% don't report same error twice + PrevWriteResult; + Error -> + logger_h_common:error_notify({HandlerName,write,FileName,Error}), + Error + end. + +sync_dev(Fd, DevName, PrevSyncResult, HandlerName) -> + case ?file_datasync(Fd) of + ok -> + ok; + PrevSyncResult -> + %% don't report same error twice + PrevSyncResult; + Error -> + logger_h_common:error_notify({HandlerName,filesync,DevName,Error}), + Error + end. + diff --git a/lib/kernel/src/logger_sup.erl b/lib/kernel/src/logger_sup.erl new file mode 100644 index 0000000000..4e4de94d5c --- /dev/null +++ b/lib/kernel/src/logger_sup.erl @@ -0,0 +1,53 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2017. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(logger_sup). + +-behaviour(supervisor). + +%% API +-export([start_link/0]). + +%% Supervisor callbacks +-export([init/1]). + +-define(SERVER, ?MODULE). + +%%%=================================================================== +%%% API functions +%%%=================================================================== + +start_link() -> + supervisor:start_link({local, ?SERVER}, ?MODULE, []). + +%%%=================================================================== +%%% Supervisor callbacks +%%%=================================================================== + +init([]) -> + + SupFlags = #{strategy => one_for_one, + intensity => 1, + period => 5}, + + {ok, {SupFlags, []}}. + +%%%=================================================================== +%%% Internal functions +%%%=================================================================== diff --git a/lib/kernel/test/Makefile b/lib/kernel/test/Makefile index 03b6355056..8599a3d814 100644 --- a/lib/kernel/test/Makefile +++ b/lib/kernel/test/Makefile @@ -70,6 +70,15 @@ MODULES= \ interactive_shell_SUITE \ init_SUITE \ kernel_config_SUITE \ + logger_SUITE \ + logger_bench_SUITE \ + logger_disk_log_h_SUITE \ + logger_env_var_SUITE \ + logger_filters_SUITE \ + logger_formatter_SUITE \ + logger_legacy_SUITE \ + logger_simple_SUITE \ + logger_std_h_SUITE \ os_SUITE \ pg2_SUITE \ seq_trace_SUITE \ @@ -102,7 +111,7 @@ TARGET_FILES= $(MODULES:%=$(EBIN)/%.$(EMULATOR)) INSTALL_PROGS= $(TARGET_FILES) EMAKEFILE=Emakefile -COVERFILE=kernel.cover +COVERFILE=kernel.cover logger.cover # ---------------------------------------------------- # Release directory specification @@ -149,7 +158,8 @@ release_tests_spec: make_emakefile $(INSTALL_DIR) "$(RELSYSDIR)" $(INSTALL_DATA) $(ERL_FILES) "$(RELSYSDIR)" $(INSTALL_DATA) $(APP_FILES) "$(RELSYSDIR)" - $(INSTALL_DATA) kernel.spec kernel_smoke.spec kernel_bench.spec \ + $(INSTALL_DATA) \ + kernel.spec kernel_smoke.spec kernel_bench.spec logger.spec \ $(EMAKEFILE) $(COVERFILE) "$(RELSYSDIR)" chmod -R u+w "$(RELSYSDIR)" @tar cf - *_SUITE_data | (cd "$(RELSYSDIR)"; tar xf -) diff --git a/lib/kernel/test/kernel.spec b/lib/kernel/test/kernel.spec index 62afc9f97b..86d2155828 100644 --- a/lib/kernel/test/kernel.spec +++ b/lib/kernel/test/kernel.spec @@ -2,3 +2,4 @@ {config, "../test_server/ts.unix.config"}. {suites,"../kernel_test", all}. +{skip_suites,"../kernel_test",[logger_bench_SUITE],"Not ready"}. diff --git a/lib/kernel/test/logger.cover b/lib/kernel/test/logger.cover new file mode 100644 index 0000000000..b30bcfe920 --- /dev/null +++ b/lib/kernel/test/logger.cover @@ -0,0 +1,14 @@ +%% -*- erlang -*- +{incl_mods,[error_logger, + logger, + logger_backend, + logger_config, + logger_disk_log_h, + logger_h_common, + logger_filters, + logger_formatter, + logger_server, + logger_simple, + logger_std_h, + logger_sup]}. + diff --git a/lib/kernel/test/logger.spec b/lib/kernel/test/logger.spec new file mode 100644 index 0000000000..cd76a754a4 --- /dev/null +++ b/lib/kernel/test/logger.spec @@ -0,0 +1,11 @@ +%% -*-erlang-*- +{suites,"../kernel_test", [error_logger_SUITE, + error_logger_warn_SUITE, + logger_SUITE, + logger_disk_log_h_SUITE, + logger_env_var_SUITE, + logger_filters_SUITE, + logger_formatter_SUITE, + logger_legacy_SUITE, + logger_simple_SUITE, + logger_std_h_SUITE]}. diff --git a/lib/kernel/test/logger_SUITE.erl b/lib/kernel/test/logger_SUITE.erl new file mode 100644 index 0000000000..0edce3e34c --- /dev/null +++ b/lib/kernel/test/logger_SUITE.erl @@ -0,0 +1,828 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2018. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(logger_SUITE). + +-compile(export_all). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("kernel/include/logger.hrl"). +-include_lib("kernel/src/logger_internal.hrl"). + +-define(str,"Log from "++atom_to_list(?FUNCTION_NAME)++ + ":"++integer_to_list(?LINE)). +-define(map_rep,#{function=>?FUNCTION_NAME, line=>?LINE}). +-define(keyval_rep,[{function,?FUNCTION_NAME}, {line,?LINE}]). + +-define(MY_LOC(N),#{mfa=>{?MODULE,?FUNCTION_NAME,?FUNCTION_ARITY}, + file=>?FILE, line=>?LINE-N}). + +-define(TRY(X), my_try(fun() -> X end)). + + +suite() -> + [{timetrap,{seconds,30}}]. + +init_per_suite(Config) -> + case logger:get_handler_config(logger_std_h) of + {ok,StdH} -> + ok = logger:remove_handler(logger_std_h), + [{logger_std_h,StdH}|Config]; + _ -> + Config + end. + +end_per_suite(Config) -> + case ?config(logger_std_h,Config) of + {HMod,HConfig} -> + ok = logger:add_handler(logger_std_h,HMod,HConfig); + _ -> + ok + end. + +init_per_group(_Group, Config) -> + Config. + +end_per_group(_Group, _Config) -> + ok. + +init_per_testcase(_TestCase, Config) -> + {ok,LC} = logger:get_logger_config(), + [{logger_config,LC}|Config]. + +end_per_testcase(Case, Config) -> + try apply(?MODULE,Case,[cleanup,Config]) + catch error:undef -> ok + end, + ok. + +groups() -> + []. + +all() -> + [start_stop, + add_remove_handler, + multiple_handlers, + add_remove_filter, + change_config, + set_formatter, + log_all_levels_api, + macros, + set_level, + set_level_module, + cache_level_module, + format_report, + filter_failed, + handler_failed, + config_sanity_check, + log_failed, + emulator, + via_logger_process, + other_node, + compare_levels, + process_metadata]. + +start_stop(_Config) -> + S = whereis(logger), + true = is_pid(S), + ok. + +add_remove_handler(_Config) -> + register(callback_receiver,self()), + {ok,#{handlers:=Hs0}} = logger:get_logger_config(), + {error,{not_found,h1}} = logger:get_handler_config(h1), + ok = logger:add_handler(h1,?MODULE,#{}), + [add] = test_server:messages_get(), + {ok,#{handlers:=Hs}} = logger:get_logger_config(), + [h1|Hs0] = Hs, + {ok,{?MODULE,#{level:=info,filters:=[],filter_default:=log}}} = % defaults + logger:get_handler_config(h1), + ok = logger:set_handler_config(h1,filter_default,stop), + [changing_config] = test_server:messages_get(), + ?LOG_INFO("hello",[]), + ok = check_no_log(), + ok = logger:set_handler_config(h1,filter_default,log), + [changing_config] = test_server:messages_get(), + {ok,{?MODULE,#{filter_default:=log}}} = logger:get_handler_config(h1), + ?LOG_INFO("hello",[]), + ok = check_logged(info,"hello",[],?MY_LOC(1)), + ok = logger:remove_handler(h1), + [remove] = test_server:messages_get(), + {ok,#{handlers:=Hs0}} = logger:get_logger_config(), + {error,{not_found,h1}} = logger:get_handler_config(h1), + {error,{not_found,h1}} = logger:remove_handler(h1), + logger:info("hello",[]), + ok = check_no_log(), + ok. + +add_remove_handler(cleanup,_Config) -> + logger:remove_handler(h1), + ok. + +multiple_handlers(_Config) -> + ok = logger:add_handler(h1,?MODULE,#{level=>info,filter_default=>log}), + ok = logger:add_handler(h2,?MODULE,#{level=>error,filter_default=>log}), + ?LOG_ERROR("hello",[]), + ok = check_logged(error,"hello",[],?MY_LOC(1)), + ok = check_logged(error,"hello",[],?MY_LOC(2)), + ?LOG_INFO("hello",[]), + ok = check_logged(info,"hello",[],?MY_LOC(1)), + ok = check_no_log(), + ok. + +multiple_handlers(cleanup,_Config) -> + logger:remove_handler(h1), + logger:remove_handler(h2), + ok. + +add_remove_filter(_Config) -> + ok = logger:add_handler(h1,?MODULE,#{level=>info,filter_default=>log}), + LF = {fun(Log,_) -> Log#{level=>error} end, []}, + ok = logger:add_logger_filter(lf,LF), + {error,{already_exist,lf}} = logger:add_logger_filter(lf,LF), + {error,{already_exist,lf}} = logger:add_logger_filter(lf,{fun(Log,_) -> + Log + end, []}), + ?LOG_INFO("hello",[]), + ok = check_logged(error,"hello",[],?MY_LOC(1)), + ok = check_no_log(), + + ok = logger:add_handler(h2,?MODULE,#{level=>info,filter_default=>log}), + HF = {fun(#{level:=error}=Log,_) -> + Log#{level=>mylevel}; + (_,_) -> + ignore + end, + []}, + ok = logger:add_handler_filter(h1,hf,HF), + {error,{already_exist,hf}} = logger:add_handler_filter(h1,hf,HF), + {error,{already_exist,hf}} = logger:add_handler_filter(h1,hf,{fun(Log,_) -> + Log + end, []}), + ?LOG_INFO("hello",[]), + ok = check_logged(mylevel,"hello",[],?MY_LOC(1)), + ok = check_logged(error,"hello",[],?MY_LOC(2)), + + ok = logger:remove_logger_filter(lf), + {error,{not_found,lf}} = logger:remove_logger_filter(lf), + + ?LOG_INFO("hello",[]), + ok = check_logged(info,"hello",[],?MY_LOC(1)), + ok = check_logged(info,"hello",[],?MY_LOC(2)), + + ?LOG_ERROR("hello",[]), + ok = check_logged(mylevel,"hello",[],?MY_LOC(1)), + ok = check_logged(error,"hello",[],?MY_LOC(2)), + + ok = logger:remove_handler_filter(h1,hf), + {error,{not_found,hf}} = logger:remove_handler_filter(h1,hf), + ?LOG_INFO("hello",[]), + ok = check_logged(info,"hello",[],?MY_LOC(1)), + ok = check_logged(info,"hello",[],?MY_LOC(2)), + + ?LOG_ERROR("hello",[]), + ok = check_logged(error,"hello",[],?MY_LOC(1)), + ok = check_logged(error,"hello",[],?MY_LOC(2)), + ok. + +add_remove_filter(cleanup,_Config) -> + logger:remove_logger_filter(lf), + logger:remove_handler(h1), + logger:remove_handler(h2), + ok. + +change_config(_Config) -> + %% Overwrite handler config - check that defaults are added + ok = logger:add_handler(h1,?MODULE,#{level=>debug,custom=>custom}), + {ok,{?MODULE,#{level:=debug,filter_default:=log,custom:=custom}}} = + logger:get_handler_config(h1), + register(callback_receiver,self()), + ok = logger:set_handler_config(h1,#{filter_default=>stop}), + [changing_config] = test_server:messages_get(), + {ok,{?MODULE,#{level:=info,filter_default:=stop}=C2}} = + logger:get_handler_config(h1), + false = maps:is_key(custom,C2), + {error,fail} = logger:set_handler_config(h1,#{fail=>true}), + {error,{attempting_syncronous_call_to_self,_}} = + logger:set_handler_config( + h1,#{call=>fun() -> logger:set_module_level(?MODULE,debug) end}), + {ok,{?MODULE,C2}} = logger:get_handler_config(h1), + + %% Change one key only + {error,fail} = logger:set_handler_config(h1,fail,true), + ok = logger:set_handler_config(h1,custom,custom), + [changing_config] = test_server:messages_get(), + {ok,{?MODULE,#{custom:=custom}=C3}} = logger:get_handler_config(h1), + C2 = maps:remove(custom,C3), + + %% Overwrite logger config - check that defaults are added + {ok,LConfig} = logger:get_logger_config(), + ok = logger:set_logger_config(#{filter_default=>stop}), + {ok,#{level:=info,filters:=[],handlers:=[],filter_default:=stop}=LC1} = + logger:get_logger_config(), + 4 = maps:size(LC1), + + %% Change one key only + ok = logger:set_logger_config(handlers,[h1]), + {ok,#{level:=info,filters:=[],handlers:=[h1],filter_default:=stop}} = + logger:get_logger_config(), + + %% Cleanup + ok = logger:set_logger_config(LConfig), + [] = test_server:messages_get(), + + ok. + +change_config(cleanup,Config) -> + logger:remove_handler(h1), + LC = ?config(logger_config,Config), + logger:set_logger_config(LC), + ok. + +set_formatter(_Config) -> + {error,{not_found,h1}}=logger:set_handler_config(h1,formatter,{?MODULE,[]}), + ok = logger:add_handler(h1,?MODULE,#{level=>info,filter_default=>log}), + ok = logger:set_handler_config(h1,formatter,{?MODULE,[]}), + logger:info("hello",[]), + receive + {_Log,#{formatter:={?MODULE,[]}}} -> + ok + after 500 -> + ct:fail({timeout,no_log,process_info(self(),messages)}) + end, + ok. + +set_formatter(cleanup,_Config) -> + logger:remove_handler(h1), + ok. + +log_all_levels_api(_Config) -> + ok = logger:set_logger_config(level,debug), + ok = logger:add_handler(h1,?MODULE,#{level=>debug,filter_default=>log}), + test_api(emergency), + test_api(alert), + test_api(critical), + test_api(error), + test_api(warning), + test_api(notice), + test_api(info), + test_api(debug), + test_log_function(emergency), + ok. + +log_all_levels_api(cleanup,_Config) -> + logger:remove_handler(h1), + logger:set_logger_config(level,info), + ok. + +macros(_Config) -> + ok = logger:set_module_level(?MODULE,debug), + ok = logger:add_handler(h1,?MODULE,#{level=>debug,filter_default=>log}), + test_macros(emergency), + ok. + +macros(cleanup,_Config) -> + logger:remove_handler(h1), + logger:reset_module_level(?MODULE), + ok. + +set_level(_Config) -> + ok = logger:add_handler(h1,?MODULE,#{level=>debug,filter_default=>log}), + logger:debug(?map_rep), + ok = check_no_log(), + logger:info(M1=?map_rep), + ok = check_logged(info,M1,#{}), + ok = logger:set_logger_config(level,debug), + logger:debug(M2=?map_rep), + ok = check_logged(debug,M2,#{}), + ok. + +set_level(cleanup,_Config) -> + logger:remove_handler(h1), + logger:set_logger_config(level,info), + ok. + +set_level_module(_Config) -> + ok = logger:add_handler(h1,?MODULE,#{level=>info,filter_default=>log}), + {error,{invalid_level,bad}} = logger:set_module_level(?MODULE,bad), + {error,{not_a_module,{bad}}} = logger:set_module_level({bad},warning), + ok = logger:set_module_level(?MODULE,warning), + logger:info(?map_rep,?MY_LOC(0)), + ok = check_no_log(), + logger:warning(M1=?map_rep,?MY_LOC(0)), + ok = check_logged(warning,M1,?MY_LOC(1)), + ok = logger:set_module_level(?MODULE,info), + logger:info(M2=?map_rep,?MY_LOC(0)), + ok = check_logged(info,M2,?MY_LOC(1)), + + {error,{not_a_module,{bad}}} = logger:reset_module_level({bad}), + ok = logger:reset_module_level(?MODULE), + + ok. + +set_level_module(cleanup,_Config) -> + logger:remove_handler(h1), + logger:reset_module_level(?MODULE), + ok. + +cache_level_module(_Config) -> + ok = logger:reset_module_level(?MODULE), + [] = ets:lookup(logger,?MODULE), %dirty - add API in logger_config? + ?LOG_INFO(?map_rep), + %% Caching is done asynchronously, so wait a bit for the update + timer:sleep(100), + [_] = ets:lookup(logger,?MODULE), %dirty - add API in logger_config? + ok = logger:reset_module_level(?MODULE), + [] = ets:lookup(logger,?MODULE), %dirty - add API in logger_config? + ok. + +cache_level_module(cleanup,_Config) -> + logger:reset_module_level(?MODULE), + ok. + +format_report(_Config) -> + {"~ts",["string"]} = logger:format_report("string"), + {"~tp",[term]} = logger:format_report(term), + {"~tp",[[]]} = logger:format_report([]), + {" ~tp: ~tp",[key,value]} = logger:format_report([{key,value}]), + KeyVals = [{key1,value1},{key2,"value2"},{key3,[]}], + KeyValRes = + {" ~tp: ~tp\n ~tp: ~ts\n ~tp: ~tp", + [key1,value1,key2,"value2",key3,[]]} = + logger:format_report(KeyVals), + KeyValRes = logger:format_report(maps:from_list(KeyVals)), + KeyValRes = logger:format_otp_report(#{label=>{?MODULE,test},report=>KeyVals}), + {" ~tp: ~tp\n ~tp: ~tp", + [label,{?MODULE,test},report,KeyVals]} = + logger:format_report(#{label=>{?MODULE,test},report=>KeyVals}), + + {" ~tp: ~tp\n ~tp",[key1,value1,term]} = + logger:format_report([{key1,value1},term]), + + {" ~tp: ~tp\n ~tp",[key1,value1,[]]} = + logger:format_report([{key1,value1},[]]), + + {"~tp",[[]]} = logger:format_report([[],[],[]]), + + ok. + +filter_failed(_Config) -> + ok = logger:add_handler(h1,?MODULE,#{level=>info,filter_default=>log}), + + %% Logger filters + {error,{invalid_filter,_}} = + logger:add_logger_filter(lf,{fun(_) -> ok end,args}), + ok = logger:add_logger_filter(lf,{fun(_,_) -> a=b end,args}), + {ok,#{filters:=[_]}} = logger:get_logger_config(), + ok = logger:info(M1=?map_rep), + ok = check_logged(info,M1,#{}), + {error,{not_found,lf}} = logger:remove_logger_filter(lf), + + ok = logger:add_logger_filter(lf,{fun(_,_) -> faulty_return end,args}), + {ok,#{filters:=[_]}} = logger:get_logger_config(), + ok = logger:info(M2=?map_rep), + ok = check_logged(info,M2,#{}), + {error,{not_found,lf}} = logger:remove_logger_filter(lf), + + %% Handler filters + {error,{not_found,h0}} = + logger:add_handler_filter(h0,hf,{fun(_,_) -> ignore end,args}), + {error,{not_found,h0}} = logger:remove_handler_filter(h0,hf), + {error,{invalid_filter,_}} = + logger:add_handler_filter(h1,hf,{fun(_) -> ok end,args}), + ok = logger:add_handler_filter(h1,hf,{fun(_,_) -> a=b end,args}), + {ok,{?MODULE,#{filters:=[_]}}} = logger:get_handler_config(h1), + ok = logger:info(M3=?map_rep), + ok = check_logged(info,M3,#{}), + {error,{not_found,hf}} = logger:remove_handler_filter(h1,hf), + + ok = logger:add_handler_filter(h1,hf,{fun(_,_) -> faulty_return end,args}), + {ok,{?MODULE,#{filters:=[_]}}} = logger:get_handler_config(h1), + ok = logger:info(M4=?map_rep), + ok = check_logged(info,M4,#{}), + {error,{not_found,hf}} = logger:remove_handler_filter(h1,hf), + + ok. + +filter_failed(cleanup,_Config) -> + logger:remove_handler(h1), + ok. + +handler_failed(_Config) -> + {error,{invalid_id,1}} = logger:add_handler(1,?MODULE,#{}), + {error,{invalid_module,"nomodule"}} = logger:add_handler(h1,"nomodule",#{}), + {error,{invalid_handler_config,bad}} = logger:add_handler(h1,?MODULE,bad), + {error,{invalid_filters,false}} = + logger:add_handler(h1,?MODULE,#{filters=>false}), + {error,{invalid_filter_default,true}} = + logger:add_handler(h1,?MODULE,#{filter_default=>true}), + {error,{invalid_formatter,[]}} = + logger:add_handler(h1,?MODULE,#{formatter=>[]}), + ok = logger:add_handler(h1,nomodule,#{filter_default=>log}), + logger:info(?map_rep), + check_no_log(), + #{logger:=#{handlers:=Ids1}, + handlers:=H1} = logger:i(), + false = lists:member(h1,Ids1), + false = lists:keymember(h1,1,H1), + {error,{not_found,h1}} = logger:remove_handler(h1), + + ok = logger:add_handler(h2,?MODULE,#{filter_default=>log,crash=>true}), + {error,{already_exist,h2}} = logger:add_handler(h2,othermodule,#{}), + + logger:info(?map_rep), + check_no_log(), + #{logger:=#{handlers:=Ids2}, + handlers:=H2} = logger:i(), + false = lists:member(h2,Ids2), + false = lists:keymember(h2,1,H2), + {error,{not_found,h2}} = logger:remove_handler(h2), + + ok. + +handler_failed(cleanup,_Config) -> + logger:remove_handler(h1), + logger:remove_handler(h2), + ok. + +config_sanity_check(_Config) -> + %% Logger config + {error,{invalid_filter_default,bad}} = + logger:set_logger_config(filter_default,bad), + {error,{invalid_level,bad}} = logger:set_logger_config(level,bad), + {error,{invalid_handlers,bad}} = logger:set_logger_config(handlers,bad), + {error,{invalid_id,{bad,bad}}} = + logger:set_logger_config(handlers,[{bad,bad}]), + {error,{invalid_id,"bad"}} = logger:set_logger_config(handlers,["bad"]), + {error,{invalid_filters,bad}} = logger:set_logger_config(filters,bad), + {error,{invalid_filter,bad}} = logger:set_logger_config(filters,[bad]), + {error,{invalid_filter,{_,_}}} = + logger:set_logger_config(filters,[{id,bad}]), + {error,{invalid_filter,{_,{_,_}}}} = + logger:set_logger_config(filters,[{id,{bad,args}}]), + {error,{invalid_filter,{_,{_,_}}}} = + logger:set_logger_config(filters,[{id,{fun() -> ok end,args}}]), + {error,{invalid_logger_config,{bad,bad}}} = + logger:set_logger_config(bad,bad), + + %% Handler config + {error,{not_found,h1}} = logger:set_handler_config(h1,a,b), + ok = logger:add_handler(h1,?MODULE,#{}), + {error,{invalid_filter_default,bad}} = + logger:set_handler_config(h1,filter_default,bad), + {error,{invalid_level,bad}} = logger:set_handler_config(h1,level,bad), + {error,{invalid_filters,bad}} = logger:set_handler_config(h1,filters,bad), + {error,{invalid_filter,bad}} = logger:set_handler_config(h1,filters,[bad]), + {error,{invalid_filter,{_,_}}} = + logger:set_handler_config(h1,filters,[{id,bad}]), + {error,{invalid_filter,{_,{_,_}}}} = + logger:set_handler_config(h1,filters,[{id,{bad,args}}]), + {error,{invalid_filter,{_,{_,_}}}} = + logger:set_handler_config(h1,filters,[{id,{fun() -> ok end,args}}]), + {error,{invalid_formatter,bad}} = + logger:set_handler_config(h1,formatter,bad), + {error,{invalid_module,{bad}}} = + logger:set_handler_config(h1,formatter,{{bad},cfg}), + {error,{invalid_formatter_config,bad}} = + logger:set_handler_config(h1,formatter,{logger_formatter,bad}), + {error,{invalid_formatter_config,{bad,bad}}} = + logger:set_handler_config(h1,formatter,{logger_formatter,#{bad=>bad}}), + {error,{invalid_formatter_config,{template,bad}}} = + logger:set_handler_config(h1,formatter,{logger_formatter, + #{template=>bad}}), + {error,{invalid_formatter_template,[1]}} = + logger:set_handler_config(h1,formatter,{logger_formatter, + #{template=>[1]}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{template=>[]}}), + {error,{invalid_formatter_config,{single_line,bad}}} = + logger:set_handler_config(h1,formatter,{logger_formatter, + #{single_line=>bad}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{single_line=>true}}), + {error,{invalid_formatter_config,{legacy_header,bad}}} = + logger:set_handler_config(h1,formatter,{logger_formatter, + #{legacy_header=>bad}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{legacy_header=>true}}), + ok = logger:set_handler_config(h1,custom,custom), + ok. + +config_sanity_check(cleanup,_Config) -> + logger:remove_handler(h1), + ok. + +log_failed(_Config) -> + ok = logger:add_handler(h1,?MODULE,#{level=>info,filter_default=>log}), + {error,function_clause} = ?TRY(logger:log(bad,?map_rep)), + {error,function_clause} = ?TRY(logger:log(info,?map_rep,bad)), + {error,function_clause} = ?TRY(logger:log(info,fun() -> ?map_rep end,bad)), + {error,function_clause} = ?TRY(logger:log(info,fun() -> ?map_rep end,bad,#{})), + {error,function_clause} = ?TRY(logger:log(info,bad,bad,bad)), + {error,function_clause} = ?TRY(logger:log(info,bad,bad,#{})), + check_no_log(), + ok = logger:log(info,M1=?str,#{}), + check_logged(info,M1,#{}), + ok = logger:log(info,M2=?map_rep,#{}), + check_logged(info,M2,#{}), + ok = logger:log(info,M3=?keyval_rep,#{}), + check_logged(info,M3,#{}), + + %% Should we check report input more thoroughly? + ok = logger:log(info,M4=?keyval_rep++[other,stuff,in,list],#{}), + check_logged(info,M4,#{}), + + %% This might break a handler since it is assumed to be a format + %% string and args, so it depends how the handler protects itself + %% against something like io_lib:format("ok","ok") + ok = logger:log(info,"ok","ok",#{}), + check_logged(info,"ok","ok",#{}), + + ok. + +log_failed(cleanup,_Config) -> + logger:remove_handler(h1), + ok. + +emulator(_Config) -> + ok = logger:add_handler(h1,?MODULE,#{level=>info,filter_default=>log, + tc_proc=>self()}), + Msg = "Error in process ~p on node ~p with exit value:~n~p~n", + Error = {badmatch,4}, + Stack = [{module, function, 2, []}], + Pid = spawn(?MODULE, generate_error, [Error, Stack]), + check_logged(error, Msg, [Pid, node(), {Error, Stack}], + #{gl=>group_leader(), + error_logger=>#{tag=>error,emulator=>true}}), + ok. + +emulator(cleanup, _Config) -> + logger:remove_handler(h1), + ok. + +generate_error(Error, Stack) -> + erlang:raise(error, Error, Stack). + +via_logger_process(Config) -> + ok = logger:add_handler(h1,?MODULE,#{level=>info,filter_default=>log, + tc_proc=>self()}), + + %% Explicitly send a message to the logger process + %% This is used by code_server, erl_prim_loader, init, prim_file, ... + Msg = ?str, + logger ! {log,error,Msg,[],#{}}, + check_logged(error, Msg, [], #{}), + + case os:type() of + {win32,_} -> + %% Skip this part on windows - cant change file mode" + ok; + _ -> + %% This should trigger the same thing from erl_prim_loader + Dir = filename:join(?config(priv_dir,Config),"dummydir"), + ok = file:make_dir(Dir), + ok = file:change_mode(Dir,8#0222), + error = erl_prim_loader:list_dir(Dir), + check_logged(error, + #{report=>"File operation error: eacces. Target: " ++ + Dir ++". Function: list_dir. "}, + #{pid=>self(), + gl=>group_leader(), + error_logger=>#{tag=>error_report, + type=>std_error}}), + ok + end. + +via_logger_process(cleanup, Config) -> + Dir = filename:join(?config(priv_dir,Config),"dummydir"), + _ = file:change_mode(Dir,8#0664), + _ = file:del_dir(Dir), + logger:remove_handler(h1), + ok. + +other_node(_Config) -> + ok = logger:add_handler(h1,?MODULE,#{level=>info,filter_default=>log, + tc_proc=>self()}), + {ok,Node} = test_server:start_node(?FUNCTION_NAME,slave,[]), + rpc:call(Node,logger,error,[Msg=?str,#{}]), + check_logged(error,Msg,#{}), + ok. + +other_node(cleanup,_Config) -> + Nodes = nodes(), + [test_server:stop_node(Node) || Node <- Nodes], + logger:remove_handler(h1), + ok. + +compare_levels(_Config) -> + Levels = [emergency,alert,critical,error,warning,notice,info,debug], + ok = compare(Levels), + {error,badarg} = ?TRY(logger:compare_levels(bad,bad)), + {error,badarg} = ?TRY(logger:compare_levels({bad},info)), + {error,badarg} = ?TRY(logger:compare_levels(info,"bad")), + ok. + +compare([L|Rest]) -> + eq = logger:compare_levels(L,L), + [gt = logger:compare_levels(L,L1) || L1 <- Rest], + [lt = logger:compare_levels(L1,L) || L1 <- Rest], + compare(Rest); +compare([]) -> + ok. + +process_metadata(_Config) -> + undefined = logger:get_process_metadata(), + {error,badarg} = ?TRY(logger:set_process_metadata(bad)), + ok = logger:add_handler(h1,?MODULE,#{level=>info,filter_default=>log}), + Time = erlang:monotonic_time(microsecond), + ProcMeta = #{time=>Time,line=>0,custom=>proc}, + ok = logger:set_process_metadata(ProcMeta), + S1 = ?str, + ?LOG_INFO(S1,#{custom=>macro}), + check_logged(info,S1,#{time=>Time,line=>0,custom=>macro}), + + Time2 = erlang:monotonic_time(microsecond), + S2 = ?str, + ?LOG_INFO(S2,#{time=>Time2,line=>1,custom=>macro}), + check_logged(info,S2,#{time=>Time2,line=>1,custom=>macro}), + + logger:info(S3=?str,#{custom=>func}), + check_logged(info,S3,#{time=>Time,line=>0,custom=>func}), + + ProcMeta = logger:get_process_metadata(), + ok = logger:unset_process_metadata(), + undefined = logger:get_process_metadata(), + + ok. + +process_metadata(cleanup,_Config) -> + logger:remove_handler(h1), + ok. + +%%%----------------------------------------------------------------- +%%% Internal +check_logged(Level,Format,Args,Meta) -> + do_check_logged(Level,{Format,Args},Meta). + +check_logged(Level,Msg,Meta) when ?IS_REPORT(Msg) -> + do_check_logged(Level,{report,Msg},Meta); +check_logged(Level,Msg,Meta) when ?IS_STRING(Msg) -> + do_check_logged(Level,{string,Msg},Meta). + +do_check_logged(Level,Msg0,Meta0) -> + receive + {#{level:=Level,msg:=Msg,meta:=Meta},_} -> + check_msg(Msg0,Msg), + check_maps(Meta0,Meta,meta) + after 500 -> + ct:fail({timeout,no_log,process_info(self(),messages)}) + end. + +check_no_log() -> + receive + X -> ct:fail({got_unexpected_log,X}) + after 500 -> + ok + end. + +check_msg(Msg,Msg) -> + ok; +check_msg({report,Expected},{report,Got}) when is_map(Expected), is_map(Got) -> + check_maps(Expected,Got,msg); +check_msg(Expected,Got) -> + ct:fail({unexpected,msg,Expected,Got}). + +check_maps(Expected,Got,What) -> + case maps:merge(Got,Expected) of + Got -> + ok; + _ -> + ct:fail({unexpected,What,Expected,Got}) + end. + +%% Handler +adding_handler(_Id,Config) -> + maybe_send(add), + {ok,Config}. +removing_handler(_Id) -> + maybe_send(remove), + ok. +changing_config(_Id,_Old,#{call:=Fun}) -> + Fun(); +changing_config(_Id,_Old,#{fail:=true}) -> + {error,fail}; +changing_config(_Id,_Old,Config) -> + maybe_send(changing_config), + {ok,Config}. + +maybe_send(Msg) -> + case whereis(callback_receiver) of + undefined -> ok; + Pid -> Pid ! Msg + end. + +log(_Log,#{crash:=true}) -> + a=b; +log(Log,Config) -> + TcProc = maps:get(tc_proc,Config,self()), + TcProc ! {Log,Config}, + ok. + +test_api(Level) -> + logger:Level(#{Level=>rep}), + ok = check_logged(Level,#{Level=>rep},#{}), + logger:Level(#{Level=>rep},#{my=>meta}), + ok = check_logged(Level,#{Level=>rep},#{my=>meta}), + logger:Level("~w: ~w",[Level,fa]), + ok = check_logged(Level,"~w: ~w",[Level,fa],#{}), + logger:Level("~w: ~w ~w",[Level,fa,meta],#{my=>meta}), + ok = check_logged(Level,"~w: ~w ~w",[Level,fa,meta],#{my=>meta}), + logger:Level(fun(x) -> {"~w: ~w ~w",[Level,fun_to_fa,meta]} end,x, + #{my=>meta}), + ok = check_logged(Level,"~w: ~w ~w",[Level,fun_to_fa,meta],#{my=>meta}), + logger:Level(fun(x) -> #{Level=>fun_to_r,meta=>true} end,x, + #{my=>meta}), + ok = check_logged(Level,#{Level=>fun_to_r,meta=>true},#{my=>meta}), + logger:Level(fun(x) -> <<"fun_to_s">> end,x,#{}), + ok = check_logged(Level,<<"fun_to_s">>,#{}), + logger:Level(F1=fun(x) -> {fun_to_bad} end,x,#{}), + ok = check_logged(Level,"LAZY_FUN ERROR: ~tp; Returned: ~tp", + [{F1,x},{fun_to_bad}],#{}), + logger:Level(F2=fun(x) -> erlang:error(fun_that_crashes) end,x,#{}), + ok = check_logged(Level,"LAZY_FUN CRASH: ~tp; Reason: ~tp", + [{F2,x},{error,fun_that_crashes}],#{}), + ok. + +test_log_function(Level) -> + logger:log(Level,#{Level=>rep}), + ok = check_logged(Level,#{Level=>rep},#{}), + logger:log(Level,#{Level=>rep},#{my=>meta}), + ok = check_logged(Level,#{Level=>rep},#{my=>meta}), + logger:log(Level,"~w: ~w",[Level,fa]), + ok = check_logged(Level,"~w: ~w",[Level,fa],#{}), + logger:log(Level,"~w: ~w ~w",[Level,fa,meta],#{my=>meta}), + ok = check_logged(Level,"~w: ~w ~w",[Level,fa,meta],#{my=>meta}), + logger:log(Level,fun(x) -> {"~w: ~w ~w",[Level,fun_to_fa,meta]} end, + x, #{my=>meta}), + ok = check_logged(Level,"~w: ~w ~w",[Level,fun_to_fa,meta],#{my=>meta}), + logger:log(Level,fun(x) -> #{Level=>fun_to_r,meta=>true} end, + x, #{my=>meta}), + ok = check_logged(Level,#{Level=>fun_to_r,meta=>true},#{my=>meta}), + logger:log(Level,fun(x) -> <<"fun_to_s">> end,x,#{}), + ok = check_logged(Level,<<"fun_to_s">>,#{}), + logger:log(Level,F1=fun(x) -> {fun_to_bad} end,x,#{}), + ok = check_logged(Level,"LAZY_FUN ERROR: ~tp; Returned: ~tp", + [{F1,x},{fun_to_bad}],#{}), + logger:log(Level,F2=fun(x) -> erlang:error(fun_that_crashes) end,x,#{}), + ok = check_logged(Level,"LAZY_FUN CRASH: ~tp; Reason: ~tp", + [{F2,x},{error,fun_that_crashes}],#{}), + ok. + +test_macros(emergency=Level) -> + ?LOG_EMERGENCY(#{Level=>rep}), + ok = check_logged(Level,#{Level=>rep},?MY_LOC(1)), + ?LOG_EMERGENCY(#{Level=>rep},#{my=>meta}), + ok = check_logged(Level,#{Level=>rep},(?MY_LOC(1))#{my=>meta}), + ?LOG_EMERGENCY("~w: ~w",[Level,fa]), + ok = check_logged(Level,"~w: ~w",[Level,fa],?MY_LOC(1)), + ?LOG_EMERGENCY("~w: ~w ~w",[Level,fa,meta],#{my=>meta}), + ok = check_logged(Level,"~w: ~w ~w",[Level,fa,meta],(?MY_LOC(1))#{my=>meta}), + ?LOG_EMERGENCY(fun(x) -> {"~w: ~w ~w",[Level,fun_to_fa,meta]} end, + x, #{my=>meta}), + ok = check_logged(Level,"~w: ~w ~w",[Level,fun_to_fa,meta], + (?MY_LOC(3))#{my=>meta}), + ?LOG_EMERGENCY(fun(x) -> #{Level=>fun_to_r,meta=>true} end, x, #{my=>meta}), + ok = check_logged(Level,#{Level=>fun_to_r,meta=>true}, + (?MY_LOC(2))#{my=>meta}), + ?LOG_EMERGENCY(fun(x) -> <<"fun_to_s">> end,x,#{}), + ok = check_logged(Level,<<"fun_to_s">>,?MY_LOC(1)), + F1=fun(x) -> {fun_to_bad} end, + ?LOG_EMERGENCY(F1,x,#{}), + ok = check_logged(Level,"LAZY_FUN ERROR: ~tp; Returned: ~tp", + [{F1,x},{fun_to_bad}],#{}), + F2=fun(x) -> erlang:error(fun_that_crashes) end, + ?LOG_EMERGENCY(F2,x,#{}), + ok = check_logged(Level,"LAZY_FUN CRASH: ~tp; Reason: ~tp", + [{F2,x},{error,fun_that_crashes}],#{}), + ok. + +%%%----------------------------------------------------------------- +%%% Called by macro ?TRY(X) +my_try(Fun) -> + try Fun() catch C:R -> {C,R} end. diff --git a/lib/kernel/test/logger_bench_SUITE.erl b/lib/kernel/test/logger_bench_SUITE.erl new file mode 100644 index 0000000000..d47122ea9d --- /dev/null +++ b/lib/kernel/test/logger_bench_SUITE.erl @@ -0,0 +1,500 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2018. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(logger_bench_SUITE). + +-compile(export_all). + +%%%----------------------------------------------------------------- +%%% To include lager tests, add paths to lager and goldrush +%%% (goldrush is a dependency inside the lager repo) +%%% +%%% To print data to .csv files, add the following to a config file: +%%% {print_csv,[{console_handler,[{path,"/some/dir/"}]}]}. +%%% +%%%----------------------------------------------------------------- + +-include_lib("common_test/include/ct.hrl"). +-include_lib("common_test/include/ct_event.hrl"). +-include_lib("kernel/include/logger.hrl"). +-include_lib("kernel/src/logger_internal.hrl"). + +-define(msg,lists:flatten(string:pad("Log from "++atom_to_list(?FUNCTION_NAME)++ + ":"++integer_to_list(?LINE), + 80,trailing,$*))). +-define(meta,#{mfa=>{?MODULE,?FUNCTION_NAME,?FUNCTION_ARITY}, + pid=>self()}). + +-define(NO_COMPARE,[profile]). + +-define(TIMES,100000). + +suite() -> + [{timetrap,{seconds,120}}]. + +init_per_suite(Config) -> + DataDir = ?config(data_dir,Config), + have_lager() andalso make(DataDir), + Config. + +end_per_suite(_Config) -> + ok. + +init_per_group(Group, Config) -> + H = remove_all_handlers(), + do_init_per_group(Group), + [{handlers,H}|Config]. + +do_init_per_group(minimal_handler) -> + ok = logger:add_handler(?MODULE,?MODULE,#{level=>error,filter_default=>log}); +do_init_per_group(console_handler) -> + ok = logger:add_handler(?MODULE,logger_std_h, + #{filter_default=>stop, + filters=>?DEFAULT_HANDLER_FILTERS, + logger_std_h=>#{type=>standard_io, + toggle_sync_qlen => ?TIMES+1, + drop_new_reqs_qlen => ?TIMES+2, + flush_reqs_qlen => ?TIMES+3, + enable_burst_limit => false}}), + have_lager() andalso lager_helper:start(), + ok. + +end_per_group(Group, Config) -> + case ?config(saved_config,Config) of + {_,[{bench,Bench}]} -> + print_compare_chart(Group,Bench); + _ -> + ok + end, + add_all_handlers(?config(handlers,Config)), + do_end_per_group(Group). + +do_end_per_group(minimal_handler) -> + ok = logger:remove_handler(?MODULE); +do_end_per_group(console_handler) -> + ok = logger:remove_handler(?MODULE), + have_lager() andalso lager_helper:stop(), + ok. + +init_per_testcase(_TestCase, Config) -> + Config. + +end_per_testcase(Case, Config) -> + try apply(?MODULE,Case,[cleanup,Config]) + catch error:undef -> ok + end, + wait_for_handlers(), + ok. + +wait_for_handlers() -> + wait_for_handler(?MODULE), + wait_for_handler(lager_event). + +wait_for_handler(Handler) -> + case whereis(Handler) of + undefined -> + io:format("~p: noproc1",[Handler]), + ok; + Pid -> + case process_info(Pid,message_queue_len) of + {_,0} -> + io:format("~p: queue=~p",[Handler,0]), + ok; + {_,Q} -> + io:format("~p: queue=~p",[Handler,Q]), + timer:sleep(2000), + wait_for_handler(Handler); + undefined -> + io:format("~p: noproc2",[Handler]), + ok + end + end. + +groups() -> + [{minimal_handler,[],[log, + log_drop, + log_drop_by_handler, + macro, + macro_drop, + macro_drop_by_handler, + error_logger, + error_logger_drop, + error_logger_drop_by_handler + ]}, + {console_handler,[],[%profile, + log, + log_drop, + log_drop_by_handler, + %% log_handler_complete, + macro, + macro_drop, + macro_drop_by_handler, + %% macro_handler_complete, + error_logger, + error_logger_drop, + error_logger_drop_by_handler%% , + %% error_logger_handler_complete + ] ++ lager_cases()} + ]. + +lager_cases() -> + case have_lager() of + true -> + [lager_log, + lager_log_drop, + lager_log_drop_by_handler, + %% lager_log_handler_complete, + lager_parsetrans, + lager_parsetrans_drop, + lager_parsetrans_drop_by_handler%% , + %% lager_parsetrans_handler_complete + ]; + false -> + [] + end. + + +all() -> + [{group,minimal_handler}, + {group,console_handler} + ]. + +log(Config) -> + Times = ?TIMES, + run_benchmark(Config,?FUNCTION_NAME,fun do_log_func/2, [error,?msg], Times). + +log_drop(Config) -> + Times = ?TIMES*100, + ok = logger:set_logger_config(level,error), + run_benchmark(Config,?FUNCTION_NAME,fun do_log_func/2, [info,?msg], Times). + +log_drop(cleanup,_Config) -> + ok = logger:set_logger_config(level,info). + +log_drop_by_handler(Config) -> + Times = ?TIMES, + %% just ensure correct levels + ok = logger:set_logger_config(level,info), + ok = logger:set_handler_config(?MODULE,level,error), + run_benchmark(Config,?FUNCTION_NAME,fun do_log_func/2, [info,?msg], Times). + +log_handler_complete(Config) -> + ok = logger:set_handler_config(?MODULE,formatter, + {?MODULE,?DEFAULT_FORMAT_CONFIG}), + handler_complete(Config, ?FUNCTION_NAME, fun do_log_func/2, [error,?msg]). + +log_handler_complete(cleanup,_Config) -> + ok=logger:set_handler_config(?MODULE,formatter, + {?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}). + +macro(Config) -> + Times = ?TIMES, + run_benchmark(Config,?FUNCTION_NAME,fun do_log_macro/2,[error,?msg], Times). + +macro_drop(Config) -> + Times = ?TIMES*100, + ok = logger:set_logger_config(level,error), + run_benchmark(Config,?FUNCTION_NAME,fun do_log_macro/2,[info,?msg], Times). + +macro_drop(cleanup,_Config) -> + ok = logger:set_logger_config(level,info). + +macro_drop_by_handler(Config) -> + Times = ?TIMES, + %% just ensure correct levels + ok = logger:set_logger_config(level,info), + ok = logger:set_handler_config(?MODULE,level,error), + run_benchmark(Config,?FUNCTION_NAME,fun do_log_macro/2, [info,?msg], Times). + +macro_handler_complete(Config) -> + ok = logger:set_handler_config(?MODULE,formatter, + {?MODULE,?DEFAULT_FORMAT_CONFIG}), + handler_complete(Config, ?FUNCTION_NAME, fun do_log_macro/2, [error,?msg]). + +macro_handler_complete(cleanup,_Config) -> + ok=logger:set_handler_config(?MODULE,formatter, + {?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}). + +error_logger(Config) -> + Times = ?TIMES, + run_benchmark(Config,?FUNCTION_NAME,fun do_error_logger/2, [error,?msg], Times). + +error_logger_drop(Config) -> + Times = ?TIMES*100, + ok = logger:set_logger_config(level,error), + run_benchmark(Config,?FUNCTION_NAME,fun do_error_logger/2, [info,?msg], Times). + +error_logger_drop(cleanup,_Config) -> + ok = logger:set_logger_config(level,info). + +error_logger_drop_by_handler(Config) -> + Times = ?TIMES, + %% just ensure correct levels + ok = logger:set_logger_config(level,info), + ok = logger:set_handler_config(?MODULE,level,error), + run_benchmark(Config,?FUNCTION_NAME,fun do_log_func/2, [info,?msg], Times). + +error_logger_handler_complete(Config) -> + ok = logger:set_handler_config(?MODULE,formatter, + {?MODULE,?DEFAULT_FORMAT_CONFIG}), + handler_complete(Config, ?FUNCTION_NAME, fun do_error_logger/2, [error,?msg]). + +error_logger_handler_complete(cleanup,_Config) -> + ok=logger:set_handler_config(?MODULE,formatter, + {?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}). + +lager_log(Config) -> + Times = ?TIMES, + run_benchmark(Config,?FUNCTION_NAME,fun lager_helper:do_func/2, [error,?msg], Times). + +lager_log_drop(Config) -> + Times = ?TIMES*100, + run_benchmark(Config,?FUNCTION_NAME,fun lager_helper:do_func/2, [info,?msg], Times). + +lager_log_drop_by_handler(Config) -> + %% This concept does not exist, so doing same as lager_log_drop/1 + Times = ?TIMES, + run_benchmark(Config,?FUNCTION_NAME,fun lager_helper:do_func/2, [info,?msg], Times). + +lager_log_handler_complete(Config) -> + handler_complete(Config, ?FUNCTION_NAME, fun lager_helper:do_func/2, [error,?msg]). + +lager_parsetrans(Config) -> + Times = ?TIMES, + run_benchmark(Config,?FUNCTION_NAME,fun lager_helper:do_parsetrans/2, [error,?msg], Times). + +lager_parsetrans_drop(Config) -> + Times = ?TIMES*100, + run_benchmark(Config,?FUNCTION_NAME,fun lager_helper:do_parsetrans/2, [info,?msg], Times). + +lager_parsetrans_drop_by_handler(Config) -> + %% This concept does not exist, so doing same as lager_parsetrans_drop/1 + Times = ?TIMES, + run_benchmark(Config,?FUNCTION_NAME,fun lager_helper:do_parsetrans/2, [info,?msg], Times). + +lager_parsetrans_handler_complete(Config) -> + handler_complete(Config, ?FUNCTION_NAME, fun lager_helper:do_parsetrans/2, [error,?msg]). + + +profile(Config) -> + Times = ?TIMES, + %% fprof:apply(fun repeated_apply/3,[fun lager_helper:do_func/2,[error,?msg],Times]), + fprof:apply(fun repeated_apply/3,[fun do_log_func/2,[error,?msg],Times]), + ok = fprof:profile(), + ok = fprof:analyse(dest,"../fprof.analyse"), + ok. + +%%%----------------------------------------------------------------- +%%% Internal +%% Handler +log(_Log,_Config) -> + ok. + +format(Log=#{meta:=#{pid:=Pid}},Config) when is_pid(Pid) -> + String = ?DEFAULT_FORMATTER:format(Log,Config), + Pid ! done, + String; +format(Log=#{meta:=#{pid:=PidStr}},Config) when is_list(PidStr) -> + String = ?DEFAULT_FORMATTER:format(Log,Config), + list_to_pid(PidStr) ! done, + String. + +handler_complete(Config, TC, Fun, Args) -> + Times = ?TIMES, + Start = os:perf_counter(microsecond), + repeated_apply(Fun, Args, Times), + MSecs = wait_for_done(Start,Times), + calc_and_report(Config,TC,MSecs,Times). + +wait_for_done(Start,0) -> + os:perf_counter(microsecond) - Start; +wait_for_done(Start,N) -> + receive + done -> + wait_for_done(Start,N-1) + after 20000 -> + ct:fail("missing " ++ integer_to_list(N) ++ " replys") + end. + +%%%----------------------------------------------------------------- +%%% Benchmark stuff +run_benchmark(Config,Tag,Fun,Args,Times) -> + _ = erlang:apply(Fun, Args), % apply once to ensure level is cached + MSecs = measure_repeated_op(Fun, Args, Times), + %% fprof:profile(), + %% fprof:analyse(dest,"../"++atom_to_list(Tag)++".prof"), + calc_and_report(Config,Tag,MSecs,Times). + +measure_repeated_op(Fun, Args, Times) -> + Start = os:perf_counter(microsecond), + %% fprof:apply(fun repeated_apply/3, [Fun, Args, Times]), + repeated_apply(Fun, Args, Times), + os:perf_counter(microsecond) - Start. + +repeated_apply(_F, _Args, Times) when Times =< 0 -> + ok; +repeated_apply(F, Args, Times) -> + erlang:apply(F, Args), + repeated_apply(F, Args, Times - 1). + +calc_and_report(Config,Tag,MSecs,Times) -> + IOPS = trunc(Times * (1000000 / MSecs)), + ct_event:notify(#event{ name = benchmark_data, data = [{value,IOPS}] }), + ct:print("~p:~n~p IOPS, ~p us", [Tag, IOPS, MSecs]), + ct:comment("~p IOPS, ~p us", [IOPS, MSecs]), + Bench = case ?config(saved_config,Config) of + {_,[{bench,B}]} -> B; + undefined -> [] + end, + {save_config,[{bench,[{Tag,IOPS,MSecs}|Bench]}]}. + +remove_all_handlers() -> + #{handlers:=Hs} = logger:i(), + [logger:remove_handler(Id) || {Id,_,_} <- Hs], + Hs. + +add_all_handlers(Hs) -> + [logger:add_handler(Id,Mod,Config) || {Id,Mod,Config} <- Hs], + ok. + +%%%----------------------------------------------------------------- +%%% Call logger in different ways +do_log_func(Level,Msg) -> + logger:Level(Msg,[],?meta). + +do_log_macro(error,Msg) -> + ?LOG_ERROR(Msg,[]); +do_log_macro(info,Msg) -> + ?LOG_INFO(Msg,[]); +do_log_macro(debug,Msg) -> + ?LOG_DEBUG(Msg,[]). + +do_error_logger(error,Msg) -> + error_logger:error_msg(Msg,[]); +do_error_logger(info,Msg) -> + error_logger:info_msg(Msg,[]). + +%%%----------------------------------------------------------------- +%%% +print_compare_chart(Group,Bench) -> + io:format("~-20s~12s~12s~12s~12s", + ["Microseconds:","Log","Drop","HDrop","Complete"]), + io:format(user,"~-20s~12s~12s~12s~12s~n", + ["Microseconds:","Log","Drop","HDrop","Complete"]), + {Log,Drop,HDrop,Comp} = sort_bench(Bench,[],[],[],[]), + print_compare_chart(Log,Drop,HDrop,Comp), + io:format(user,"~n",[]), + maybe_print_csv_files(Group, + [{log,Log},{drop,Drop},{hdrop,HDrop},{comp,Comp}]). + +print_compare_chart([{What,LIOPS,LMSecs}|Log], + [{What,DIOPS,DMSecs}|Drop], + [{What,HIOPS,HMSecs}|HDrop], + [{What,CIOPS,CMSecs}|Comp]) -> + io:format("~-20w~12w~12w~12w~12w",[What,LMSecs,DMSecs,HMSecs,CMSecs]), + io:format(user,"~-20w~12w~12w~12w~12w~n",[What,LMSecs,DMSecs,HMSecs,CMSecs]), + print_compare_chart(Log,Drop,HDrop,Comp); +print_compare_chart([{What,LIOPS,LMSecs}|Log], + [{What,DIOPS,DMSecs}|Drop], + [{What,HIOPS,HMSecs}|HDrop], + []=Comp) -> + io:format("~-20w~12w~12w~12w",[What,LMSecs,DMSecs,HMSecs]), + io:format(user,"~-20w~12w~12w~12w~n",[What,LMSecs,DMSecs,HMSecs]), + print_compare_chart(Log,Drop,HDrop,Comp); +print_compare_chart([],[],[],[]) -> + ok; +print_compare_chart(Log,Drop,HDrop,Comp) -> + ct:fail({Log,Drop,HDrop,Comp}). + +sort_bench([{TC,IOPS,MSecs}|Bench],Log,Drop,HDrop,Comp) -> + case lists:member(TC,?NO_COMPARE) of + true -> + sort_bench(Bench,Log,Drop,HDrop,Comp); + false -> + TCStr = atom_to_list(TC), + {What,Type} = + case re:run(TCStr,"(.*)_(drop.*)", + [{capture,all_but_first,list}]) of + {match,[WhatStr,TypeStr]} -> + {list_to_atom(WhatStr),list_to_atom(TypeStr)}; + nomatch -> + case re:run(TCStr,"(.*)_(handler_complete.*)", + [{capture,all_but_first,list}]) of + {match,[WhatStr,TypeStr]} -> + {list_to_atom(WhatStr),list_to_atom(TypeStr)}; + nomatch -> + {TC,log} + end + end, + case Type of + log -> + sort_bench(Bench,[{What,IOPS,MSecs}|Log],Drop,HDrop,Comp); + drop -> + sort_bench(Bench,Log,[{What,IOPS,MSecs}|Drop],HDrop,Comp); + drop_by_handler -> + sort_bench(Bench,Log,Drop,[{What,IOPS,MSecs}|HDrop],Comp); + handler_complete -> + sort_bench(Bench,Log,Drop,HDrop,[{What,IOPS,MSecs}|Comp]) + end + end; +sort_bench([],Log,Drop,HDrop,Comp) -> + {lists:keysort(1,Log), + lists:keysort(1,Drop), + lists:keysort(1,HDrop), + lists:keysort(1,Comp)}. + +maybe_print_csv_files(Group,Data) -> + case ct:get_config({print_csv,Group}) of + undefined -> + ok; + Cfg -> + Path = proplists:get_value(path,Cfg,".."), + Files = [begin + File = filename:join(Path,F)++".csv", + case filelib:is_regular(File) of + true -> + {ok,Fd} = file:open(File,[append]), + Fd; + false -> + {ok,Fd} = file:open(File,[write]), + ok = file:write(Fd, + "error_logger,lager_log," + "lager_parsetrans,logger_log," + "logger_macro\n"), + Fd + end + end || {F,_} <- Data], + [print_csv_file(F,D) || {F,D} <- lists:zip(Files,Data)], + [file:close(Fd) || Fd <- Files], + ok + end. + +print_csv_file(Fd,{_,Data}) -> + AllIOPS = [integer_to_list(IOPS) || {_,IOPS,_} <- Data], + ok = file:write(Fd,lists:join(",",AllIOPS)++"\n"). + +have_lager() -> + code:ensure_loaded(lager) == {module,lager}. + +make(Dir) -> + {ok,Cwd} = file:get_cwd(), + ok = file:set_cwd(Dir), + up_to_date = make:all([load]), + ok = file:set_cwd(Cwd), + code:add_path(Dir). diff --git a/lib/kernel/test/logger_bench_SUITE_data/Emakefile b/lib/kernel/test/logger_bench_SUITE_data/Emakefile new file mode 100644 index 0000000000..85c82bdaab --- /dev/null +++ b/lib/kernel/test/logger_bench_SUITE_data/Emakefile @@ -0,0 +1 @@ +{['lager_helper'],[{outdir,"."},debug_info,{i,"/home/uabshan/Work/git/otp/lib/kernel/src"},{i,"/home/uabshan/Work/git/otp/lib/kernel/include"}]}. diff --git a/lib/kernel/test/logger_bench_SUITE_data/lager_helper.erl b/lib/kernel/test/logger_bench_SUITE_data/lager_helper.erl new file mode 100644 index 0000000000..296ced4276 --- /dev/null +++ b/lib/kernel/test/logger_bench_SUITE_data/lager_helper.erl @@ -0,0 +1,73 @@ +-module(lager_helper). + +-compile(export_all). +-compile({parse_transform,lager_transform}). + +-include_lib("kernel/src/logger_internal.hrl"). + +start() -> + application:load(lager), + application:set_env(lager, error_logger_redirect, false), + application:set_env(lager, async_threshold, 100010), + application:set_env(lager, async_threshold_window, 100), + application:set_env(lager,handlers,[{?MODULE,[{level,error}]}]), + lager:start(). + +stop() -> + application:stop(lager). + +do_func(Level,Msg) -> + lager:log(Level,[{pid,self()}],Msg,[]). + +do_parsetrans(error,Msg) -> + lager:error(Msg,[]); +do_parsetrans(info,Msg) -> + lager:info(Msg,[]). + +%%%----------------------------------------------------------------- +%%% Dummy handler for lager +-record(state, {level :: {'mask', integer()}, + formatter :: atom(), + format_config :: any()}). +init(Opts) -> + Level = proplists:get_value(level,Opts,info), + Formatter = proplists:get_value(formatter,Opts,logger_bench_SUITE), + FormatConfig = proplists:get_value(format_config,Opts,?DEFAULT_FORMAT_CONFIG), + {ok,#state{level=lager_util:config_to_mask(Level), + formatter=Formatter, + format_config=FormatConfig}}. + +handle_call(get_loglevel, #state{level=Level} = State) -> + {ok, Level, State}; +handle_call({set_loglevel, Level}, State) -> + try lager_util:config_to_mask(Level) of + Levels -> + {ok, ok, State#state{level=Levels}} + catch + _:_ -> + {ok, {error, bad_log_level}, State} + end; +handle_call(_Request, State) -> + {ok, ok, State}. + +handle_event({log, Message}, + #state{level=L,formatter=Formatter,format_config=FormatConfig} = State) -> + case lager_util:is_loggable(Message, L, ?MODULE) of + true -> + Metadata = + case maps:from_list(lager_msg:metadata(Message)) of + Meta = #{pid:=Pid} when is_pid(Pid) -> + Meta; + Meta = #{pid:=PidStr} when is_list(PidStr) -> + Meta + end, + Log = #{level=>lager_msg:severity(Message), + msg=>{report,lager_msg:message(Message)}, + meta=>Metadata}, + io:put_chars(user, Formatter:format(Log,FormatConfig)), + {ok, State}; + false -> + {ok, State} + end; +handle_event(_Event, State) -> + {ok, State}. diff --git a/lib/kernel/test/logger_disk_log_h_SUITE.erl b/lib/kernel/test/logger_disk_log_h_SUITE.erl new file mode 100644 index 0000000000..c7c6137380 --- /dev/null +++ b/lib/kernel/test/logger_disk_log_h_SUITE.erl @@ -0,0 +1,1417 @@ +-module(logger_disk_log_h_SUITE). + +-compile(export_all). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("kernel/include/logger.hrl"). +-include_lib("kernel/src/logger_internal.hrl"). +-include_lib("kernel/src/logger_h_common.hrl"). +-include_lib("stdlib/include/ms_transform.hrl"). +-include_lib("kernel/include/file.hrl"). + +-define(check_no_log, [] = test_server:messages_get()). + +-define(check(Expected), + receive {log,Expected} -> + [] = test_server:messages_get() + after 1000 -> + ct:fail({report_not_received, + {line,?LINE}, + {got,test_server:messages_get()}}) + end). + +-define(msg,"Log from "++atom_to_list(?FUNCTION_NAME)++ + ":"++integer_to_list(?LINE)). +-define(bin(Msg), list_to_binary(Msg++"\n")). +-define(log_no(File,N), lists:concat([File,".",N])). +-define(domain,#{domain=>[?MODULE]}). + +-define(SYNC_REP_INT, if is_atom(?FILESYNC_REPEAT_INTERVAL) -> 5500; + true -> ?FILESYNC_REPEAT_INTERVAL + 500 + end). + +suite() -> + [{timetrap,{seconds,30}}]. + +init_per_suite(Config) -> + timer:start(), % to avoid progress report + Config. + +end_per_suite(_Config) -> + ok. + +init_per_group(_Group, Config) -> + Config. + +end_per_group(_Group, _Config) -> + ok. + +init_per_testcase(TestHooksCase, Config) when + TestHooksCase == write_failure; + TestHooksCase == sync_failure -> + if ?TEST_HOOKS_TAB == undefined -> + {skip,"Define the TEST_HOOKS macro to run this test"}; + true -> + ct:print("********** ~w **********", [TestHooksCase]), + Config + end; +init_per_testcase(TestCase, Config) -> + ct:print("********** ~w **********", [TestCase]), + Config. + +end_per_testcase(Case, Config) -> + try apply(?MODULE,Case,[cleanup,Config]) + catch error:undef -> ok + end, + ok. + +groups() -> + []. + +all() -> + [start_stop_handler, + create_log, + open_existing_log, + disk_log_opts, + default_formatter, + logging, + errors, + formatter_fail, + config_fail, + bad_input, + info_and_reset, + reconfig, + disk_log_sync, + disk_log_full, + disk_log_wrap, + disk_log_events, + write_failure, + sync_failure, + op_switch_to_sync, + op_switch_to_drop, + op_switch_to_flush, + limit_burst_disabled, + limit_burst_enabled_one, + limit_burst_enabled_period, + kill_disabled, + qlen_kill_new, + %% qlen_kill_std, + mem_kill_new, + %% mem_kill_std, + restart_after, + handler_requests_under_load + ]. + +start_stop_handler(_Config) -> + ok = logger:add_handler(?MODULE, logger_disk_log_h, #{}), + {error,{already_exist,?MODULE}} = + logger:add_handler(?MODULE, logger_disk_log_h, #{}), + true = is_pid(whereis(?MODULE)), + ok = logger:remove_handler(?MODULE), + timer:sleep(500), + undefined = whereis(?MODULE). +start_stop_handler(cleanup, _Config) -> + logger:remove_handler(?MODULE). + +create_log(Config) -> + PrivDir = ?config(priv_dir,Config), + %% test new handler + Name1 = list_to_atom(lists:concat([?FUNCTION_NAME,"_A"])), + LogFile1 = filename:join(PrivDir, Name1), + ok = start_and_add(Name1, #{filter_default=>stop, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter=>{?MODULE,self()}}, + #{file=>LogFile1}), + logger:info("hello", ?domain), + logger_disk_log_h:disk_log_sync(Name1), + ct:pal("Checking contents of ~p", [?log_no(LogFile1,1)]), + try_read_file(?log_no(LogFile1,1), {ok,<<"hello\n">>}, 5000), + + %% test second handler + Name2 = list_to_atom(lists:concat([?FUNCTION_NAME,"_B"])), + DLName = lists:concat([?FUNCTION_NAME,"_B_log"]), + LogFile2 = filename:join(PrivDir, DLName), + ok = start_and_add(Name2, #{filter_default=>stop, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter=>{?MODULE,self()}}, + #{file=>LogFile2}), + logger:info("dummy", ?domain), + logger_disk_log_h:disk_log_sync(Name2), + ct:pal("Checking contents of ~p", [?log_no(LogFile2,1)]), + try_read_file(?log_no(LogFile2,1), {ok,<<"dummy\n">>}, 5000), + + remove_and_stop(Name1), + remove_and_stop(Name2), + try_read_file(?log_no(LogFile1,1), {ok,<<"hello\ndummy\n">>}, 1), + try_read_file(?log_no(LogFile2,1), {ok,<<"dummy\n">>}, 5000), + ok. + +open_existing_log(Config) -> + PrivDir = ?config(priv_dir,Config), + %% test new handler + HName = ?FUNCTION_NAME, + DLName = lists:concat([?FUNCTION_NAME,"_log"]), + LogFile1 = filename:join(PrivDir, DLName), + ok = start_and_add(HName, #{filter_default=>stop, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter=>{?MODULE,self()}}, + #{file=>LogFile1}), + logger:info("one", ?domain), + logger_disk_log_h:disk_log_sync(HName), + ct:pal("Checking contents of ~p", [?log_no(LogFile1,1)]), + try_read_file(?log_no(LogFile1,1), {ok,<<"one\n">>}, 5000), + logger:info("two", ?domain), + ok = remove_and_stop(HName), + try_read_file(?log_no(LogFile1,1), {ok,<<"one\ntwo\n">>}, 5000), + + logger:info("two and a half", ?domain), + + ok = start_and_add(HName, #{filter_default=>stop, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter=>{?MODULE,self()}}, + #{file=>LogFile1}), + logger:info("three", ?domain), + logger_disk_log_h:disk_log_sync(HName), + try_read_file(?log_no(LogFile1,1), {ok,<<"one\ntwo\nthree\n">>}, 5000), + remove_and_stop(HName), + try_read_file(?log_no(LogFile1,1), {ok,<<"one\ntwo\nthree\n">>}, 5000). + +disk_log_opts(Config) -> + Get = fun(Key, PL) -> proplists:get_value(Key, PL) end, + PrivDir = ?config(priv_dir,Config), + WName = list_to_atom(lists:concat([?FUNCTION_NAME,"_W"])), + WFile = lists:concat([?FUNCTION_NAME,"_W_log"]), + Size = length("12345"), + ConfigW = #{filter_default=>stop, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter => {?MODULE,no_nl}}, + WFileFull = filename:join(PrivDir, WFile), + DLOptsW = #{file => WFileFull, + type => wrap, + max_no_bytes => Size, + max_no_files => 2}, + ok = start_and_add(WName, ConfigW, DLOptsW), + WInfo1 = disk_log:info(WName), + ct:log("Fullname = ~s", [WFileFull]), + {WFileFull,wrap,{Size,2},1} = {Get(file,WInfo1),Get(type,WInfo1), + Get(size,WInfo1),Get(current_file,WInfo1)}, + logger:info("123", ?domain), + logger_disk_log_h:disk_log_sync(WName), + timer:sleep(500), + 1 = Get(current_file, disk_log:info(WName)), + + logger:info("45", ?domain), + logger_disk_log_h:disk_log_sync(WName), + timer:sleep(500), + 1 = Get(current_file, disk_log:info(WName)), + + logger:info("6", ?domain), + logger_disk_log_h:disk_log_sync(WName), + timer:sleep(500), + 2 = Get(current_file, disk_log:info(WName)), + + logger:info("7890", ?domain), + logger_disk_log_h:disk_log_sync(WName), + timer:sleep(500), + 2 = Get(current_file, disk_log:info(WName)), + + HName1 = list_to_atom(lists:concat([?FUNCTION_NAME,"_H1"])), + HFile1 = lists:concat([?FUNCTION_NAME,"_H1_log"]), + ConfigH = #{filter_default=>stop, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter => {?MODULE,no_nl}}, + HFile1Full = filename:join(PrivDir, HFile1), + DLOptsH1 = #{file => HFile1Full, + type => halt}, + ok = start_and_add(HName1, ConfigH, DLOptsH1), + HInfo1 = disk_log:info(HName1), + ct:log("Fullname = ~s", [HFile1Full]), + {HFile1Full,halt,infinity} = {Get(file,HInfo1),Get(type,HInfo1), + Get(size,HInfo1)}, + logger:info("12345", ?domain), + logger_disk_log_h:disk_log_sync(HName1), + timer:sleep(500), + 1 = Get(no_written_items, disk_log:info(HName1)), + + HName2 = list_to_atom(lists:concat([?FUNCTION_NAME,"_H2"])), + HFile2 = lists:concat([?FUNCTION_NAME,"_H2_log"]), + HFile2Full = filename:join(PrivDir, HFile2), + DLOptsH2 = DLOptsH1#{file => HFile2Full, + max_no_bytes => 1000}, + ok = start_and_add(HName2, ConfigH, DLOptsH2), + HInfo3 = disk_log:info(HName2), + ct:log("Fullname = ~s", [HFile2Full]), + {HFile2Full,halt,1000} = {Get(file,HInfo3),Get(type,HInfo3), + Get(size,HInfo3)}, + + remove_and_stop(WName), + remove_and_stop(HName1), + remove_and_stop(HName2), + ok. + +default_formatter(Config) -> + PrivDir = ?config(priv_dir,Config), + LogFile = filename:join(PrivDir,atom_to_list(?FUNCTION_NAME)), + HConfig = #{disk_log_opts => #{file=>LogFile}, + filter_default=>log}, + ct:pal("Log: ~p", [LogFile]), + ok = logger:add_handler(?MODULE, logger_disk_log_h, HConfig), + ok = logger:set_handler_config(?MODULE,formatter, + {?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}), + LogName = lists:concat([LogFile, ".1"]), + logger:info("dummy"), + wait_until_written(LogName), + {ok,Bin} = file:read_file(LogName), + match = re:run(Bin, "=INFO REPORT====.*\ndummy", [{capture,none}]), + ok. +default_formatter(cleanup, _Config) -> + logger:remove_handler(?MODULE). + +logging(Config) -> + PrivDir = ?config(priv_dir,Config), + %% test new handler + Name = list_to_atom(lists:concat([?FUNCTION_NAME,"_1"])), + LogFile = filename:join(PrivDir, Name), + ok = start_and_add(Name, #{filter_default=>log, + formatter=>{?MODULE,self()}}, + #{file => LogFile}), + MsgFormatter = fun(Term) -> {io_lib:format("Term:~p",[Term]),[]} end, + logger:info([{x,y}], #{report_cb => MsgFormatter}), + logger:info([{x,y}], #{}), + ct:pal("Checking contents of ~p", [?log_no(LogFile,1)]), + try_read_file(?log_no(LogFile,1), {ok,<<"Term:[{x,y}]\n x: y\n">>}, 5000). + +logging(cleanup, _Config) -> + Name = list_to_atom(lists:concat([?FUNCTION_NAME,"_1"])), + remove_and_stop(Name). + +errors(Config) -> + PrivDir = ?config(priv_dir,Config), + Name1 = list_to_atom(lists:concat([?FUNCTION_NAME,"_1"])), + LogFile1 = filename:join(PrivDir,Name1), + HConfig = #{disk_log_opts=>#{file=>LogFile1}, + filter_default=>log, + formatter=>{?MODULE,self()}}, + ok = logger:add_handler(Name1, logger_disk_log_h, HConfig), + {error,{already_exist,Name1}} = + logger:add_handler(Name1, logger_disk_log_h, #{}), + + %%! TODO: + %%! Check how bad log_opts are handled! + + {error,{illegal_config_change,_,_}} = + logger:set_handler_config(Name1, + disk_log_opts, + #{file=>LogFile1, + type=>halt}), + {error,{illegal_config_change,_,_}} = + logger:set_handler_config(Name1,id,new), + + ok = logger:remove_handler(Name1), + {error,{not_found,Name1}} = logger:remove_handler(Name1), + ok. + +errors(cleanup, _Config) -> + Name1 = list_to_atom(lists:concat([?FUNCTION_NAME,"_1"])), + _ = logger:remove_handler(Name1). + +formatter_fail(Config) -> + PrivDir = ?config(priv_dir,Config), + Name = ?FUNCTION_NAME, + LogFile = filename:join(PrivDir,Name), + ct:pal("Log = ~p", [LogFile]), + HConfig = #{disk_log_opts => #{file=>LogFile}, + filter_default=>stop, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE])}, + %% no formatter! + logger:add_handler(Name, logger_disk_log_h, HConfig), + Pid = whereis(Name), + true = is_pid(Pid), + {ok,#{handlers:=H}} = logger:get_logger_config(), + true = lists:member(Name,H), + + %% Formatter is added automatically + {ok,{_,#{formatter:={logger_formatter,_}}}} = + logger:get_handler_config(Name), + logger:info(M1=?msg,?domain), + Got1 = try_match_file(?log_no(LogFile,1),"=INFO REPORT====.*\n"++M1,5000), + + ok = logger:set_handler_config(Name,formatter,{nonexistingmodule,#{}}), + logger:info(M2=?msg,?domain), + Got2 = try_match_file(?log_no(LogFile,1), + Got1++"=INFO REPORT====.*\nFORMATTER CRASH: .*"++M2, + 5000), + + ok = logger:set_handler_config(Name,formatter,{?MODULE,crash}), + logger:info(M3=?msg,?domain), + Got3 = try_match_file(?log_no(LogFile,1), + Got2++"=INFO REPORT====.*\nFORMATTER CRASH: .*"++M3, + 5000), + + ok = logger:set_handler_config(Name,formatter,{?MODULE,bad_return}), + logger:info(?msg,?domain), + try_match_file(?log_no(LogFile,1), + Got3++"FORMATTER ERROR: bad_return_value", + 5000), + + %% Check that handler is still alive and was never dead + Pid = whereis(Name), + {ok,#{handlers:=H}} = logger:get_logger_config(), + ok. + +formatter_fail(cleanup,_Config) -> + _ = logger:remove_handler(?FUNCTION_NAME), + ok. + +config_fail(_Config) -> + {error,{handler_not_added,{invalid_config,logger_disk_log_h,{bad,bad}}}} = + logger:add_handler(?MODULE,logger_disk_log_h, + #{logger_disk_log_h => #{bad => bad}, + filter_default=>log, + formatter=>{?MODULE,self()}}), + {error,{handler_not_added,{invalid_levels,{42,42,_}}}} = + logger:add_handler(?MODULE,logger_disk_log_h, + #{logger_disk_log_h => #{toggle_sync_qlen=>42, + drop_new_reqs_qlen=>42}}), + + ok = logger:add_handler(?MODULE,logger_disk_log_h, + #{filter_default=>log, + formatter=>{?MODULE,self()}}), + %% can't change the disk log options for a log already in use + {error,{illegal_config_change,_,_}} = + logger:set_handler_config(?MODULE,disk_log_opts, + #{max_no_files=>2}), + %% can't change name of an existing handler + {error,{illegal_config_change,_,_}} = + logger:set_handler_config(?MODULE,id,bad), + %% incorrect values of OP params + {error,{invalid_levels,_}} = + logger:set_handler_config(?MODULE,logger_disk_log_h, + #{toggle_sync_qlen=>100, + flush_reqs_qlen=>99}), + %% invalid name of config parameter + {error,{invalid_config,logger_disk_log_h,{filesync_rep_int,2000}}} = + logger:set_handler_config(?MODULE, logger_disk_log_h, + #{filesync_rep_int => 2000}), + ok. +config_fail(cleanup,_Config) -> + logger:remove_handler(?MODULE). + +bad_input(_Config) -> + {error,{badarg,{disk_log_sync,["BadType"]}}} = + logger_disk_log_h:disk_log_sync("BadType"), + {error,{badarg,{info,["BadType"]}}} = logger_disk_log_h:info("BadType"), + {error,{badarg,{reset,["BadType"]}}} = logger_disk_log_h:reset("BadType"). + +info_and_reset(_Config) -> + ok = logger:add_handler(?MODULE,logger_disk_log_h, + #{filter_default=>log, + formatter=>{?MODULE,self()}}), + #{id := ?MODULE} = logger_disk_log_h:info(?MODULE), + ok = logger_disk_log_h:reset(?MODULE). +info_and_reset(cleanup,_Config) -> + logger:remove_handler(?MODULE). + +reconfig(Config) -> + Dir = ?config(priv_dir,Config), + ok = logger:add_handler(?MODULE, + logger_disk_log_h, + #{filter_default=>log, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter=>{?MODULE,self()}}), + #{id := ?MODULE, + toggle_sync_qlen := ?TOGGLE_SYNC_QLEN, + drop_new_reqs_qlen := ?DROP_NEW_REQS_QLEN, + flush_reqs_qlen := ?FLUSH_REQS_QLEN, + enable_burst_limit := ?ENABLE_BURST_LIMIT, + burst_limit_size := ?BURST_LIMIT_SIZE, + burst_window_time := ?BURST_WINDOW_TIME, + enable_kill_overloaded := ?ENABLE_KILL_OVERLOADED, + handler_overloaded_qlen := ?HANDLER_OVERLOADED_QLEN, + handler_overloaded_mem := ?HANDLER_OVERLOADED_MEM, + handler_restart_after := ?HANDLER_RESTART_AFTER, + filesync_repeat_interval := ?FILESYNC_REPEAT_INTERVAL, + log_opts := #{type := ?DISK_LOG_TYPE, + max_no_files := ?DISK_LOG_MAX_NO_FILES, + max_no_bytes := ?DISK_LOG_MAX_NO_BYTES, + file := _DiskLogFile}} = + logger_disk_log_h:info(?MODULE), + + ok = logger:set_handler_config(?MODULE, logger_disk_log_h, + #{toggle_sync_qlen => 1, + drop_new_reqs_qlen => 2, + flush_reqs_qlen => 3, + enable_burst_limit => false, + burst_limit_size => 10, + burst_window_time => 10, + enable_kill_overloaded => true, + handler_overloaded_qlen => 100000, + handler_overloaded_mem => 10000000, + handler_restart_after => never, + filesync_repeat_interval => no_repeat}), + #{id := ?MODULE, + toggle_sync_qlen := 1, + drop_new_reqs_qlen := 2, + flush_reqs_qlen := 3, + enable_burst_limit := false, + burst_limit_size := 10, + burst_window_time := 10, + enable_kill_overloaded := true, + handler_overloaded_qlen := 100000, + handler_overloaded_mem := 10000000, + handler_restart_after := never, + filesync_repeat_interval := no_repeat} = + logger_disk_log_h:info(?MODULE), + + ok = logger:remove_handler(?MODULE), + + File = filename:join(Dir, "logfile"), + ok = logger:add_handler(?MODULE, + logger_disk_log_h, + #{filter_default=>log, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter=>{?MODULE,self()}, + disk_log_opts=> + #{type => halt, + max_no_files => 1, + max_no_bytes => 1024, + file => File}}), + #{log_opts := #{type := halt, + max_no_files := 1, + max_no_bytes := 1024, + file := File}} = + logger_disk_log_h:info(?MODULE), + ok. + +reconfig(cleanup, _Config) -> + logger:remove_handler(?MODULE). + +disk_log_sync(Config) -> + Dir = ?config(priv_dir,Config), + File = filename:join(Dir, ?FUNCTION_NAME), + Log = lists:concat([File,".1"]), + ok = logger:add_handler(?MODULE, + logger_disk_log_h, + #{disk_log_opts => #{file => File}, + filter_default=>log, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter=>{?MODULE,nl}}), + + start_tracer([{?MODULE,format,2}, + {disk_log,blog,2}, + {disk_log,sync,1}], + [{formatter,"first"}, + {disk_log,blog}, + {disk_log,sync}]), + + logger:info("first", ?domain), + %% wait for automatic disk_log_sync + check_tracer(?FILESYNC_REPEAT_INTERVAL*2), + + start_tracer([{?MODULE,format,2}, + {disk_log,blog,2}, + {disk_log,sync,1}], + [{formatter,"second"}, + {formatter,"third"}, + {disk_log,blog}, + {disk_log,blog}, + {disk_log,sync}]), + %% two log requests in fast succession will make the handler skip + %% an automatic disk log sync + logger:info("second", ?domain), + logger:info("third", ?domain), + %% do explicit disk_log_sync + logger_disk_log_h:disk_log_sync(?MODULE), + check_tracer(100), + + %% check that if there's no repeated disk_log_sync active, + %% a disk_log_sync is still performed when handler goes idle + logger:set_handler_config(?MODULE, logger_disk_log_h, + #{filesync_repeat_interval => no_repeat}), + no_repeat = maps:get(filesync_repeat_interval, + logger_disk_log_h:info(?MODULE)), + + start_tracer([{?MODULE,format,2}, + {disk_log,blog,2}, + {disk_log,sync,1}], + [{formatter,"fourth"}, + {disk_log,blog}, + {formatter,"fifth"}, + {disk_log,blog}, + {disk_log,sync}]), + + logger:info("fourth", ?domain), + timer:sleep(?IDLE_DETECT_TIME_MSEC*2), + logger:info("fifth", ?domain), + %% wait for automatic disk_log_sync + check_tracer(?IDLE_DETECT_TIME_MSEC*2), + + try_read_file(Log, {ok,<<"first\nsecond\nthird\nfourth\nfifth\n">>}, 1000), + + %% switch repeated disk_log_sync on and verify that the looping works + SyncInt = 1000, + WaitT = 4500, + OneSync = {logger_disk_log_h,handle_cast,repeated_disk_log_sync}, + %% receive 1 initial repeated_disk_log_sync, then 1 per sec + start_tracer([{logger_disk_log_h,handle_cast,2}], + [OneSync || _ <- lists:seq(1, 1 + trunc(WaitT/SyncInt))]), + + logger:set_handler_config(?MODULE, logger_disk_log_h, + #{filesync_repeat_interval => SyncInt}), + SyncInt = maps:get(filesync_repeat_interval, + logger_disk_log_h:info(?MODULE)), + timer:sleep(WaitT), + logger:set_handler_config(?MODULE, logger_disk_log_h, + #{filesync_repeat_interval => no_repeat}), + check_tracer(100), + ok. +disk_log_sync(cleanup,_Config) -> + logger:remove_handler(?MODULE). + +disk_log_wrap(Config) -> + Get = fun(Key, PL) -> proplists:get_value(Key, PL) end, + Dir = ?config(priv_dir,Config), + File = filename:join(Dir, ?FUNCTION_NAME), + ct:pal("Log = ~p", [File]), + MaxFiles = 3, + MaxBytes = 5, + ok = logger:add_handler(?MODULE, + logger_disk_log_h, + #{filter_default=>log, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter=>{?MODULE,self()}, + disk_log_opts=> + #{type => wrap, + max_no_files => MaxFiles, + max_no_bytes => MaxBytes, + file => File}}), + Info = disk_log:info(?MODULE), + {File,wrap,{MaxBytes,MaxFiles},1} = + {Get(file,Info),Get(type,Info),Get(size,Info),Get(current_file,Info)}, + Tester = self(), + TraceFun = fun({trace,_,call,{Mod,Func,Details}}, Pid) -> + Pid ! {trace,Mod,Func,Details}, + Pid + end, + {ok,_} = dbg:tracer(process, {TraceFun, Tester}), + {ok,_} = dbg:p(whereis(?MODULE), [c]), + {ok,_} = dbg:tp(logger_disk_log_h, handle_info, 2, []), + + Text = [34 + rand:uniform(126-34) || _ <- lists:seq(1,MaxBytes)], + ct:pal("String = ~p (~w)", [Text, erts_debug:size(Text)]), + %% fill first file + lists:foreach(fun(N) -> + Log = lists:concat([File,".",N]), + logger:info(Text, ?domain), + wait_until_written(Log), + ct:pal("N = ~w", + [N = Get(current_file, + disk_log:info(?MODULE))]) + end, lists:seq(1,MaxFiles)), + + %% wait for trace messages + timer:sleep(1000), + dbg:stop_clear(), + Received = lists:flatmap(fun({trace,_M,handle_info, + [{disk_log,_Node,_Name,What},_]}) -> + [{trace,What}]; + ({log,_}) -> + [] + end, test_server:messages_get()), + ct:pal("Trace =~n~p", [Received]), + Received = [{trace,{wrap,0}} || _ <- lists:seq(1,MaxFiles-1)], + ok. + +disk_log_wrap(cleanup,_Config) -> + logger:remove_handler(?MODULE). + +disk_log_full(Config) -> + Dir = ?config(priv_dir,Config), + File = filename:join(Dir, ?FUNCTION_NAME), + ct:pal("Log = ~p", [File]), + MaxBytes = 50, + ok = logger:add_handler(?MODULE, + logger_disk_log_h, + #{filter_default=>log, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter=>{?MODULE,self()}, + disk_log_opts=> + #{type => halt, + max_no_files => 1, + max_no_bytes => MaxBytes, + file => File}}), + + Tester = self(), + TraceFun = fun({trace,_,call,{Mod,Func,Details}}, Pid) -> + Pid ! {trace,Mod,Func,Details}, + Pid + end, + {ok,_} = dbg:tracer(process, {TraceFun, Tester}), + {ok,_} = dbg:p(whereis(?MODULE), [c]), + {ok,_} = dbg:tp(logger_disk_log_h, handle_info, 2, []), + + NoOfChars = 5, + Text = [34 + rand:uniform(126-34) || _ <- lists:seq(1,NoOfChars)], + [logger:info(Text, ?domain) || _ <- lists:seq(1,trunc(MaxBytes/NoOfChars)+1)], + + %% wait for trace messages + timer:sleep(2000), + dbg:stop_clear(), + Received = lists:flatmap(fun({trace,_M,handle_info, + [{disk_log,_Node,_Name,What},_]}) -> + [{trace,What}]; + ({log,_}) -> + [] + end, test_server:messages_get()), + ct:pal("Trace =~n~p", [Received]), + [{trace,full}, + {trace,{error_status,{error,{full,_}}}}] = Received, + ok. +disk_log_full(cleanup, _Config) -> + logger:remove_handler(?MODULE). + +disk_log_events(Config) -> + Node = node(), + Log = ?MODULE, + ok = logger:add_handler(?MODULE, + logger_disk_log_h, + #{filter_default=>log, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter=>{?MODULE,self()}}), + + %% Events copied from disk_log API + Events = + [{disk_log, Node, Log, {wrap, 0}}, + {disk_log, Node, Log, {truncated, 0}}, + {disk_log, Node, Log, {read_only, 42}}, + {disk_log, Node, Log, {blocked_log, 42}}, + {disk_log, Node, Log, {format_external, 42}}, + {disk_log, Node, Log, full}, + {disk_log, Node, Log, {error_status, ok}}], + + Tester = self(), + TraceFun = fun({trace,_,call,{Mod,Func,Details}}, Pid) -> + Pid ! {trace,Mod,Func,Details}, + Pid + end, + {ok,_} = dbg:tracer(process, {TraceFun, Tester}), + {ok,_} = dbg:p(whereis(?MODULE), [c]), + {ok,_} = dbg:tp(logger_disk_log_h, handle_info, 2, []), + + [whereis(?MODULE) ! E || E <- Events], + %% wait for trace messages + timer:sleep(2000), + dbg:stop_clear(), + Received = lists:map(fun({trace,_M,handle_info, + [Got,_]}) -> Got + end, test_server:messages_get()), + ct:pal("Trace =~n~p", [Received]), + NoOfEvents = length(Events), + NoOfEvents = length(Received), + lists:foreach(fun(Event) -> + true = lists:member(Event, Received) + end, Received), + ok. +disk_log_events(cleanup, _Config) -> + logger:remove_handler(?MODULE). + +write_failure(Config) -> + Dir = ?config(priv_dir, Config), + File = filename:join(Dir, ?FUNCTION_NAME), + Log = lists:concat([File,".1"]), + ct:pal("Log = ~p", [Log]), + + Node = start_h_on_new_node(Config, ?FUNCTION_NAME, File), + false = (undefined == rpc:call(Node, ets, whereis, [?TEST_HOOKS_TAB])), + rpc:call(Node, ets, insert, [?TEST_HOOKS_TAB,{tester,self()}]), + rpc:call(Node, ?MODULE, set_internal_log, [?MODULE,internal_log]), + rpc:call(Node, ?MODULE, set_result, [disk_log_blog,ok]), + HState = rpc:call(Node, logger_disk_log_h, info, [?STANDARD_HANDLER]), + ct:pal("LogOpts = ~p", [LogOpts = maps:get(log_opts, HState)]), + + ok = log_on_remote_node(Node, "Logged1"), + rpc:call(Node, logger_disk_log_h, disk_log_sync, [?STANDARD_HANDLER]), + ?check_no_log, + try_read_file(Log, {ok,<<"Logged1\n">>}, ?SYNC_REP_INT), + + rpc:call(Node, ?MODULE, set_result, [disk_log_blog,{error,no_such_log}]), + ok = log_on_remote_node(Node, "Cause simple error printout"), + + ?check({error,{?STANDARD_HANDLER,log,LogOpts,{error,no_such_log}}}), + + ok = log_on_remote_node(Node, "No second error printout"), + ?check_no_log, + + rpc:call(Node, ?MODULE, set_result, [disk_log_blog, + {error,{full,?STANDARD_HANDLER}}]), + ok = log_on_remote_node(Node, "Cause simple error printout"), + ?check({error,{?STANDARD_HANDLER,log,LogOpts, + {error,{full,?STANDARD_HANDLER}}}}), + + rpc:call(Node, ?MODULE, set_result, [disk_log_blog,ok]), + ok = log_on_remote_node(Node, "Logged2"), + rpc:call(Node, logger_disk_log_h, disk_log_sync, [?STANDARD_HANDLER]), + ?check_no_log, + try_read_file(Log, {ok,<<"Logged1\nLogged2\n">>}, ?SYNC_REP_INT), + ok. +write_failure(cleanup, _Config) -> + Nodes = nodes(), + [test_server:stop_node(Node) || Node <- Nodes]. + + +sync_failure(Config) -> + Dir = ?config(priv_dir, Config), + FileName = lists:concat([?MODULE,"_",?FUNCTION_NAME]), + File = filename:join(Dir, FileName), + Log = lists:concat([File,".1"]), + + Node = start_h_on_new_node(Config, ?FUNCTION_NAME, File), + false = (undefined == rpc:call(Node, ets, whereis, [?TEST_HOOKS_TAB])), + rpc:call(Node, ets, insert, [?TEST_HOOKS_TAB,{tester,self()}]), + rpc:call(Node, ?MODULE, set_internal_log, [?MODULE,internal_log]), + rpc:call(Node, ?MODULE, set_result, [disk_log_sync,ok]), + HState = rpc:call(Node, logger_disk_log_h, info, [?STANDARD_HANDLER]), + LogOpts = maps:get(log_opts, HState), + + SyncInt = 500, + ok = rpc:call(Node, logger, set_handler_config, + [?STANDARD_HANDLER, logger_disk_log_h, + #{filesync_repeat_interval => SyncInt}]), + Info = rpc:call(Node, logger_disk_log_h, info, [?STANDARD_HANDLER]), + SyncInt = maps:get(filesync_repeat_interval, Info), + + ok = log_on_remote_node(Node, "Logged1"), + ?check_no_log, + + rpc:call(Node, ?MODULE, set_result, [disk_log_sync,{error,no_such_log}]), + ok = log_on_remote_node(Node, "Cause simple error printout"), + + ?check({error,{?STANDARD_HANDLER,sync,LogOpts,{error,no_such_log}}}), + + ok = log_on_remote_node(Node, "No second error printout"), + ?check_no_log, + + rpc:call(Node, ?MODULE, set_result, + [disk_log_sync,{error,{blocked_log,?STANDARD_HANDLER}}]), + ok = log_on_remote_node(Node, "Cause simple error printout"), + ?check({error,{?STANDARD_HANDLER,sync,LogOpts, + {error,{blocked_log,?STANDARD_HANDLER}}}}), + + rpc:call(Node, ?MODULE, set_result, [disk_log_sync,ok]), + ok = log_on_remote_node(Node, "Logged2"), + ?check_no_log, + ok. +sync_failure(cleanup, _Config) -> + Nodes = nodes(), + [test_server:stop_node(Node) || Node <- Nodes]. + +start_h_on_new_node(_Config, Func, File) -> + Pa = filename:dirname(code:which(?MODULE)), + Dest = + case os:type() of + {win32,_} -> + lists:concat([" {disk_log,\\\"",File,"\\\"}"]); + _ -> + lists:concat([" \'{disk_log,\"",File,"\"}\'"]) + end, + Args = lists:concat([" -kernel ",logger_dest,Dest," -pa ",Pa]), + NodeName = lists:concat([?MODULE,"_",Func]), + ct:pal("Starting ~s with ~tp", [NodeName,Args]), + {ok,Node} = test_server:start_node(NodeName, peer, [{args, Args}]), + Pid = rpc:call(Node,erlang,whereis,[?STANDARD_HANDLER]), + true = is_pid(Pid), + ok = rpc:call(Node,logger,set_handler_config,[?STANDARD_HANDLER,formatter, + {?MODULE,nl}]), + Node. + +log_on_remote_node(Node,Msg) -> + _ = spawn_link(Node, + fun() -> erlang:group_leader(whereis(user),self()), + logger:info(Msg) + end), + ok. + +%% functions for test hook macros to be called by rpc +set_internal_log(Mod, Func) -> + ?set_internal_log({Mod,Func}). +set_result(Op, Result) -> + ?set_result(Op, Result). +set_defaults() -> + ?set_defaults(). + +%% internal log function that sends the term to the test case process +internal_log(Type, Term) -> + [{tester,Tester}] = ets:lookup(?TEST_HOOKS_TAB, tester), + Tester ! {log,{Type,Term}}, + logger:internal_log(Type, Term), + ok. + + +%%%----------------------------------------------------------------- +%%% Overload protection tests + +op_switch_to_sync(Config) -> + {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + NewHConfig = + HConfig#{logger_disk_log_h => DLHConfig#{toggle_sync_qlen => 3, + drop_new_reqs_qlen => 501, + flush_reqs_qlen => 2000, + enable_burst_limit => false}}, + ok = logger:set_handler_config(?MODULE, NewHConfig), + NumOfReqs = 500, + send_burst({n,NumOfReqs}, seq, {chars,79}, info), + NumOfReqs = count_lines(Log), + ok = file:delete(Log). +op_switch_to_sync(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +op_switch_to_drop(Config) -> + {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + + NewHConfig = + HConfig#{logger_disk_log_h => DLHConfig#{toggle_sync_qlen => 2, + drop_new_reqs_qlen => 3, + flush_reqs_qlen => 600, + enable_burst_limit => false}}, + ok = logger:set_handler_config(?MODULE, NewHConfig), + NumOfReqs = 500, + send_burst({n,NumOfReqs}, seq, {chars,79}, info), + Logged = count_lines(Log), + ct:pal("Number of messages dropped = ~w (~w)", + [NumOfReqs-Logged,NumOfReqs]), + true = (Logged < NumOfReqs), + ok = file:delete(Log). +op_switch_to_drop(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +op_switch_to_flush() -> + [{timetrap,{seconds,60}}]. +op_switch_to_flush(Config) -> + {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + + %% it's important that both async and sync requests have been queued + %% when the flush happens (verify with coverage of flush_log_requests/2) + + NewHConfig = + HConfig#{logger_disk_log_h => DLHConfig#{toggle_sync_qlen => 2, + drop_new_reqs_qlen => 99, + flush_reqs_qlen => 100, + enable_burst_limit => false}}, + ok = logger:set_handler_config(?MODULE, NewHConfig), + NumOfReqs = 1000, + Procs = 500, + send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, info), + Logged = count_lines(Log), + ct:pal("Number of messages flushed/dropped = ~w (~w)", + [(NumOfReqs*Procs)-Logged,NumOfReqs*Procs]), + true = (Logged < (NumOfReqs*Procs)), + ok = file:delete(Log). +op_switch_to_flush(cleanup, _Config) -> + ok = stop_handler(?MODULE). + + +limit_burst_disabled(Config) -> + {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + NewHConfig = + HConfig#{logger_disk_log_h => DLHConfig#{enable_burst_limit => false, + burst_limit_size => 10, + burst_window_time => 2000, + drop_new_reqs_qlen => 200, + flush_reqs_qlen => 300}}, + ok = logger:set_handler_config(?MODULE, NewHConfig), + NumOfReqs = 100, + send_burst({n,NumOfReqs}, seq, {chars,79}, info), + Logged = count_lines(Log), + ct:pal("Number of messages logged = ~w", [Logged]), + ok = file:delete(Log), + NumOfReqs = Logged. +limit_burst_disabled(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +limit_burst_enabled_one(Config) -> + {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + ReqLimit = 10, + NewHConfig = + HConfig#{logger_disk_log_h => DLHConfig#{enable_burst_limit => true, + burst_limit_size => ReqLimit, + burst_window_time => 2000, + drop_new_reqs_qlen => 200, + flush_reqs_qlen => 300}}, + ok = logger:set_handler_config(?MODULE, NewHConfig), + NumOfReqs = 100, + send_burst({n,NumOfReqs}, seq, {chars,79}, info), + Logged = count_lines(Log), + ct:pal("Number of messages logged = ~w", [Logged]), + ok = file:delete(Log), + ReqLimit = Logged. +limit_burst_enabled_one(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +limit_burst_enabled_period(Config) -> + {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + ReqLimit = 10, + BurstTWin = 1000, + NewHConfig = + HConfig#{logger_disk_log_h => DLHConfig#{enable_burst_limit => true, + burst_limit_size => ReqLimit, + burst_window_time => BurstTWin, + drop_new_reqs_qlen => 20000, + flush_reqs_qlen => 20001}}, + ok = logger:set_handler_config(?MODULE, NewHConfig), + + Windows = 3, + Sent = send_burst({t,BurstTWin*Windows}, seq, {chars,79}, info), + Logged = count_lines(Log), + ct:pal("Number of messages sent = ~w~nNumber of messages logged = ~w", + [Sent,Logged]), + ok = file:delete(Log), + true = (Logged > (ReqLimit*Windows)) andalso + (Logged < (ReqLimit*(Windows+2))). +limit_burst_enabled_period(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +kill_disabled(Config) -> + {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + NewHConfig = + HConfig#{logger_disk_log_h=>DLHConfig#{enable_kill_overloaded=>false, + handler_overloaded_qlen=>10, + handler_overloaded_mem=>100}}, + ok = logger:set_handler_config(?MODULE, NewHConfig), + NumOfReqs = 100, + send_burst({n,NumOfReqs}, seq, {chars,79}, info), + Logged = count_lines(Log), + ct:pal("Number of messages logged = ~w", [Logged]), + ok = file:delete(Log), + true = is_pid(whereis(?MODULE)), + ok. +kill_disabled(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +qlen_kill_new(Config) -> + {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + Pid0 = whereis(?MODULE), + {_,Mem0} = process_info(Pid0, memory), + RestartAfter = 2000, + NewHConfig = + HConfig#{logger_disk_log_h => + DLHConfig#{enable_kill_overloaded=>true, + handler_overloaded_qlen=>10, + handler_overloaded_mem=>Mem0+50000, + handler_restart_after=>RestartAfter}}, + ok = logger:set_handler_config(?MODULE, NewHConfig), + MRef = erlang:monitor(process, Pid0), + NumOfReqs = 100, + Procs = 2, + send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, info), + %% send_burst({n,NumOfReqs}, seq, {chars,79}, info), + receive + {'DOWN', MRef, _, _, Info} -> + case Info of + {shutdown,{overloaded,?MODULE,QLen,Mem}} -> + ct:pal("Terminated with qlen = ~w, mem = ~w", [QLen,Mem]); + killed -> + ct:pal("Slow shutdown, handler process was killed!", []) + end, + timer:sleep(RestartAfter + 1000), + true = is_pid(whereis(?MODULE)), + ok + after + 5000 -> + Info = logger_disk_log_h:info(?MODULE), + ct:pal("Handler state = ~p", [Info]), + ct:fail("Handler not dead! It should not have survived this!") + end. +qlen_kill_new(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +mem_kill_new(Config) -> + {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + Pid0 = whereis(?MODULE), + {_,Mem0} = process_info(Pid0, memory), + RestartAfter = 2000, + NewHConfig = + HConfig#{logger_disk_log_h => + DLHConfig#{enable_kill_overloaded=>true, + handler_overloaded_qlen=>50000, + handler_overloaded_mem=>Mem0+500, + handler_restart_after=>RestartAfter}}, + ok = logger:set_handler_config(?MODULE, NewHConfig), + MRef = erlang:monitor(process, Pid0), + NumOfReqs = 100, + Procs = 2, + send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, info), + %% send_burst({n,NumOfReqs}, seq, {chars,79}, info), + receive + {'DOWN', MRef, _, _, Info} -> + case Info of + {shutdown,{overloaded,?MODULE,QLen,Mem}} -> + ct:pal("Terminated with qlen = ~w, mem = ~w", [QLen,Mem]); + killed -> + ct:pal("Slow shutdown, handler process was killed!", []) + end, + timer:sleep(RestartAfter * 2), + true = is_pid(whereis(?MODULE)), + ok + after + 5000 -> + Info = logger_disk_log_h:info(?MODULE), + ct:pal("Handler state = ~p", [Info]), + ct:fail("Handler not dead! It should not have survived this!") + end. +mem_kill_new(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +restart_after(Config) -> + {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + NewHConfig1 = + HConfig#{logger_disk_log_h=>DLHConfig#{enable_kill_overloaded=>true, + handler_overloaded_qlen=>10, + handler_restart_after=>never}}, + ok = logger:set_handler_config(?MODULE, NewHConfig1), + MRef1 = erlang:monitor(process, whereis(?MODULE)), + %% kill handler + send_burst({n,100}, {spawn,2,0}, {chars,79}, info), + receive + {'DOWN', MRef1, _, _, _Info1} -> + timer:sleep(?HANDLER_RESTART_AFTER + 1000), + undefined = whereis(?MODULE), + ok + after + 5000 -> + ct:fail("Handler not dead! It should not have survived this!") + end, + + {Log,_,_} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + RestartAfter = 2000, + NewHConfig2 = + HConfig#{logger_disk_log_h=>DLHConfig#{enable_kill_overloaded=>true, + handler_overloaded_qlen=>10, + handler_restart_after=>RestartAfter}}, + ok = logger:set_handler_config(?MODULE, NewHConfig2), + Pid0 = whereis(?MODULE), + MRef2 = erlang:monitor(process, Pid0), + %% kill handler + send_burst({n,100}, {spawn,2,0}, {chars,79}, info), + receive + {'DOWN', MRef2, _, _, _Info2} -> + timer:sleep(RestartAfter + 1000), + Pid1 = whereis(?MODULE), + true = is_pid(Pid1), + false = (Pid1 == Pid0), + ok + after + 5000 -> + ct:fail("Handler not dead! It should not have survived this!") + end, + ok. +restart_after(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +%% send handler requests (filesync, info, reset, change_config) +%% during high load to verify that sync, dropping and flushing is +%% handled correctly. +handler_requests_under_load() -> + [{timetrap,{seconds,60}}]. +handler_requests_under_load(Config) -> + {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + NewHConfig = + HConfig#{logger_disk_log_h => DLHConfig#{toggle_sync_qlen => 2, + drop_new_reqs_qlen => 1000, + flush_reqs_qlen => 2000, + enable_burst_limit => false}}, + ok = logger:set_handler_config(?MODULE, NewHConfig), + Pid = spawn_link(fun() -> send_requests(?MODULE, 1, [{disk_log_sync,[]}, + {info,[]}, + {reset,[]}, + {change_config,[]}]) + end), + Procs = 100, + Sent = Procs * send_burst({n,5000}, {spawn,Procs,10}, {chars,79}, info), + Pid ! {self(),finish}, + ReqResult = receive {Pid,Result} -> Result end, + Logged = count_lines(Log), + ct:pal("Number of messages sent = ~w~nNumber of messages logged = ~w", + [Sent,Logged]), + FindError = fun(Res) -> + [E || E <- Res, + is_tuple(E) andalso (element(1,E) == error)] + end, + Errors = [{Req,FindError(Res)} || {Req,Res} <- ReqResult], + NoOfReqs = lists:foldl(fun({_,Res}, N) -> N + length(Res) end, 0, ReqResult), + ct:pal("~w requests made. Errors: ~n~p", [NoOfReqs,Errors]), + ok = file:delete(Log). +handler_requests_under_load(cleanup, Config) -> + ok = stop_handler(?MODULE). + +send_requests(HName, TO, Reqs = [{Req,Res}|Rs]) -> + receive + {From,finish} -> + From ! {self(),Reqs} + after + TO -> + Result = + case Req of + change_config -> + logger:set_handler_config(HName, logger_disk_log_h, + #{enable_kill_overloaded => + false}); + Func -> + logger_disk_log_h:Func(HName) + end, + send_requests(HName, TO, Rs ++ [{Req,[Result|Res]}]) + end. + +%%%----------------------------------------------------------------- +%%% +start_handler(Name, FuncName, Config) -> + Dir = ?config(priv_dir,Config), + File = filename:join(Dir, FuncName), + ct:pal("Logging to ~tp", [File]), + ok = logger:add_handler(Name, + logger_disk_log_h, + #{disk_log_opts=>#{file => File, + max_no_files => 1, + max_no_bytes => 100000000}, + filter_default=>log, + filters=>?DEFAULT_HANDLER_FILTERS([Name]), + formatter=>{?MODULE,op}}), + {ok,{_,HConfig = #{logger_disk_log_h := DLHConfig}}} = + logger:get_handler_config(Name), + {lists:concat([File,".1"]),HConfig,DLHConfig}. + +stop_handler(Name) -> + ok = logger:remove_handler(Name), + ct:pal("Handler ~p stopped!", [Name]). + +send_burst(NorT, Type, {chars,Sz}, Class) -> + Text = [34 + rand:uniform(126-34) || _ <- lists:seq(1,Sz)], + case NorT of + {n,N} -> + %% process_flag(priority, high), + send_n_burst(N, Type, Text, Class), + %% process_flag(priority, normal), + N; + {t,T} -> + ct:pal("Sending messages sequentially for ~w ms", [T]), + T0 = erlang:monotonic_time(millisecond), + send_t_burst(T0, T, Text, Class, 0) + end. + +send_n_burst(0, _, _Text, _Class) -> + ok; +send_n_burst(N, seq, Text, Class) -> + ok = logger:Class(Text, ?domain), + send_n_burst(N-1, seq, Text, Class); +send_n_burst(N, {spawn,Ps,TO}, Text, Class) -> + ct:pal("~w processes each sending ~w messages", [Ps,N]), + PerProc = fun() -> + send_n_burst(N, seq, Text, Class) + end, + MRefs = [begin if TO == 0 -> ok; true -> timer:sleep(TO) end, + monitor(process,spawn_link(PerProc)) end || + _ <- lists:seq(1,Ps)], + lists:foreach(fun(MRef) -> + receive + {'DOWN', MRef, _, _, _} -> + ok + end + end, MRefs), + ct:pal("Message burst sent", []), + ok. + +send_t_burst(T0, T, Text, Class, N) -> + T1 = erlang:monotonic_time(millisecond), + if (T1-T0) > T -> + N; + true -> + ok = logger:Class(Text, ?domain), + send_t_burst(T0, T, Text, Class, N+1) + end. + +%%%----------------------------------------------------------------- +%%% Formatter callback +%%% Using this to send the formatted string back to the test case +%%% process - so it can check for logged events. +format(_,bad_return) -> + bad_return; +format(_,crash) -> + erlang:error(formatter_crashed); +format(#{msg:={report,R},meta:=#{report_cb:=Fun}}=Log,Config) -> + format(Log#{msg=>Fun(R)},Config); +format(#{msg:={string,String0}},no_nl) -> + String = unicode:characters_to_list(String0), + String; +format(#{msg:={string,String0}},nl) -> + String = unicode:characters_to_list(String0), + String++"\n"; +format(#{msg:={string,String0}},op) -> + String = unicode:characters_to_list(String0), + String++"\n"; +format(#{msg:={report,#{label:={supervisor,progress}}}},op) -> + ""; +format(#{msg:={report,#{label:={gen_server,terminate}}}},op) -> + ""; +format(#{msg:={report,#{label:={proc_lib,crash}}}},op) -> + ""; +format(#{msg:={F,A}},Pid) when is_list(F), is_list(A) -> + String = lists:flatten(io_lib:format(F,A)), + Pid ! {log,String}, + String++"\n"; +format(#{msg:={string,String0}},Pid) -> + String = unicode:characters_to_list(String0), + Pid ! {log,String}, + String++"\n"; +format(Msg,Tag) -> + Error = {unexpected_format,Msg,Tag}, + erlang:display(Error), + exit(Error). + +remove(Handler, LogName) -> + logger_disk_log_h:remove(Handler, LogName), + HState = #{log_names := Logs} = logger_disk_log_h:info(), + false = maps:is_key(LogName, HState), + false = lists:member(LogName, Logs), + false = logger_config:exist(logger, LogName), + {error,no_such_log} = disk_log:info(LogName), + ok. + +start_and_add(Name, Config, LogOpts) -> + ct:pal("Adding handler ~w with: ~p", + [Name,Config#{disk_log_opts=>LogOpts}]), + ok = logger:add_handler(Name, logger_disk_log_h, + Config#{disk_log_opts=>LogOpts}), + Pid = whereis(Name), + true = is_pid(Pid), + Name = proplists:get_value(name, disk_log:info(Name)), + ok. + +remove_and_stop(Handler) -> + ok = logger:remove_handler(Handler), + timer:sleep(500), + undefined = whereis(Handler), + ok. + +try_read_file(FileName, Expected, Time) -> + try_read_file(FileName, Expected, Time, undefined). + +try_read_file(FileName, Expected, Time, _) when Time > 0 -> + case file:read_file(FileName) of + Expected -> + ok; + Error = {error,_Reason} -> + erlang:error(Error); + SomethingElse -> + ct:pal("try_read_file read unexpected: ~p~n", [SomethingElse]), + timer:sleep(500), + try_read_file(FileName, Expected, Time-500, SomethingElse) + end; + +try_read_file(_, _, _, Incorrect) -> + ct:pal("try_read_file got incorrect pattern: ~p~n", [Incorrect]), + erlang:error({error,not_matching_pattern,Incorrect}). + +try_match_file(FileName, Pattern, Time) -> + try_match_file(FileName, Pattern, Time, <<>>). + +try_match_file(FileName, Pattern, Time, _) when Time > 0 -> + case file:read_file(FileName) of + {ok, Bin} -> + case re:run(Bin,Pattern,[{capture,none}]) of + match -> + unicode:characters_to_list(Bin); + _ -> + timer:sleep(100), + try_match_file(FileName, Pattern, Time-100, Bin) + end; + Error -> + erlang:error(Error) + end; +try_match_file(_,Pattern,_,Incorrect) -> + ct:pal("try_match_file did not match pattern: ~p~nGot: ~p~n", + [Pattern,Incorrect]), + erlang:error({error,not_matching_pattern,Pattern,Incorrect}). + +count_lines(File) -> + wait_until_written(File), + count_lines1(File). + +wait_until_written(File) -> + wait_until_written(File, -1). + +wait_until_written(File, Sz) -> + timer:sleep(2000), + case file:read_file_info(File) of + {ok,#file_info{size = Sz}} -> + timer:sleep(1000), + case file:read_file_info(File) of + {ok,#file_info{size = Sz1}} -> + ok; + {ok,#file_info{size = Sz2}} -> + wait_until_written(File, Sz2) + end; + {ok,#file_info{size = Sz1}} -> + wait_until_written(File, Sz1) + end. + +count_lines1(File) -> + Counter = fun Cnt(Dev,LC) -> + case file:read_line(Dev) of + eof -> LC; + _ -> Cnt(Dev,LC+1) + end + end, + {_,Dev} = file:open(File, [read]), + Lines = Counter(Dev, 0), + file:close(Dev), + Lines. + +start_tracer(Trace,Expected) -> + Pid = self(), + dbg:tracer(process,{fun tracer/2,{Pid,Expected}}), + dbg:p(whereis(?MODULE),[c]), + dbg:p(Pid,[c]), + tpl(Trace), + ok. + +tpl([{M,F,A}|Trace]) -> + {ok,Match} = dbg:tpl(M,F,A,[]), + case lists:keyfind(matched,1,Match) of + {_,_,1} -> + ok; + _ -> + dbg:stop_clear(), + throw({skip,"Can't trace "++atom_to_list(M)++":"++ + atom_to_list(F)++"/"++integer_to_list(A)}) + end, + tpl(Trace); +tpl([]) -> + ok. + +tracer({trace,_,call,{?MODULE,format,[#{msg:={string,Msg}}|_]}}, {Pid,[{formatter,Msg}|Expected]}) -> + maybe_tracer_done(Pid,Expected,{formatter,Msg}); +tracer({trace,_,call,{logger_disk_log_h,handle_cast,[{Op,_}|_]}}, {Pid,[{Mod,Func,Op}|Expected]}) -> + maybe_tracer_done(Pid,Expected,{Mod,Func,Op}); +tracer({trace,_,call,{Mod,Func,_}}, {Pid,[{Mod,Func}|Expected]}) -> + maybe_tracer_done(Pid,Expected,{Mod,Func}); +tracer({trace,_,call,Call}, {Pid,Expected}) -> + Pid ! {tracer_got_unexpected,Call,Expected}, + {Pid,Expected}. + +maybe_tracer_done(Pid,[],Got) -> + ct:log("Tracer got: ~p~n",[Got]), + Pid ! tracer_done; +maybe_tracer_done(Pid,Expected,Got) -> + ct:log("Tracer got: ~p~n",[Got]), + {Pid,Expected}. + +check_tracer(T) -> + receive + tracer_done -> + dbg:stop_clear(), + ok; + {tracer_got_unexpected,Got,Expected} -> + dbg:stop_clear(), + ct:fail({tracer_got_unexpected,Got,Expected}) + after T -> + ct:fail({timeout,tracer}) + end. diff --git a/lib/kernel/test/logger_env_var_SUITE.erl b/lib/kernel/test/logger_env_var_SUITE.erl new file mode 100644 index 0000000000..c2d3364701 --- /dev/null +++ b/lib/kernel/test/logger_env_var_SUITE.erl @@ -0,0 +1,451 @@ +% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2018. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(logger_env_var_SUITE). + +-compile(export_all). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("kernel/include/logger.hrl"). +-include_lib("kernel/src/logger_internal.hrl"). + +-define(all_vars,[{kernel,logger_dest}, + {kernel,logger_level}, + {kernel,logger_log_progress}, + {kernel,logger_sasl_compatible}, + {kernel,error_logger}]). + +suite() -> + [{timetrap,{seconds,30}}]. + +init_per_suite(Config) -> + Env = [{App,Key,application:get_env(App,Key)} || {App,Key} <- ?all_vars], + Removed = cleanup(), + [{env,Env},{logger,Removed}|Config]. + +end_per_suite(Config) -> + [application:set_env(App,Key,Val) || + {App,Key,Val} <- ?config(env,Config), + Val =/= undefined], + Hs = ?config(logger,Config), + [ok = logger:add_handler(Id,Mod,C) || {Id,Mod,C} <- Hs], + ok. + +init_per_group(_Group, Config) -> + Config. + +end_per_group(_Group, _Config) -> + ok. + +init_per_testcase(_TestCase, Config) -> + Config. + +end_per_testcase(Case, Config) -> + try apply(?MODULE,Case,[cleanup,Config]) + catch error:undef -> ok + end, + cleanup(), + ok. + +groups() -> + []. + +all() -> + [default, + default_sasl_compatible, + dest_tty, + dest_tty_sasl_compatible, + dest_false, + dest_false_progress, + dest_false_sasl_compatible, + dest_silent, + dest_silent_sasl_compatible, + dest_file_old, + dest_file, + dest_disk_log, + %% disk_log_vars, % or test this in logger_disk_log_SUITE? + sasl_compatible_false, + sasl_compatible_false_no_progress, + sasl_compatible, + bad_dest%% , + %% bad_level, + %% bad_sasl_compatibility, + %% bad_progress + ]. + +default(Config) -> + {ok,{_Log,Hs}} = setup(Config,?FUNCTION_NAME, + undefined, + undefined, % dest + undefined, % level + undefined, % sasl comp (default=false) + undefined), % progress (default=false) + {logger_std_h,logger_std_h,StdC} = lists:keyfind(logger_std_h,1,Hs), + true = is_pid(whereis(logger_std_h)), + info = maps:get(level,StdC), + StdFilters = maps:get(filters,StdC), + {domain,{_,{log,prefix_of,[beam,erlang,otp,sasl]}}} = + lists:keyfind(domain,1,StdFilters), + true = lists:keymember(stop_progress,1,StdFilters), + false = lists:keymember(logger_simple,1,Hs), + false = lists:keymember(sasl_h,1,Hs), + false = is_pid(whereis(sasl_h)), + ok. + +default_sasl_compatible(Config) -> + {ok,{_Log,Hs}} = setup(Config,?FUNCTION_NAME, + undefined, + undefined, % dest + undefined, % level + true, % sasl comp (default=false) + undefined), % progress (default=false) + {logger_std_h,logger_std_h,StdC} = lists:keyfind(logger_std_h,1,Hs), + true = is_pid(whereis(logger_std_h)), + info = maps:get(level,StdC), + StdFilters = maps:get(filters,StdC), + {domain,{_,{log,prefix_of,[beam,erlang,otp]}}} = + lists:keyfind(domain,1,StdFilters), + false = lists:keymember(stop_progress,1,StdFilters), + false = lists:keymember(logger_simple,1,Hs), + true = lists:keymember(sasl_h,1,Hs), + true = is_pid(whereis(sasl_h)), + ok. + +dest_tty(Config) -> + {ok,{_Log,Hs}} = setup(Config,?FUNCTION_NAME, + logger_dest, + tty, % dest + undefined, % level + undefined, % sasl comp (default=false) + undefined), % progress (default=false) + {logger_std_h,logger_std_h,StdC} = lists:keyfind(logger_std_h,1,Hs), + true = is_pid(whereis(logger_std_h)), + info = maps:get(level,StdC), + StdFilters = maps:get(filters,StdC), + {domain,{_,{log,prefix_of,[beam,erlang,otp,sasl]}}} = + lists:keyfind(domain,1,StdFilters), + true = lists:keymember(stop_progress,1,StdFilters), + false = lists:keymember(logger_simple,1,Hs), + false = lists:keymember(sasl_h,1,Hs), + false = is_pid(whereis(sasl_h)), + ok. + +dest_tty_sasl_compatible(Config) -> + {ok,{_Log,Hs}} = setup(Config,?FUNCTION_NAME, + logger_dest, + tty, % dest + undefined, % level + true, % sasl comp (default=false) + undefined), % progress (default=false) + {logger_std_h,logger_std_h,StdC} = lists:keyfind(logger_std_h,1,Hs), + true = is_pid(whereis(logger_std_h)), + info = maps:get(level,StdC), + StdFilters = maps:get(filters,StdC), + {domain,{_,{log,prefix_of,[beam,erlang,otp]}}} = + lists:keyfind(domain,1,StdFilters), + false = lists:keymember(stop_progress,1,StdFilters), + false = lists:keymember(logger_simple,1,Hs), + true = lists:keymember(sasl_h,1,Hs), + true = is_pid(whereis(sasl_h)), + ok. + +dest_false(Config) -> + {ok,{_Log,Hs}} = setup(Config,?FUNCTION_NAME, + logger_dest, + false, % dest + notice, % level + undefined, % sasl comp (default=false) + undefined), % progress (default=false) + false = lists:keymember(logger_std_h,1,Hs), + {logger_simple,logger_simple,SimpleC} = lists:keyfind(logger_simple,1,Hs), + notice = maps:get(level,SimpleC), + SimpleFilters = maps:get(filters,SimpleC), + {domain,{_,{log,prefix_of,[beam,erlang,otp,sasl]}}} = + lists:keyfind(domain,1,SimpleFilters), + true = lists:keymember(stop_progress,1,SimpleFilters), + false = lists:keymember(sasl_h,1,Hs), + ok. + +dest_false_progress(Config) -> + {ok,{_Log,Hs}} = setup(Config,?FUNCTION_NAME, + logger_dest, + false, % dest + notice, % level + undefined, % sasl comp (default=false) + true), % progress (default=false) + false = lists:keymember(logger_std_h,1,Hs), + {logger_simple,logger_simple,SimpleC} = lists:keyfind(logger_simple,1,Hs), + notice = maps:get(level,SimpleC), + SimpleFilters = maps:get(filters,SimpleC), + {domain,{_,{log,prefix_of,[beam,erlang,otp,sasl]}}} = + lists:keyfind(domain,1,SimpleFilters), + false = lists:keymember(stop_progress,1,SimpleFilters), + false = lists:keymember(sasl_h,1,Hs), + ok. + +dest_false_sasl_compatible(Config) -> + {ok,{_Log,Hs}} = setup(Config,?FUNCTION_NAME, + logger_dest, + false, % dest + notice, % level + true, % sasl comp (default=false) + undefined), % progress (default=false) + false = lists:keymember(logger_std_h,1,Hs), + {logger_simple,logger_simple,SimpleC} = lists:keyfind(logger_simple,1,Hs), + notice = maps:get(level,SimpleC), + SimpleFilters = maps:get(filters,SimpleC), + {domain,{_,{log,prefix_of,[beam,erlang,otp]}}} = + lists:keyfind(domain,1,SimpleFilters), + false = lists:keymember(stop_progress,1,SimpleFilters), + true = lists:keymember(sasl_h,1,Hs), + true = is_pid(whereis(sasl_h)), + ok. + +dest_silent(Config) -> + {ok,{_Log,Hs}} = setup(Config,?FUNCTION_NAME, + logger_dest, + silent, % dest + undefined, % level + undefined, % sasl comp (default=false) + undefined), % progress (default=false) + false = lists:keymember(logger_std_h,1,Hs), + false = lists:keymember(logger_simple,1,Hs), + false = lists:keymember(sasl_h,1,Hs), + ok. + +dest_silent_sasl_compatible(Config) -> + {ok,{_Log,Hs}} = setup(Config,?FUNCTION_NAME, + logger_dest, + silent, % dest + undefined, % level + true, % sasl comp (default=false) + undefined), % progress (default=false) + false = lists:keymember(logger_std_h,1,Hs), + false = lists:keymember(logger_simple,1,Hs), + true = lists:keymember(sasl_h,1,Hs), + true = is_pid(whereis(sasl_h)), + ok. + + +dest_file_old(Config) -> + {ok,{Log,_Hs}} = setup(Config,?FUNCTION_NAME, + error_logger, + file, % dest + undefined, % level + undefined, % sasl comp (default=false) + undefined), % progress (default=false) + check_log(Log, + file, % dest + 0), % progress in std logger + ok. + + +dest_file(Config) -> + {ok,{Log,_Hs}} = setup(Config,?FUNCTION_NAME, + logger_dest, + file, % dest + undefined, % level + undefined, % sasl comp (default=false) + undefined), % progress (default=false) + check_log(Log, + file, % dest + 0), % progress in std logger + ok. + + +dest_disk_log(Config) -> + {ok,{Log,_Hs}} = setup(Config,?FUNCTION_NAME, + logger_dest, + disk_log, % dest + undefined, % level + undefined, % sasl comp (default=false) + undefined), % progress (default=false) + check_log(Log, + disk_log, % dest + 0), % progress in std logger + ok. + + +sasl_compatible_false(Config) -> + {ok,{Log,_Hs}} = setup(Config,?FUNCTION_NAME, + logger_dest, + file, % dest + undefined, % level + false, % sasl comp + true), % progress + check_log(Log, + file, % dest + 4), % progress in std logger + ok. + +sasl_compatible_false_no_progress(Config) -> + {ok,{Log,_Hs}} = setup(Config,?FUNCTION_NAME, + logger_dest, + file, % dest + undefined, % level + false, % sasl comp + false), % progress + check_log(Log, + file, % dest + 0), % progress in std logger + ok. + +sasl_compatible(Config) -> + {ok,{Log,_Hs}} = setup(Config,?FUNCTION_NAME, + logger_dest, + file, % dest + undefined, % level + true, % sasl comp + undefined), % progress + check_log(Log, + file, % dest + 0), % progress in std logger + ok. + +bad_dest(Config) -> + {error,{bad_config,{kernel,{logger_dest,baddest}}}} = + setup(Config,?FUNCTION_NAME, + logger_dest, + baddest, + undefined, + undefined, + undefined). + +bad_level(Config) -> + error = + setup(Config,?FUNCTION_NAME, + logger_dest, + tty, + badlevel, + undefined, + undefined). + +bad_sasl_compatibility(Config) -> + error = + setup(Config,?FUNCTION_NAME, + logger_dest, + tty, + info, + badcomp, + undefined). + +bad_progress(Config) -> + error = + setup(Config,?FUNCTION_NAME, + logger_dest, + tty, + info, + undefined, + badprogress). + +%%%----------------------------------------------------------------- +%%% Internal +setup(Config,Func,DestVar,Dest,Level,SaslComp,Progress) -> + ok = logger:add_handler(logger_simple,logger_simple, + #{filter_default=>log, + logger_simple=>#{buffer=>true}}), + Dir = ?config(priv_dir,Config), + File = lists:concat([?MODULE,"_",Func,".log"]), + Log = filename:join(Dir,File), + case Dest of + undefined -> + ok; + F when F==file; F==disk_log -> + application:set_env(kernel,DestVar,{Dest,Log}); + _ -> + application:set_env(kernel,DestVar,Dest) + end, + case Level of + undefined -> + ok; + _ -> + application:set_env(kernel,logger_level,Level) + end, + case SaslComp of + undefined -> + ok; + _ -> + application:set_env(kernel,logger_sasl_compatible,SaslComp) + end, + case Progress of + undefined -> + ok; + _ -> + application:set_env(kernel,logger_log_progress,Progress) + end, + case logger:setup_standard_handler() of + ok -> + application:start(sasl), + StdH = case Dest of + NoH when NoH==false; NoH==silent -> false; + _ -> true + end, + StdH = is_pid(whereis(?STANDARD_HANDLER)), + SaslH = if SaslComp -> true; + true -> false + end, + SaslH = is_pid(whereis(sasl_h)), + {ok,{Log,maps:get(handlers,logger:i())}}; + Error -> + Error + end. + +check_log(Log,Dest,NumProgress) -> + ok = logger:alert("dummy1"), + ok = logger:debug("dummy1"), + + %% Check that there are progress reports (supervisor and + %% application_controller) and an error report (the call above) in + %% the log. There should not be any info reports yet. + {ok,Bin1} = sync_and_read(Dest,Log), + ct:log("Log content:~n~s",[Bin1]), + match(Bin1,<<"PROGRESS REPORT">>,NumProgress), + match(Bin1,<<"ALERT REPORT">>,1), + match(Bin1,<<"INFO REPORT">>,0), + match(Bin1,<<"DEBUG REPORT">>,0), + + %% Then stop sasl and see that the info report from + %% application_controller is there + ok = application:stop(sasl), + {ok,Bin2} = sync_and_read(Dest,Log), + ct:log("Log content:~n~s",[Bin2]), + match(Bin2,<<"INFO REPORT">>,1), + match(Bin1,<<"DEBUG REPORT">>,0), + ok. + +match(Bin,Pattern,0) -> + nomatch = re:run(Bin,Pattern,[{capture,none}]); +match(Bin,Pattern,N) -> + {match,M} = re:run(Bin,Pattern,[{capture,all},global]), + N = length(M). + +sync_and_read(disk_log,Log) -> + logger_disk_log_h:disk_log_sync(?STANDARD_HANDLER), + file:read_file(Log ++ ".1"); +sync_and_read(file,Log) -> + logger_std_h:filesync(?STANDARD_HANDLER), + file:read_file(Log). + +cleanup() -> + application:stop(sasl), + [application:unset_env(App,Key) || {App,Key} <- ?all_vars], + #{handlers:=Hs0} = logger:i(), + Hs = lists:keydelete(cth_log_redirect,1,Hs0), + [ok = logger:remove_handler(Id) || {Id,_,_} <- Hs], + Hs. diff --git a/lib/kernel/test/logger_filters_SUITE.erl b/lib/kernel/test/logger_filters_SUITE.erl new file mode 100644 index 0000000000..21f14bbc02 --- /dev/null +++ b/lib/kernel/test/logger_filters_SUITE.erl @@ -0,0 +1,214 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2018. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(logger_filters_SUITE). + +-compile(export_all). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("kernel/include/logger.hrl"). + +-define(ndlog, + #{level=>info,msg=>{"Line: ~p",[?LINE]},meta=>#{}}). +-define(dlog(Domain), + #{level=>info,msg=>{"Line: ~p",[?LINE]},meta=>#{domain=>Domain}}). +-define(llog(Level), + #{level=>Level,msg=>{"Line: ~p",[?LINE]},meta=>#{}}). +-define(plog, + #{level=>info, + msg=>{report,#{label=>{?MODULE,progress}}}, + meta=>#{line=>?LINE}}). +-define(rlog(Node), + #{level=>info, + msg=>{"Line: ~p",[?LINE]}, + meta=>#{gl=>rpc:call(Node,erlang,whereis,[user])}}). + +-define(TRY(X), my_try(fun() -> X end)). + +suite() -> + [{timetrap,{seconds,30}}]. + +init_per_suite(Config) -> + Config. + +end_per_suite(_Config) -> + ok. + +init_per_group(_Group, Config) -> + Config. + +end_per_group(_Group, _Config) -> + ok. + +init_per_testcase(_TestCase, Config) -> + Config. + +end_per_testcase(Case, Config) -> + try apply(?MODULE,Case,[cleanup,Config]) + catch error:undef -> ok + end, + ok. + +groups() -> + []. + +all() -> + [domain, + level, + progress, + remote_gl]. + +domain(_Config) -> + L1 = logger_filters:domain(L1=?dlog([]),{log,prefix_of,[]}), + stop = logger_filters:domain(?dlog([]),{stop,prefix_of,[]}), + L2 = logger_filters:domain(L2=?dlog([]),{log,starts_with,[]}), + stop = logger_filters:domain(?dlog([]),{stop,starts_with,[]}), + L3 = logger_filters:domain(L3=?dlog([]),{log,equals,[]}), + stop = logger_filters:domain(?dlog([]),{stop,equals,[]}), + ignore = logger_filters:domain(?dlog([]),{log,no_domain,[]}), + ignore = logger_filters:domain(?dlog([]),{stop,no_domain,[]}), + + L4 = logger_filters:domain(L4=?dlog([a]),{log,prefix_of,[a,b]}), + stop = logger_filters:domain(?dlog([a]),{stop,prefix_of,[a,b]}), + ignore = logger_filters:domain(?dlog([a]),{log,starts_with,[a,b]}), + ignore = logger_filters:domain(?dlog([a]),{stop,starts_with,[a,b]}), + ignore = logger_filters:domain(?dlog([a]),{log,equals,[a,b]}), + ignore = logger_filters:domain(?dlog([a]),{stop,equals,[a,b]}), + ignore = logger_filters:domain(?dlog([a]),{log,no_domain,[a,b]}), + ignore = logger_filters:domain(?dlog([a]),{stop,no_domain,[a,b]}), + + ignore = logger_filters:domain(?dlog([a,b]),{log,prefix_of,[a]}), + ignore = logger_filters:domain(?dlog([a,b]),{stop,prefix_of,[a]}), + L5 = logger_filters:domain(L5=?dlog([a,b]),{log,starts_with,[a]}), + stop = logger_filters:domain(?dlog([a,b]),{stop,starts_with,[a]}), + ignore = logger_filters:domain(?dlog([a,b]),{log,equals,[a]}), + ignore = logger_filters:domain(?dlog([a,b]),{stop,equals,[a]}), + ignore = logger_filters:domain(?dlog([a,b]),{log,no_domain,[a]}), + ignore = logger_filters:domain(?dlog([a,b]),{stop,no_domain,[a]}), + + ignore = logger_filters:domain(?ndlog,{log,prefix_of,[a]}), + ignore = logger_filters:domain(?ndlog,{stop,prefix_of,[a]}), + ignore = logger_filters:domain(?ndlog,{log,starts_with,[a]}), + ignore = logger_filters:domain(?ndlog,{stop,starts_with,[a]}), + ignore = logger_filters:domain(?ndlog,{log,equals,[a]}), + ignore = logger_filters:domain(?ndlog,{stop,equals,[a]}), + L6 = logger_filters:domain(L6=?ndlog,{log,no_domain,[a]}), + stop = logger_filters:domain(?ndlog,{stop,no_domain,[a]}), + + L7 = logger_filters:domain(L7=?dlog([a,b,c,d]),{log,prefix_of,[a,b,c,d]}), + stop = logger_filters:domain(?dlog([a,b,c,d]),{stop,prefix_of,[a,b,c,d]}), + L8 = logger_filters:domain(L8=?dlog([a,b,c,d]),{log,starts_with,[a,b,c,d]}), + stop = logger_filters:domain(?dlog([a,b,c,d]),{stop,starts_with,[a,b,c,d]}), + L9 = logger_filters:domain(L9=?dlog([a,b,c,d]),{log,equals,[a,b,c,d]}), + stop = logger_filters:domain(?dlog([a,b,c,d]),{stop,equals,[a,b,c,d]}), + ignore = logger_filters:domain(?dlog([a,b,c,d]),{log,no_domain,[a,b,c,d]}), + ignore = logger_filters:domain(?dlog([a,b,c,d]),{stop,no_domain,[a,b,c,d]}), + + %% A domain field in meta which is not a list is allowed by the + %% filter, but it will never match. + ignore = logger_filters:domain(?dlog(dummy),{log,prefix_of,[a,b,c,d]}), + ignore = logger_filters:domain(?dlog(dummy),{stop,prefix_of,[a,b,c,d]}), + ignore = logger_filters:domain(?dlog(dummy),{log,starts_with,[a,b,c,d]}), + ignore = logger_filters:domain(?dlog(dummy),{stop,starts_with,[a,b,c,d]}), + ignore = logger_filters:domain(?dlog(dummy),{log,equals,[a,b,c,d]}), + ignore = logger_filters:domain(?dlog(dummy),{stop,equals,[a,b,c,d]}), + ignore = logger_filters:domain(?dlog(dummy),{log,no_domain,[a,b,c,d]}), + ignore = logger_filters:domain(?dlog(dummy),{stop,no_domain,[a,b,c,d]}), + + {error,badarg} = ?TRY(logger_filters:domain(?ndlog,bad)), + {error,badarg} = ?TRY(logger_filters:domain(?ndlog,{bad,prefix_of,[]})), + {error,badarg} = ?TRY(logger_filters:domain(?ndlog,{log,bad,[]})), + {error,badarg} = ?TRY(logger_filters:domain(?ndlog,{log,prefix_of,bad})), + + ok. + +level(_Config) -> + ignore = logger_filters:level(?llog(info),{log,lt,info}), + ignore = logger_filters:level(?llog(info),{stop,lt,info}), + ignore = logger_filters:level(?llog(info),{log,gt,info}), + ignore = logger_filters:level(?llog(info),{stop,gt,info}), + L1 = logger_filters:level(L1=?llog(info),{log,lteq,info}), + stop = logger_filters:level(?llog(info),{stop,lteq,info}), + L2 = logger_filters:level(L2=?llog(info),{log,gteq,info}), + stop = logger_filters:level(?llog(info),{stop,gteq,info}), + L3 = logger_filters:level(L3=?llog(info),{log,eq,info}), + stop = logger_filters:level(?llog(info),{stop,eq,info}), + ignore = logger_filters:level(?llog(info),{log,neq,info}), + ignore = logger_filters:level(?llog(info),{stop,neq,info}), + + ignore = logger_filters:level(?llog(error),{log,lt,info}), + ignore = logger_filters:level(?llog(error),{stop,lt,info}), + L4 = logger_filters:level(L4=?llog(error),{log,gt,info}), + stop = logger_filters:level(?llog(error),{stop,gt,info}), + ignore = logger_filters:level(?llog(error),{log,lteq,info}), + ignore = logger_filters:level(?llog(error),{stop,lteq,info}), + L5 = logger_filters:level(L5=?llog(error),{log,gteq,info}), + stop = logger_filters:level(?llog(error),{stop,gteq,info}), + ignore = logger_filters:level(?llog(error),{log,eq,info}), + ignore = logger_filters:level(?llog(error),{stop,eq,info}), + L6 = logger_filters:level(L6=?llog(error),{log,neq,info}), + stop = logger_filters:level(?llog(error),{stop,neq,info}), + + L7 = logger_filters:level(L7=?llog(info),{log,lt,error}), + stop = logger_filters:level(?llog(info),{stop,lt,error}), + ignore = logger_filters:level(?llog(info),{log,gt,error}), + ignore = logger_filters:level(?llog(info),{stop,gt,error}), + L8 = logger_filters:level(L8=?llog(info),{log,lteq,error}), + stop = logger_filters:level(?llog(info),{stop,lteq,error}), + ignore = logger_filters:level(?llog(info),{log,gteq,error}), + ignore = logger_filters:level(?llog(info),{stop,gteq,error}), + ignore = logger_filters:level(?llog(info),{log,eq,error}), + ignore = logger_filters:level(?llog(info),{stop,eq,error}), + L9 = logger_filters:level(L9=?llog(info),{log,neq,error}), + stop = logger_filters:level(?llog(info),{stop,neq,error}), + + {error,badarg} = ?TRY(logger_filters:level(?llog(info),bad)), + {error,badarg} = ?TRY(logger_filters:level(?llog(info),{bad,eq,info})), + {error,badarg} = ?TRY(logger_filters:level(?llog(info),{log,bad,info})), + {error,badarg} = ?TRY(logger_filters:level(?llog(info),{log,eq,bad})), + + ok. + +progress(_Config) -> + L1 = logger_filters:progress(L1=?plog,log), + stop = logger_filters:progress(?plog,stop), + ignore = logger_filters:progress(?ndlog,log), + ignore = logger_filters:progress(?ndlog,stop), + + {error,badarg} = ?TRY(logger_filters:progress(?plog,bad)), + + ok. + +remote_gl(_Config) -> + {ok,Node} = test_server:start_node(?FUNCTION_NAME,slave,[]), + L1 = logger_filters:remote_gl(L1=?rlog(Node),log), + stop = logger_filters:remote_gl(?rlog(Node),stop), + ignore = logger_filters:remote_gl(?ndlog,log), + ignore = logger_filters:remote_gl(?ndlog,stop), + + {error,badarg} = ?TRY(logger_filters:remote_gl(?rlog(Node),bad)), + ok. + +remote_gl(cleanup,_Config) -> + [test_server:stop_node(N) || N<-nodes()]. + +%%%----------------------------------------------------------------- +%%% Called by macro ?TRY(X) +my_try(Fun) -> + try Fun() catch C:R -> {C,R} end. diff --git a/lib/kernel/test/logger_formatter_SUITE.erl b/lib/kernel/test/logger_formatter_SUITE.erl new file mode 100644 index 0000000000..44e12d2d2a --- /dev/null +++ b/lib/kernel/test/logger_formatter_SUITE.erl @@ -0,0 +1,501 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2018. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(logger_formatter_SUITE). + +-compile(export_all). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("kernel/include/logger.hrl"). + +-define(TRY(X), my_try(fun() -> X end)). + +suite() -> + [{timetrap,{seconds,30}}]. + +init_per_suite(Config) -> + Config. + +end_per_suite(_Config) -> + ok. + +init_per_group(_Group, Config) -> + Config. + +end_per_group(_Group, _Config) -> + ok. + +init_per_testcase(_TestCase, Config) -> + Config. + +end_per_testcase(Case, Config) -> + try apply(?MODULE,Case,[cleanup,Config]) + catch error:undef -> ok + end, + ok. + +groups() -> + []. + +all() -> + [default, + legacy_header, + single_line, + template, + format_msg, + report_cb, + max_size, + depth, + format_mfa, + format_time, + level_or_msg_in_meta, + faulty_log, + faulty_config, + faulty_msg]. + +default(_Config) -> + String1 = format(info,{"~p",[term]},#{},#{}), + ct:log(String1), + [_Date,_Time,"info:\nterm\n"] = string:lexemes(String1," "), + + Time = timestamp(), + ExpectedTimestamp = default_time_format(Time), + String2 = format(info,{"~p",[term]},#{time=>Time},#{}), + ct:log(String2), + " info:\nterm\n" = string:prefix(String2,ExpectedTimestamp), + ok. + +legacy_header(_Config) -> + Time = timestamp(), + String1 = format(info,{"~p",[term]},#{time=>Time},#{legacy_header=>true}), + ct:log(String1), + "=INFO REPORT==== "++Rest = String1, + [Timestamp,"\nterm\n"] = string:lexemes(Rest," ="), + [D,M,Y,H,Min,S,Micro] = string:lexemes(Timestamp,"-:."), + integer(D,31), + integer(Y,2018,infinity), + integer(H,23), + integer(Min,59), + integer(S,59), + integer(Micro,999999), + true = lists:member(M,["Jan","Feb","Mar","Apr","May","Jun", + "Jul","Aug","Sep","Oct","Nov","Dec"]), + + String2 = format(info,{"~p",[term]},#{time=>Time},#{legacy_header=>false}), + ct:log(String2), + ExpectedTimestamp = default_time_format(Time), + " info:\nterm\n" = string:prefix(String2,ExpectedTimestamp), + + String3 = format(info,{"~p",[term]},#{time=>Time},#{legacy_header=>bad}), + ct:log(String3), + String3 = String2, + + String4 = format(info,{"~p",[term]},#{time=>Time}, + #{legacy_header=>true, + single_line=>true}), % <---ignored + ct:log(String4), + String4 = String1, + + String5 = format(info,{"~p",[term]},#{}, % <--- no time + #{legacy_header=>true}), + ct:log(String5), + "=INFO REPORT==== "++_ = String5, + ok. + +single_line(_Config) -> + Time = timestamp(), + ExpectedTimestamp = default_time_format(Time), + String1 = format(info,{"~p",[term]},#{time=>Time},#{single_line=>true}), + ct:log(String1), + " info: term\n" = string:prefix(String1,ExpectedTimestamp), + + String2 = format(info,{"~p",[term]},#{time=>Time},#{single_line=>false}), + ct:log(String2), + " info:\nterm\n" = string:prefix(String2,ExpectedTimestamp), + + String2 = format(info,{"~p",[term]},#{time=>Time},#{single_line=>bad}), + ok. + +template(_Config) -> + Time = timestamp(), + + Template1 = [msg], + String1 = format(info,{"~p",[term]},#{time=>Time},#{template=>Template1}), + ct:log(String1), + "term" = String1, + + Template2 = [msg,unknown], + String2 = format(info,{"~p",[term]},#{time=>Time},#{template=>Template2}), + ct:log(String2), + "term" = String2, + + Template3 = ["string"], + String3 = format(info,{"~p",[term]},#{time=>Time},#{template=>Template3}), + ct:log(String3), + "string" = String3, + + Template4 = ["string\nnewline"], + String4 = format(info,{"~p",[term]},#{time=>Time},#{template=>Template4, + single_line=>true}), + ct:log(String4), + "string\nnewline" = String4, + + Template5 = [], + String5 = format(info,{"~p",[term]},#{time=>Time},#{template=>Template5}), + ct:log(String5), + "" = String5, + + Ref6 = erlang:make_ref(), + Meta6 = #{atom=>some_atom, + integer=>632, + list=>[list,"string",4321,#{},{tuple}], + mfa=>{mod,func,0}, + pid=>self(), + ref=>Ref6, + string=>"some string", + time=>Time, + tuple=>{1,atom,"list"}, + nested=>#{subkey=>subvalue}}, + Template6 = lists:join(";",maps:keys(maps:remove(nested,Meta6)) ++ + [{nested,subkey}]), + String6 = format(info,{"~p",[term]},Meta6,#{template=>Template6, + single_line=>true}), + ct:log(String6), + SelfStr = pid_to_list(self()), + RefStr6 = ref_to_list(Ref6), + ListStr = "[list,\"string\",4321,#{},{tuple}]", + ExpectedTime6 = default_time_format(Time), + ["some_atom", + "632", + ListStr, + "mod:func/0", + SelfStr, + RefStr6, + "some string", + ExpectedTime6, + "{1,atom,\"list\"}", + "subvalue"] = string:lexemes(String6,";"), + + Meta7 = #{time=>Time, + nested=>#{key1=>#{subkey1=>value1}, + key2=>value2}}, + Template7 = lists:join(";",[nested, + {nested,key1}, + {nested,key1,subkey1}, + {nested,key2}, + {nested,key2,subkey2}, + {nested,key3}, + {nested,key3,subkey3}]), + String7 = format(info,{"~p",[term]},Meta7,#{template=>Template7, + single_line=>true}), + ct:log(String7), + [MultipleKeysStr, + "#{subkey1 => value1}", + "value1", + "value2", + "", + "", + ""] = string:split(String7,";",all), + %% Order of keys is not fixed + case MultipleKeysStr of + "#{key2 => value2,key1 => #{subkey1 => value1}}" -> ok; + "#{key1 => #{subkey1 => value1},key2 => value2}" -> ok; + _ -> ct:fail({full_nested_map_unexpected,MultipleKeysStr}) + end, + ok. + +format_msg(_Config) -> + Template = [msg], + + String1 = format(info,{"~p",[term]},#{},#{template=>Template}), + ct:log(String1), + "term" = String1, + + String2 = format(info,{"list",[term]},#{},#{template=>Template}), + ct:log(String2), + "FORMAT ERROR: \"list\" - [term]" = String2, + + String3 = format(info,{report,term},#{},#{template=>Template}), + ct:log(String3), + "term" = String3, + + String4 = format(info,{report,term}, + #{report_cb=>fun(_)-> {"formatted",[]} end}, + #{template=>Template}), + ct:log(String4), + "formatted" = String4, + + String5 = format(info,{report,term}, + #{report_cb=>fun(_)-> faulty_return end}, + #{template=>Template}), + ct:log(String5), + "REPORT_CB ERROR: term; Returned: faulty_return" = String5, + + String6 = format(info,{report,term}, + #{report_cb=>fun(_)-> erlang:error(fun_crashed) end}, + #{template=>Template}), + ct:log(String6), + "REPORT_CB CRASH: term; Reason: {error,fun_crashed}" = String6, + + %% strings are not formatted + String7 = format(info,{string,"string"}, + #{report_cb=>fun(_)-> {"formatted",[]} end}, + #{template=>Template}), + ct:log(String7), + "string" = String7, + + String8 = format(info,{string,['not',printable,list]}, + #{report_cb=>fun(_)-> {"formatted",[]} end}, + #{template=>Template}), + ct:log(String8), + "INVALID STRING: ['not',printable,list]" = String8, + + String9 = format(info,{string,"string"},#{},#{template=>Template}), + ct:log(String9), + "string" = String9, + + ok. + +report_cb(_Config) -> + Template = [msg], + MetaFun = fun(_) -> {"meta_rcb",[]} end, + ConfigFun = fun(_) -> {"config_rcb",[]} end, + "term" = format(info,{report,term},#{},#{template=>Template}), + "meta_rcb" = + format(info,{report,term},#{report_cb=>MetaFun},#{template=>Template}), + "config_rcb" = + format(info,{report,term},#{},#{template=>Template, + report_cb=>ConfigFun}), + "config_rcb" = + format(info,{report,term},#{report_cb=>MetaFun},#{template=>Template, + report_cb=>ConfigFun}), + ok. + +max_size(_Config) -> + Template = [msg], + "12345678901234567890" = + format(info,{"12345678901234567890",[]},#{},#{template=>Template}), + application:set_env(kernel,logger_max_size,11), + "12345678901234567890" = % min value is 50, so this is not limited + format(info,{"12345678901234567890",[]},#{},#{template=>Template}), + "12345678901234567890123456789012345678901234567..." = % 50 + format(info, + {"123456789012345678901234567890123456789012345678901234567890", + []}, + #{}, + #{template=>Template}), + application:set_env(kernel,logger_max_size,53), + "12345678901234567890123456789012345678901234567890..." = %53 + format(info, + {"123456789012345678901234567890123456789012345678901234567890", + []}, + #{}, + #{template=>Template}), + "1234567..." = + format(info,{"12345678901234567890",[]},#{},#{template=>Template, + max_size=>10}), + "12345678901234567890" = + format(info,{"12345678901234567890",[]},#{},#{template=>Template, + max_size=>unlimited}), + ok. +max_size(cleanup,_Config) -> + application:unset_env(kernel,logger_max_size), + ok. + +depth(_Config) -> + Template = [msg], + "[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]" = + format(info, + {"~p",[[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]]}, + #{}, + #{template=>Template}), + application:set_env(kernel,error_logger_format_depth,11), + "[1,2,3,4,5,6,7,8,9,0|...]" = + format(info, + {"~p",[[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]]}, + #{}, + #{template=>Template}), + application:set_env(kernel,logger_format_depth,12), + "[1,2,3,4,5,6,7,8,9,0,1|...]" = + format(info, + {"~p",[[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]]}, + #{}, + #{template=>Template}), + "[1,2,3,4,5,6,7,8,9,0,1,2|...]" = + format(info, + {"~p",[[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]]}, + #{}, + #{template=>Template, + depth=>13}), + "[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]" = + format(info, + {"~p",[[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]]}, + #{}, + #{template=>Template, + depth=>unlimited}), + ok. +depth(cleanup,_Config) -> + application:unset_env(kernel,logger_format_depth), + ok. + +format_mfa(_Config) -> + Template = [mfa], + + Meta1 = #{mfa=>{mod,func,0}}, + String1 = format(info,{"~p",[term]},Meta1,#{template=>Template}), + ct:log(String1), + "mod:func/0" = String1, + + Meta2 = #{mfa=>{mod,func,[]}}, + String2 = format(info,{"~p",[term]},Meta2,#{template=>Template}), + ct:log(String2), + "mod:func/0" = String2, + + Meta3 = #{mfa=>"mod:func/0"}, + String3 = format(info,{"~p",[term]},Meta3,#{template=>Template}), + ct:log(String3), + "mod:func/0" = String3, + + Meta4 = #{mfa=>othermfa}, + String4 = format(info,{"~p",[term]},Meta4,#{template=>Template}), + ct:log(String4), + "othermfa" = String4, + + ok. + +format_time(_Config) -> + Time1 = timestamp(), + ExpectedTimestamp1 = default_time_format(Time1), + String1 = format(info,{"~p",[term]},#{time=>Time1},#{}), + ct:log(String1), + " info:\nterm\n" = string:prefix(String1,ExpectedTimestamp1), + + Time2 = timestamp(), + ExpectedTimestamp2 = default_time_format(Time2,true), + String2 = format(info,{"~p",[term]},#{time=>Time2},#{utc=>true}), + ct:log(String2), + " info:\nterm\n" = string:prefix(String2,ExpectedTimestamp2), + + application:set_env(kernel,logger_utc,true), + Time3 = timestamp(), + ExpectedTimestamp3 = default_time_format(Time3,true), + String3 = format(info,{"~p",[term]},#{time=>Time3},#{}), + ct:log(String3), + " info:\nterm\n" = string:prefix(String3,ExpectedTimestamp3), + + ok. + +format_time(cleanup,_Config) -> + application:unset_env(kernel,logger_utc), + ok. + +level_or_msg_in_meta(_Config) -> + %% The template contains atoms to pick out values from meta, + %% or level/msg to add these from the log event. What if you have + %% a key named 'level' or 'msg' in meta and want to display + %% its value? + %% For now we simply ignore Meta on this and display the + %% actual level and msg from the log event. + + Meta = #{level=>mylevel, + msg=>"metamsg"}, + Template = [level,";",msg], + String = format(info,{"~p",[term]},Meta,#{template=>Template}), + ct:log(String), + "info;term" = String, % so mylevel and "metamsg" are ignored + + ok. + +faulty_log(_Config) -> + %% Unexpected log (should be type logger:log()) - print error + {error, + function_clause, + {logger_formatter,format,[_,_],_}} = + ?TRY(logger_formatter:format(unexp_log,#{})), + ok. + +faulty_config(_Config) -> + {error, + function_clause, + {logger_formatter,format,[_,_],_}} = + ?TRY(logger_formatter:format(#{level=>info, + msg=>{"~p",[term]}, + meta=>#{time=>timestamp()}}, + unexp_config)), + ok. + +faulty_msg(_Config) -> + {error, + function_clause, + {logger_formatter,_,[_,_],_}} = + ?TRY(logger_formatter:format(#{level=>info, + msg=>term, + meta=>#{time=>timestamp()}}, + #{})), + ok. + +%%%----------------------------------------------------------------- +%%% Internal +format(Level,Msg,Meta,Config) -> + format(#{level=>Level,msg=>Msg,meta=>add_time(Meta)},Config). + +format(Log,Config) -> + lists:flatten(logger_formatter:format(Log,Config)). + +default_time_format(Timestamp) -> + default_time_format(Timestamp,false). + +default_time_format(Timestamp0,Utc) when is_integer(Timestamp0) -> + Timestamp=Timestamp0+erlang:time_offset(microsecond), + %% calendar:system_time_to_rfc3339(Time,[{unit,microsecond}]). + Micro = Timestamp rem 1000000, + Sec = Timestamp div 1000000, + UniversalTime = erlang:posixtime_to_universaltime(Sec), + {Date,Time} = + if Utc -> UniversalTime; + true -> erlang:universaltime_to_localtime(UniversalTime) + end, + default_time_format(Date,Time,Micro). + +default_time_format({Y,M,D},{H,Min,S},Micro) -> + lists:flatten( + io_lib:format("~4w-~2..0w-~2..0w ~2w:~2..0w:~2..0w.~6..0w", + [Y,M,D,H,Min,S,Micro])). + +integer(Str) -> + is_integer(list_to_integer(Str)). +integer(Str,Max) -> + integer(Str,0,Max). +integer(Str,Min,Max) -> + Int = list_to_integer(Str), + Int >= Min andalso Int =<Max. + +%%%----------------------------------------------------------------- +%%% Called by macro ?TRY(X) +my_try(Fun) -> + try Fun() catch C:R:S -> {C,R,hd(S)} end. + +timestamp() -> + erlang:monotonic_time(microsecond). + +%% necessary? +add_time(#{time:=_}=Meta) -> + Meta; +add_time(Meta) -> + Meta#{time=>timestamp()}. diff --git a/lib/kernel/test/logger_legacy_SUITE.erl b/lib/kernel/test/logger_legacy_SUITE.erl new file mode 100644 index 0000000000..b59f5f7758 --- /dev/null +++ b/lib/kernel/test/logger_legacy_SUITE.erl @@ -0,0 +1,282 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2018. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(logger_legacy_SUITE). + +-compile(export_all). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("kernel/include/logger.hrl"). +-include_lib("kernel/src/logger_internal.hrl"). + +%%%----------------------------------------------------------------- +%%% This test suite test that log events from within OTP can be +%%% delivered to legacy error_logger event handlers on the same format +%%% as before 'logger' was introduced. +%%% +%%% Before changing the expected format of any of the log events in +%%% this suite, please make sure that the backwards incompatibility it +%%% introduces is ok. +%%% ----------------------------------------------------------------- + +-define(check(Expected), + receive Expected -> + [] = test_server:messages_get() + after 1000 -> + ct:fail({report_not_received, + {line,?LINE}, + {got,test_server:messages_get()}}) + end). +-define(check_no_flush(Expected), + receive Expected -> + ok + after 1000 -> + ct:fail({report_not_received, + {line,?LINE}, + {got,test_server:messages_get()}}) + end). + +suite() -> + [{timetrap,{seconds,30}}]. + +init_per_suite(Config) -> + logger:add_handler(error_logger,error_logger, + #{level=>info,filter_default=>stop}), + Config. + +end_per_suite(_Config) -> + logger:remove_handler(error_logger), + ok. + +init_per_group(std, Config) -> + ok = logger:set_handler_config( + error_logger,filters, + [{domain,{fun logger_filters:domain/2, + {log,prefix_of,[beam,erlang,otp]}}}]), + Config; +init_per_group(sasl, Config) -> + ok = logger:set_handler_config( + error_logger,filters, + [{domain,{fun logger_filters:domain/2, + {log,prefix_of,[beam,erlang,otp,sasl]}}}]), + + %% cth_log_redirect checks if sasl is started before displaying + %% any sasl reports - so just to see the real sasl reports in tc + %% log: + application:start(sasl), + Config; +init_per_group(_Group, Config) -> + Config. + +end_per_group(sasl, _Config) -> + application:stop(sasl), + ok; +end_per_group(_Group, _Config) -> + ok. + +init_per_testcase(_TestCase, Config) -> + error_logger:add_report_handler(?MODULE,{event_handler,self()}), + Config. + +end_per_testcase(Case, Config) -> + %% Using gen_event directly here, instead of + %% error_logger:delete_report_handler. This is to avoid + %% automatically stopping the error_logger process due to removing + %% the last handler. + gen_event:delete_handler(error_logger,?MODULE,[]), + try apply(?MODULE,Case,[cleanup,Config]) + catch error:undef -> ok + end, + ok. + +groups() -> + [{std,[],[gen_server, + gen_event, + gen_fsm, + gen_statem]}, + {sasl,[],[sasl_reports, + supervisor_handle_info]}]. + +all() -> + [{group,std}, + {group,sasl}]. + +gen_server(_Config) -> + {ok,Pid} = gen_server:start(?MODULE,gen_server,[]), + Msg = fun() -> a=b end, + Pid ! Msg, + ?check({warning_msg,"** Undefined handle_info in ~p"++_,[?MODULE,Msg]}), + ok = gen_server:cast(Pid,Msg), + ?check({error,"** Generic server ~tp terminating"++_, + [Pid,{'$gen_cast',Msg},gen_server,{{badmatch,b},_}]}). + +gen_event(_Config) -> + {ok,Pid} = gen_event:start(), + ok = gen_event:add_handler(Pid,?MODULE,gen_event), + Msg = fun() -> a=b end, + Pid ! Msg, + ?check({warning_msg,"** Undefined handle_info in ~tp"++_,[?MODULE,Msg]}), + gen_event:notify(Pid,Msg), + ?check({error,"** gen_event handler ~p crashed."++_, + [?MODULE,Pid,Msg,gen_event,{{badmatch,b},_}]}). + +gen_fsm(_Config) -> + {ok,Pid} = gen_fsm:start(?MODULE,gen_fsm,[]), + Msg = fun() -> a=b end, + Pid ! Msg, + ?check({warning_msg,"** Undefined handle_info in ~p"++_,[?MODULE,Msg]}), + gen_fsm:send_all_state_event(Pid,Msg), + ?check({error,"** State machine ~tp terminating"++_, + [Pid,Msg,mystate,gen_fsm,{{badmatch,b},_}]}). + +gen_statem(_Config) -> + {ok,Pid} = gen_statem:start(?MODULE,gen_statem,[]), + Msg = fun() -> a=b end, + Pid ! Msg, + ?check({error,"** State machine ~tp terminating"++_, + [Pid,{info,Msg},{mystate,gen_statem},error,{badmatch,b}|_]}). + +sasl_reports(Config) -> + App = {application,?MODULE,[{description, ""}, + {vsn, "1.0"}, + {modules, [?MODULE]}, + {registered, []}, + {applications, []}, + {mod, {?MODULE, []}}]}, + AppStr = io_lib:format("~p.",[App]), + Dir = ?config(priv_dir,Config), + AppFile = filename:join(Dir,?MODULE_STRING++".app"), + ok = file:write_file(AppFile,AppStr), + true = code:add_patha(Dir), + ok = application:start(?MODULE), + SupName = sup_name(), + Pid = whereis(SupName), + [{ch,ChPid,_,_}] = supervisor:which_children(Pid), + Node = node(), + ?check_no_flush({info_report,progress,[{application,?MODULE}, + {started_at,Node}]}), + ?check({info_report,progress,[{supervisor,{local,SupName}}, + {started,[{pid,ChPid}|_]}]}), + ok = gen_server:cast(ChPid, fun() -> + spawn_link(fun() -> receive x->ok end end) + end), + Msg = fun() -> a=b end, + ok = gen_server:cast(ChPid,Msg), + ?check_no_flush({error,"** Generic server ~tp terminating"++_, + [ChPid,{'$gen_cast',Msg},gen_server,{{badmatch,b},_}]}), + ?check_no_flush({error_report,crash_report, + [[{initial_call,_}, + {pid,ChPid}, + {registered_name,[]}, + {error_info,{error,{badmatch,b},_}}, + {ancestors,_}, + {message_queue_len,_}, + {messages,_}, + {links,[Pid,Neighbour]}, + {dictionary,_}, + {trap_exit,_}, + {status,_}, + {heap_size,_}, + {stack_size,_}, + {reductions,_}], + [{neighbour,[{pid,Neighbour}, + {registered_name,_}, + {initial_call,_}, + {current_function,_}, + {ancestors,_}, + {message_queue_len,_}, + {links,[ChPid]}, + {trap_exit,_}, + {status,_}, + {heap_size,_}, + {stack_size,_}, + {reductions,_}, + {current_stacktrace,_}]}]]}), + ?check_no_flush({error_report,supervisor_report, + [{supervisor,{local,SupName}}, + {errorContext,child_terminated}, + {reason,{{badmatch,b},_}}, + {offender,[{pid,ChPid}|_]}]}), + ?check({info_report,progress,[{supervisor,{local,SupName}}, + {started,_}]}), + + ok = application:stop(?MODULE), + ?check({info_report,std_info,[{application,?MODULE}, + {exited,stopped}, + {type,temporary}]}). + +sasl_reports(cleanup,_Config) -> + application:stop(?MODULE). + +supervisor_handle_info(_Config) -> + {ok,Pid} = supervisor:start_link({local,sup_name()},?MODULE,supervisor), + ?check({info_report,progress,[{supervisor,_},{started,_}]}), + Pid ! msg, + ?check({error,"Supervisor received unexpected message: ~tp~n",[msg]}). + +supervisor_handle_info(cleanup,_Config) -> + Pid = whereis(sup_name()), + unlink(Pid), + exit(Pid,shutdown). + +%%%----------------------------------------------------------------- +%%% Callbacks for error_logger event handler, gen_server, gen_statem, +%%% gen_fsm, gen_event, supervisor and application. +start(_,_) -> + supervisor:start_link({local,sup_name()},?MODULE,supervisor). + +init(supervisor) -> + {ok,{#{},[#{id=>ch,start=>{gen_server,start_link,[?MODULE,gen_server,[]]}}]}}; +init(StateMachine) when StateMachine==gen_statem; StateMachine==gen_fsm -> + {ok,mystate,StateMachine}; +init(State) -> + {ok,State}. + +%% error_logger event handler +handle_event({Tag,_Gl,{_Pid,Type,Report}},{_,Pid}=State) -> + Pid ! {Tag,Type,Report}, + {ok,State}; +%% other gen_event +handle_event(Fun,State) when is_function(Fun) -> + Fun(), + {next_state,State}. + +%% gen_fsm +handle_event(Fun,State,Data) when is_function(Fun) -> + Fun(), + {next_state,State,Data}. + +%% gen_statem +handle_event(info,Fun,State,Data) when is_function(Fun) -> + Fun(), + {next_state,State,Data}. + +%% gen_server +handle_cast(Fun,State) when is_function(Fun) -> + Fun(), + {noreply,State}. + +%% gen_statem +callback_mode() -> + handle_event_function. + +%%%----------------------------------------------------------------- +%%% Internal +sup_name() -> + list_to_atom(?MODULE_STRING++"_sup"). diff --git a/lib/kernel/test/logger_simple_SUITE.erl b/lib/kernel/test/logger_simple_SUITE.erl new file mode 100644 index 0000000000..5d8d32492d --- /dev/null +++ b/lib/kernel/test/logger_simple_SUITE.erl @@ -0,0 +1,247 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2018. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(logger_simple_SUITE). + +-compile(export_all). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("kernel/include/logger.hrl"). +-include_lib("kernel/src/logger_internal.hrl"). + +-define(check_no_log,[] = test_server:messages_get()). +-define(check(Expected), + receive {log,Expected} -> + [] = test_server:messages_get() + after 1000 -> + ct:fail({report_not_received, + {line,?LINE}, + {expected,Expected}, + {got,test_server:messages_get()}}) + end). + +-define(str,"Log from "++atom_to_list(?FUNCTION_NAME)++ + ":"++integer_to_list(?LINE)). +-define(map_rep,#{function=>?FUNCTION_NAME, line=>?LINE}). +-define(keyval_rep,[{function,?FUNCTION_NAME}, {line,?LINE}]). + +suite() -> + [{timetrap,{seconds,30}}]. + +init_per_suite(Config) -> + #{handlers:=Hs0} = logger:i(), + Hs = lists:keydelete(cth_log_redirect,1,Hs0), + [ok = logger:remove_handler(Id) || {Id,_,_} <- Hs], + Env = [{App,Key,application:get_env(App,Key)} || + {App,Key} <- [{kernel,logger_dest}, + {kernel,logger_level}]], + [{env,Env},{logger,Hs}|Config]. + +end_per_suite(Config) -> + [application:set_env(App,Key,Val) || {App,Key,Val} <- ?config(env,Config)], + Hs = ?config(logger,Config), + [ok = logger:add_handler(Id,Mod,C) || {Id,Mod,C} <- Hs], + ok. + +init_per_group(_Group, Config) -> + Config. + +end_per_group(_Group, _Config) -> + ok. + +init_per_testcase(_TestCase, Config) -> + Config. + +end_per_testcase(Case, Config) -> + try apply(?MODULE,Case,[cleanup,Config]) + catch error:undef -> ok + end, + ok. + +groups() -> + []. + +all() -> + [start_stop, + get_buffer, + replace_file, + replace_disk_log + ]. + +start_stop(_Config) -> + undefined = whereis(logger_simple), + register(logger_simple,self()), + {error,_} = logger:add_handler(logger_simple, + logger_simple, + #{filter_default=>log}), + unregister(logger_simple), + ok = logger:add_handler(logger_simple,logger_simple,#{filter_default=>log}), + Pid = whereis(logger_simple), + true = is_pid(Pid), + ok = logger:remove_handler(logger_simple), + false = is_pid(whereis(logger_simple)), + ok. +start_stop(cleanup,_Config) -> + logger:remove_handler(logger_simple). + +get_buffer(_Config) -> + %% Start simple without buffer + ok = logger:add_handler(logger_simple,logger_simple, + #{filter_default=>log}), + logger:emergency(?str), + logger:alert(?str,[]), + logger:error(?map_rep), + logger:info(?keyval_rep), + {ok,[]} = logger_simple:get_buffer(), % no buffer + ok = logger:remove_handler(logger_simple), + + %% Start with buffer + ok = logger:add_handler(logger_simple,logger_simple, + #{filter_default=>log, + logger_simple=>#{buffer=>true}}), + logger:emergency(M1=?str), + logger:alert(M2=?str,[]), + logger:error(M3=?map_rep), + logger:info(M4=?keyval_rep), + logger:info(M41=?keyval_rep++[not_key_val]), + error_logger:error_report(some_type,M5=?map_rep), + error_logger:warning_report("some_type",M6=?map_rep), + logger:critical(M7=?str,[A7=?keyval_rep]), + logger:notice(M8=["fake",string,"line:",?LINE]), + {ok,Buffered1} = logger_simple:get_buffer(), + [#{level:=emergency,msg:={string,M1}}, + #{level:=alert,msg:={M2,[]}}, + #{level:=error,msg:={report,M3}}, + #{level:=info,msg:={report,M4}}, + #{level:=info,msg:={report,M41}}, + #{level:=error,msg:={report,#{label:={error_logger,error_report}, + report:=M5}}}, + #{level:=warning,msg:={report,#{label:={error_logger,warning_report}, + report:=M6}}}, + #{level:=critical,msg:={M7,[A7]}}, + #{level:=notice,msg:={string,M8}}] = Buffered1, + + %% Keep logging - should not buffer any more + logger:emergency(?str), + logger:alert(?str,[]), + logger:error(?map_rep), + logger:info(?keyval_rep), + {ok,[]} = logger_simple:get_buffer(), + ok = logger:remove_handler(logger_simple), + + %% Fill buffer and drop + ok = logger:add_handler(logger_simple,logger_simple, + #{filter_default=>log, + logger_simple=>#{buffer=>true}}), + logger:emergency(M9=?str), + M10=?str, + [logger:info(M10) || _ <- lists:seq(1,8)], + logger:error(M11=?str), + logger:error(?str), + logger:error(?str), + {ok,Buffered3} = logger_simple:get_buffer(), + 11 = length(Buffered3), + [#{level:=emergency,msg:={string,M9}}, + #{level:=info,msg:={string,M10}}, + #{level:=info,msg:={string,M10}}, + #{level:=info,msg:={string,M10}}, + #{level:=info,msg:={string,M10}}, + #{level:=info,msg:={string,M10}}, + #{level:=info,msg:={string,M10}}, + #{level:=info,msg:={string,M10}}, + #{level:=info,msg:={string,M10}}, + #{level:=error,msg:={string,M11}}, + #{level:=info,msg:={"Simple handler buffer full, dropped ~w messages",[2]}}] + = Buffered3, + ok. +get_buffer(cleanup,_Config) -> + logger:remove_handler(logger_simple). + +replace_file(Config) -> + ok = logger:add_handler(logger_simple,logger_simple, + #{filter_default=>log, + logger_simple=>#{buffer=>true}}), + logger:emergency(M1=?str), + logger:alert(M2=?str,[]), + logger:error(?map_rep), + logger:info(?keyval_rep), + undefined = whereis(?STANDARD_HANDLER), + PrivDir = ?config(priv_dir,Config), + File = filename:join(PrivDir,atom_to_list(?FUNCTION_NAME)++".log"), + + application:set_env(kernel,logger_dest,{file,File}), + application:set_env(kernel,logger_level,info), + + ok = logger:setup_standard_handler(), + true = is_pid(whereis(?STANDARD_HANDLER)), + ok = logger_std_h:filesync(?STANDARD_HANDLER), + {ok,Bin} = file:read_file(File), + Lines = [unicode:characters_to_list(L) || + L <- binary:split(Bin,<<"\n">>,[global,trim])], + ["=EMERGENCY REPORT===="++_, + M1, + "=ALERT REPORT===="++_, + M2, + "=ERROR REPORT===="++_, + _, + _, + "=INFO REPORT===="++_, + _, + _] = Lines, + ok. +replace_file(cleanup,_Config) -> + logger:remove_handler(?STANDARD_HANDLER), + logger:remove_handler(logger_simple). + +replace_disk_log(Config) -> + ok = logger:add_handler(logger_simple,logger_simple, + #{filter_default=>log, + logger_simple=>#{buffer=>true}}), + logger:emergency(M1=?str), + logger:alert(M2=?str,[]), + logger:error(?map_rep), + logger:info(?keyval_rep), + undefined = whereis(?STANDARD_HANDLER), + PrivDir = ?config(priv_dir,Config), + File = filename:join(PrivDir,atom_to_list(?FUNCTION_NAME)), + + application:set_env(kernel,logger_dest,{disk_log,File}), + application:set_env(kernel,logger_level,info), + + ok = logger:setup_standard_handler(), + true = is_pid(whereis(?STANDARD_HANDLER)), + ok = logger_disk_log_h:disk_log_sync(?STANDARD_HANDLER), + {ok,Bin} = file:read_file(File++".1"), + Lines = [unicode:characters_to_list(L) || + L <- binary:split(Bin,<<"\n">>,[global,trim])], + ["=EMERGENCY REPORT===="++_, + M1, + "=ALERT REPORT===="++_, + M2, + "=ERROR REPORT===="++_, + _, + _, + "=INFO REPORT===="++_, + _, + _|_] = Lines, % the tail might be an info report about opening the disk log + ok. +replace_disk_log(cleanup,_Config) -> + logger:remove_handler(?STANDARD_HANDLER), + logger:remove_handler(logger_simple). + diff --git a/lib/kernel/test/logger_std_h_SUITE.erl b/lib/kernel/test/logger_std_h_SUITE.erl new file mode 100644 index 0000000000..e940e0a026 --- /dev/null +++ b/lib/kernel/test/logger_std_h_SUITE.erl @@ -0,0 +1,1396 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2018. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(logger_std_h_SUITE). + +-compile(export_all). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("kernel/include/logger.hrl"). +-include_lib("kernel/src/logger_internal.hrl"). +-include_lib("kernel/src/logger_h_common.hrl"). +-include_lib("stdlib/include/ms_transform.hrl"). +-include_lib("kernel/include/file.hrl"). + +-define(check_no_log, [] = test_server:messages_get()). +-define(check(Expected), + receive + {log,Expected} -> + [] = test_server:messages_get() + after 5000 -> + ct:fail({report_not_received, + {line,?LINE}, + {expected,Expected}, + {got,test_server:messages_get()}}) + end). + +-define(msg,"Log from "++atom_to_list(?FUNCTION_NAME)++ + ":"++integer_to_list(?LINE)). +-define(bin(Msg), list_to_binary(Msg++"\n")). +-define(domain,#{domain=>[?MODULE]}). + +-define(FILESYNC_REP_INT, if is_atom(?FILESYNC_REPEAT_INTERVAL) -> 5500; + true -> ?FILESYNC_REPEAT_INTERVAL + 500 + end). + +suite() -> + [{timetrap,{seconds,30}}]. + +init_per_suite(Config) -> + timer:start(), % to avoid progress report + {ok,{?STANDARD_HANDLER,#{formatter:=OrigFormatter}}} = + logger:get_handler_config(?STANDARD_HANDLER), + [{formatter,OrigFormatter}|Config]. + +end_per_suite(Config) -> + {OrigMod,OrigConf} = proplists:get_value(formatter,Config), + logger:set_handler_config(?STANDARD_HANDLER,formatter,{OrigMod,OrigConf}), + ok. + +init_per_group(_Group, Config) -> + Config. + +end_per_group(_Group, _Config) -> + ok. + +init_per_testcase(TestHooksCase, Config) when + TestHooksCase == write_failure; + TestHooksCase == sync_failure -> + if ?TEST_HOOKS_TAB == undefined -> + {skip,"Define the TEST_HOOKS macro to run this test"}; + true -> + ct:print("********** ~w **********", [TestHooksCase]), + Config + end; +init_per_testcase(TestCase, Config) -> + ct:print("********** ~w **********", [TestCase]), + Config. + +end_per_testcase(Case, Config) -> + try apply(?MODULE,Case,[cleanup,Config]) + catch error:undef -> ok + end, + ok. + +groups() -> + []. + +all() -> + [add_remove_instance_tty, + add_remove_instance_standard_io, + add_remove_instance_standard_error, + add_remove_instance_file1, + add_remove_instance_file2, + default_formatter, + errors, + formatter_fail, + config_fail, + crash_std_h_to_file, + crash_std_h_to_disk_log, + bad_input, + info_and_reset, + reconfig, + file_opts, + filesync, + write_failure, + sync_failure, + op_switch_to_sync_file, + op_switch_to_sync_tty, + op_switch_to_drop_file, + op_switch_to_drop_tty, + op_switch_to_flush_file, + op_switch_to_flush_tty, + limit_burst_disabled, + limit_burst_enabled_one, + limit_burst_enabled_period, + kill_disabled, + qlen_kill_new, + qlen_kill_std, + mem_kill_new, + mem_kill_std, + restart_after, + handler_requests_under_load + ]. + +add_remove_instance_tty(_Config) -> + {error,{handler_not_added,{invalid_config,logger_std_h,{type,tty}}}} = + logger:add_handler(?MODULE,logger_std_h, + #{logger_std_h => #{type => tty}, + filter_default=>log, + formatter=>{?MODULE,self()}}), + ok. + +add_remove_instance_standard_io(_Config) -> + add_remove_instance_nofile(standard_io). +add_remove_instance_standard_io(cleanup,_Config) -> + logger_std_h_remove(). + +add_remove_instance_standard_error(_Config) -> + add_remove_instance_nofile(standard_error). +add_remove_instance_standard_error(cleanup,_Config) -> + logger_std_h_remove(). + +add_remove_instance_file1(Config) -> + Dir = ?config(priv_dir,Config), + Log = filename:join(Dir,"stdlog1.txt"), + Type = {file,Log}, + add_remove_instance_file(Log, Type). +add_remove_instance_file1(cleanup,_Config) -> + logger_std_h_remove(). + +add_remove_instance_file2(Config) -> + Dir = ?config(priv_dir,Config), + Log = filename:join(Dir,"stdlog2.txt"), + Type = {file,Log,[raw,append]}, + add_remove_instance_file(Log, Type). +add_remove_instance_file2(cleanup,_Config) -> + logger_std_h_remove(). + +add_remove_instance_file(Log, Type) -> + ok = logger:add_handler(?MODULE, + logger_std_h, + #{logger_std_h => #{type => Type}, + filter_default=>stop, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter=>{?MODULE,self()}}), + Pid = whereis(?MODULE), + true = is_pid(Pid), + logger:info(M1=?msg,?domain), + ?check(M1), + B1 = ?bin(M1), + try_read_file(Log, {ok,B1}, ?FILESYNC_REP_INT), + ok = logger:remove_handler(?MODULE), + timer:sleep(500), + undefined = whereis(?MODULE), + logger:info(?msg,?domain), + ?check_no_log, + try_read_file(Log, {ok,B1}, ?FILESYNC_REP_INT), + ok. + +default_formatter(_Config) -> + ok = logger:set_handler_config(?STANDARD_HANDLER,formatter, + {?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}), + ct:capture_start(), + logger:info(M1=?msg), + timer:sleep(100), + ct:capture_stop(), + [Msg] = ct:capture_get(), + match = re:run(Msg,"=INFO REPORT====.*\n"++M1,[{capture,none}]), + ok. + +errors(Config) -> + Dir = ?config(priv_dir,Config), + Log = filename:join(Dir,?FUNCTION_NAME), + + ok = logger:add_handler(?MODULE,logger_std_h,#{}), + {error,{already_exist,?MODULE}} = + logger:add_handler(?MODULE,logger_std_h,#{}), + + {error,{not_found,no_such_name}} = logger:remove_handler(no_such_name), + + ok = logger:remove_handler(?MODULE), + {error,{not_found,?MODULE}} = logger:remove_handler(?MODULE), + + {error, + {handler_not_added, + {invalid_config,logger_std_h,{type,faulty_type}}}} = + logger:add_handler(?MODULE,logger_std_h, + #{logger_std_h => #{type => faulty_type}}), + + NoDir = lists:concat(["/",?MODULE,"_dir"]), + {error, + {handler_not_added,{{open_failed,NoDir,eacces},_}}} = + logger:add_handler(myh2,logger_std_h, + #{logger_std_h=>#{type=>{file,NoDir}}}), + + {error, + {handler_not_added,{{open_failed,Log,_},_}}} = + logger:add_handler(myh3,logger_std_h, + #{logger_std_h=>#{type=>{file,Log,[bad_file_opt]}}}), + + ok = logger:info(?msg). + +errors(cleanup,_Config) -> + logger:remove_handler(?MODULE). + +formatter_fail(Config) -> + Dir = ?config(priv_dir,Config), + Log = filename:join(Dir,?FUNCTION_NAME), + + %% no formatter + ok = logger:add_handler(?MODULE, + logger_std_h, + #{logger_std_h => #{type => {file,Log}}, + filter_default=>stop, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE])}), + Pid = whereis(?MODULE), + true = is_pid(Pid), + {ok,#{handlers:=H}} = logger:get_logger_config(), + true = lists:member(?MODULE,H), + + %% Formatter is added automatically + {ok,{_,#{formatter:={logger_formatter,_}}}} = + logger:get_handler_config(?MODULE), + logger:info(M1=?msg,?domain), + Got1 = try_match_file(Log,"=INFO REPORT====.*\n"++M1,5000), + + ok = logger:set_handler_config(?MODULE,formatter,{nonexistingmodule,#{}}), + logger:info(M2=?msg,?domain), + Got2 = try_match_file(Log, + Got1++"=INFO REPORT====.*\nFORMATTER CRASH: .*"++M2, + 5000), + + ok = logger:set_handler_config(?MODULE,formatter,{?MODULE,crash}), + logger:info(M3=?msg,?domain), + Got3 = try_match_file(Log, + Got2++"=INFO REPORT====.*\nFORMATTER CRASH: .*"++M3, + 5000), + + ok = logger:set_handler_config(?MODULE,formatter,{?MODULE,bad_return}), + logger:info(?msg,?domain), + try_match_file(Log, + Got3++"FORMATTER ERROR: bad_return_value", + 5000), + + %% Check that handler is still alive and was never dead + Pid = whereis(?MODULE), + {ok,#{handlers:=H}} = logger:get_logger_config(), + + ok. + +formatter_fail(cleanup,_Config) -> + logger:remove_handler(?MODULE). + +config_fail(_Config) -> + {error,{handler_not_added,{invalid_config,logger_std_h,{bad,bad}}}} = + logger:add_handler(?MODULE,logger_std_h, + #{logger_std_h => #{bad => bad}, + filter_default=>log, + formatter=>{?MODULE,self()}}), + {error,{handler_not_added,{invalid_config,logger_std_h, + {restart_type,bad}}}} = + logger:add_handler(?MODULE,logger_std_h, + #{logger_std_h => #{restart_type => bad}, + filter_default=>log, + formatter=>{?MODULE,self()}}), + {error,{handler_not_added,{invalid_levels,{42,42,_}}}} = + logger:add_handler(?MODULE,logger_std_h, + #{logger_std_h => #{toggle_sync_qlen=>42, + drop_new_reqs_qlen=>42}}), + + ok = logger:add_handler(?MODULE,logger_std_h, + #{filter_default=>log, + formatter=>{?MODULE,self()}}), + {error,{illegal_config_change,_,_}} = + logger:set_handler_config(?MODULE,logger_std_h, + #{type=>{file,"file"}}), + {error,{illegal_config_change,_,_}} = + logger:set_handler_config(?MODULE,id,bad), + {error,{invalid_levels,_}} = + logger:set_handler_config(?MODULE,logger_std_h, + #{toggle_sync_qlen=>100, + flush_reqs_qlen=>99}), + {error,{invalid_config,logger_std_h,{filesync_rep_int,2000}}} = + logger:set_handler_config(?MODULE, logger_std_h, + #{filesync_rep_int => 2000}), + ok. + +config_fail(cleanup,_Config) -> + logger:remove_handler(?MODULE). + +crash_std_h_to_file(Config) -> + crash_std_h(Config,?FUNCTION_NAME,logger_dest,file). +crash_std_h_to_file(cleanup,_Config) -> + crash_std_h(cleanup). + +crash_std_h_to_disk_log(Config) -> + crash_std_h(Config,?FUNCTION_NAME,logger_dest,disk_log). +crash_std_h_to_disk_log(cleanup,_Config) -> + crash_std_h(cleanup). + +crash_std_h(Config,Func,Var,Type) -> + Dir = ?config(priv_dir,Config), + File = lists:concat([?MODULE,"_",Func,".log"]), + Log = filename:join(Dir,File), + Pa = filename:dirname(code:which(?MODULE)), + TypeAndLog = + case os:type() of + {win32,_} -> + lists:concat([" {",Type,",\\\"",Log,"\\\"}"]); + _ -> + lists:concat([" \'{",Type,",\"",Log,"\"}\'"]) + end, + Args = lists:concat([" -kernel ",Var,TypeAndLog," -pa ",Pa]), + Name = lists:concat([?MODULE,"_",Func]), + ct:pal("Starting ~p with ~tp", [Name,Args]), + %% Start a node which prints kernel logs to the destination specified by Type + {ok,Node} = test_server:start_node(Name, peer, [{args, Args}]), + Pid = rpc:call(Node,erlang,whereis,[?STANDARD_HANDLER]), + ok = rpc:call(Node,logger,set_handler_config,[?STANDARD_HANDLER,formatter, + {?MODULE,self()}]), + ok = log_on_remote_node(Node,"dummy1"), + ?check("dummy1"), + {ok,Bin1} = sync_and_read(Node,Type,Log), + <<"dummy1\n">> = binary:part(Bin1,{byte_size(Bin1),-7}), + + %% Kill the logger_std_h process + exit(Pid, kill), + + %% Wait a bit, then check that it is gone + timer:sleep(2000), + undefined = rpc:call(Node,erlang,whereis,[?STANDARD_HANDLER]), + + %% Check that file is not empty + {ok,Bin2} = sync_and_read(Node,Type,Log), + <<"dummy1\n">> = binary:part(Bin2,{byte_size(Bin2),-7}), + ok. + +%% Can not use rpc:call here, since the code would execute on a +%% process with group_leader on this (the calling) node, and thus +%% logger would send the log event to the logger process here instead +%% of logging it itself. +log_on_remote_node(Node,Msg) -> + _ = spawn_link(Node, + fun() -> erlang:group_leader(whereis(user),self()), + logger:info(Msg) + end), + ok. + + +crash_std_h(cleanup) -> + Nodes = nodes(), + [test_server:stop_node(Node) || Node <- Nodes]. + +sync_and_read(Node,disk_log,Log) -> + rpc:call(Node,logger_disk_log_h,disk_log_sync,[?STANDARD_HANDLER]), + case file:read_file(Log ++ ".1") of + {ok,<<>>} -> + timer:sleep(5000), + file:read_file(Log ++ ".1"); + Ok -> + Ok + end; +sync_and_read(Node,file,Log) -> + rpc:call(Node,logger_std_h,filesync,[?STANDARD_HANDLER]), + case file:read_file(Log) of + {ok,<<>>} -> + timer:sleep(5000), + file:read_file(Log); + Ok -> + Ok + end. + +bad_input(_Config) -> + {error,{badarg,{filesync,["BadType"]}}} = logger_std_h:filesync("BadType"), + {error,{badarg,{info,["BadType"]}}} = logger_std_h:info("BadType"), + {error,{badarg,{reset,["BadType"]}}} = logger_std_h:reset("BadType"). + + +info_and_reset(_Config) -> + #{id := ?STANDARD_HANDLER} = logger_std_h:info(?STANDARD_HANDLER), + ok = logger_std_h:reset(?STANDARD_HANDLER). + +reconfig(Config) -> + Dir = ?config(priv_dir,Config), + ok = logger:add_handler(?MODULE, + logger_std_h, + #{logger_std_h => #{type => standard_io}, + filter_default=>log, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter=>{?MODULE,self()}}), + #{id := ?MODULE, + type := standard_io, + file_ctrl_pid := FileCtrlPid, + toggle_sync_qlen := ?TOGGLE_SYNC_QLEN, + drop_new_reqs_qlen := ?DROP_NEW_REQS_QLEN, + flush_reqs_qlen := ?FLUSH_REQS_QLEN, + enable_burst_limit := ?ENABLE_BURST_LIMIT, + burst_limit_size := ?BURST_LIMIT_SIZE, + burst_window_time := ?BURST_WINDOW_TIME, + enable_kill_overloaded := ?ENABLE_KILL_OVERLOADED, + handler_overloaded_qlen := ?HANDLER_OVERLOADED_QLEN, + handler_overloaded_mem := ?HANDLER_OVERLOADED_MEM, + handler_restart_after := ?HANDLER_RESTART_AFTER, + filesync_repeat_interval := ?FILESYNC_REPEAT_INTERVAL} = + logger_std_h:info(?MODULE), + + ok = logger:set_handler_config(?MODULE, logger_std_h, + #{toggle_sync_qlen => 1, + drop_new_reqs_qlen => 2, + flush_reqs_qlen => 3, + enable_burst_limit => false, + burst_limit_size => 10, + burst_window_time => 10, + enable_kill_overloaded => true, + handler_overloaded_qlen => 100000, + handler_overloaded_mem => 10000000, + handler_restart_after => never, + filesync_repeat_interval => no_repeat}), + #{id := ?MODULE, + type := standard_io, + file_ctrl_pid := FileCtrlPid, + toggle_sync_qlen := 1, + drop_new_reqs_qlen := 2, + flush_reqs_qlen := 3, + enable_burst_limit := false, + burst_limit_size := 10, + burst_window_time := 10, + enable_kill_overloaded := true, + handler_overloaded_qlen := 100000, + handler_overloaded_mem := 10000000, + handler_restart_after := never, + filesync_repeat_interval := no_repeat} = logger_std_h:info(?MODULE), + ok. + +reconfig(cleanup, _Config) -> + logger:remove_handler(?MODULE). + + +file_opts(Config) -> + Dir = ?config(priv_dir,Config), + Log = filename:join(Dir, lists:concat([?FUNCTION_NAME,".log"])), + BadFileOpts = [raw], + BadType = {file,Log,BadFileOpts}, + {error,{handler_not_added,{{open_failed,Log,enoent},_}}} = + logger:add_handler(?MODULE, logger_std_h, + #{logger_std_h => #{type => BadType}}), + + OkFileOpts = [raw,append], + OkType = {file,Log,OkFileOpts}, + ok = logger:add_handler(?MODULE, + logger_std_h, + #{logger_std_h => #{type => OkType}, + filter_default=>log, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter=>{?MODULE,self()}}), + + #{type := OkType} = logger_std_h:info(?MODULE), + logger:info(M1=?msg,?domain), + ?check(M1), + B1 = ?bin(M1), + try_read_file(Log, {ok,B1}, ?FILESYNC_REP_INT), + ok. +file_opts(cleanup, _Config) -> + logger:remove_handler(?MODULE). + + +filesync(Config) -> + Dir = ?config(priv_dir,Config), + Log = filename:join(Dir, lists:concat([?FUNCTION_NAME,".log"])), + Type = {file,Log}, + ok = logger:add_handler(?MODULE, + logger_std_h, + #{logger_std_h => #{type => Type}, + filter_default=>log, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter=>{?MODULE,self()}}), + Tester = self(), + TraceFun = fun({trace,_,call,{Mod,Func,Details}}, Pid) -> + Pid ! {trace,Mod,Func,Details}, + Pid; + ({trace,TPid,'receive',Received}, Pid) -> + Pid ! {trace,TPid,Received}, + Pid + end, + {ok,_} = dbg:tracer(process, {TraceFun, Tester}), + FileCtrlPid = maps:get(file_ctrl_pid , logger_std_h:info(?MODULE)), + {ok,_} = dbg:p(FileCtrlPid, [c]), + {ok,_} = dbg:tpl(logger_std_h, write_to_dev, 5, []), + {ok,_} = dbg:tpl(logger_std_h, sync_dev, 4, []), + {ok,_} = dbg:tp(file, datasync, 1, []), + + logger:info("first", ?domain), + %% wait for automatic filesync + timer:sleep(?FILESYNC_REP_INT), + Expected1 = [{log,"first"}, {trace,logger_std_h,write_to_dev}, + {trace,logger_std_h,sync_dev}, {trace,file,datasync}], + + logger:info("second", ?domain), + %% do explicit filesync + logger_std_h:filesync(?MODULE), + %% a second filesync should be ignored + logger_std_h:filesync(?MODULE), + Expected2 = [{log,"second"}, {trace,logger_std_h,write_to_dev}, + {trace,logger_std_h,sync_dev}, {trace,file,datasync}], + + %% check that if there's no repeated filesync active, + %% a filesync is still performed when handler goes idle + logger:set_handler_config(?MODULE, logger_std_h, + #{filesync_repeat_interval => no_repeat}), + no_repeat = maps:get(filesync_repeat_interval, logger_std_h:info(?MODULE)), + logger:info("third", ?domain), + timer:sleep(?IDLE_DETECT_TIME_MSEC*2), + logger:info("fourth", ?domain), + %% wait for automatic filesync + timer:sleep(?IDLE_DETECT_TIME_MSEC*2), + Expected3 = [{log,"third"}, {trace,logger_std_h,write_to_dev}, + {log,"fourth"}, {trace,logger_std_h,write_to_dev}, + {trace,logger_std_h,sync_dev}, {trace,file,datasync}], + + dbg:stop_clear(), + + %% verify that filesync has been performed as expected + Received1 = lists:map(fun({trace,M,F,_}) -> {trace,M,F}; + (Other) -> Other + end, test_server:messages_get()), + ct:pal("Trace #1 =~n~p", [Received1]), + Received1 = Expected1 ++ Expected2 ++ Expected3, + + try_read_file(Log, {ok,<<"first\nsecond\nthird\nfourth\n">>}, 1000), + + {ok,_} = dbg:tracer(process, {TraceFun, Tester}), + {ok,_} = dbg:p(whereis(?MODULE), [c]), + {ok,_} = dbg:tpl(logger_std_h, handle_cast, 2, []), + + %% switch repeated filesync on and verify that the looping works + SyncInt = 1000, + WaitT = 4500, + logger:set_handler_config(?MODULE, logger_std_h, + #{filesync_repeat_interval => SyncInt}), + SyncInt = maps:get(filesync_repeat_interval, logger_std_h:info(?MODULE)), + timer:sleep(WaitT), + logger:set_handler_config(?MODULE, logger_std_h, + #{filesync_repeat_interval => no_repeat}), + dbg:stop_clear(), + + Received2 = lists:map(fun({trace,_M,handle_cast,[{Op,_},_]}) -> {trace,Op}; + (Other) -> Other + end, test_server:messages_get()), + ct:pal("Trace #2 =~n~p", [Received2]), + OneSync = [{trace,repeated_filesync}], + %% receive 1 initial repeated_filesync, then 1 per sec + Received2 = + lists:flatten([OneSync || _ <- lists:seq(1, 1 + trunc(WaitT/SyncInt))]), + ok. +filesync(cleanup, _Config) -> + logger:remove_handler(?MODULE). + +write_failure(Config) -> + Dir = ?config(priv_dir, Config), + File = lists:concat([?MODULE,"_",?FUNCTION_NAME,".log"]), + Log = filename:join(Dir, File), + Node = start_std_h_on_new_node(Config, ?FUNCTION_NAME, Log), + false = (undefined == rpc:call(Node, ets, whereis, [?TEST_HOOKS_TAB])), + rpc:call(Node, ets, insert, [?TEST_HOOKS_TAB,{tester,self()}]), + rpc:call(Node, ?MODULE, set_internal_log, [?MODULE,internal_log]), + rpc:call(Node, ?MODULE, set_result, [file_write,ok]), + + ok = log_on_remote_node(Node, "Logged1"), + rpc:call(Node, logger_std_h, filesync, [?STANDARD_HANDLER]), + ?check_no_log, + try_read_file(Log, {ok,<<"Logged1\n">>}, ?FILESYNC_REP_INT), + + rpc:call(Node, ?MODULE, set_result, [file_write,{error,terminated}]), + ok = log_on_remote_node(Node, "Cause simple error printout"), + + ?check({error,{?STANDARD_HANDLER,write,Log,{error,terminated}}}), + + ok = log_on_remote_node(Node, "No second error printout"), + ?check_no_log, + + rpc:call(Node, ?MODULE, set_result, [file_write,{error,eacces}]), + ok = log_on_remote_node(Node, "Cause simple error printout"), + ?check({error,{?STANDARD_HANDLER,write,Log,{error,eacces}}}), + + rpc:call(Node, ?MODULE, set_result, [file_write,ok]), + ok = log_on_remote_node(Node, "Logged2"), + rpc:call(Node, logger_std_h, filesync, [?STANDARD_HANDLER]), + ?check_no_log, + try_read_file(Log, {ok,<<"Logged1\nLogged2\n">>}, ?FILESYNC_REP_INT), + ok. +write_failure(cleanup, _Config) -> + Nodes = nodes(), + [test_server:stop_node(Node) || Node <- Nodes]. + +sync_failure(Config) -> + Dir = ?config(priv_dir, Config), + File = lists:concat([?MODULE,"_",?FUNCTION_NAME,".log"]), + Log = filename:join(Dir, File), + Node = start_std_h_on_new_node(Config, ?FUNCTION_NAME, Log), + false = (undefined == rpc:call(Node, ets, whereis, [?TEST_HOOKS_TAB])), + rpc:call(Node, ets, insert, [?TEST_HOOKS_TAB,{tester,self()}]), + rpc:call(Node, ?MODULE, set_internal_log, [?MODULE,internal_log]), + rpc:call(Node, ?MODULE, set_result, [file_datasync,ok]), + + SyncInt = 500, + ok = rpc:call(Node, logger, set_handler_config, + [?STANDARD_HANDLER, logger_std_h, + #{filesync_repeat_interval => SyncInt}]), + Info = rpc:call(Node, logger_std_h, info, [?STANDARD_HANDLER]), + SyncInt = maps:get(filesync_repeat_interval, Info), + + ok = log_on_remote_node(Node, "Logged1"), + ?check_no_log, + + rpc:call(Node, ?MODULE, set_result, [file_datasync,{error,terminated}]), + ok = log_on_remote_node(Node, "Cause simple error printout"), + + ?check({error,{?STANDARD_HANDLER,filesync,Log,{error,terminated}}}), + + ok = log_on_remote_node(Node, "No second error printout"), + ?check_no_log, + + rpc:call(Node, ?MODULE, set_result, [file_datasync,{error,eacces}]), + ok = log_on_remote_node(Node, "Cause simple error printout"), + ?check({error,{?STANDARD_HANDLER,filesync,Log,{error,eacces}}}), + + rpc:call(Node, ?MODULE, set_result, [file_datasync,ok]), + ok = log_on_remote_node(Node, "Logged2"), + ?check_no_log, + ok. +sync_failure(cleanup, _Config) -> + Nodes = nodes(), + [test_server:stop_node(Node) || Node <- Nodes]. + +start_std_h_on_new_node(_Config, Func, Log) -> + Pa = filename:dirname(code:which(?MODULE)), + Dest = + case os:type() of + {win32,_} -> + lists:concat([" {file,\\\"",Log,"\\\"}"]); + _ -> + lists:concat([" \'{file,\"",Log,"\"}\'"]) + end, + Args = lists:concat([" -kernel ",logger_dest,Dest," -pa ",Pa]), + Name = lists:concat([?MODULE,"_",Func]), + ct:pal("Starting ~s with ~tp", [Name,Args]), + {ok,Node} = test_server:start_node(Name, peer, [{args, Args}]), + Pid = rpc:call(Node,erlang,whereis,[?STANDARD_HANDLER]), + true = is_pid(Pid), + ok = rpc:call(Node,logger,set_handler_config,[?STANDARD_HANDLER,formatter, + {?MODULE,nl}]), + Node. + +%% functions for test hook macros to be called by rpc +set_internal_log(Mod, Func) -> + ?set_internal_log({Mod,Func}). +set_result(Op, Result) -> + ?set_result(Op, Result). +set_defaults() -> + ?set_defaults(). + +%% internal log function that sends the term to the test case process +internal_log(Type, Term) -> + [{tester,Tester}] = ets:lookup(?TEST_HOOKS_TAB, tester), + Tester ! {log,{Type,Term}}, + logger:internal_log(Type, Term), + ok. + + +%%%----------------------------------------------------------------- +%%% Overload protection tests + +op_switch_to_sync_file(Config) -> + {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + NewHConfig = + HConfig#{logger_std_h => StdHConfig#{toggle_sync_qlen => 3, + drop_new_reqs_qlen => 501, + flush_reqs_qlen => 2000, + enable_burst_limit => false}}, + ok = logger:set_handler_config(?MODULE, NewHConfig), + %% TRecvPid = start_op_trace(), + NumOfReqs = 500, + send_burst({n,NumOfReqs}, seq, {chars,79}, info), + NumOfReqs = count_lines(Log), + %% true = analyse_trace(TRecvPid, + %% fun(Events) -> find_mode(async,Events) end), + %% true = analyse_trace(TRecvPid, + %% fun(Events) -> find_mode(sync,Events) end), + %% true = analyse_trace(TRecvPid, + %% fun(Events) -> find_switch(async,sync,Events) end), + %% false = analyse_trace(TRecvPid, + %% fun(Events) -> find_mode(drop,Events) end), + %% false = analyse_trace(TRecvPid, + %% fun(Events) -> find_mode(flush,Events) end), + ok = file:delete(Log), + %% stop_op_trace(TRecvPid), + ok. +op_switch_to_sync_file(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +op_switch_to_sync_tty(Config) -> + {HConfig,StdHConfig} = start_handler(?MODULE, standard_io, Config), + NewHConfig = + HConfig#{logger_std_h => StdHConfig#{toggle_sync_qlen => 3, + drop_new_reqs_qlen => 501, + flush_reqs_qlen => 2000, + enable_burst_limit => false}}, + ok = logger:set_handler_config(?MODULE, NewHConfig), + NumOfReqs = 500, + send_burst({n,NumOfReqs}, seq, {chars,79}, info), + ok. +op_switch_to_sync_tty(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +op_switch_to_drop_file(Config) -> + {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + + NewHConfig = + HConfig#{logger_std_h => StdHConfig#{toggle_sync_qlen => 2, + drop_new_reqs_qlen => 3, + flush_reqs_qlen => 600, + enable_burst_limit => false}}, + ok = logger:set_handler_config(?MODULE, NewHConfig), + %% TRecvPid = start_op_trace(), + NumOfReqs = 500, + send_burst({n,NumOfReqs}, seq, {chars,79}, info), + Logged = count_lines(Log), + ct:pal("Number of messages dropped = ~w (~w)", + [NumOfReqs-Logged,NumOfReqs]), + true = (Logged < NumOfReqs), + %% true = analyse_trace(TRecvPid, + %% fun(Events) -> find_mode(async,Events) end), + %% true = analyse_trace(TRecvPid, + %% fun(Events) -> find_mode(drop,Events) end), + %% false = analyse_trace(TRecvPid, + %% fun(Events) -> find_mode(flush,Events) end), + %% true = analyse_trace(TRecvPid, + %% fun(Events) -> find_switch(async,drop,Events) + %% orelse find_switch(sync,drop,Events) + %% end), + ok = file:delete(Log), + %% stop_op_trace(TRecvPid), + ok. +op_switch_to_drop_file(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +op_switch_to_drop_tty(Config) -> + {HConfig,StdHConfig} = start_handler(?MODULE, standard_io, Config), + NewHConfig = + HConfig#{logger_std_h => StdHConfig#{toggle_sync_qlen => 2, + drop_new_reqs_qlen => 3, + flush_reqs_qlen => 600, + enable_burst_limit => false}}, + ok = logger:set_handler_config(?MODULE, NewHConfig), + NumOfReqs = 500, + send_burst({n,NumOfReqs}, seq, {chars,79}, info), + ok. +op_switch_to_drop_tty(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +op_switch_to_flush_file() -> + [{timetrap,{seconds,60}}]. +op_switch_to_flush_file(Config) -> + {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + + %% it's important that both async and sync requests have been queued + %% when the flush happens (verify with coverage of flush_log_requests/2) + + NewHConfig = + HConfig#{logger_std_h => StdHConfig#{toggle_sync_qlen => 2, + drop_new_reqs_qlen => 99, + flush_reqs_qlen => 100, + enable_burst_limit => false}}, + ok = logger:set_handler_config(?MODULE, NewHConfig), + NumOfReqs = 10000, + Procs = 100, + send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, info), + Logged = count_lines(Log), + ct:pal("Number of messages flushed/dropped = ~w (~w)", + [(NumOfReqs*Procs)-Logged,NumOfReqs*Procs]), + true = (Logged < (NumOfReqs*Procs)), + + %%! --- Thu Apr 12 13:46:00 2018 --- peppe was here! + %%! TODO: Verify that handler has switched to flush mode + + ok = file:delete(Log), + ok. +op_switch_to_flush_file(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +op_switch_to_flush_tty(Config) -> + {HConfig,StdHConfig} = start_handler(?MODULE, standard_io, Config), + + %% it's important that both async and sync requests have been queued + %% when the flush happens (verify with coverage of flush_log_requests/2) + + NewHConfig = + HConfig#{logger_std_h => StdHConfig#{toggle_sync_qlen => 2, + drop_new_reqs_qlen => 99, + flush_reqs_qlen => 100, + enable_burst_limit => false}}, + ok = logger:set_handler_config(?MODULE, NewHConfig), + NumOfReqs = 10000, + Procs = 10, + send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, info), + ok. +op_switch_to_flush_tty(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +limit_burst_disabled(Config) -> + {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + NewHConfig = + HConfig#{logger_std_h => StdHConfig#{enable_burst_limit => false, + burst_limit_size => 10, + burst_window_time => 2000, + drop_new_reqs_qlen => 200, + flush_reqs_qlen => 300}}, + ok = logger:set_handler_config(?MODULE, NewHConfig), + NumOfReqs = 100, + send_burst({n,NumOfReqs}, seq, {chars,79}, info), + Logged = count_lines(Log), + ct:pal("Number of messages logged = ~w", [Logged]), + ok = file:delete(Log), + NumOfReqs = Logged. +limit_burst_disabled(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +limit_burst_enabled_one(Config) -> + {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + ReqLimit = 10, + NewHConfig = + HConfig#{logger_std_h => StdHConfig#{enable_burst_limit => true, + burst_limit_size => ReqLimit, + burst_window_time => 2000, + drop_new_reqs_qlen => 200, + flush_reqs_qlen => 300}}, + ok = logger:set_handler_config(?MODULE, NewHConfig), + NumOfReqs = 100, + send_burst({n,NumOfReqs}, seq, {chars,79}, info), + Logged = count_lines(Log), + ct:pal("Number of messages logged = ~w", [Logged]), + ok = file:delete(Log), + ReqLimit = Logged. +limit_burst_enabled_one(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +limit_burst_enabled_period(Config) -> + {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + ReqLimit = 10, + BurstTWin = 1000, + NewHConfig = + HConfig#{logger_std_h => StdHConfig#{enable_burst_limit => true, + burst_limit_size => ReqLimit, + burst_window_time => BurstTWin, + drop_new_reqs_qlen => 20000, + flush_reqs_qlen => 20001}}, + ok = logger:set_handler_config(?MODULE, NewHConfig), + + Windows = 3, + Sent = send_burst({t,BurstTWin*Windows}, seq, {chars,79}, info), + Logged = count_lines(Log), + ct:pal("Number of messages sent = ~w~nNumber of messages logged = ~w", + [Sent,Logged]), + ok = file:delete(Log), + true = (Logged > (ReqLimit*Windows)) andalso + (Logged < (ReqLimit*(Windows+2))). +limit_burst_enabled_period(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +kill_disabled(Config) -> + {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + NewHConfig = + HConfig#{logger_std_h=>StdHConfig#{enable_kill_overloaded=>false, + handler_overloaded_qlen=>10, + handler_overloaded_mem=>100}}, + ok = logger:set_handler_config(?MODULE, NewHConfig), + NumOfReqs = 100, + send_burst({n,NumOfReqs}, seq, {chars,79}, info), + Logged = count_lines(Log), + ct:pal("Number of messages logged = ~w", [Logged]), + ok = file:delete(Log), + true = is_pid(whereis(?MODULE)), + ok. +kill_disabled(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +qlen_kill_new(Config) -> + {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + Pid0 = whereis(?MODULE), + {_,Mem0} = process_info(Pid0, memory), + RestartAfter = 2000, + NewHConfig = + HConfig#{logger_std_h=>StdHConfig#{enable_kill_overloaded=>true, + handler_overloaded_qlen=>10, + handler_overloaded_mem=>Mem0+50000, + handler_restart_after=>RestartAfter}}, + ok = logger:set_handler_config(?MODULE, NewHConfig), + MRef = erlang:monitor(process, Pid0), + NumOfReqs = 100, + Procs = 2, + send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, info), + %% send_burst({n,NumOfReqs}, seq, {chars,79}, info), + receive + {'DOWN', MRef, _, _, Info} -> + case Info of + {shutdown,{overloaded,?MODULE,QLen,Mem}} -> + ct:pal("Terminated with qlen = ~w, mem = ~w", [QLen,Mem]); + killed -> + ct:pal("Slow shutdown, handler process was killed!", []) + end, + timer:sleep(RestartAfter + 1000), + true = is_pid(whereis(?MODULE)), + ok + after + 5000 -> + Info = logger_std_h:info(?MODULE), + ct:pal("Handler state = ~p", [Info]), + ct:fail("Handler not dead! It should not have survived this!") + end. +qlen_kill_new(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +%% choke the standard handler on remote node to verify the termination +%% works as expected +qlen_kill_std(Config) -> + %%! HERE + %% Dir = ?config(priv_dir, Config), + %% File = lists:concat([?MODULE,"_",?FUNCTION_NAME,".log"]), + %% Log = filename:join(Dir, File), + %% Node = start_std_h_on_new_node(Config, ?FUNCTION_NAME, Log), + %% ok = rpc:call(Node, logger, set_handler_config, + %% [?STANDARD_HANDLER, logger_std_h, + %% #{enable_kill_overloaded=>true, + %% handler_overloaded_qlen=>10, + %% handler_overloaded_mem=>100000}]), + {skip,"Not done yet"}. + +mem_kill_new(Config) -> + {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + Pid0 = whereis(?MODULE), + {_,Mem0} = process_info(Pid0, memory), + RestartAfter = 2000, + NewHConfig = + HConfig#{logger_std_h=>StdHConfig#{enable_kill_overloaded=>true, + handler_overloaded_qlen=>50000, + handler_overloaded_mem=>Mem0+500, + handler_restart_after=>RestartAfter}}, + ok = logger:set_handler_config(?MODULE, NewHConfig), + MRef = erlang:monitor(process, Pid0), + NumOfReqs = 100, + Procs = 2, + send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, info), + %% send_burst({n,NumOfReqs}, seq, {chars,79}, info), + receive + {'DOWN', MRef, _, _, Info} -> + case Info of + {shutdown,{overloaded,?MODULE,QLen,Mem}} -> + ct:pal("Terminated with qlen = ~w, mem = ~w", [QLen,Mem]); + killed -> + ct:pal("Slow shutdown, handler process was killed!", []) + end, + timer:sleep(RestartAfter * 2), + true = is_pid(whereis(?MODULE)), + ok + after + 5000 -> + Info = logger_std_h:info(?MODULE), + ct:pal("Handler state = ~p", [Info]), + ct:fail("Handler not dead! It should not have survived this!") + end. +mem_kill_new(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +%% choke the standard handler on remote node to verify the termination +%% works as expected +mem_kill_std(Config) -> + {skip,"Not done yet"}. + +restart_after(Config) -> + {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + NewHConfig1 = + HConfig#{logger_std_h=>StdHConfig#{enable_kill_overloaded=>true, + handler_overloaded_qlen=>10, + handler_restart_after=>never}}, + ok = logger:set_handler_config(?MODULE, NewHConfig1), + MRef1 = erlang:monitor(process, whereis(?MODULE)), + %% kill handler + send_burst({n,100}, {spawn,2,0}, {chars,79}, info), + receive + {'DOWN', MRef1, _, _, _Info1} -> + timer:sleep(?HANDLER_RESTART_AFTER + 1000), + undefined = whereis(?MODULE), + ok + after + 5000 -> + ct:fail("Handler not dead! It should not have survived this!") + end, + + {Log,_,_} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + RestartAfter = 2000, + NewHConfig2 = + HConfig#{logger_std_h=>StdHConfig#{enable_kill_overloaded=>true, + handler_overloaded_qlen=>10, + handler_restart_after=>RestartAfter}}, + ok = logger:set_handler_config(?MODULE, NewHConfig2), + Pid0 = whereis(?MODULE), + MRef2 = erlang:monitor(process, Pid0), + %% kill handler + send_burst({n,100}, {spawn,2,0}, {chars,79}, info), + receive + {'DOWN', MRef2, _, _, _Info2} -> + timer:sleep(RestartAfter + 1000), + Pid1 = whereis(?MODULE), + true = is_pid(Pid1), + false = (Pid1 == Pid0), + ok + after + 5000 -> + ct:fail("Handler not dead! It should not have survived this!") + end, + ok. +restart_after(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +%% send handler requests (filesync, info, reset, change_config) +%% during high load to verify that sync, dropping and flushing is +%% handled correctly. +handler_requests_under_load() -> + [{timetrap,{seconds,60}}]. +handler_requests_under_load(Config) -> + {Log,HConfig,StdHConfig} = + start_handler(?MODULE, ?FUNCTION_NAME, Config), + NewHConfig = + HConfig#{logger_std_h => StdHConfig#{toggle_sync_qlen => 2, + drop_new_reqs_qlen => 1000, + flush_reqs_qlen => 2000, + enable_burst_limit => false}}, + ok = logger:set_handler_config(?MODULE, NewHConfig), + Pid = spawn_link(fun() -> send_requests(?MODULE, 1, [{filesync,[]}, + {info,[]}, + {reset,[]}, + {change_config,[]}]) + end), + Sent = send_burst({t,10000}, seq, {chars,79}, info), + Pid ! {self(),finish}, + ReqResult = receive {Pid,Result} -> Result end, + Logged = count_lines(Log), + ct:pal("Number of messages sent = ~w~nNumber of messages logged = ~w", + [Sent,Logged]), + FindError = fun(Res) -> + [E || E <- Res, + is_tuple(E) andalso (element(1,E) == error)] + end, + Errors = [{Req,FindError(Res)} || {Req,Res} <- ReqResult], + NoOfReqs = lists:foldl(fun({_,Res}, N) -> N + length(Res) end, 0, ReqResult), + ct:pal("~w requests made. Errors: ~n~p", [NoOfReqs,Errors]), + ok = file:delete(Log). +handler_requests_under_load(cleanup, Config) -> + ok = stop_handler(?MODULE). + +send_requests(HName, TO, Reqs = [{Req,Res}|Rs]) -> + receive + {From,finish} -> + From ! {self(),Reqs} + after + TO -> + Result = + case Req of + change_config -> + logger:set_handler_config(HName, logger_std_h, + #{enable_kill_overloaded => + false}); + Func -> + logger_std_h:Func(HName) + end, + send_requests(HName, TO, Rs ++ [{Req,[Result|Res]}]) + end. + + +%%%----------------------------------------------------------------- +%%% +start_handler(Name, TTY, Config) when TTY == standard_io; + TTY == standard_error-> + ok = logger:add_handler(Name, + logger_std_h, + #{logger_std_h => #{type => TTY}, + filter_default=>log, + filters=>?DEFAULT_HANDLER_FILTERS([Name]), + formatter=>{?MODULE,op}}), + {ok,{_,HConfig = #{logger_std_h := StdHConfig}}} = + logger:get_handler_config(Name), + {HConfig,StdHConfig}; + +start_handler(Name, FuncName, Config) -> + Dir = ?config(priv_dir,Config), + Log = filename:join(Dir, lists:concat([FuncName,".log"])), + ct:pal("Logging to ~tp", [Log]), + Type = {file,Log}, + ok = logger:add_handler(Name, + logger_std_h, + #{logger_std_h => #{type => Type}, + filter_default=>log, + filters=>?DEFAULT_HANDLER_FILTERS([Name]), + formatter=>{?MODULE,op}}), + {ok,{_,HConfig = #{logger_std_h := StdHConfig}}} = + logger:get_handler_config(Name), + {Log,HConfig,StdHConfig}. + +stop_handler(Name) -> + ok = logger:remove_handler(Name), + ct:pal("Handler ~p stopped!", [Name]). + +count_lines(File) -> + wait_until_written(File, -1), + count_lines1(File). + +wait_until_written(File, Sz) -> + timer:sleep(2000), + case file:read_file_info(File) of + {ok,#file_info{size = Sz}} -> + timer:sleep(1000), + case file:read_file_info(File) of + {ok,#file_info{size = Sz1}} -> + ok; + {ok,#file_info{size = Sz2}} -> + wait_until_written(File, Sz2) + end; + {ok,#file_info{size = Sz1}} -> + wait_until_written(File, Sz1) + end. + +count_lines1(File) -> + Counter = fun Cnt(Dev,LC) -> + case file:read_line(Dev) of + eof -> LC; + _ -> Cnt(Dev,LC+1) + end + end, + {_,Dev} = file:open(File, [read]), + Lines = Counter(Dev, 0), + file:close(Dev), + Lines. + +send_burst(NorT, Type, {chars,Sz}, Class) -> + Text = [34 + rand:uniform(126-34) || _ <- lists:seq(1,Sz)], + case NorT of + {n,N} -> + %% process_flag(priority, high), + send_n_burst(N, Type, Text, Class), + %% process_flag(priority, normal), + N; + {t,T} -> + ct:pal("Sending messages sequentially for ~w ms", [T]), + T0 = erlang:monotonic_time(millisecond), + send_t_burst(T0, T, Text, Class, 0) + end. + +send_n_burst(0, _, _Text, _Class) -> + ok; +send_n_burst(N, seq, Text, Class) -> + ok = logger:Class(Text, ?domain), + send_n_burst(N-1, seq, Text, Class); +send_n_burst(N, {spawn,Ps,TO}, Text, Class) -> + ct:pal("~w processes each sending ~w messages", [Ps,N]), + PerProc = fun() -> + send_n_burst(N, seq, Text, Class) + end, + MRefs = [begin if TO == 0 -> ok; true -> timer:sleep(TO) end, + monitor(process,spawn_link(PerProc)) end || + _ <- lists:seq(1,Ps)], + lists:foreach(fun(MRef) -> + receive + {'DOWN', MRef, _, _, _} -> + ok + end + end, MRefs), + ct:pal("Message burst sent", []), + ok. + +send_t_burst(T0, T, Text, Class, N) -> + T1 = erlang:monotonic_time(millisecond), + if (T1-T0) > T -> + N; + true -> + ok = logger:Class(Text, ?domain), + send_t_burst(T0, T, Text, Class, N+1) + end. + +%%%----------------------------------------------------------------- +%%% Formatter callback +%%% Using this to send the formatted string back to the test case +%%% process - so it can check for logged events. +format(_,bad_return) -> + bad_return; +format(_,crash) -> + erlang:error(formatter_crashed); +format(#{msg:={string,String0}},no_nl) -> + String = unicode:characters_to_list(String0), + String; +format(#{msg:={string,String0}},nl) -> + String = unicode:characters_to_list(String0), + String++"\n"; +format(#{msg:={string,String0}},op) -> + String = unicode:characters_to_list(String0), + String++"\n"; +format(#{msg:={report,#{label:={supervisor,progress}}}},op) -> + ""; +format(#{msg:={report,#{label:={gen_server,terminate}}}},op) -> + ""; +format(#{msg:={report,#{label:={proc_lib,crash}}}},op) -> + ""; +format(#{msg:={F,A}},Pid) when is_list(F), is_list(A) -> + String = lists:flatten(io_lib:format(F,A)), + Pid ! {log,String}, + String++"\n"; +format(#{msg:={string,String0}},Pid) -> + String = unicode:characters_to_list(String0), + Pid ! {log,String}, + String++"\n". + +add_remove_instance_nofile(Type) -> + ok = logger:add_handler(?MODULE,logger_std_h, + #{logger_std_h => #{type => Type}, + filter_default=>stop, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter=>{?MODULE,self()}}), + Pid = whereis(?MODULE), + true = is_pid(Pid), + group_leader(group_leader(),Pid), % to get printouts in test log + logger:info(M1=?msg,?domain), + ?check(M1), + %% check that filesync doesn't do damage even if not relevant + ok = logger_std_h:filesync(?MODULE), + ok = logger:remove_handler(?MODULE), + timer:sleep(500), + undefined = whereis(?MODULE), + logger:info(?msg,?domain), + ?check_no_log, + ok. + +logger_std_h_remove() -> + logger:remove_handler(?MODULE). +logger_std_h_remove(Id) -> + logger:remove_handler(Id). + +try_read_file(FileName, Expected, Time) when Time > 0 -> + case file:read_file(FileName) of + Expected -> + ok; + Error = {error,_Reason} -> + ct:pal("Can't read ~tp: ~tp", [FileName,Error]), + erlang:error(Error); + Got -> + ct:pal("try_read_file got ~tp", [Got]), + timer:sleep(500), + try_read_file(FileName, Expected, Time-500) + end; +try_read_file(FileName, Expected, _) -> + ct:pal("Missing pattern ~tp in ~tp", [Expected,FileName]), + erlang:error({error,missing_expected_pattern}). + +try_match_file(FileName, Pattern, Time) -> + try_match_file(FileName, Pattern, Time, <<>>). + +try_match_file(FileName, Pattern, Time, _) when Time > 0 -> + case file:read_file(FileName) of + {ok, Bin} -> + case re:run(Bin,Pattern,[{capture,none}]) of + match -> + unicode:characters_to_list(Bin); + _ -> + timer:sleep(100), + try_match_file(FileName, Pattern, Time-100, Bin) + end; + Error -> + erlang:error(Error) + end; +try_match_file(_,Pattern,_,Incorrect) -> + ct:pal("try_match_file did not match pattern: ~p~nGot: ~p~n", + [Pattern,Incorrect]), + erlang:error({error,not_matching_pattern,Pattern,Incorrect}). + +%%%----------------------------------------------------------------- +%%% +start_op_trace() -> + TraceFun = fun({trace,_,call,{_Mod,Func,Details}}, Pid) -> + Pid ! {trace_call,Func,Details}, + Pid; + ({trace,_,return_from,{_Mod,Func,_},RetVal}, Pid) -> + Pid ! {trace_return,Func,RetVal}, + Pid + end, + TRecvPid = spawn_link(fun() -> trace_receiver(5000) end), + {ok,_} = dbg:tracer(process, {TraceFun, TRecvPid}), + + {ok,_} = dbg:p(whereis(?MODULE), [c]), + {ok,_} = dbg:p(self(), [c]), + + MS1 = dbg:fun2ms(fun([_]) -> return_trace() end), + {ok,_} = dbg:tp(logger_h_common, check_load, 1, MS1), + + {ok,_} = dbg:tpl(logger_h_common, flush_log_requests, 2, []), + + MS2 = dbg:fun2ms(fun([_,mode]) -> return_trace() end), + {ok,_} = dbg:tpl(ets, lookup, 2, MS2), + + ct:pal("Tracing started!", []), + TRecvPid. + +stop_op_trace(TRecvPid) -> + dbg:stop_clear(), + unlink(TRecvPid), + exit(TRecvPid, kill), + ok. + +find_mode(flush, Events) -> + lists:any(fun({trace_call,flush_log_requests,[_,_]}) -> true; + (_) -> false + end, Events); +find_mode(Mode, Events) -> + lists:keymember([{mode,Mode}], 3, Events). + +find_switch(From, To, Events) -> + try lists:foldl(fun({trace_return,check_load,{To,_,_,_}}, + {trace_call,check_load,[#{mode := From}]}) -> + throw(match); + (Event, _) -> + Event + end, undefined, Events) of + _ -> false + catch + throw:match -> true + end. + +analyse_trace(TRecvPid, TestFun) -> + TRecvPid ! {test,self(),TestFun}, + receive + {result,TRecvPid,Result} -> + Result + after + 60000 -> + fails + end. + +trace_receiver(IdleT) -> + Msgs = receive_until_idle(IdleT, 5, []), + ct:pal("~w trace events generated", [length(Msgs)]), + analyse(Msgs). + +receive_until_idle(IdleT, WaitN, Msgs) -> + receive + Msg = {trace_call,_,_} -> + receive_until_idle(IdleT, 5, [Msg | Msgs]); + Msg = {trace_return,_,_} -> + receive_until_idle(IdleT, 5, [Msg | Msgs]) + after + IdleT -> + if WaitN == 0 -> + Msgs; + true -> + receive_until_idle(IdleT, WaitN-1, Msgs) + end + end. + +analyse(Msgs) -> + receive + {test,From,TestFun} -> + From ! {result,self(),TestFun(Msgs)}, + analyse(Msgs) + end. |