aboutsummaryrefslogtreecommitdiffstats
path: root/lib/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'lib/kernel')
-rw-r--r--lib/kernel/doc/src/application.xml35
-rw-r--r--lib/kernel/doc/src/kernel_app.xml18
-rw-r--r--lib/kernel/doc/src/logger.xml28
-rw-r--r--lib/kernel/doc/src/logger_chapter.xml14
-rw-r--r--lib/kernel/doc/src/logger_std_h.xml122
-rw-r--r--lib/kernel/doc/src/notes.xml198
-rw-r--r--lib/kernel/src/application.erl22
-rw-r--r--lib/kernel/src/application_controller.erl101
-rw-r--r--lib/kernel/src/code_server.erl10
-rw-r--r--lib/kernel/src/inet_db.erl7
-rw-r--r--lib/kernel/src/kernel.app.src2
-rw-r--r--lib/kernel/src/kernel.appup.src10
-rw-r--r--lib/kernel/src/kernel.erl72
-rw-r--r--lib/kernel/src/logger.erl10
-rw-r--r--lib/kernel/src/logger_formatter.erl71
-rw-r--r--lib/kernel/src/logger_h_common.erl37
-rw-r--r--lib/kernel/src/logger_olp.erl5
-rw-r--r--lib/kernel/src/logger_olp.hrl7
-rw-r--r--lib/kernel/src/logger_simple_h.erl4
-rw-r--r--lib/kernel/src/logger_std_h.erl503
-rw-r--r--lib/kernel/test/application_SUITE.erl90
-rw-r--r--lib/kernel/test/code_SUITE.erl10
-rw-r--r--lib/kernel/test/gen_sctp_SUITE.erl10
-rw-r--r--lib/kernel/test/gen_tcp_api_SUITE_data/gen_tcp_api_SUITE.c1
-rw-r--r--lib/kernel/test/gen_tcp_misc_SUITE.erl37
-rw-r--r--lib/kernel/test/inet_SUITE.erl31
-rw-r--r--lib/kernel/test/kernel.spec1
-rw-r--r--lib/kernel/test/kernel_config_SUITE.erl21
-rw-r--r--lib/kernel/test/logger_SUITE.erl18
-rw-r--r--lib/kernel/test/logger_formatter_SUITE.erl2
-rw-r--r--lib/kernel/test/logger_std_h_SUITE.erl428
-rw-r--r--lib/kernel/test/logger_stress_SUITE.erl94
-rw-r--r--lib/kernel/vsn.mk2
33 files changed, 1682 insertions, 339 deletions
diff --git a/lib/kernel/doc/src/application.xml b/lib/kernel/doc/src/application.xml
index 4e32c1a3a5..83a83ebad2 100644
--- a/lib/kernel/doc/src/application.xml
+++ b/lib/kernel/doc/src/application.xml
@@ -238,6 +238,41 @@ Nodes = [cp1@cave, {cp2@cave, cp3@cave}]</code>
</desc>
</func>
<func>
+ <name name="set_env" arity="1" since="OTP 21.3"/>
+ <name name="set_env" arity="2" since="OTP 21.3"/>
+ <fsummary>Sets the configuration parameters of multiple applications.</fsummary>
+ <desc>
+ <p>Sets the configuration <c><anno>Config</anno></c> for multiple
+ applications. It is equivalent to calling <c>set_env/4</c> on
+ each application individially, except it is more efficient.
+ The given <c><anno>Config</anno></c> is validated before the
+ configuration is set.</p>
+ <p><c>set_env/2</c> uses the standard <c>gen_server</c> time-out
+ value (5000 ms). Option <c>timeout</c> can be specified
+ if another time-out value is useful, for example, in situations
+ where the application controller is heavily loaded.</p>
+ <p>Option <c>persistent</c> can be set to <c>true</c>
+ to guarantee that parameters set with <c>set_env/2</c>
+ are not overridden by those defined in the application resource
+ file on load. This means that persistent values will stick after the application
+ is loaded and also on application reload.</p>
+ <p>If an application is given more than once or if an application
+ has the same key given more than once, the behaviour is undefined
+ and a warning message will be logged. In future releases, an error
+ will be raised.</p>
+ <p><c>set_env/1</c> is equivalent to <c>set_env(Config, [])</c>.</p>
+ <warning>
+ <p>Use this function only if you know what you are doing,
+ that is, on your own applications. It is very
+ application-dependent and
+ configuration parameter-dependent when and how often
+ the value is read by the application. Careless use
+ of this function can put the application in a
+ weird, inconsistent, and malfunctioning state.</p>
+ </warning>
+ </desc>
+ </func>
+ <func>
<name name="permit" arity="2" since=""/>
<fsummary>Change the permission for an application to run at a node.</fsummary>
<desc>
diff --git a/lib/kernel/doc/src/kernel_app.xml b/lib/kernel/doc/src/kernel_app.xml
index 15dbdb47dc..7f9609d5c1 100644
--- a/lib/kernel/doc/src/kernel_app.xml
+++ b/lib/kernel/doc/src/kernel_app.xml
@@ -369,6 +369,14 @@ MaxT = TickTime + TickTime / 4</code>
performed. This option ensures that <c>global</c> is
synchronized.</p>
</item>
+ <tag><c>start_distribution = true | false</c></tag>
+ <item>
+ <p>Starts all distribution services, such as <c>rpc</c>,
+ <c>global</c>, and <c>net_kernel</c> if the parameter is
+ <c>true</c>. This parameter is to be set to <c>false</c>
+ for systems who want to disable all distribution functionality.</p>
+ <p>Defaults to <c>true</c>.</p>
+ </item>
<tag><c>start_dist_ac = true | false</c></tag>
<item>
<p>Starts the <c>dist_ac</c> server if the parameter is
@@ -510,11 +518,13 @@ MaxT = TickTime + TickTime / 4</code>
parameters for Logger are not set.</p>
<taglist>
<tag><c>error_logger</c></tag>
- <item>Replaced by setting the type of the default
- <seealso marker="logger_std_h#type"><c>logger_std_h</c></seealso>
- to the same value. Example:
+ <item>Replaced by setting the <seealso
+ marker="logger_std_h#type"><c>type</c></seealso>, and possibly
+ <seealso marker="logger_std_h#file"><c>file</c></seealso> and
+ <seealso marker="logger_std_h#modes"><c>modes</c></seealso>
+ parameters of the default <c>logger_std_h</c> handler. Example:
<code type="none">
-erl -kernel logger '[{handler,default,logger_std_h,#{config=>#{type=>{file,"/tmp/erlang.log"}}}}]'
+erl -kernel logger '[{handler,default,logger_std_h,#{config=>#{file=>"/tmp/erlang.log"}}}]'
</code>
</item>
<tag><c>error_logger_format_depth</c></tag>
diff --git a/lib/kernel/doc/src/logger.xml b/lib/kernel/doc/src/logger.xml
index df2d081d76..2a060ce1f9 100644
--- a/lib/kernel/doc/src/logger.xml
+++ b/lib/kernel/doc/src/logger.xml
@@ -66,7 +66,7 @@ logger:error("error happened because: ~p", [Reason]). % Without macro
[{kernel,
[{logger,
[{handler, default, logger_std_h,
- #{config => #{type => {file, "path/to/file.log"}}}}]}]}].
+ #{config => #{file => "path/to/file.log"}}}]}]}].
</code>
<p>
For more information about:
@@ -190,7 +190,7 @@ logger:error("error happened because: ~p", [Reason]). % Without macro
<list>
<item><c>pid => self()</c></item>
<item><c>gl => group_leader()</c></item>
- <item><c>time => erlang:system_time(microsecond)</c></item>
+ <item><c>time => logger:timestamp()</c></item>
</list>
<p>When a log macro is used, Logger also inserts location
information:</p>
@@ -288,8 +288,8 @@ logger:error("error happened because: ~p", [Reason]). % Without macro
<name name="timestamp"/>
<desc>
<p>A timestamp produced
- with <seealso marker="erts:erlang#system_time-1">
- <c>erlang:system_time(microsecond)</c></seealso>.</p>
+ with <seealso marker="#timestamp-0">
+ <c>logger:timestamp()</c></seealso>.</p>
</desc>
</datatype>
</datatypes>
@@ -1117,6 +1117,24 @@ logger:set_proxy_config(maps:merge(Old, Config)).
a key-value list before formatting as such.</p>
</desc>
</func>
+
+ <func>
+ <name name="timestamp" arity="0" since="OTP 21.3"/>
+ <fsummary>Return a timestamp to insert in meta data for a log
+ event.</fsummary>
+ <desc>
+ <p>Return a timestamp that can be inserted as the <c>time</c>
+ field in the meta data for a log event. It is produced with
+ <seealso marker="kernel:os#system_time-1">
+ <c>os:system_time(microsecond)</c></seealso>.</p>
+ <p>Notice that Logger automatically inserts a timestamp in the
+ meta data unless it already exists. This function is
+ exported for the rare case when the timestamp must be taken
+ at a different point in time than when the log event is
+ issued.</p>
+ </desc>
+ </func>
+
</funcs>
<section>
@@ -1333,5 +1351,3 @@ logger:set_proxy_config(maps:merge(Old, Config)).
</p>
</section>
</erlref>
-
-
diff --git a/lib/kernel/doc/src/logger_chapter.xml b/lib/kernel/doc/src/logger_chapter.xml
index 03b9edcf8f..8458ffa042 100644
--- a/lib/kernel/doc/src/logger_chapter.xml
+++ b/lib/kernel/doc/src/logger_chapter.xml
@@ -801,7 +801,7 @@ logger:debug(#{got => connection_request, id => Id, state => State},
[{kernel,
[{logger,
[{handler, default, logger_std_h, % {handler, HandlerId, Module,
- #{config => #{type => {file,"log/erlang.log"}}}} % Config}
+ #{config => #{file => "log/erlang.log"}}} % Config}
]}]}].
</code>
<p>Modify the default handler to print each log event as a
@@ -831,10 +831,10 @@ logger:debug(#{got => connection_request, id => Id, state => State},
[{logger,
[{handler, default, logger_std_h,
#{level => error,
- config => #{type => {file, "log/erlang.log"}}}},
+ config => #{file => "log/erlang.log"}}},
{handler, info, logger_std_h,
#{level => debug,
- config => #{type => {file, "log/debug.log"}}}}
+ config => #{file => "log/debug.log"}}}
]}]}].
</code>
</section>
@@ -1004,10 +1004,10 @@ ok</pre>
<p>Then, add a new handler which prints to file. You can use the
handler
module <seealso marker="logger_std_h"><c>logger_std_h</c></seealso>,
- and specify type <c>{file,File}</c>.:</p>
+ and configure it to log to file:</p>
<pre>
-4> <input>Config = #{config => #{type => {file,"./info.log"}}, level => info}.</input>
-#{config => #{type => {file,"./info.log"}},level => info}
+4> <input>Config = #{config => #{file => "./info.log"}, level => info}.</input>
+#{config => #{file => "./info.log"},level => info}
5> <input>logger:add_handler(myhandler, logger_std_h, Config).</input>
ok</pre>
<p>Since <c>filter_default</c> defaults to <c>log</c>, this
@@ -1246,7 +1246,7 @@ do_log(Fd, LogEvent, #{formatter := {FModule, FConfig}}) ->
<p>A configuration example:</p>
<code type="none">
logger:add_handler(my_standard_h, logger_std_h,
- #{config => #{type => {file,"./system_info.log"},
+ #{config => #{file => "./system_info.log",
sync_mode_qlen => 100,
drop_mode_qlen => 1000,
flush_qlen => 2000}}).
diff --git a/lib/kernel/doc/src/logger_std_h.xml b/lib/kernel/doc/src/logger_std_h.xml
index fcd180abd6..5ed1a2f210 100644
--- a/lib/kernel/doc/src/logger_std_h.xml
+++ b/lib/kernel/doc/src/logger_std_h.xml
@@ -55,30 +55,105 @@
is stored in a sub map with the key <c>config</c>, and can contain the
following parameters:</p>
<taglist>
- <tag><marker id="type"/><c>type</c></tag>
+ <tag><marker id="type"/><c>type = standard_io | standard_error | file</c></tag>
<item>
- <p>This has the value <c>standard_io</c>, <c>standard_error</c>,
- <c>{file,LogFileName}</c>, or <c>{file,LogFileName,LogFileOpts}</c>.</p>
- <p>If <c>LogFileOpts</c> is specified, it replaces the default
- list of options used when opening the log file. The default
- list is <c>[raw,append,delayed_write]</c>. One reason to do
- so can be to change <c>append</c> to, for
- example, <c>write</c>, ensuring that the old log is
- truncated when a node is restarted. See the reference manual
- for <seealso marker="file#open-2"><c>file:open/2</c></seealso>
- for more information about file options.</p>
+ <p>Specifies the log destination.</p>
+ <p>The value is set when the handler is added, and it can not
+ be changed in runtime.</p>
+ <p>Defaults to <c>standard_io</c>, unless
+ parameter <seealso marker="#file"><c>file</c></seealso> is
+ given, in which case it defaults to <c>file</c>.</p>
+ </item>
+ <tag><marker id="file"/><c>file = </c><seealso marker="file#type-filename"><c>file:filename()</c></seealso></tag>
+ <item>
+ <p>This specifies the name of the log file when the handler is
+ of type <c>file</c>.</p>
+ <p>The value is set when the handler is added, and it can not
+ be changed in runtime.</p>
+ <p>Defaults to the same name as the handler identity, in the
+ current directory.</p>
+ </item>
+ <tag><marker id="modes"/><c>modes = [</c><seealso marker="file#type-mode"><c>file:mode()</c></seealso><c>]</c></tag>
+ <item>
+ <p>This specifies the file modes to use when opening the log
+ file,
+ see <seealso marker="file#open-2"><c>file:open/2</c></seealso>.
+ If <c>modes</c> are not specified, the default list used
+ is <c>[raw,append,delayed_write]</c>. If <c>modes</c> are
+ specified, the list replaces the default modes list with the
+ following adjustments:</p>
+ <list>
+ <item>
+ If <c>raw</c> is not found in the list, it is added.
+ </item>
+ <item>
+ If none of <c>write</c>, <c>append</c> or <c>exclusive</c> is
+ found in the list, <c>append</c> is added.</item>
+ <item>If none of <c>delayed_write</c>
+ or <c>{delayed_write,Size,Delay}</c> is found in the
+ list, <c>delayed_write</c> is added.</item>
+ </list>
<p>Log files are always UTF-8 encoded. The encoding can not be
- changed by setting the option <c>{encoding,Encoding}</c>
- in <c>LogFileOpts</c>.</p>
- <p>Notice that the standard handler does not have support for
- circular logging. Use the disk_log handler,
- <seealso marker="logger_disk_log_h"><c>logger_disk_log_h</c></seealso>,
- for this.</p>
+ changed by setting the mode <c>{encoding,Encoding}</c>.</p>
<p>The value is set when the handler is added, and it can not
be changed in runtime.</p>
- <p>Defaults to <c>standard_io</c>.</p>
+ <p>Defaults to <c>[raw,append,delayed_write]</c>.</p>
+ </item>
+ <tag><marker id="max_no_bytes"/><c>max_no_bytes = pos_integer() | infinity</c></tag>
+ <item>
+ <p>This parameter specifies if the log file should be rotated
+ or not. The value <c>infinity</c> means the log file will
+ grow indefinitely, while an integer value specifies at which
+ file size (bytes) the file is rotated.</p>
+ <p>Defaults to <c>infinity</c>.</p>
+ </item>
+ <tag><marker id="max_no_files"/><c>max_no_files = non_neg_integer()</c></tag>
+ <item>
+ <p>This parameter specifies the number of rotated log file
+ archives to keep. This has meaning only
+ if <seealso marker="#max_no_bytes"><c>max_no_bytes</c></seealso>
+ is set to an integer value.</p>
+ <p>The log archives are
+ named <c>FileName.0</c>, <c>FileName.1</c>,
+ ... <c>FileName.N</c>, where <c>FileName</c> is the name of
+ the current log file. <c>FileName.0</c> is the newest of the
+ archives. The maximum value for <c>N</c> is the value
+ of <c>max_no_files</c> minus 1.</p>
+ <p>Notice that setting this value to <c>0</c> does not turn of
+ rotation. It only specifies that no archives are kept.</p>
+ <p>Defaults to <c>0</c>.</p>
+ </item>
+ <tag><marker id="compress_on_rotate"/><c>compress_on_rotate = boolean()</c></tag>
+ <item>
+ <p>This parameter specifies if the rotated log file archives
+ shall be compressed or not. If set to <c>true</c>, all
+ archives are compressed with <c>gzip</c>, and renamed
+ to <c>FileName.N.gz</c></p>
+ <p><c>compress_on_rotate</c> has no meaning if <seealso
+ marker="#max_no_bytes"><c>max_no_bytes</c></seealso> has the
+ value <c>infinity</c>.</p>
+ <p>Defaults to <c>false</c>.</p>
+ </item>
+ <tag><marker id="file_check"/><c>file_check = non_neg_integer()</c></tag>
+ <item>
+ <p>When <c>logger_std_h</c> logs to a file, it reads the file
+ information of the log file prior to each write
+ operation. This is to make sure the file still exists and
+ has the same inode as when it was opened. This implies some
+ performance loss, but ensures that no log events are lost in
+ the case when the file has been removed or renamed by an
+ external actor.</p>
+ <p>In order to allow minimizing the performance loss, the
+ <c>file_check</c> parameter can be set to a positive integer
+ value, <c>N</c>. The handler will then skip reading the file
+ information prior to writing, as long as no more
+ than <c>N</c> milliseconds have passed since it was last
+ read.</p>
+ <p>Notice that the risk of loosing log events grows when
+ the <c>file_check</c> value grows.</p>
+ <p>Defaults to 0.</p>
</item>
- <tag><c>filesync_repeat_interval</c></tag>
+ <tag><c>filesync_repeat_interval = pos_integer() | no_repeat</c></tag>
<item>
<p>This value, in milliseconds, specifies how often the handler does
a file sync operation to write buffered data to disk. The handler attempts
@@ -97,12 +172,13 @@
standard handler and the disk_log handler, and are documented in the
<seealso marker="logger_chapter#overload_protection"><c>User's Guide</c>
</seealso>.</p>
- <p>Notice that if changing the configuration of the handler in runtime,
- the <c>type</c> parameter must not be modified.</p>
+ <p>Notice that if changing the configuration of the handler in
+ runtime, the <c>type</c>, <c>file</c>, or <c>modes</c> parameters
+ must not be modified.</p>
<p>Example of adding a standard handler:</p>
<code type="none">
logger:add_handler(my_standard_h, logger_std_h,
- #{config => #{type => {file,"./system_info.log"},
+ #{config => #{file => "./system_info.log",
filesync_repeat_interval => 1000}}).
</code>
<p>To set the default handler, that starts initially with
@@ -110,7 +186,7 @@ logger:add_handler(my_standard_h, logger_std_h,
change the Kernel default logger configuration. Example:</p>
<code type="none">
erl -kernel logger '[{handler,default,logger_std_h,
- #{config => #{type => {file,"./log.log"}}}}]'
+ #{config => #{file => "./log.log"}}}]'
</code>
<p>An example of how to replace the standard handler with a disk_log handler
at startup is found in the
diff --git a/lib/kernel/doc/src/notes.xml b/lib/kernel/doc/src/notes.xml
index 021ecfa40d..0c187eb19f 100644
--- a/lib/kernel/doc/src/notes.xml
+++ b/lib/kernel/doc/src/notes.xml
@@ -31,6 +31,204 @@
</header>
<p>This document describes the changes made to the Kernel application.</p>
+<section><title>Kernel 6.3</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ If for example the <c>/etc/hosts</c> did not come into
+ existence until after the kernel application had started,
+ its content was never read. This bug has now been
+ corrected.</p>
+ <p>
+ Own Id: OTP-14702 Aux Id: PR-2066 </p>
+ </item>
+ <item>
+ <p>
+ Fix bug where doing <c>seq_trace:reset_trace()</c> while
+ another process was doing a garbage collection could
+ cause the run-time system to segfault.</p>
+ <p>
+ Own Id: OTP-15490</p>
+ </item>
+ <item>
+ <p>
+ Fix <c>erl_epmd:port_please</c> spec to include
+ <c>atom()</c> and <c>string()</c>.</p>
+ <p>
+ Own Id: OTP-15557 Aux Id: PR-2117 </p>
+ </item>
+ <item>
+ <p>
+ The Logger handler logger_std_h now keeps track of the
+ inode of its log file in order to re-open the file if the
+ inode changes. This may happen, for instance, if the log
+ file is opened and saved by an editor.</p>
+ <p>
+ Own Id: OTP-15578 Aux Id: ERL-850 </p>
+ </item>
+ <item>
+ <p>
+ When user specific file modes are given to the logger
+ handler <c>logger_std_h</c>, they were earlier accepted
+ without any control. This is now changes, so Logger will
+ adjust the file modes as follows:</p>
+ <p>
+ - If <c>raw</c> is not found in the list, it is
+ added.<br/> - If none of <c>write</c>, <c>append</c> or
+ <c>exclusive</c> are found in the list, <c>append</c> is
+ added.<br/> - If none of <c>delayed_write</c> or
+ <c>{delayed_write,Size,Delay}</c> are found in the list,
+ <c>delayed_write</c> is added.</p>
+ <p>
+ Own Id: OTP-15602</p>
+ </item>
+ </list>
+ </section>
+
+
+ <section><title>Improvements and New Features</title>
+ <list>
+ <item>
+ <p>
+ The standard logger handler, <c>logger_std_h</c>, now has
+ a new internal feature for log rotation. The rotation
+ scheme is as follows:</p>
+ <p>
+ The log file to which the handler currently writes always
+ has the same name, i.e. the name which is configured for
+ the handler. The archived files have the same name, but
+ with extension ".N", where N is an integer. The newest
+ archive has extension ".0", and the oldest archive has
+ the highest number. </p>
+ <p>
+ The size at which the log file is rotated, and the number
+ of archive files that are kept, is specified with the
+ handler specific configuration parameters
+ <c>max_no_bytes</c> and <c>max_no_files</c> respectively. </p>
+ <p>
+ Archives can be compressed, in which case they get a
+ ".gz" file extension after the integer. Compression is
+ specified with the handler specific configuration
+ parameter <c>compress_on_rotate</c>.</p>
+ <p>
+ Own Id: OTP-15479</p>
+ </item>
+ <item>
+ <p>
+ The new functions <c>logger:i/0</c> and <c>logger:i/1</c>
+ are added. These provide the same information as
+ <c>logger:get_config/0</c> and other
+ <c>logger:get_*_config</c> functions, but the information
+ is more logically sorted and more readable.</p>
+ <p>
+ Own Id: OTP-15600</p>
+ </item>
+ <item>
+ <p>
+ Logger is now protected against overload due to massive
+ amounts of log events from the emulator or from remote
+ nodes.</p>
+ <p>
+ Own Id: OTP-15601</p>
+ </item>
+ <item>
+ <p>
+ Logger now uses os:system_time/1 instead of
+ erlang:system_time/1 to generate log event timestamps.</p>
+ <p>
+ Own Id: OTP-15625</p>
+ </item>
+ <item>
+ <p>
+ Add functions <c>application:set_env/1,2</c> and
+ <c>application:set_env/2</c>. These take a list of
+ application configuration parameters, and the behaviour
+ is equivalent to calling <c>application:set_env/4</c>
+ individually for each application/key combination, except
+ it is more efficient.</p>
+ <p>
+ <c>set_env/1,2</c> warns about duplicated applications or
+ keys. The warning is also emitted during boot, if
+ applications or keys are duplicated within one
+ configuration file, e.g. sys.config.</p>
+ <p>
+ Own Id: OTP-15642 Aux Id: PR-2164 </p>
+ </item>
+ <item>
+ <p>
+ Handler specific configuration parameters for the
+ standard handler <c>logger_std_h</c> are changed to be
+ more intuitive and more similar to the disk_log handler.</p>
+ <p>
+ Earlier there was only one parameter, <c>type</c>, which
+ could have the values <c>standard_io</c>,
+ <c>standard_error</c>, <c>{file,FileName}</c> or
+ <c>{file,FileName,Modes}</c>.</p>
+ <p>
+ This is now changed, so the following parameters are
+ allowed:</p>
+ <p>
+ <c>type = standard_io | standard_error | file</c><br/>
+ <c>file = file:filename()</c><br/> <c>modes =
+ [file:mode()]</c></p>
+ <p>
+ All parameters are optional. <c>type</c> defaults to
+ <c>standard_io</c>, unless a file name is given, in which
+ case it defaults to <c>file</c>. If <c>type</c> is set to
+ <c>file</c>, the file name defaults to the same as the
+ handler id.</p>
+ <p>
+ The potential incompatibility is that
+ <c>logger:get_config/0</c> and
+ <c>logger:get_handler_config/1</c> now returns the new
+ parameters, even if the configuration was set with the
+ old variant, e.g. <c>#{type=>{file,FileName}}</c>.</p>
+ <p>
+ *** POTENTIAL INCOMPATIBILITY ***</p>
+ <p>
+ Own Id: OTP-15662</p>
+ </item>
+ <item>
+ <p>
+ The new configuration parameter <c>file_check</c> is
+ added to the Logger handler <c>logger_std_h</c>. This
+ parameter specifies how long (in milliseconds) the
+ handler may wait before checking if the log file still
+ exists and the inode is the same as when it was opened.</p>
+ <p>
+ The default value is 0, which means that this check is
+ done prior to each write operation. Setting a higher
+ number may improve performance, but adds the risk of
+ loosing log events.</p>
+ <p>
+ Own Id: OTP-15663</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
+<section><title>Kernel 6.2.1</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ Setting the <c>recbuf</c> size of an inet socket the
+ <c>buffer</c> is also automatically increased. Fix a bug
+ where the auto adjustment of inet buffer size would be
+ triggered even if an explicit inet buffer size had
+ already been set.</p>
+ <p>
+ Own Id: OTP-15651 Aux Id: ERIERL-304 </p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
<section><title>Kernel 6.2</title>
<section><title>Fixed Bugs and Malfunctions</title>
diff --git a/lib/kernel/src/application.erl b/lib/kernel/src/application.erl
index bc6be2f8f5..5c2e981e4b 100644
--- a/lib/kernel/src/application.erl
+++ b/lib/kernel/src/application.erl
@@ -25,7 +25,7 @@
which_applications/0, which_applications/1,
loaded_applications/0, permit/2]).
-export([ensure_started/1, ensure_started/2]).
--export([set_env/3, set_env/4, unset_env/2, unset_env/3]).
+-export([set_env/1, set_env/2, set_env/3, set_env/4, unset_env/2, unset_env/3]).
-export([get_env/1, get_env/2, get_env/3, get_all_env/0, get_all_env/1]).
-export([get_key/1, get_key/2, get_all_key/0, get_all_key/1]).
-export([get_application/0, get_application/1, info/0]).
@@ -279,6 +279,26 @@ loaded_applications() ->
info() ->
application_controller:info().
+-spec set_env(Config) -> 'ok' when
+ Config :: [{Application, Env}],
+ Application :: atom(),
+ Env :: [{Par :: atom(), Val :: term()}].
+
+set_env(Config) when is_list(Config) ->
+ set_env(Config, []).
+
+-spec set_env(Config, Opts) -> 'ok' when
+ Config :: [{Application, Env}],
+ Application :: atom(),
+ Env :: [{Par :: atom(), Val :: term()}],
+ Opts :: [{timeout, timeout()} | {persistent, boolean()}].
+
+set_env(Config, Opts) when is_list(Config), is_list(Opts) ->
+ case application_controller:set_env(Config, Opts) of
+ ok -> ok;
+ {error, Msg} -> erlang:error({badarg, Msg}, [Config, Opts])
+ end.
+
-spec set_env(Application, Par, Val) -> 'ok' when
Application :: atom(),
Par :: atom(),
diff --git a/lib/kernel/src/application_controller.erl b/lib/kernel/src/application_controller.erl
index a074d2e74b..7715dca7c6 100644
--- a/lib/kernel/src/application_controller.erl
+++ b/lib/kernel/src/application_controller.erl
@@ -26,7 +26,7 @@
control_application/1,
change_application_data/2, prep_config_change/0, config_change/1,
which_applications/0, which_applications/1,
- loaded_applications/0, info/0,
+ loaded_applications/0, info/0, set_env/2,
get_pid_env/2, get_env/2, get_pid_all_env/1, get_all_env/1,
get_pid_key/2, get_key/2, get_pid_all_key/1, get_all_key/1,
get_master/1, get_application/1, get_application_module/1,
@@ -345,9 +345,6 @@ get_all_env(AppName) ->
map(fun([Key, Val]) -> {Key, Val} end,
ets:match(ac_tab, {{env, AppName, '$1'}, '$2'})).
-
-
-
get_pid_key(Master, Key) ->
case ets:match(ac_tab, {{application_master, '$1'}, Master}) of
[[AppName]] -> get_key(AppName, Key);
@@ -461,6 +458,15 @@ permit_application(ApplName, Flag) ->
{permit_application, ApplName, Flag},
infinity).
+set_env(Config, Opts) ->
+ case check_conf_data(Config) of
+ ok ->
+ Timeout = proplists:get_value(timeout, Opts, 5000),
+ gen_server:call(?AC, {set_env, Config, Opts}, Timeout);
+
+ {error, _} = Error ->
+ Error
+ end.
set_env(AppName, Key, Val) ->
gen_server:call(?AC, {set_env, AppName, Key, Val, []}).
@@ -528,19 +534,15 @@ check_conf_data([]) ->
check_conf_data(ConfData) when is_list(ConfData) ->
[Application | ConfDataRem] = ConfData,
case Application of
- {kernel, List} when is_list(List) ->
- case check_para_kernel(List) of
- ok ->
- check_conf_data(ConfDataRem);
- Error1 ->
- Error1
- end;
{AppName, List} when is_atom(AppName), is_list(List) ->
- case check_para(List, atom_to_list(AppName)) of
- ok ->
- check_conf_data(ConfDataRem);
- Error2 ->
- Error2
+ case lists:keymember(AppName, 1, ConfDataRem) of
+ true ->
+ {error, "duplicate application config: " ++ atom_to_list(AppName)};
+ false ->
+ case check_para(List, AppName) of
+ ok -> check_conf_data(ConfDataRem);
+ Error -> Error
+ end
end;
{AppName, List} when is_list(List) ->
ErrMsg = "application: "
@@ -553,36 +555,39 @@ check_conf_data(ConfData) when is_list(ConfData) ->
++ "; parameters must be a list",
{error, ErrMsg};
Else ->
- ErrMsg = "invalid application name: " ++
- lists:flatten(io_lib:format(" ~tp",[Else])),
+ ErrMsg = "invalid application config: "
+ ++ lists:flatten(io_lib:format("~tp",[Else])),
{error, ErrMsg}
end;
check_conf_data(_ConfData) ->
- {error, 'configuration must be a list ended by <dot><whitespace>'}.
-
+ {error, "configuration must be a list ended by <dot><whitespace>"}.
-%% Special check of distributed parameter for kernel
-check_para_kernel([]) ->
+
+check_para([], _AppName) ->
ok;
-check_para_kernel([{distributed, Apps} | ParaList]) when is_list(Apps) ->
- case check_distributed(Apps) of
- {error, _ErrorMsg} = Error ->
- Error;
- _ ->
- check_para_kernel(ParaList)
+check_para([{Para, Val} | ParaList], AppName) when is_atom(Para) ->
+ case lists:keymember(Para, 1, ParaList) of
+ true ->
+ ErrMsg = "application: " ++ atom_to_list(AppName)
+ ++ "; duplicate parameter: " ++ atom_to_list(Para),
+ {error, ErrMsg};
+ false ->
+ case check_para_value(Para, Val, AppName) of
+ ok -> check_para(ParaList, AppName);
+ {error, _} = Error -> Error
+ end
end;
-check_para_kernel([{distributed, _Apps} | _ParaList]) ->
- {error, "application: kernel; erroneous parameter: distributed"};
-check_para_kernel([{Para, _Val} | ParaList]) when is_atom(Para) ->
- check_para_kernel(ParaList);
-check_para_kernel([{Para, _Val} | _ParaList]) ->
- {error, "application: kernel; invalid parameter: " ++
+check_para([{Para, _Val} | _ParaList], AppName) ->
+ {error, "application: " ++ atom_to_list(AppName) ++ "; invalid parameter name: " ++
lists:flatten(io_lib:format("~tp",[Para]))};
-check_para_kernel(Else) ->
- {error, "application: kernel; invalid parameter list: " ++
+check_para([Else | _ParaList], AppName) ->
+ {error, "application: " ++ atom_to_list(AppName) ++ "; invalid parameter: " ++
lists:flatten(io_lib:format("~tp",[Else]))}.
-
+check_para_value(distributed, Apps, kernel) -> check_distributed(Apps);
+check_para_value(_Para, _Val, _AppName) -> ok.
+
+%% Special check of distributed parameter for kernel
check_distributed([]) ->
ok;
check_distributed([{App, List} | Apps]) when is_atom(App), is_list(List) ->
@@ -595,18 +600,6 @@ check_distributed(_Else) ->
{error, "application: kernel; erroneous parameter: distributed"}.
-check_para([], _AppName) ->
- ok;
-check_para([{Para, _Val} | ParaList], AppName) when is_atom(Para) ->
- check_para(ParaList, AppName);
-check_para([{Para, _Val} | _ParaList], AppName) ->
- {error, "application: " ++ AppName ++ "; invalid parameter: " ++
- lists:flatten(io_lib:format("~tp",[Para]))};
-check_para([Else | _ParaList], AppName) ->
- {error, "application: " ++ AppName ++ "; invalid parameter: " ++
- lists:flatten(io_lib:format("~tp",[Else]))}.
-
-
-type calls() :: 'info' | 'prep_config_change' | 'which_applications'
| {'config_change' | 'control_application' |
'load_application' | 'start_type' | 'stop_application' |
@@ -863,6 +856,16 @@ handle_call(which_applications, _From, S) ->
end, S#state.running),
{reply, Reply, S};
+handle_call({set_env, Config, Opts}, _From, S) ->
+ _ = [add_env(AppName, Env) || {AppName, Env} <- Config],
+
+ case proplists:get_value(persistent, Opts, false) of
+ true ->
+ {reply, ok, S#state{conf_data = merge_env(S#state.conf_data, Config)}};
+ false ->
+ {reply, ok, S}
+ end;
+
handle_call({set_env, AppName, Key, Val, Opts}, _From, S) ->
ets:insert(ac_tab, {{env, AppName, Key}, Val}),
case proplists:get_value(persistent, Opts, false) of
diff --git a/lib/kernel/src/code_server.erl b/lib/kernel/src/code_server.erl
index 1b4a67ecb7..68e1205301 100644
--- a/lib/kernel/src/code_server.erl
+++ b/lib/kernel/src/code_server.erl
@@ -1434,19 +1434,25 @@ all_loaded(Db) ->
-spec error_msg(io:format(), [term()]) -> 'ok'.
error_msg(Format, Args) ->
+ %% This is equal to calling logger:error/3 which we don't want to
+ %% do from code_server. We don't want to call logger:timestamp()
+ %% either.
logger ! {log,error,Format,Args,
#{pid=>self(),
gl=>group_leader(),
- time=>erlang:system_time(microsecond),
+ time=>os:system_time(microsecond),
error_logger=>#{tag=>error}}},
ok.
-spec info_msg(io:format(), [term()]) -> 'ok'.
info_msg(Format, Args) ->
+ %% This is equal to calling logger:info/3 which we don't want to
+ %% do from code_server. We don't want to call logger:timestamp()
+ %% either.
logger ! {log,info,Format,Args,
#{pid=>self(),
gl=>group_leader(),
- time=>erlang:system_time(microsecond),
+ time=>os:system_time(microsecond),
error_logger=>#{tag=>info_msg}}},
ok.
diff --git a/lib/kernel/src/inet_db.erl b/lib/kernel/src/inet_db.erl
index 6cbb6ac2da..3f5a2ea5ee 100644
--- a/lib/kernel/src/inet_db.erl
+++ b/lib/kernel/src/inet_db.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2016. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2019. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -1223,7 +1223,10 @@ handle_set_file(Option, Fname, TagTm, TagInfo, ParseFun, From,
{ok, B, _} -> B;
_ -> <<>>
end;
- _ -> <<>>
+ _ ->
+ ets:insert(Db, {TagInfo, undefined}),
+ TimeZero = - (?RES_FILE_UPDATE_TM + 1), % Early enough
+ ets:insert(Db, {TagTm, TimeZero})
end,
handle_set_file(ParseFun, Bin, From, State);
false -> {reply,error,State}
diff --git a/lib/kernel/src/kernel.app.src b/lib/kernel/src/kernel.app.src
index 4b48f6cd1d..8fe6bdd1ca 100644
--- a/lib/kernel/src/kernel.app.src
+++ b/lib/kernel/src/kernel.app.src
@@ -147,6 +147,6 @@
{logger_sasl_compatible, false}
]},
{mod, {kernel, []}},
- {runtime_dependencies, ["erts-10.1", "stdlib-3.5", "sasl-3.0"]}
+ {runtime_dependencies, ["erts-10.2.5", "stdlib-3.5", "sasl-3.0"]}
]
}.
diff --git a/lib/kernel/src/kernel.appup.src b/lib/kernel/src/kernel.appup.src
index ccf0a82ced..8fa3f5c588 100644
--- a/lib/kernel/src/kernel.appup.src
+++ b/lib/kernel/src/kernel.appup.src
@@ -40,7 +40,10 @@
{<<"^6\\.0\\.1(?:\\.[0-9]+)*$">>,[restart_new_emulator]},
{<<"^6\\.1$">>,[restart_new_emulator]},
{<<"^6\\.1\\.0(?:\\.[0-9]+)+$">>,[restart_new_emulator]},
- {<<"^6\\.1\\.1(?:\\.[0-9]+)*$">>,[restart_new_emulator]}],
+ {<<"^6\\.1\\.1(?:\\.[0-9]+)*$">>,[restart_new_emulator]},
+ {<<"^6\\.2$">>,[restart_new_emulator]},
+ {<<"^6\\.2\\.0(?:\\.[0-9]+)+$">>,[restart_new_emulator]},
+ {<<"^6\\.2\\.1(?:\\.[0-9]+)*$">>,[restart_new_emulator]}],
[{<<"^5\\.3$">>,[restart_new_emulator]},
{<<"^5\\.3\\.0(?:\\.[0-9]+)+$">>,[restart_new_emulator]},
{<<"^5\\.3\\.1(?:\\.[0-9]+)*$">>,[restart_new_emulator]},
@@ -54,4 +57,7 @@
{<<"^6\\.0\\.1(?:\\.[0-9]+)*$">>,[restart_new_emulator]},
{<<"^6\\.1$">>,[restart_new_emulator]},
{<<"^6\\.1\\.0(?:\\.[0-9]+)+$">>,[restart_new_emulator]},
- {<<"^6\\.1\\.1(?:\\.[0-9]+)*$">>,[restart_new_emulator]}]}.
+ {<<"^6\\.1\\.1(?:\\.[0-9]+)*$">>,[restart_new_emulator]},
+ {<<"^6\\.2$">>,[restart_new_emulator]},
+ {<<"^6\\.2\\.0(?:\\.[0-9]+)+$">>,[restart_new_emulator]},
+ {<<"^6\\.2\\.1(?:\\.[0-9]+)*$">>,[restart_new_emulator]}]}.
diff --git a/lib/kernel/src/kernel.erl b/lib/kernel/src/kernel.erl
index c68d04e279..111d103df2 100644
--- a/lib/kernel/src/kernel.erl
+++ b/lib/kernel/src/kernel.erl
@@ -145,26 +145,11 @@ init([]) ->
{ok, {SupFlags,
[Code, File, StdError, User, LoggerSup, Config, RefC, SafeSup]}};
_ ->
- Rpc = #{id => rex,
- start => {rpc, start_link, []},
- restart => permanent,
- shutdown => 2000,
- type => worker,
- modules => [rpc]},
-
- Global = #{id => global_name_server,
- start => {global, start_link, []},
- restart => permanent,
- shutdown => 2000,
- type => worker,
- modules => [global]},
-
- GlGroup = #{id => global_group,
- start => {global_group,start_link,[]},
- restart => permanent,
- shutdown => 2000,
- type => worker,
- modules => [global_group]},
+ DistChildren =
+ case application:get_env(kernel, start_distribution) of
+ {ok, false} -> [];
+ _ -> start_distribution()
+ end,
InetDb = #{id => inet_db,
start => {inet_db, start_link, []},
@@ -173,13 +158,6 @@ init([]) ->
type => worker,
modules => [inet_db]},
- NetSup = #{id => net_sup,
- start => {erl_distribution, start_link, []},
- restart => permanent,
- shutdown => infinity,
- type => supervisor,
- modules => [erl_distribution]},
-
SigSrv = #{id => erl_signal_server,
start => {gen_event, start_link, [{local, erl_signal_server}]},
restart => permanent,
@@ -187,14 +165,11 @@ init([]) ->
type => worker,
modules => dynamic},
- DistAC = start_dist_ac(),
-
Timer = start_timer(),
{ok, {SupFlags,
- [Code, Rpc, Global, InetDb | DistAC] ++
- [NetSup, GlGroup, File, SigSrv,
- StdError, User, Config, RefC, SafeSup, LoggerSup] ++ Timer}}
+ [Code, InetDb | DistChildren] ++
+ [File, SigSrv, StdError, User, Config, RefC, SafeSup, LoggerSup] ++ Timer}}
end;
init(safe) ->
SupFlags = #{strategy => one_for_one,
@@ -213,6 +188,39 @@ init(safe) ->
{ok, {SupFlags, Boot ++ DiskLog ++ Pg2}}.
+start_distribution() ->
+ Rpc = #{id => rex,
+ start => {rpc, start_link, []},
+ restart => permanent,
+ shutdown => 2000,
+ type => worker,
+ modules => [rpc]},
+
+ Global = #{id => global_name_server,
+ start => {global, start_link, []},
+ restart => permanent,
+ shutdown => 2000,
+ type => worker,
+ modules => [global]},
+
+ DistAC = start_dist_ac(),
+
+ NetSup = #{id => net_sup,
+ start => {erl_distribution, start_link, []},
+ restart => permanent,
+ shutdown => infinity,
+ type => supervisor,
+ modules => [erl_distribution]},
+
+ GlGroup = #{id => global_group,
+ start => {global_group,start_link,[]},
+ restart => permanent,
+ shutdown => 2000,
+ type => worker,
+ modules => [global_group]},
+
+ [Rpc, Global | DistAC] ++ [NetSup, GlGroup].
+
start_dist_ac() ->
Spec = [#{id => dist_ac,
start => {dist_ac,start_link,[]},
diff --git a/lib/kernel/src/logger.erl b/lib/kernel/src/logger.erl
index 7d36640f52..38bd2f481c 100644
--- a/lib/kernel/src/logger.erl
+++ b/lib/kernel/src/logger.erl
@@ -61,6 +61,7 @@
-export([set_process_metadata/1, update_process_metadata/1,
unset_process_metadata/0, get_process_metadata/0]).
-export([i/0, i/1]).
+-export([timestamp/0]).
%% Basic report formatting
-export([format_report/1, format_otp_report/1]).
@@ -154,7 +155,8 @@
filter_return/0,
config_handler/0,
formatter_config/0,
- olp_config/0]).
+ olp_config/0,
+ timestamp/0]).
%%%-----------------------------------------------------------------
%%% API
@@ -354,6 +356,10 @@ internal_log(Level,Term) when is_atom(Level) ->
erlang:display_string("Logger - "++ atom_to_list(Level) ++ ": "),
erlang:display(Term).
+-spec timestamp() -> timestamp().
+timestamp() ->
+ os:system_time(microsecond).
+
%%%-----------------------------------------------------------------
%%% Configuration
-spec add_primary_filter(FilterId,Filter) -> ok | {error,term()} when
@@ -1129,7 +1135,7 @@ proc_meta() ->
default(pid) -> self();
default(gl) -> group_leader();
-default(time) -> erlang:system_time(microsecond).
+default(time) -> timestamp().
%% Remove everything upto and including this module from the stacktrace
filter_stacktrace(Module,[{Module,_,_,_}|_]) ->
diff --git a/lib/kernel/src/logger_formatter.erl b/lib/kernel/src/logger_formatter.erl
index ded89bac9f..8696adbd72 100644
--- a/lib/kernel/src/logger_formatter.erl
+++ b/lib/kernel/src/logger_formatter.erl
@@ -64,7 +64,7 @@ format(#{level:=Level,msg:=Msg0,meta:=Meta},Config0)
Config;
Size0 ->
Size =
- case Size0 - string:length([B,A]) of
+ case Size0 - io_lib:chars_length([B,A]) of
S when S>=0 -> S;
_ -> 0
end,
@@ -75,7 +75,11 @@ format(#{level:=Level,msg:=Msg0,meta:=Meta},Config0)
true ->
%% Trim leading and trailing whitespaces, and replace
%% newlines with ", "
- re:replace(string:trim(MsgStr0),",?\r?\n\s*",", ",
+ T = lists:reverse(
+ trim(
+ lists:reverse(
+ trim(MsgStr0,false)),true)),
+ re:replace(T,",?\r?\n\s*",", ",
[{return,list},global,unicode]);
_false ->
MsgStr0
@@ -83,7 +87,26 @@ format(#{level:=Level,msg:=Msg0,meta:=Meta},Config0)
true ->
""
end,
- truncate([B,MsgStr,A],maps:get(max_size,Config)).
+ truncate(B,MsgStr,A,maps:get(max_size,Config)).
+
+trim([H|T],Rev) when H==$\s; H==$\r; H==$\n ->
+ trim(T,Rev);
+trim([H|T],false) when is_list(H) ->
+ case trim(H,false) of
+ [] ->
+ trim(T,false);
+ TrimmedH ->
+ [TrimmedH|T]
+ end;
+trim([H|T],true) when is_list(H) ->
+ case trim(lists:reverse(H),true) of
+ [] ->
+ trim(T,true);
+ TrimmedH ->
+ [lists:reverse(TrimmedH)|T]
+ end;
+trim(String,_) ->
+ String.
do_format(Level,Data,[level|Format],Config) ->
[to_string(level,Level,Config)|do_format(Level,Data,Format,Config)];
@@ -239,21 +262,47 @@ chardata_to_list(Chardata) ->
throw(Error)
end.
-truncate(String,unlimited) ->
- String;
-truncate(String,Size) ->
- Length = string:length(String),
+truncate(B,Msg,A,unlimited) ->
+ [B,Msg,A];
+truncate(B,Msg,A,Size) ->
+ String = [B,Msg,A],
+ Length = io_lib:chars_length(String),
if Length>Size ->
- case lists:reverse(lists:flatten(String)) of
- [$\n|_] ->
- string:slice(String,0,Size-4)++"...\n";
+ {Last,FlatString} =
+ case A of
+ [] ->
+ case Msg of
+ [] ->
+ {get_last(B),lists:flatten(B)};
+ _ ->
+ {get_last(Msg),lists:flatten([B,Msg])}
+ end;
+ _ ->
+ {get_last(A),lists:flatten(String)}
+ end,
+ case Last of
+ $\n->
+ lists:sublist(FlatString,1,Size-4)++"...\n";
_ ->
- string:slice(String,0,Size-3)++"..."
+ lists:sublist(FlatString,1,Size-3)++"..."
end;
true ->
String
end.
+get_last(L) ->
+ get_first(lists:reverse(L)).
+
+get_first([]) ->
+ error;
+get_first([C|_]) when is_integer(C) ->
+ C;
+get_first([L|Rest]) when is_list(L) ->
+ case get_last(L) of
+ error -> get_first(Rest);
+ First -> First
+ end.
+
%% SysTime is the system time in microseconds
format_time(SysTime,#{time_offset:=Offset,time_designator:=Des})
when is_integer(SysTime) ->
diff --git a/lib/kernel/src/logger_h_common.erl b/lib/kernel/src/logger_h_common.erl
index e69f6de38d..16946ff97c 100644
--- a/lib/kernel/src/logger_h_common.erl
+++ b/lib/kernel/src/logger_h_common.erl
@@ -142,8 +142,9 @@ changing_config(SetOrUpdate,
maps:with(?OLP_KEYS,NewHConfig0)),
case logger_olp:set_opts(Olp,NewOlpOpts) of
ok ->
- maybe_set_repeated_filesync(Olp,OldCommonConfig,
- NewCommonConfig),
+ logger_olp:cast(Olp, {config_changed,
+ NewCommonConfig,
+ NewHandlerConfig}),
ReadOnly = maps:with(?READ_ONLY_KEYS,OldHConfig),
NewHConfig =
maps:merge(
@@ -281,11 +282,24 @@ handle_cast(repeated_filesync,
State#{handler_state => HS, last_op => sync}
end,
{noreply,set_repeated_filesync(State1)};
-
-handle_cast({set_repeated_filesync,FSyncInt},State) ->
- State1 = State#{filesync_repeat_interval=>FSyncInt},
- State2 = set_repeated_filesync(cancel_repeated_filesync(State1)),
- {noreply, State2}.
+handle_cast({config_changed, CommonConfig, HConfig},
+ State = #{id := Name,
+ module := Module,
+ handler_state := HandlerState,
+ filesync_repeat_interval := OldFSyncInt}) ->
+ State1 =
+ case maps:get(filesync_repeat_interval,CommonConfig) of
+ OldFSyncInt ->
+ State;
+ FSyncInt ->
+ set_repeated_filesync(
+ cancel_repeated_filesync(
+ State#{filesync_repeat_interval=>FSyncInt}))
+ end,
+ HS = try Module:config_changed(Name, HConfig, HandlerState)
+ catch error:undef -> HandlerState
+ end,
+ {noreply, State1#{handler_state => HS}}.
handle_info(Info, #{id := Name, module := Module,
handler_state := HandlerState} = State) ->
@@ -351,7 +365,7 @@ log_handler_info(Name, Format, Args, #{module:=Module,
{ok,Conf} -> Conf;
_ -> #{formatter=>{?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}}
end,
- Meta = #{time=>erlang:system_time(microsecond)},
+ Meta = #{time=>logger:timestamp()},
Bin = log_to_binary(#{level => notice,
msg => {Format,Args},
meta => Meta}, Config),
@@ -447,10 +461,3 @@ cancel_repeated_filesync(State) ->
end.
error_notify(Term) ->
?internal_log(error, Term).
-
-maybe_set_repeated_filesync(_Olp,
- #{filesync_repeat_interval:=FSyncInt},
- #{filesync_repeat_interval:=FSyncInt}) ->
- ok;
-maybe_set_repeated_filesync(Olp,_,#{filesync_repeat_interval:=FSyncInt}) ->
- logger_olp:cast(Olp,{set_repeated_filesync,FSyncInt}).
diff --git a/lib/kernel/src/logger_olp.erl b/lib/kernel/src/logger_olp.erl
index 009280a9c9..8365383fe2 100644
--- a/lib/kernel/src/logger_olp.erl
+++ b/lib/kernel/src/logger_olp.erl
@@ -515,10 +515,11 @@ check_load(State = #{id:=_Name, mode_ref := ModeRef, mode := Mode,
end,
State1 = ?update_other(drops,DROPS,_NewDrops,State),
State2 = ?update_max_qlen(QLen,State1),
- State3 = maybe_notify_mode_change(Mode1,State2),
+ State3 = ?update_max_mem(Mem,State2),
+ State4 = maybe_notify_mode_change(Mode1,State3),
{Mode1, QLen, Mem,
?update_other(flushes,FLUSHES,_NewFlushes,
- State3#{last_qlen => QLen})}.
+ State4#{last_qlen => QLen})}.
limit_burst(#{burst_limit_enable := false}=State) ->
{true,State};
diff --git a/lib/kernel/src/logger_olp.hrl b/lib/kernel/src/logger_olp.hrl
index 9b4f5ebf27..d68b5c048d 100644
--- a/lib/kernel/src/logger_olp.hrl
+++ b/lib/kernel/src/logger_olp.hrl
@@ -114,12 +114,16 @@
flushes => 0, flushed => 0, drops => 0,
burst_drops => 0, casts => 0, calls => 0,
writes => 0, max_qlen => 0, max_time => 0,
- freq => {TIME,0,0}} end).
+ max_mem => 0, freq => {TIME,0,0}} end).
-define(update_max_qlen(QLEN, STATE),
begin #{max_qlen := QLEN0} = STATE,
STATE#{max_qlen => ?max(QLEN0,QLEN)} end).
+ -define(update_max_mem(MEM, STATE),
+ begin #{max_mem := MEM0} = STATE,
+ STATE#{max_mem => ?max(MEM0,MEM)} end).
+
-define(update_calls_or_casts(CALL_OR_CAST, INC, STATE),
case CALL_OR_CAST of
cast ->
@@ -154,6 +158,7 @@
-else. % DEFAULT!
-define(merge_with_stats(STATE), STATE).
-define(update_max_qlen(_QLEN, STATE), STATE).
+ -define(update_max_mem(_MEM, STATE), STATE).
-define(update_calls_or_casts(_CALL_OR_CAST, _INC, STATE), STATE).
-define(update_max_time(_TIME, STATE), STATE).
-define(update_other(_OTHER, _VAR, _INCVAL, STATE), STATE).
diff --git a/lib/kernel/src/logger_simple_h.erl b/lib/kernel/src/logger_simple_h.erl
index fe181722f3..a0d51dba25 100644
--- a/lib/kernel/src/logger_simple_h.erl
+++ b/lib/kernel/src/logger_simple_h.erl
@@ -69,7 +69,7 @@ log(#{msg:=_,meta:=#{time:=_}}=Log,_Config) ->
do_log(
#{level=>error,
msg=>{report,{error,simple_handler_process_dead}},
- meta=>#{time=>erlang:system_time(microsecond)}}),
+ meta=>#{time=>logger:timestamp()}}),
do_log(Log);
_ ->
?MODULE ! {log,Log}
@@ -129,7 +129,7 @@ drop_msg(0) ->
drop_msg(N) ->
[#{level=>info,
msg=>{"Simple handler buffer full, dropped ~w messages",[N]},
- meta=>#{time=>erlang:system_time(microsecond)}}].
+ meta=>#{time=>logger:timestamp()}}].
%%%-----------------------------------------------------------------
%%% Internal
diff --git a/lib/kernel/src/logger_std_h.erl b/lib/kernel/src/logger_std_h.erl
index 65f5b3876e..c8f1acfca4 100644
--- a/lib/kernel/src/logger_std_h.erl
+++ b/lib/kernel/src/logger_std_h.erl
@@ -29,7 +29,7 @@
-export([filesync/1]).
%% logger_h_common callbacks
--export([init/2, check_config/4, reset_state/2,
+-export([init/2, check_config/4, config_changed/3, reset_state/2,
filesync/3, write/4, handle_info/3, terminate/3]).
%% logger callbacks
@@ -105,85 +105,169 @@ filter_config(Config) ->
%%%===================================================================
%%% logger_h_common callbacks
%%%===================================================================
-init(Name, #{type := Type}) ->
- case open_log_file(Name, Type) of
+init(Name, Config) ->
+ MyConfig = maps:with([type,file,modes,file_check,max_no_bytes,
+ max_no_files,compress_on_rotate],Config),
+ case file_ctrl_start(Name, MyConfig) of
{ok,FileCtrlPid} ->
- {ok,#{type=>Type,file_ctrl_pid=>FileCtrlPid}};
+ {ok,MyConfig#{file_ctrl_pid=>FileCtrlPid}};
Error ->
Error
end.
-check_config(_Name,set,undefined,NewHConfig) ->
- check_config(maps:merge(get_default_config(),NewHConfig));
-check_config(_Name,SetOrUpdate,OldHConfig,NewHConfig0) ->
- WriteOnce = maps:with([type],OldHConfig),
+check_config(Name,set,undefined,NewHConfig) ->
+ check_h_config(merge_default_config(Name,normalize_config(NewHConfig)));
+check_config(Name,SetOrUpdate,OldHConfig,NewHConfig0) ->
+ WriteOnce = maps:with([type,file,modes],OldHConfig),
Default =
case SetOrUpdate of
set ->
%% Do not reset write-once fields to defaults
- maps:merge(get_default_config(),WriteOnce);
+ merge_default_config(Name,WriteOnce);
update ->
OldHConfig
end,
- NewHConfig = maps:merge(Default, NewHConfig0),
+ NewHConfig = maps:merge(Default, normalize_config(NewHConfig0)),
%% Fail if write-once fields are changed
- case maps:with([type],NewHConfig) of
+ case maps:with([type,file,modes],NewHConfig) of
WriteOnce ->
- check_config(NewHConfig);
+ check_h_config(NewHConfig);
Other ->
{error,{illegal_config_change,?MODULE,WriteOnce,Other}}
end.
-check_config(#{type:=Type}=HConfig) ->
- case check_h_config(maps:to_list(HConfig)) of
- ok when is_atom(Type) ->
- {ok,HConfig#{filesync_repeat_interval=>no_repeat}};
+check_h_config(HConfig) ->
+ case check_h_config(maps:get(type,HConfig),maps:to_list(HConfig)) of
ok ->
- {ok,HConfig};
+ {ok,fix_file_opts(HConfig)};
{error,{Key,Value}} ->
{error,{invalid_config,?MODULE,#{Key=>Value}}}
end.
-check_h_config([{type,Type} | Config]) when Type == standard_io;
- Type == standard_error ->
- check_h_config(Config);
-check_h_config([{type,{file,File}} | Config]) when is_list(File) ->
- check_h_config(Config);
-check_h_config([{type,{file,File,Modes}} | Config]) when is_list(File),
- is_list(Modes) ->
- check_h_config(Config);
-check_h_config([Other | _]) ->
+check_h_config(Type,[{type,Type} | Config]) when Type =:= standard_io;
+ Type =:= standard_error;
+ Type =:= file ->
+ check_h_config(Type,Config);
+check_h_config(file,[{file,File} | Config]) when is_list(File) ->
+ check_h_config(file,Config);
+check_h_config(file,[{modes,Modes} | Config]) when is_list(Modes) ->
+ check_h_config(file,Config);
+check_h_config(file,[{max_no_bytes,Size} | Config])
+ when (is_integer(Size) andalso Size>0) orelse Size=:=infinity ->
+ check_h_config(file,Config);
+check_h_config(file,[{max_no_files,Num} | Config]) when is_integer(Num), Num>=0 ->
+ check_h_config(file,Config);
+check_h_config(file,[{compress_on_rotate,Bool} | Config]) when is_boolean(Bool) ->
+ check_h_config(file,Config);
+check_h_config(file,[{file_check,FileCheck} | Config])
+ when is_integer(FileCheck), FileCheck>=0 ->
+ check_h_config(file,Config);
+check_h_config(_Type,[Other | _]) ->
{error,Other};
-check_h_config([]) ->
+check_h_config(_Type,[]) ->
ok.
-get_default_config() ->
- #{type => standard_io}.
+normalize_config(#{type:={file,File}}=HConfig) ->
+ HConfig#{type=>file,file=>File};
+normalize_config(#{type:={file,File,Modes}}=HConfig) ->
+ HConfig#{type=>file,file=>File,modes=>Modes};
+normalize_config(HConfig) ->
+ HConfig.
+
+merge_default_config(Name,#{type:=Type}=HConfig) ->
+ merge_default_config(Name,Type,HConfig);
+merge_default_config(Name,#{file:=_}=HConfig) ->
+ merge_default_config(Name,file,HConfig);
+merge_default_config(Name,HConfig) ->
+ merge_default_config(Name,standard_io,HConfig).
+
+merge_default_config(Name,Type,HConfig) ->
+ maps:merge(get_default_config(Name,Type),HConfig).
+
+get_default_config(Name,file) ->
+ #{type => file,
+ file => atom_to_list(Name),
+ modes => [raw,append],
+ file_check => 0,
+ max_no_bytes => infinity,
+ max_no_files => 0,
+ compress_on_rotate => false};
+get_default_config(_Name,Type) ->
+ #{type => Type}.
+
+fix_file_opts(#{modes:=Modes}=HConfig) ->
+ HConfig#{modes=>fix_modes(Modes)};
+fix_file_opts(HConfig) ->
+ HConfig#{filesync_repeat_interval=>no_repeat}.
+
+fix_modes(Modes) ->
+ %% Ensure write|append|exclusive
+ Modes1 =
+ case [M || M <- Modes,
+ lists:member(M,[write,append,exclusive])] of
+ [] -> [append|Modes];
+ _ -> Modes
+ end,
+ %% Ensure raw
+ Modes2 =
+ case lists:member(raw,Modes) of
+ false -> [raw|Modes1];
+ true -> Modes1
+ end,
+ %% Ensure delayed_write
+ case lists:partition(fun(delayed_write) -> true;
+ ({delayed_write,_,_}) -> true;
+ (_) -> false
+ end, Modes2) of
+ {[],_} ->
+ [delayed_write|Modes2];
+ _ ->
+ Modes2
+ end.
-filesync(_Name, _Mode, #{type := Type}=State) when is_atom(Type) ->
- {ok,State};
-filesync(_Name, async, #{file_ctrl_pid := FileCtrlPid} = State) ->
- ok = file_ctrl_filesync_async(FileCtrlPid),
- {ok,State};
-filesync(_Name, sync, #{file_ctrl_pid := FileCtrlPid} = State) ->
- Result = file_ctrl_filesync_sync(FileCtrlPid),
+config_changed(_Name,
+ #{file_check:=FileCheck,
+ max_no_bytes:=Size,
+ max_no_files:=Count,
+ compress_on_rotate:=Compress},
+ #{file_check:=FileCheck,
+ max_no_bytes:=Size,
+ max_no_files:=Count,
+ compress_on_rotate:=Compress}=State) ->
+ State;
+config_changed(_Name,
+ #{file_check:=FileCheck,
+ max_no_bytes:=Size,
+ max_no_files:=Count,
+ compress_on_rotate:=Compress},
+ #{file_ctrl_pid := FileCtrlPid} = State) ->
+ FileCtrlPid ! {update_config,#{file_check=>FileCheck,
+ max_no_bytes=>Size,
+ max_no_files=>Count,
+ compress_on_rotate=>Compress}},
+ State#{file_check:=FileCheck,
+ max_no_bytes:=Size,
+ max_no_files:=Count,
+ compress_on_rotate:=Compress};
+config_changed(_Name,_NewHConfig,State) ->
+ State.
+
+filesync(_Name, SyncAsync, #{file_ctrl_pid := FileCtrlPid} = State) ->
+ Result = file_ctrl_filesync(SyncAsync, FileCtrlPid),
{Result,State}.
-write(_Name, async, Bin, #{file_ctrl_pid:=FileCtrlPid} = State) ->
- ok = file_write_async(FileCtrlPid, Bin),
- {ok,State};
-write(_Name, sync, Bin, #{file_ctrl_pid:=FileCtrlPid} = State) ->
- Result = file_write_sync(FileCtrlPid, Bin),
+write(_Name, SyncAsync, Bin, #{file_ctrl_pid:=FileCtrlPid} = State) ->
+ Result = file_write(SyncAsync, FileCtrlPid, Bin),
{Result,State}.
reset_state(_Name, State) ->
State.
-handle_info(_Name, {'EXIT',Pid,Why}, #{type := FileInfo, file_ctrl_pid := Pid}) ->
+handle_info(_Name, {'EXIT',Pid,Why}, #{file_ctrl_pid := Pid}=State) ->
%% file_ctrl_pid died, file error, terminate handler
- exit({error,{write_failed,FileInfo,Why}});
+ exit({error,{write_failed,maps:with([type,file,modes],State),Why}});
handle_info(_, _, State) ->
State.
@@ -211,27 +295,33 @@ terminate(_Name, _Reason, #{file_ctrl_pid:=FWPid}) ->
%%%-----------------------------------------------------------------
%%%
-open_log_file(HandlerName, FileInfo) ->
- case file_ctrl_start(HandlerName, FileInfo) of
- OK = {ok,_FileCtrlPid} -> OK;
- Error -> Error
- end.
-
-do_open_log_file({file,FileName}) ->
- do_open_log_file({file,FileName,[raw,append,delayed_write]});
-
-do_open_log_file({file,FileName,[]}) ->
- do_open_log_file({file,FileName,[raw,append,delayed_write]});
-
-do_open_log_file({file,FileName,Modes}) ->
+open_log_file(HandlerName,#{type:=file,
+ file:=FileName,
+ modes:=Modes,
+ file_check:=FileCheck,
+ max_no_bytes:=Size,
+ max_no_files:=Count,
+ compress_on_rotate:=Compress}) ->
try
case filelib:ensure_dir(FileName) of
ok ->
case file:open(FileName, Modes) of
{ok, Fd} ->
{ok,#file_info{inode=INode}} =
- file:read_file_info(FileName),
- {ok, {Fd, INode}};
+ file:read_file_info(FileName,[raw]),
+ UpdateModes = [append | Modes--[write,append,exclusive]],
+ State0 = #{handler_name=>HandlerName,
+ file_name=>FileName,
+ modes=>UpdateModes,
+ file_check=>FileCheck,
+ fd=>Fd,
+ inode=>INode,
+ last_check=>timestamp(),
+ synced=>false,
+ write_res=>ok,
+ sync_res=>ok},
+ State = update_rotation({Size,Count,Compress},State0),
+ {ok,State};
Error ->
Error
end;
@@ -242,21 +332,23 @@ do_open_log_file({file,FileName,Modes}) ->
_:Reason -> {error,Reason}
end.
-close_log_file(Std) when Std == standard_io; Std == standard_error ->
- ok;
-close_log_file({Fd,_}) ->
+close_log_file(#{fd:=Fd}) ->
_ = file:datasync(Fd),
- _ = file:close(Fd).
+ _ = file:close(Fd),
+ ok;
+close_log_file(_) ->
+ ok.
+
%%%-----------------------------------------------------------------
%%% File control process
-file_ctrl_start(HandlerName, FileInfo) ->
+file_ctrl_start(HandlerName, HConfig) ->
Starter = self(),
FileCtrlPid =
spawn_link(fun() ->
- file_ctrl_init(HandlerName, FileInfo, Starter)
+ file_ctrl_init(HandlerName, HConfig, Starter)
end),
receive
{FileCtrlPid,ok} ->
@@ -271,18 +363,16 @@ file_ctrl_start(HandlerName, FileInfo) ->
file_ctrl_stop(Pid) ->
Pid ! stop.
-file_write_async(Pid, Bin) ->
+file_write(async, Pid, Bin) ->
Pid ! {log,Bin},
- ok.
-
-file_write_sync(Pid, Bin) ->
+ ok;
+file_write(sync, Pid, Bin) ->
file_ctrl_call(Pid, {log,Bin}).
-file_ctrl_filesync_async(Pid) ->
+file_ctrl_filesync(async, Pid) ->
Pid ! filesync,
- ok.
-
-file_ctrl_filesync_sync(Pid) ->
+ ok;
+file_ctrl_filesync(sync, Pid) ->
file_ctrl_call(Pid, filesync).
file_ctrl_call(Pid, Msg) ->
@@ -299,98 +389,255 @@ file_ctrl_call(Pid, Msg) ->
{error,{no_response,Pid}}
end.
-file_ctrl_init(HandlerName, FileInfo, Starter) when is_tuple(FileInfo) ->
+file_ctrl_init(HandlerName,
+ #{type:=file,
+ file:=FileName} = HConfig,
+ Starter) ->
process_flag(message_queue_data, off_heap),
- FileName = element(2, FileInfo),
- case do_open_log_file(FileInfo) of
- {ok,File} ->
+ case open_log_file(HandlerName,HConfig) of
+ {ok,State} ->
Starter ! {self(),ok},
- file_ctrl_loop(File, FileName, false, ok, ok, HandlerName);
+ file_ctrl_loop(State);
{error,Reason} ->
Starter ! {self(),{error,{open_failed,FileName,Reason}}}
end;
-file_ctrl_init(HandlerName, StdDev, Starter) ->
+file_ctrl_init(HandlerName, #{type:=StdDev}, Starter) ->
Starter ! {self(),ok},
- file_ctrl_loop(StdDev, StdDev, false, ok, ok, HandlerName).
+ file_ctrl_loop(#{handler_name=>HandlerName,dev=>StdDev}).
-file_ctrl_loop(File, DevName, Synced,
- PrevWriteResult, PrevSyncResult, HandlerName) ->
+file_ctrl_loop(State) ->
receive
%% asynchronous event
{log,Bin} ->
- File1 = ensure(File, DevName),
- Result = write_to_dev(File1, Bin, DevName,
- PrevWriteResult, HandlerName),
- file_ctrl_loop(File1, DevName, false,
- Result, PrevSyncResult, HandlerName);
+ State1 = write_to_dev(Bin,State),
+ file_ctrl_loop(State1);
%% synchronous event
{{log,Bin},{From,MRef}} ->
- File1 = ensure(File, DevName),
- Result = write_to_dev(File1, Bin, DevName,
- PrevWriteResult, HandlerName),
+ State1 = ensure_file(State),
+ State2 = write_to_dev(Bin,State1),
From ! {MRef,ok},
- file_ctrl_loop(File1, DevName, false,
- Result, PrevSyncResult, HandlerName);
+ file_ctrl_loop(State2);
filesync ->
- File1 = ensure(File, DevName),
- Result = sync_dev(File1, DevName, Synced,
- PrevSyncResult, HandlerName),
- file_ctrl_loop(File1, DevName, true,
- PrevWriteResult, Result, HandlerName);
+ State1 = sync_dev(State),
+ file_ctrl_loop(State1);
{filesync,{From,MRef}} ->
- File1 = ensure(File, DevName),
- Result = sync_dev(File1, DevName, Synced,
- PrevSyncResult, HandlerName),
+ State1 = ensure_file(State),
+ State2 = sync_dev(State1),
From ! {MRef,ok},
- file_ctrl_loop(File1, DevName, true,
- PrevWriteResult, Result, HandlerName);
+ file_ctrl_loop(State2);
+
+ {update_config,#{file_check:=FileCheck,
+ max_no_bytes:=Size,
+ max_no_files:=Count,
+ compress_on_rotate:=Compress}} ->
+ State1 = update_rotation({Size,Count,Compress},State),
+ file_ctrl_loop(State1#{file_check=>FileCheck});
stop ->
- _ = close_log_file(File),
+ close_log_file(State),
stopped
end.
+maybe_ensure_file(#{file_check:=0}=State) ->
+ ensure_file(State);
+maybe_ensure_file(#{last_check:=T0,file_check:=CheckInt}=State)
+ when is_integer(CheckInt) ->
+ T = timestamp(),
+ if T-T0 > CheckInt -> ensure_file(State);
+ true -> State
+ end;
+maybe_ensure_file(State) ->
+ State.
+
%% In order to play well with tools like logrotate, we need to be able
%% to re-create the file if it has disappeared (e.g. if rotated by
%% logrotate)
-ensure(Fd,DevName) when is_atom(DevName) ->
- Fd;
-ensure({Fd,INode},FileName) ->
- case file:read_file_info(FileName) of
- {ok,#file_info{inode=INode}} ->
- {Fd,INode};
+ensure_file(#{fd:=Fd0,inode:=INode0,file_name:=FileName,modes:=Modes}=State) ->
+ case file:read_file_info(FileName,[raw]) of
+ {ok,#file_info{inode=INode0}} ->
+ State#{last_check=>timestamp()};
_ ->
- _ = file:close(Fd),
- _ = file:close(Fd), % delayed_write cause close not to close
- case do_open_log_file({file,FileName}) of
- {ok,File} ->
- File;
+ close_log_file(Fd0),
+ case file:open(FileName,Modes) of
+ {ok,Fd} ->
+ {ok,#file_info{inode=INode}} =
+ file:read_file_info(FileName,[raw]),
+ State#{fd=>Fd,inode=>INode,
+ last_check=>timestamp(),
+ synced=>true,sync_res=>ok};
Error ->
exit({could_not_reopen_file,Error})
end
- end.
+ end;
+ensure_file(State) ->
+ State.
-write_to_dev(DevName, Bin, _DevName, _PrevWriteResult, _HandlerName)
- when is_atom(DevName) ->
- io:put_chars(DevName, Bin);
-write_to_dev({Fd,_}, Bin, FileName, PrevWriteResult, HandlerName) ->
+write_to_dev(Bin,#{dev:=DevName}=State) ->
+ io:put_chars(DevName, Bin),
+ State;
+write_to_dev(Bin, State) ->
+ State1 = #{fd:=Fd} = maybe_ensure_file(State),
Result = ?file_write(Fd, Bin),
- maybe_notify_error(write,Result,PrevWriteResult,FileName,HandlerName).
+ State2 = maybe_rotate_file(Bin,State1),
+ maybe_notify_error(write,Result,State2),
+ State2#{synced=>false,write_res=>Result}.
-sync_dev(_, _FileName, true, PrevSyncResult, _HandlerName) ->
- PrevSyncResult;
-sync_dev({Fd,_}, FileName, false, PrevSyncResult, HandlerName) ->
+sync_dev(#{synced:=false}=State) ->
+ State1 = #{fd:=Fd} = maybe_ensure_file(State),
Result = ?file_datasync(Fd),
- maybe_notify_error(filesync,Result,PrevSyncResult,FileName,HandlerName).
+ maybe_notify_error(filesync,Result,State1),
+ State1#{synced=>true,sync_res=>Result};
+sync_dev(State) ->
+ State.
-maybe_notify_error(_Op, ok, _PrevResult, _FileName, _HandlerName) ->
+update_rotation({infinity,_,_},State) ->
+ maybe_remove_archives(0,State),
+ maps:remove(rotation,State);
+update_rotation({Size,Count,Compress},#{file_name:=FileName} = State) ->
+ maybe_remove_archives(Count,State),
+ {ok,#file_info{size=CurrSize}} = file:read_file_info(FileName,[raw]),
+ State1 = State#{rotation=>#{size=>Size,
+ count=>Count,
+ compress=>Compress,
+ curr_size=>CurrSize}},
+ maybe_update_compress(0,State1),
+ maybe_rotate_file(0,State1).
+
+maybe_remove_archives(Count,#{file_name:=FileName}=State) ->
+ Archive = rot_file_name(FileName,Count,false),
+ CompressedArchive = rot_file_name(FileName,Count,true),
+ case {file:read_file_info(Archive,[raw]),
+ file:read_file_info(CompressedArchive,[raw])} of
+ {{error,enoent},{error,enoent}} ->
+ ok;
+ _ ->
+ _ = file:delete(Archive),
+ _ = file:delete(CompressedArchive),
+ maybe_remove_archives(Count+1,State)
+ end.
+
+maybe_update_compress(Count,#{rotation:=#{count:=Count}}) ->
+ ok;
+maybe_update_compress(N,#{file_name:=FileName,
+ rotation:=#{compress:=Compress}}=State) ->
+ Archive = rot_file_name(FileName,N,not Compress),
+ case file:read_file_info(Archive,[raw]) of
+ {ok,_} when Compress ->
+ compress_file(Archive);
+ {ok,_} ->
+ decompress_file(Archive);
+ _ ->
+ ok
+ end,
+ maybe_update_compress(N+1,State).
+
+maybe_rotate_file(Bin,#{rotation:=_}=State) when is_binary(Bin) ->
+ maybe_rotate_file(byte_size(Bin),State);
+maybe_rotate_file(AddSize,#{rotation:=#{size:=RotSize,
+ curr_size:=CurrSize}=Rotation}=State) ->
+ NewSize = CurrSize + AddSize,
+ if NewSize>RotSize ->
+ rotate_file(State#{rotation=>Rotation#{curr_size=>NewSize}});
+ true ->
+ State#{rotation=>Rotation#{curr_size=>NewSize}}
+ end;
+maybe_rotate_file(_Bin,State) ->
+ State.
+
+rotate_file(#{fd:=Fd0,file_name:=FileName,modes:=Modes,rotation:=Rotation}=State) ->
+ State1 = sync_dev(State),
+ _ = file:close(Fd0),
+ _ = file:close(Fd0),
+ rotate_files(FileName,maps:get(count,Rotation),maps:get(compress,Rotation)),
+ case file:open(FileName,Modes) of
+ {ok,Fd} ->
+ {ok,#file_info{inode=INode}} = file:read_file_info(FileName,[raw]),
+ State1#{fd=>Fd,inode=>INode,rotation=>Rotation#{curr_size=>0}};
+ Error ->
+ exit({could_not_reopen_file,Error})
+ end.
+
+rotate_files(FileName,0,_Compress) ->
+ _ = file:delete(FileName),
+ ok;
+rotate_files(FileName,1,Compress) ->
+ FileName0 = FileName++".0",
+ _ = file:rename(FileName,FileName0),
+ if Compress -> compress_file(FileName0);
+ true -> ok
+ end,
ok;
-maybe_notify_error(_Op, PrevResult, PrevResult, _FileName, _HandlerName) ->
+rotate_files(FileName,Count,Compress) ->
+ _ = file:rename(rot_file_name(FileName,Count-2,Compress),
+ rot_file_name(FileName,Count-1,Compress)),
+ rotate_files(FileName,Count-1,Compress).
+
+rot_file_name(FileName,Count,false) ->
+ FileName ++ "." ++ integer_to_list(Count);
+rot_file_name(FileName,Count,true) ->
+ rot_file_name(FileName,Count,false) ++ ".gz".
+
+compress_file(FileName) ->
+ {ok,In} = file:open(FileName,[read,binary]),
+ {ok,Out} = file:open(FileName++".gz",[write]),
+ Z = zlib:open(),
+ zlib:deflateInit(Z, default, deflated, 31, 8, default),
+ compress_data(Z,In,Out),
+ zlib:deflateEnd(Z),
+ zlib:close(Z),
+ _ = file:close(In),
+ _ = file:close(Out),
+ _ = file:delete(FileName),
+ ok.
+
+compress_data(Z,In,Out) ->
+ case file:read(In,100000) of
+ {ok,Data} ->
+ Compressed = zlib:deflate(Z, Data),
+ _ = file:write(Out,Compressed),
+ compress_data(Z,In,Out);
+ eof ->
+ Compressed = zlib:deflate(Z, <<>>, finish),
+ _ = file:write(Out,Compressed),
+ ok
+ end.
+
+decompress_file(FileName) ->
+ {ok,In} = file:open(FileName,[read,binary]),
+ {ok,Out} = file:open(filename:rootname(FileName,".gz"),[write]),
+ Z = zlib:open(),
+ zlib:inflateInit(Z, 31),
+ decompress_data(Z,In,Out),
+ zlib:inflateEnd(Z),
+ zlib:close(Z),
+ _ = file:close(In),
+ _ = file:close(Out),
+ _ = file:delete(FileName),
+ ok.
+
+decompress_data(Z,In,Out) ->
+ case file:read(In,1000) of
+ {ok,Data} ->
+ Decompressed = zlib:inflate(Z, Data),
+ _ = file:write(Out,Decompressed),
+ decompress_data(Z,In,Out);
+ eof ->
+ ok
+ end.
+
+maybe_notify_error(_Op, ok, _State) ->
+ ok;
+maybe_notify_error(Op, Result, #{write_res:=WR,sync_res:=SR})
+ when (Op==write andalso Result==WR) orelse
+ (Op==filesync andalso Result==SR) ->
%% don't report same error twice
- PrevResult;
-maybe_notify_error(Op, Error, _PrevResult, FileName, HandlerName) ->
+ ok;
+maybe_notify_error(Op, Error, #{handler_name:=HandlerName,file_name:=FileName}) ->
logger_h_common:error_notify({HandlerName,Op,FileName,Error}),
- Error.
+ ok.
+
+timestamp() ->
+ erlang:monotonic_time(millisecond).
diff --git a/lib/kernel/test/application_SUITE.erl b/lib/kernel/test/application_SUITE.erl
index 5c35b82207..1ab554db7c 100644
--- a/lib/kernel/test/application_SUITE.erl
+++ b/lib/kernel/test/application_SUITE.erl
@@ -31,6 +31,7 @@
otp_3002/1, otp_3184/1, otp_4066/1, otp_4227/1, otp_5363/1,
otp_5606/1,
start_phases/1, get_key/1, get_env/1,
+ set_env/1, set_env_persistent/1, set_env_errors/1,
permit_false_start_local/1, permit_false_start_dist/1, script_start/1,
nodedown_start/1, init2973/0, loop2973/0, loop5606/1]).
@@ -55,6 +56,7 @@ all() ->
load_use_cache, ensure_started, {group, reported_bugs}, start_phases,
script_start, nodedown_start, permit_false_start_local,
permit_false_start_dist, get_key, get_env, ensure_all_started,
+ set_env, set_env_persistent, set_env_errors,
{group, distr_changed}, config_change, shutdown_func, shutdown_timeout,
shutdown_deadlock, config_relative_paths,
persistent_env].
@@ -1944,6 +1946,94 @@ get_appls([_ | T], Res) ->
get_appls([], Res) ->
Res.
+%% Test set_env/1.
+set_env(Conf) when is_list(Conf) ->
+ ok = application:set_env([{appinc, [{own2, persist}, {not_in_app, persist}]},
+ {unknown_app, [{key, persist}]}]),
+
+ %% own_env1 and own2 are set in appinc
+ undefined = application:get_env(appinc, own_env1),
+ {ok, persist} = application:get_env(appinc, own2),
+ {ok, persist} = application:get_env(appinc, not_in_app),
+ {ok, persist} = application:get_env(unknown_app, key),
+
+ ok = application:load(appinc()),
+ {ok, value1} = application:get_env(appinc, own_env1),
+ {ok, val2} = application:get_env(appinc, own2),
+ {ok, persist} = application:get_env(appinc, not_in_app),
+ {ok, persist} = application:get_env(unknown_app, key),
+
+ %% On reload, values are lost
+ ok = application:unload(appinc),
+ ok = application:load(appinc()),
+ {ok, value1} = application:get_env(appinc, own_env1),
+ {ok, val2} = application:get_env(appinc, own2),
+ undefined = application:get_env(appinc, not_in_app),
+
+ %% Clean up
+ ok = application:unload(appinc).
+
+%% Test set_env/2 with persistent true.
+set_env_persistent(Conf) when is_list(Conf) ->
+ Opts = [{persistent, true}],
+ ok = application:set_env([{appinc, [{own2, persist}, {not_in_app, persist}]},
+ {unknown_app, [{key, persist}]}], Opts),
+
+ %% own_env1 and own2 are set in appinc
+ undefined = application:get_env(appinc, own_env1),
+ {ok, persist} = application:get_env(appinc, own2),
+ {ok, persist} = application:get_env(appinc, not_in_app),
+ {ok, persist} = application:get_env(unknown_app, key),
+
+ ok = application:load(appinc()),
+ {ok, value1} = application:get_env(appinc, own_env1),
+ {ok, persist} = application:get_env(appinc, own2),
+ {ok, persist} = application:get_env(appinc, not_in_app),
+ {ok, persist} = application:get_env(unknown_app, key),
+
+ %% On reload, values are not lost
+ ok = application:unload(appinc),
+ ok = application:load(appinc()),
+ {ok, value1} = application:get_env(appinc, own_env1),
+ {ok, persist} = application:get_env(appinc, own2),
+ {ok, persist} = application:get_env(appinc, not_in_app),
+
+ %% Clean up
+ ok = application:unload(appinc).
+
+set_env_errors(Conf) when is_list(Conf) ->
+ "application: 1; application name must be an atom" =
+ badarg_msg(fun() -> application:set_env([{1, []}]) end),
+
+ "application: foo; parameters must be a list" =
+ badarg_msg(fun() -> application:set_env([{foo, bar}]) end),
+
+ "invalid application config: foo_bar" =
+ badarg_msg(fun() -> application:set_env([foo_bar]) end),
+
+ "application: foo; invalid parameter name: 1" =
+ badarg_msg(fun() -> application:set_env([{foo, [{1, 2}]}]) end),
+
+ "application: foo; invalid parameter: config" =
+ badarg_msg(fun() -> application:set_env([{foo, [config]}]) end),
+
+ "application: kernel; erroneous parameter: distributed" =
+ badarg_msg(fun() -> application:set_env([{kernel, [{distributed, config}]}]) end),
+
+ "duplicate application config: foo" =
+ badarg_msg(fun() -> application:set_env([{foo, []}, {foo, []}]) end),
+
+ "application: foo; duplicate parameter: bar" =
+ badarg_msg(fun() -> application:set_env([{foo, [{bar, baz}, {bar, bat}]}]) end),
+
+ ok.
+
+badarg_msg(Fun) ->
+ try Fun() of
+ _ -> ct:fail(try_succeeded)
+ catch
+ error:{badarg, Msg} -> Msg
+ end.
%% Test set_env/4 and unset_env/3 with persistent true.
persistent_env(Conf) when is_list(Conf) ->
diff --git a/lib/kernel/test/code_SUITE.erl b/lib/kernel/test/code_SUITE.erl
index 64e0b9d8dd..99fecbe970 100644
--- a/lib/kernel/test/code_SUITE.erl
+++ b/lib/kernel/test/code_SUITE.erl
@@ -140,6 +140,11 @@ end_per_testcase(on_load_embedded, Config) ->
LinkName = proplists:get_value(link_name, Config),
_ = del_link(LinkName),
end_per_testcase(Config);
+end_per_testcase(upgrade, Config) ->
+ %% Make sure tracing is turned off even if the test times out.
+ erlang:trace_pattern({error_handler,undefined_function,3}, false, [global]),
+ erlang:trace(self(), false, [call]),
+ end_per_testcase(Config);
end_per_testcase(_Func, Config) ->
end_per_testcase(Config).
@@ -1556,6 +1561,11 @@ on_load_update_code_1(3, Mod) ->
%% Test -on_load while trace feature 'on_load' is enabled (OTP-14612)
on_load_trace_on_load(Config) ->
+ %% 'on_load' enables tracing for all newly loaded modules, so we make a dry
+ %% run to ensure that ancillary modules like 'merl' won't be loaded during
+ %% the actual test.
+ on_load_update(Config),
+
Papa = self(),
Tracer = spawn_link(fun F() -> receive M -> Papa ! M end, F() end),
{tracer,[]} = erlang:trace_info(self(),tracer),
diff --git a/lib/kernel/test/gen_sctp_SUITE.erl b/lib/kernel/test/gen_sctp_SUITE.erl
index a0ae792ba9..e4c489bd10 100644
--- a/lib/kernel/test/gen_sctp_SUITE.erl
+++ b/lib/kernel/test/gen_sctp_SUITE.erl
@@ -1459,11 +1459,11 @@ do_open_and_connect(ServerAddresses, AddressToConnectTo) ->
do_open_and_connect(ServerAddresses, AddressToConnectTo, Fun).
%%
do_open_and_connect(ServerAddresses, AddressToConnectTo, Fun) ->
- ServerFamily = get_family_by_addrs(ServerAddresses),
+ {ServerFamily, ServerOpts} = get_family_by_addrs(ServerAddresses),
io:format("Serving ~p addresses: ~p~n",
[ServerFamily, ServerAddresses]),
S1 = ok(gen_sctp:open(0, [{ip,Addr} || Addr <- ServerAddresses] ++
- [ServerFamily])),
+ [ServerFamily|ServerOpts])),
ok = gen_sctp:listen(S1, true),
P1 = ok(inet:port(S1)),
ClientFamily = get_family_by_addr(AddressToConnectTo),
@@ -1493,9 +1493,9 @@ do_open_and_connect(ServerAddresses, AddressToConnectTo, Fun) ->
%% If at least one of the addresses is an ipv6 address, return inet6, else inet.
get_family_by_addrs(Addresses) ->
case lists:usort([get_family_by_addr(Addr) || Addr <- Addresses]) of
- [inet, inet6] -> inet6;
- [inet] -> inet;
- [inet6] -> inet6
+ [inet, inet6] -> {inet6, [{ipv6_v6only, false}]};
+ [inet] -> {inet, []};
+ [inet6] -> {inet6, []}
end.
get_family_by_addr(Addr) when tuple_size(Addr) =:= 4 -> inet;
diff --git a/lib/kernel/test/gen_tcp_api_SUITE_data/gen_tcp_api_SUITE.c b/lib/kernel/test/gen_tcp_api_SUITE_data/gen_tcp_api_SUITE.c
index b91dca61d4..96938f9071 100644
--- a/lib/kernel/test/gen_tcp_api_SUITE_data/gen_tcp_api_SUITE.c
+++ b/lib/kernel/test/gen_tcp_api_SUITE_data/gen_tcp_api_SUITE.c
@@ -30,6 +30,7 @@
#define sock_close(s) closesocket(s)
#else
#include <sys/socket.h>
+#include <unistd.h>
#define sock_close(s) close(s)
#endif
diff --git a/lib/kernel/test/gen_tcp_misc_SUITE.erl b/lib/kernel/test/gen_tcp_misc_SUITE.erl
index 52edfaee29..edf30448c4 100644
--- a/lib/kernel/test/gen_tcp_misc_SUITE.erl
+++ b/lib/kernel/test/gen_tcp_misc_SUITE.erl
@@ -2086,8 +2086,39 @@ test_pktoptions(Family, Spec, CheckConnect, OSType, OSVer) ->
%%% {ok,<<"hi">>} = gen_tcp:recv(S1, 2, Timeout),
%%
%% Verify returned remote options
- {ok,[{pktoptions,OptsVals1}]} = inet:getopts(S1, [pktoptions]),
- {ok,[{pktoptions,OptsVals2}]} = inet:getopts(S2, [pktoptions]),
+ VerifyRemOpts =
+ fun(S, Role) ->
+ case inet:getopts(S, [pktoptions]) of
+ {ok, [{pktoptions, PktOpts1}]} ->
+ PktOpts1;
+ {ok, UnexpOK1} ->
+ io:format("Unexpected OK (~w): "
+ "~n ~p"
+ "~n", [Role, UnexpOK1]),
+ exit({unexpected_getopts_ok,
+ Role,
+ Spec,
+ TrueRecvOpts,
+ OptsVals,
+ OptsValsDefault,
+ UnexpOK1});
+ {error, UnexpERR1} ->
+ io:format("Unexpected ERROR (~w): "
+ "~n ~p"
+ "~n", [Role, UnexpERR1]),
+ exit({unexpected_getopts_failure,
+ Role,
+ Spec,
+ TrueRecvOpts,
+ OptsVals,
+ OptsValsDefault,
+ UnexpERR1})
+ end
+ end,
+ OptsVals1 = VerifyRemOpts(S1, dest),
+ OptsVals2 = VerifyRemOpts(S2, orig),
+ %% {ok,[{pktoptions,OptsVals1}]} = inet:getopts(S1, [pktoptions]),
+ %% {ok,[{pktoptions,OptsVals2}]} = inet:getopts(S2, [pktoptions]),
(Result1 = sets_eq(OptsVals1, OptsVals))
orelse io:format(
"Accept differs: ~p neq ~p~n", [OptsVals1,OptsVals]),
@@ -3430,7 +3461,7 @@ wait(Mref) ->
%% OTP-15536
%% Test that send error works correctly for delay_send
-delay_send_error(Config) ->
+delay_send_error(_Config) ->
{ok, LS} = gen_tcp:listen(0, [{reuseaddr, true}, {packet, 1}, {active, false}]),
{ok,{{0,0,0,0},PortNum}}=inet:sockname(LS),
P = spawn_link(
diff --git a/lib/kernel/test/inet_SUITE.erl b/lib/kernel/test/inet_SUITE.erl
index 8b33f4a679..44ec7e7076 100644
--- a/lib/kernel/test/inet_SUITE.erl
+++ b/lib/kernel/test/inet_SUITE.erl
@@ -21,6 +21,7 @@
-include_lib("common_test/include/ct.hrl").
-include_lib("kernel/include/inet.hrl").
+-include_lib("kernel/src/inet_res.hrl").
-include_lib("kernel/src/inet_dns.hrl").
-export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1,
@@ -34,7 +35,7 @@
ipv4_to_ipv6/0, ipv4_to_ipv6/1,
host_and_addr/0, host_and_addr/1,
t_gethostnative/1,
- gethostnative_parallell/1, cname_loop/1,
+ gethostnative_parallell/1, cname_loop/1, missing_hosts_reload/1,
gethostnative_soft_restart/0, gethostnative_soft_restart/1,
gethostnative_debug_level/0, gethostnative_debug_level/1,
lookup_bad_search_option/1,
@@ -56,7 +57,7 @@ all() ->
[t_gethostbyaddr, t_gethostbyname, t_getaddr,
t_gethostbyaddr_v6, t_gethostbyname_v6, t_getaddr_v6,
ipv4_to_ipv6, host_and_addr, {group, parse},
- t_gethostnative, gethostnative_parallell, cname_loop,
+ t_gethostnative, gethostnative_parallell, cname_loop, missing_hosts_reload,
gethostnative_debug_level, gethostnative_soft_restart,
lookup_bad_search_option,
getif, getif_ifr_name_overflow, getservbyname_overflow,
@@ -840,6 +841,32 @@ cname_loop(Config) when is_list(Config) ->
ok.
+%% Test that hosts file gets reloaded correctly in case when it
+% was missing during initial startup
+missing_hosts_reload(Config) when is_list(Config) ->
+ RootDir = proplists:get_value(priv_dir,Config),
+ HostsFile = filename:join(RootDir, atom_to_list(?MODULE) ++ ".hosts"),
+ InetRc = filename:join(RootDir, "inetrc"),
+ ok = file:write_file(InetRc, "{hosts_file, \"" ++ HostsFile ++ "\"}.\n"),
+ {error, enoent} = file:read_file_info(HostsFile),
+ % start a node
+ Pa = filename:dirname(code:which(?MODULE)),
+ {ok, TestNode} = test_server:start_node(?MODULE, slave,
+ [{args, "-pa " ++ Pa ++ " -kernel inetrc '\"" ++ InetRc ++ "\"'"}]),
+ % ensure it has our RC
+ Rc = rpc:call(TestNode, inet_db, get_rc, []),
+ {hosts_file, HostsFile} = lists:keyfind(hosts_file, 1, Rc),
+ % ensure it does not resolve
+ {error, nxdomain} = rpc:call(TestNode, inet_hosts, gethostbyname, ["somehost"]),
+ % write hosts file
+ ok = file:write_file(HostsFile, "1.2.3.4 somehost"),
+ % wait for cached timestamp to expire
+ timer:sleep(?RES_FILE_UPDATE_TM * 1000 + 100),
+ % ensure it DOES resolve
+ {ok,{hostent,"somehost",[],inet,4,[{1,2,3,4}]}} =
+ rpc:call(TestNode, inet_hosts, gethostbyname, ["somehost"]),
+ % cleanup
+ true = test_server:stop_node(TestNode).
%% These must be run in the whole suite since they need
%% the host list and require inet_gethost_native to be started.
diff --git a/lib/kernel/test/kernel.spec b/lib/kernel/test/kernel.spec
index 62afc9f97b..eaa17f3a59 100644
--- a/lib/kernel/test/kernel.spec
+++ b/lib/kernel/test/kernel.spec
@@ -2,3 +2,4 @@
{config, "../test_server/ts.unix.config"}.
{suites,"../kernel_test", all}.
+{skip_suites,"../kernel_test",[logger_stress_SUITE],"Benchmarks only"}.
diff --git a/lib/kernel/test/kernel_config_SUITE.erl b/lib/kernel/test/kernel_config_SUITE.erl
index 9207025a2c..57c44c2498 100644
--- a/lib/kernel/test/kernel_config_SUITE.erl
+++ b/lib/kernel/test/kernel_config_SUITE.erl
@@ -21,7 +21,8 @@
-include_lib("common_test/include/ct.hrl").
--export([all/0, suite/0,groups/0,init_per_group/2,end_per_group/2, sync/1]).
+-export([all/0, suite/0,groups/0,init_per_group/2,end_per_group/2,
+ start_distribution_false/1, sync/1]).
-export([init_per_suite/1, end_per_suite/1]).
@@ -30,7 +31,7 @@ suite() ->
{timetrap,{minutes,2}}].
all() ->
- [sync].
+ [sync, start_distribution_false].
groups() ->
[].
@@ -59,12 +60,9 @@ from(H, [H | T]) -> T;
from(H, [_ | T]) -> from(H, T);
from(_, []) -> [].
-%%-----------------------------------------------------------------
-%% Test suite for sync_nodes. This is quite tricky.
-%%
+%% Test sync_nodes. This is quite tricky.
%% Should be started in a CC view with:
%% erl -sname XXX where XX not in [cp1, cp2]
-%%-----------------------------------------------------------------
sync(Conf) when is_list(Conf) ->
%% Write a config file
Dir = proplists:get_value(priv_dir,Conf),
@@ -106,9 +104,18 @@ wait_for_node(Node) ->
_Other -> wait_for_node(Node)
end.
-
stop_node(Node) ->
M = list_to_atom(lists:concat([Node,
[$@],
from($@,atom_to_list(node()))])),
rpc:cast(M, erlang, halt, []).
+
+start_distribution_false(Config) when is_list(Config) ->
+ %% When distribution is disabled, -sname/-name has no effect
+ Str = os:cmd(ct:get_progname()
+ ++ " -kernel start_distribution false"
+ ++ " -sname no_distribution"
+ ++ " -eval \"erlang:display(node())\""
+ ++ " -noshell -s erlang halt"),
+ "'nonode@nohost'" ++ _ = Str,
+ ok.
diff --git a/lib/kernel/test/logger_SUITE.erl b/lib/kernel/test/logger_SUITE.erl
index 2dad651f9c..035e5d8974 100644
--- a/lib/kernel/test/logger_SUITE.erl
+++ b/lib/kernel/test/logger_SUITE.erl
@@ -899,14 +899,14 @@ process_metadata(_Config) ->
undefined = logger:get_process_metadata(),
{error,badarg} = ?TRY(logger:set_process_metadata(bad)),
ok = logger:add_handler(h1,?MODULE,#{level=>notice,filter_default=>log}),
- Time = erlang:system_time(microsecond),
+ Time = logger:timestamp(),
ProcMeta = #{time=>Time,line=>0,custom=>proc},
ok = logger:set_process_metadata(ProcMeta),
S1 = ?str,
?LOG_NOTICE(S1,#{custom=>macro}),
check_logged(notice,S1,#{time=>Time,line=>0,custom=>macro}),
- Time2 = erlang:system_time(microsecond),
+ Time2 = logger:timestamp(),
S2 = ?str,
?LOG_NOTICE(S2,#{time=>Time2,line=>1,custom=>macro}),
check_logged(notice,S2,#{time=>Time2,line=>1,custom=>macro}),
@@ -1048,8 +1048,11 @@ kernel_config(Config) ->
ok = rpc:call(Node,logger,internal_init_logger,[]),
ok = rpc:call(Node,logger,add_handlers,[kernel]),
#{primary:=#{filter_default:=log,filters:=[]},
- handlers:=[#{id:=default,filters:=DF,config:=#{type:={file,F}}}],
+ handlers:=[#{id:=default,filters:=DF,
+ config:=#{type:=file,file:=F,modes:=Modes}}],
module_levels:=[]} = rpc:call(Node,logger,get_config,[]),
+ [append,delayed_write,raw] = lists:sort(Modes),
+
%% Same, but using 'logger' parameter instead of 'error_logger'
ok = rpc:call(Node,logger,remove_handler,[default]),% so it can be added again
@@ -1060,26 +1063,27 @@ kernel_config(Config) ->
ok = rpc:call(Node,logger,internal_init_logger,[]),
ok = rpc:call(Node,logger,add_handlers,[kernel]),
#{primary:=#{filter_default:=log,filters:=[]},
- handlers:=[#{id:=default,filters:=DF,config:=#{type:={file,F}}}],
+ handlers:=[#{id:=default,filters:=DF,
+ config:=#{type:=file,file:=F,modes:=Modes}}],
module_levels:=[]} = rpc:call(Node,logger,get_config,[]),
%% Same, but with type={file,File,Modes}
ok = rpc:call(Node,logger,remove_handler,[default]),% so it can be added again
ok = rpc:call(Node,application,unset_env,[kernel,error_logger]),
- M = [raw,write,delayed_write],
+ M = [raw,write],
ok = rpc:call(Node,application,set_env,[kernel,logger,
[{handler,default,logger_std_h,
#{config=>#{type=>{file,F,M}}}}]]),
ok = rpc:call(Node,logger,internal_init_logger,[]),
ok = rpc:call(Node,logger,add_handlers,[kernel]),
#{primary:=#{filter_default:=log,filters:=[]},
- handlers:=[#{id:=default,filters:=DF,config:=#{type:={file,F,M}}}],
+ handlers:=[#{id:=default,filters:=DF,
+ config:=#{type:=file,file:=F,modes:=[delayed_write|M]}}],
module_levels:=[]} = rpc:call(Node,logger,get_config,[]),
%% Same, but with disk_log handler
ok = rpc:call(Node,logger,remove_handler,[default]),% so it can be added again
ok = rpc:call(Node,application,unset_env,[kernel,error_logger]),
- M = [raw,write,delayed_write],
ok = rpc:call(Node,application,set_env,[kernel,logger,
[{handler,default,logger_disk_log_h,
#{config=>#{file=>F}}}]]),
diff --git a/lib/kernel/test/logger_formatter_SUITE.erl b/lib/kernel/test/logger_formatter_SUITE.erl
index 8c13f0f908..83e3e6c40a 100644
--- a/lib/kernel/test/logger_formatter_SUITE.erl
+++ b/lib/kernel/test/logger_formatter_SUITE.erl
@@ -867,7 +867,7 @@ my_try(Fun) ->
try Fun() catch C:R:S -> {C,R,hd(S)} end.
timestamp() ->
- erlang:system_time(microsecond).
+ logger:timestamp().
%% necessary?
add_time(#{time:=_}=Meta) ->
diff --git a/lib/kernel/test/logger_std_h_SUITE.erl b/lib/kernel/test/logger_std_h_SUITE.erl
index b2c2c8ba67..0c5516f82b 100644
--- a/lib/kernel/test/logger_std_h_SUITE.erl
+++ b/lib/kernel/test/logger_std_h_SUITE.erl
@@ -112,6 +112,8 @@ all() ->
add_remove_instance_standard_error,
add_remove_instance_file1,
add_remove_instance_file2,
+ add_remove_instance_file3,
+ add_remove_instance_file4,
default_formatter,
filter_config,
errors,
@@ -142,7 +144,12 @@ all() ->
restart_after,
handler_requests_under_load,
recreate_deleted_log,
- reopen_changed_log
+ reopen_changed_log,
+ rotate_size,
+ rotate_size_compressed,
+ rotate_size_reopen,
+ rotation_opts,
+ rotation_opts_restart_handler
].
add_remove_instance_tty(_Config) ->
@@ -179,10 +186,27 @@ add_remove_instance_file2(Config) ->
add_remove_instance_file2(cleanup,_Config) ->
logger_std_h_remove().
-add_remove_instance_file(Log, Type) ->
+add_remove_instance_file3(_Config) ->
+ Log = atom_to_list(?MODULE),
+ StdHConfig = #{type=>file},
+ add_remove_instance_file(Log, StdHConfig).
+add_remove_instance_file3(cleanup,_Config) ->
+ logger_std_h_remove().
+
+add_remove_instance_file4(Config) ->
+ Dir = ?config(priv_dir,Config),
+ Log = filename:join(Dir,"stdlog4.txt"),
+ StdHConfig = #{file=>Log,modes=>[]},
+ add_remove_instance_file(Log, StdHConfig).
+add_remove_instance_file4(cleanup,_Config) ->
+ logger_std_h_remove().
+
+add_remove_instance_file(Log, Type) when not is_map(Type) ->
+ add_remove_instance_file(Log,#{type=>Type});
+add_remove_instance_file(Log, StdHConfig) when is_map(StdHConfig) ->
ok = logger:add_handler(?MODULE,
logger_std_h,
- #{config => #{type => Type},
+ #{config => StdHConfig,
filter_default=>stop,
filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
formatter=>{?MODULE,self()}}),
@@ -257,9 +281,10 @@ errors(Config) ->
end,
{error,
- {handler_not_added,{open_failed,Log,_}}} =
+ {handler_not_added,
+ {invalid_config,logger_std_h,#{modes:=bad_file_opt}}}} =
logger:add_handler(myh3,logger_std_h,
- #{config=>#{type=>{file,Log,[bad_file_opt]}}}),
+ #{config=>#{type=>{file,Log,bad_file_opt}}}),
ok = logger:notice(?msg).
@@ -607,24 +632,51 @@ reconfig(cleanup, _Config) ->
file_opts(Config) ->
Dir = ?config(priv_dir,Config),
Log = filename:join(Dir, lists:concat([?FUNCTION_NAME,".log"])),
- BadFileOpts = [raw],
- BadType = {file,Log,BadFileOpts},
- {error,{handler_not_added,{open_failed,Log,enoent}}} =
- logger:add_handler(?MODULE, logger_std_h,
- #{config => #{type => BadType}}),
+ MissingOpts = [raw],
+ Type1 = {file,Log,MissingOpts},
+ ok = logger:add_handler(?MODULE, logger_std_h,
+ #{config => #{type => Type1}}),
+ {ok,#{config:=#{type:=file,file:=Log,modes:=Modes1}}} =
+ logger:get_handler_config(?MODULE),
+ [append,delayed_write,raw] = lists:sort(Modes1),
+ ok = logger:remove_handler(?MODULE),
OkFileOpts = [raw,append],
OkType = {file,Log,OkFileOpts},
ok = logger:add_handler(?MODULE,
logger_std_h,
- #{config => #{type => OkType},
+ #{config => #{type => OkType}, % old format
filter_default=>log,
filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
formatter=>{?MODULE,self()}}),
- #{cb_state := #{handler_state := #{type := OkType}}} =
+ ModOpts = [delayed_write|OkFileOpts],
+ #{cb_state := #{handler_state := #{type:=file,
+ file:=Log,
+ modes:=ModOpts}}} =
logger_olp:info(h_proc_name()),
- {ok,#{config := #{type := OkType}}} = logger:get_handler_config(?MODULE),
+ {ok,#{config := #{type:=file,
+ file:=Log,
+ modes:=ModOpts}}} = logger:get_handler_config(?MODULE),
+ ok = logger:remove_handler(?MODULE),
+
+ ok = logger:add_handler(?MODULE,
+ logger_std_h,
+ #{config => #{type => file,
+ file => Log,
+ modes => OkFileOpts}, % new format
+ filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()}}),
+
+ #{cb_state := #{handler_state := #{type:=file,
+ file:=Log,
+ modes:=ModOpts}}} =
+ logger_olp:info(h_proc_name()),
+ {ok,#{config := #{type:=file,
+ file:=Log,
+ modes:=ModOpts}}} =
+ logger:get_handler_config(?MODULE),
logger:notice(M1=?msg,?domain),
?check(M1),
B1 = ?bin(M1),
@@ -640,13 +692,14 @@ sync(Config) ->
Type = {file,Log},
ok = logger:add_handler(?MODULE,
logger_std_h,
- #{config => #{type => Type},
+ #{config => #{type => Type,
+ file_check => 10000},
filter_default=>log,
filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
formatter=>{?MODULE,nl}}),
%% check repeated filesync happens
- start_tracer([{logger_std_h, write_to_dev, 5},
+ start_tracer([{logger_std_h, write_to_dev, 2},
{file, datasync, 1}],
[{logger_std_h, write_to_dev, <<"first\n">>},
{file,datasync}]),
@@ -656,7 +709,7 @@ sync(Config) ->
check_tracer(filesync_rep_int()*2),
%% check that explicit filesync is only done once
- start_tracer([{logger_std_h, write_to_dev, 5},
+ start_tracer([{logger_std_h, write_to_dev, 2},
{file, datasync, 1}],
[{logger_std_h, write_to_dev, <<"second\n">>},
{file,datasync},
@@ -675,7 +728,7 @@ sync(Config) ->
#{filesync_repeat_interval => no_repeat}),
no_repeat = maps:get(filesync_repeat_interval,
maps:get(cb_state, logger_olp:info(h_proc_name()))),
- start_tracer([{logger_std_h, write_to_dev, 5},
+ start_tracer([{logger_std_h, write_to_dev, 2},
{file, datasync, 1}],
[{logger_std_h, write_to_dev, <<"third\n">>},
{file,datasync},
@@ -1285,6 +1338,331 @@ reopen_changed_log(Config) ->
reopen_changed_log(cleanup, _Config) ->
ok = stop_handler(?MODULE).
+rotate_size(Config) ->
+ {Log,_HConfig,_StdHConfig} =
+ start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ ok = logger:update_handler_config(?MODULE,#{config=>#{max_no_bytes=>1000,
+ max_no_files=>2}}),
+
+ Str = lists:duplicate(19,$a),
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,50)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=1000}} = file:read_file_info(Log),
+ {error,enoent} = file:read_file_info(Log++".0"),
+
+ logger:notice(Str,?domain),
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ {ok,#file_info{size=1020}} = file:read_file_info(Log++".0"),
+ {error,enoent} = file:read_file_info(Log++".1"),
+
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,51)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ {ok,#file_info{size=1020}} = file:read_file_info(Log++".0"),
+ {ok,#file_info{size=1020}} = file:read_file_info(Log++".1"),
+ {error,enoent} = file:read_file_info(Log++".2"),
+
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,50)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=1000}} = file:read_file_info(Log),
+ {ok,#file_info{size=1020}} = file:read_file_info(Log++".0"),
+ {ok,#file_info{size=1020}} = file:read_file_info(Log++".1"),
+ {error,enoent} = file:read_file_info(Log++".2"),
+
+ logger:notice("bbbb",?domain),
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ {ok,#file_info{size=1005}} = file:read_file_info(Log++".0"),
+ {ok,#file_info{size=1020}} = file:read_file_info(Log++".1"),
+ {error,enoent} = file:read_file_info(Log++".2"),
+
+ ok.
+rotate_size(cleanup,_Config) ->
+ ok = stop_handler(?MODULE).
+
+rotate_size_compressed(Config) ->
+ {Log,_HConfig,_StdHConfig} =
+ start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ ok = logger:update_handler_config(?MODULE,
+ #{config=>#{max_no_bytes=>1000,
+ max_no_files=>2,
+ compress_on_rotate=>true}}),
+ Str = lists:duplicate(19,$a),
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,50)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=1000}} = file:read_file_info(Log),
+ {error,enoent} = file:read_file_info(Log++".0"),
+ {error,enoent} = file:read_file_info(Log++".0.gz"),
+
+ logger:notice(Str,?domain),
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ {error,enoent} = file:read_file_info(Log++".0"),
+ {ok,#file_info{size=35}} = file:read_file_info(Log++".0.gz"),
+ {error,enoent} = file:read_file_info(Log++".1"),
+ {error,enoent} = file:read_file_info(Log++".1.gz"),
+
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,51)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ {error,enoent} = file:read_file_info(Log++".0"),
+ {ok,#file_info{size=35}} = file:read_file_info(Log++".0.gz"),
+ {error,enoent} = file:read_file_info(Log++".1"),
+ {ok,#file_info{size=35}} = file:read_file_info(Log++".1.gz"),
+ {error,enoent} = file:read_file_info(Log++".2"),
+ {error,enoent} = file:read_file_info(Log++".2.gz"),
+
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,50)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=1000}} = file:read_file_info(Log),
+ {error,enoent} = file:read_file_info(Log++".0"),
+ {ok,#file_info{size=35}} = file:read_file_info(Log++".0.gz"),
+ {error,enoent} = file:read_file_info(Log++".1"),
+ {ok,#file_info{size=35}} = file:read_file_info(Log++".1.gz"),
+ {error,enoent} = file:read_file_info(Log++".2"),
+ {error,enoent} = file:read_file_info(Log++".2.gz"),
+
+ logger:notice("bbbb",?domain),
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ {error,enoent} = file:read_file_info(Log++".0"),
+ {ok,#file_info{size=38}} = file:read_file_info(Log++".0.gz"),
+ {error,enoent} = file:read_file_info(Log++".1"),
+ {ok,#file_info{size=35}} = file:read_file_info(Log++".1.gz"),
+ {error,enoent} = file:read_file_info(Log++".2"),
+ {error,enoent} = file:read_file_info(Log++".2.gz"),
+
+ ok.
+rotate_size_compressed(cleanup,_Config) ->
+ ok = stop_handler(?MODULE).
+
+rotate_size_reopen(Config) ->
+ {Log,_HConfig,_StdHConfig} =
+ start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ ok = logger:update_handler_config(?MODULE,#{config=>#{max_no_bytes=>1000,
+ max_no_files=>2}}),
+
+ Str = lists:duplicate(19,$a),
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,40)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=800}} = file:read_file_info(Log),
+
+ {ok,HConfig} = logger:get_handler_config(?MODULE),
+ ok = logger:remove_handler(?MODULE),
+ ok = logger:add_handler(?MODULE,maps:get(module,HConfig),HConfig),
+ {ok,#file_info{size=800}} = file:read_file_info(Log),
+
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,40)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=580}} = file:read_file_info(Log),
+ {ok,#file_info{size=1020}} = file:read_file_info(Log++".0"),
+ ok.
+rotate_size_reopen(cleanup,_Config) ->
+ ok = stop_handler(?MODULE).
+
+rotation_opts(Config) ->
+ {Log,_HConfig,StdHConfig} =
+ start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ #{max_no_bytes:=infinity,
+ max_no_files:=0,
+ compress_on_rotate:=false} = StdHConfig,
+
+ %% Test bad rotation config
+ {error,{invalid_config,_,_}} =
+ logger:update_handler_config(?MODULE,config,#{max_no_bytes=>0}),
+ {error,{invalid_config,_,_}} =
+ logger:update_handler_config(?MODULE,config,#{max_no_files=>infinity}),
+ {error,{invalid_config,_,_}} =
+ logger:update_handler_config(?MODULE,config,
+ #{compress_on_rotate=>undefined}),
+
+
+ %% Test good rotation config - start with no rotation
+ Str = lists:duplicate(19,$a),
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,10)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=200}} = file:read_file_info(Log),
+ [] = filelib:wildcard(Log++".*"),
+
+ %% Turn on rotation, check that existing file is rotated since its
+ %% size exceeds max_no_bytes
+ ok = logger:update_handler_config(?MODULE,
+ config,
+ #{max_no_bytes=>100,
+ max_no_files=>2}),
+ timer:sleep(100), % give some time to execute config_changed
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ Log0 = Log++".0",
+ {ok,#file_info{size=200}} = file:read_file_info(Log0),
+ [Log0] = filelib:wildcard(Log++".*"),
+
+ %% Fill all logs
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,13)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=20}} = file:read_file_info(Log),
+ {ok,#file_info{size=120}} = file:read_file_info(Log0),
+ {ok,#file_info{size=120}} = file:read_file_info(Log++".1"),
+ [_,_] = filelib:wildcard(Log++".*"),
+
+ %% Extend size and count and check that nothing changes with existing files
+ ok = logger:update_handler_config(?MODULE,
+ config,
+ #{max_no_bytes=>200,
+ max_no_files=>3}),
+ timer:sleep(100), % give some time to execute config_changed
+ {ok,#file_info{size=20}} = file:read_file_info(Log),
+ {ok,#file_info{size=120}} = file:read_file_info(Log0),
+ {ok,#file_info{size=120}} = file:read_file_info(Log++".1"),
+ [_,_] = filelib:wildcard(Log++".*"),
+
+ %% Add more log events and see that extended size and count works
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,10)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ {ok,#file_info{size=220}} = file:read_file_info(Log0),
+ {ok,#file_info{size=120}} = file:read_file_info(Log++".1"),
+ {ok,#file_info{size=120}} = file:read_file_info(Log++".2"),
+ [_,_,_] = filelib:wildcard(Log++".*"),
+
+ %% Reduce count and check that archive files that exceed the new
+ %% count are moved
+ ok = logger:update_handler_config(?MODULE,
+ config,
+ #{max_no_files=>1}),
+ timer:sleep(100), % give some time to execute config_changed
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ {ok,#file_info{size=220}} = file:read_file_info(Log0),
+ [Log0] = filelib:wildcard(Log++".*"),
+
+ %% Extend size and count again, and turn on compression. Check
+ %% that archives are compressed
+ ok = logger:update_handler_config(?MODULE,
+ config,
+ #{max_no_bytes=>100,
+ max_no_files=>2,
+ compress_on_rotate=>true}),
+ timer:sleep(100), % give some time to execute config_changed
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ Log0gz = Log0++".gz",
+ {ok,#file_info{size=29}} = file:read_file_info(Log0gz),
+ [Log0gz] = filelib:wildcard(Log++".*"),
+
+ %% Fill all logs
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,13)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=20}} = file:read_file_info(Log),
+ {ok,#file_info{size=29}} = file:read_file_info(Log0gz),
+ {ok,#file_info{size=29}} = file:read_file_info(Log++".1.gz"),
+ [_,_] = filelib:wildcard(Log++".*"),
+
+ %% Reduce count and turn off compression. Check that archives that
+ %% exceeds the new count are removed, and the rest are
+ %% uncompressed.
+ ok = logger:update_handler_config(?MODULE,
+ config,
+ #{max_no_files=>1,
+ compress_on_rotate=>false}),
+ timer:sleep(100), % give some time to execute config_changed
+ {ok,#file_info{size=20}} = file:read_file_info(Log),
+ {ok,#file_info{size=120}} = file:read_file_info(Log0),
+ [Log0] = filelib:wildcard(Log++".*"),
+
+ %% Check that config and handler state agree on the current rotation settings
+ {ok,#{config:=#{max_no_bytes:=100,
+ max_no_files:=1,
+ compress_on_rotate:=false}}} =
+ logger:get_handler_config(?MODULE),
+ #{cb_state:=#{handler_state:=#{max_no_bytes:=100,
+ max_no_files:=1,
+ compress_on_rotate:=false}}} =
+ logger_olp:info(h_proc_name()),
+ ok.
+rotation_opts(cleanup,_Config) ->
+ ok = stop_handler(?MODULE).
+
+rotation_opts_restart_handler(Config) ->
+ {Log,_HConfig,_StdHConfig} =
+ start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ ok = logger:update_handler_config(?MODULE,
+ config,
+ #{max_no_bytes=>100,
+ max_no_files=>2}),
+
+ %% Fill all logs
+ Str = lists:duplicate(19,$a),
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,15)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=60}} = file:read_file_info(Log),
+ {ok,#file_info{size=120}} = file:read_file_info(Log++".0"),
+ {ok,#file_info{size=120}} = file:read_file_info(Log++".1"),
+ [_,_] = filelib:wildcard(Log++".*"),
+
+ %% Stop/start handler and turn off rotation. Check that archives are removed.
+ {ok,#{config:=StdHConfig1}=HConfig1} = logger:get_handler_config(?MODULE),
+ ok = logger:remove_handler(?MODULE),
+ ok = logger:add_handler(
+ ?MODULE,logger_std_h,
+ HConfig1#{config=>StdHConfig1#{max_no_bytes=>infinity}}),
+ timer:sleep(100),
+ {ok,#file_info{size=60}} = file:read_file_info(Log),
+ [] = filelib:wildcard(Log++".*"),
+
+ %% Add some log events and check that file is no longer rotated.
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,10)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=260}} = file:read_file_info(Log),
+ [] = filelib:wildcard(Log++".*"),
+
+ %% Stop/start handler and trun on rotation. Check that file is rotated.
+ {ok,#{config:=StdHConfig2}=HConfig2} = logger:get_handler_config(?MODULE),
+ ok = logger:remove_handler(?MODULE),
+ ok = logger:add_handler(
+ ?MODULE,logger_std_h,
+ HConfig2#{config=>StdHConfig2#{max_no_bytes=>100,
+ max_no_files=>2}}),
+ timer:sleep(100),
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ {ok,#file_info{size=260}} = file:read_file_info(Log++".0"),
+ [_] = filelib:wildcard(Log++".*"),
+
+ %% Fill all logs
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,10)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=80}} = file:read_file_info(Log),
+ {ok,#file_info{size=120}} = file:read_file_info(Log++".0"),
+ {ok,#file_info{size=260}} = file:read_file_info(Log++".1"),
+
+ %% Stop/start handler, reduce count and turn on compression. Check
+ %% that excess archives are removed, and the rest compressed.
+ {ok,#{config:=StdHConfig3}=HConfig3} = logger:get_handler_config(?MODULE),
+ ok = logger:remove_handler(?MODULE),
+ ok = logger:add_handler(
+ ?MODULE,logger_std_h,
+ HConfig3#{config=>StdHConfig3#{max_no_bytes=>75,
+ max_no_files=>1,
+ compress_on_rotate=>true}}),
+ timer:sleep(100),
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ {ok,#file_info{size=29}} = file:read_file_info(Log++".0.gz"),
+ [_] = filelib:wildcard(Log++".*"),
+
+ %% Stop/start handler and turn off compression. Check that achives
+ %% are decompressed.
+ {ok,#{config:=StdHConfig4}=HConfig4} = logger:get_handler_config(?MODULE),
+ ok = logger:remove_handler(?MODULE),
+ ok = logger:add_handler(
+ ?MODULE,logger_std_h,
+ HConfig4#{config=>StdHConfig4#{compress_on_rotate=>false}}),
+ timer:sleep(100),
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ {ok,#file_info{size=80}} = file:read_file_info(Log++".0"),
+ [_] = filelib:wildcard(Log++".*"),
+
+ ok.
+rotation_opts_restart_handler(cleanup,_Config) ->
+ ok = stop_handler(?MODULE).
+
%%%-----------------------------------------------------------------
%%%
send_requests(TO, Reqs = [{Mod,Func,Args,Res}|Rs]) ->
@@ -1305,8 +1683,8 @@ start_handler(Name, TTY, _Config) when TTY == standard_io;
ok = logger:add_handler(Name,
logger_std_h,
#{config => #{type => TTY},
- filter_default=>log,
- filters=>?DEFAULT_HANDLER_FILTERS([Name]),
+ filter_default=>stop,
+ filters=>filter_only_this_domain(Name),
formatter=>{?MODULE,op}}),
{ok,HConfig = #{config := StdHConfig}} = logger:get_handler_config(Name),
{HConfig,StdHConfig};
@@ -1320,12 +1698,17 @@ start_handler(Name, FuncName, Config) ->
ok = logger:add_handler(Name,
logger_std_h,
#{config => #{type => Type},
- filter_default=>log,
- filters=>?DEFAULT_HANDLER_FILTERS([Name]),
+ filter_default=>stop,
+ filters=>filter_only_this_domain(Name),
formatter=>{?MODULE,op}}),
{ok,HConfig = #{config := StdHConfig}} = logger:get_handler_config(Name),
{Log,HConfig,StdHConfig}.
+
+filter_only_this_domain(Name) ->
+ [{remote_gl,{fun logger_filters:remote_gl/2,stop}},
+ {domain,{fun logger_filters:domain/2,{log,super,[Name]}}}].
+
stop_handler(Name) ->
R = logger:remove_handler(Name),
ct:pal("Handler ~p stopped! Result: ~p", [Name,R]),
@@ -1658,7 +2041,7 @@ tpl([]) ->
tracer({trace,_,call,{logger_h_common,handle_cast,[Op|_]}},
{Pid,[{Mod,Func,Op}|Expected]}) ->
maybe_tracer_done(Pid,Expected,{Mod,Func,Op});
-tracer({trace,_,call,{Mod=logger_std_h,Func=write_to_dev,[_,Data,_,_,_]}},
+tracer({trace,_,call,{Mod=logger_std_h,Func=write_to_dev,[Data,_]}},
{Pid,[{Mod,Func,Data}|Expected]}) ->
maybe_tracer_done(Pid,Expected,{Mod,Func,Data});
tracer({trace,_,call,{Mod,Func,_}}, {Pid,[{Mod,Func}|Expected]}) ->
@@ -1742,4 +2125,3 @@ filesync_rep_int() ->
file_delete(Log) ->
file:delete(Log).
-
diff --git a/lib/kernel/test/logger_stress_SUITE.erl b/lib/kernel/test/logger_stress_SUITE.erl
index 4072e8c86a..1a278fb1b2 100644
--- a/lib/kernel/test/logger_stress_SUITE.erl
+++ b/lib/kernel/test/logger_stress_SUITE.erl
@@ -67,6 +67,10 @@ all() ->
reject_events,
std_handler,
disk_log_handler,
+ std_handler_time,
+ std_handler_time_big,
+ disk_log_handler_time,
+ disk_log_handler_time_big,
emulator_events,
remote_events,
remote_to_disk_log,
@@ -123,6 +127,20 @@ std_handler(cleanup,_Config) ->
_ = file:delete("default.log"),
ok.
+%% Disable overload protection and just print a lot - measure time.
+%% The IOPS reported is the number of log events written per millisecond.
+std_handler_time(Config) ->
+ measure_handler_time(logger_std_h,#{type=>{file,"default.log"}},Config).
+std_handler_time(cleanup,_Config) ->
+ _ = file:delete("default.log"),
+ ok.
+
+std_handler_time_big(Config) ->
+ measure_handler_time_big(logger_std_h,#{type=>{file,"default.log"}},Config).
+std_handler_time_big(cleanup,_Config) ->
+ _ = file:delete("default.log"),
+ ok.
+
%% Cascading failure that produce gen_server and proc_lib reports -
%% how many of the produced log events are actually written to a log
%% with logger_disk_log_h wrap file handler.
@@ -140,6 +158,20 @@ disk_log_handler(cleanup,_Config) ->
[_ = file:delete(F) || F <- Files],
ok.
+%% Disable overload protection and just print a lot - measure time.
+%% The IOPS reported is the number of log events written per millisecond.
+disk_log_handler_time(Config) ->
+ measure_handler_time(logger_disk_log_h,#{type=>halt},Config).
+disk_log_handler_time(cleanup,_Config) ->
+ _ = file:delete("default"),
+ ok.
+
+disk_log_handler_time_big(Config) ->
+ measure_handler_time_big(logger_disk_log_h,#{type=>halt},Config).
+disk_log_handler_time_big(cleanup,_Config) ->
+ _ = file:delete("default"),
+ ok.
+
%% Cascading failure that produce log events from the emulator - how
%% many of the produced log events pass through the proxy.
emulator_events(Config) ->
@@ -221,6 +253,68 @@ remote_emulator_to_disk_log(cleanup,_Config) ->
%%%-----------------------------------------------------------------
%%% Internal functions
+measure_handler_time(Module,HCfg,Config) ->
+ measure_handler_time(Module,100000,small_fa(),millisecond,HCfg,#{},Config).
+
+measure_handler_time_big(Module,HCfg,Config) ->
+ FCfg = #{chars_limit=>4096, max_size=>1024},
+ measure_handler_time(Module,100,big_fa(),second,HCfg,FCfg,Config).
+
+measure_handler_time(Module,N,FA,Unit,HCfg,FCfg,Config) ->
+ {ok,_,Node} =
+ logger_test_lib:setup(
+ Config,
+ [{logger,
+ [{handler,default,Module,
+ #{formatter => {logger_formatter,
+ maps:merge(#{legacy_header=>false,
+ single_line=>true},FCfg)},
+ config=>maps:merge(#{sync_mode_qlen => N+1,
+ drop_mode_qlen => N+1,
+ flush_qlen => N+1,
+ burst_limit_enable => false,
+ filesync_repeat_interval => no_repeat},
+ HCfg)}}]}]),
+ %% HPid = rpc:call(Node,erlang,whereis,[?name_to_reg_name(Module,default)]),
+ %% {links,[_,FCPid]} = rpc:call(Node,erlang,process_info,[HPid,links]),
+ T0 = erlang:monotonic_time(millisecond),
+ ok = rpc:call(Node,?MODULE,nlogs_wait,[N,FA,Module]),
+ %% ok = rpc:call(Node,fprof,apply,[fun ?MODULE:nlogs_wait/2,[N div 10,FA,Module],[{procs,[HPid,FCPid]}]]),
+ T1 = erlang:monotonic_time(millisecond),
+ T = T1-T0,
+ M = case Unit of
+ millisecond -> 1;
+ second -> 1000
+ end,
+ IOPS = M*N/T,
+ ct:pal("N: ~p~nT: ~p~nIOPS: ~.2f events pr ~w",[N,T,IOPS,Unit]),
+ %% Stats = rpc:call(Node,logger_olp,info,[?name_to_reg_name(Module,default)]),
+ %% ct:pal("Stats: ~p",[Stats]),
+ ct_event:notify(#event{name = benchmark_data,
+ data = [{value,IOPS}]}),
+ {comment,io_lib:format("~.2f events written pr ~w",[IOPS,Unit])}.
+
+nlogs_wait(N,{F,A},Module) ->
+ group_leader(whereis(user),self()),
+ [?LOG_NOTICE(F,A) || _ <- lists:seq(1,N)],
+ wait(Module).
+
+wait(Module) ->
+ case Module:filesync(default) of
+ {error,handler_busy} ->
+ wait(Module);
+ ok ->
+ ok
+ end.
+
+small_fa() ->
+ Str = "\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ "[\\]^_`abcdefghijklmnopqr",
+ {"~ts",[Str]}.
+
+big_fa() ->
+ {"~p",[lists:duplicate(1048576,"a")]}.
+
nlogs(N) ->
group_leader(whereis(user),self()),
Str = "\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ"
diff --git a/lib/kernel/vsn.mk b/lib/kernel/vsn.mk
index 4b43c6ae9d..7bebe1ba70 100644
--- a/lib/kernel/vsn.mk
+++ b/lib/kernel/vsn.mk
@@ -1 +1 @@
-KERNEL_VSN = 6.2
+KERNEL_VSN = 6.3