diff options
27 files changed, 904 insertions, 625 deletions
diff --git a/erts/doc/src/erl_driver.xml b/erts/doc/src/erl_driver.xml index c790872fe4..e6c9905039 100644 --- a/erts/doc/src/erl_driver.xml +++ b/erts/doc/src/erl_driver.xml @@ -429,7 +429,7 @@ <taglist> <tag>Return types for driver callbacks</tag> <item> - <p>Rrewrite driver callback + <p>Rewrite driver callback <seealso marker="driver_entry#control"><c>control</c></seealso> to use return type <c>ErlDrvSSizeT</c> instead of <c>int</c>.</p> <p>Rewrite driver callback @@ -841,7 +841,7 @@ int suggested_stack_size;</code> <p>Thread options structure passed to <seealso marker="#erl_drv_thread_create"> <c>erl_drv_thread_create</c></seealso>. - The following fields exists:</p> + The following field exists:</p> <taglist> <tag><c>suggested_stack_size</c></tag> <item>A suggestion, in kilowords, on how large a stack to use. @@ -3220,6 +3220,6 @@ erl_drv_output_term(driver_mk_port(drvport), spec, sizeof(spec) / sizeof(spec[0] <seealso marker="erlang"><c>erlang(3)</c></seealso>, <seealso marker="kernel:erl_ddll"><c>erl_ddll(3)</c></seealso>, section <seealso marker="alt_dist">How to Implement an Alternative - Carrier for the Erlang Distribution></seealso> in the User's Guide</p> + Carrier for the Erlang Distribution</seealso> in the User's Guide</p> </section> </cref> diff --git a/erts/emulator/beam/erl_message.c b/erts/emulator/beam/erl_message.c index bea7a0fe86..507cc989d2 100644 --- a/erts/emulator/beam/erl_message.c +++ b/erts/emulator/beam/erl_message.c @@ -379,7 +379,10 @@ queue_messages(Process* receiver, erts_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ); } - erts_proc_notify_new_message(receiver, receiver_locks); + if (last == &first->next) + erts_proc_notify_new_message(receiver, receiver_locks); + else + erts_proc_notify_new_sig(receiver, state, ERTS_PSFLG_ACTIVE); } static ERTS_INLINE diff --git a/erts/emulator/beam/erl_proc_sig_queue.c b/erts/emulator/beam/erl_proc_sig_queue.c index 5165cd22a5..6d81044c39 100644 --- a/erts/emulator/beam/erl_proc_sig_queue.c +++ b/erts/emulator/beam/erl_proc_sig_queue.c @@ -519,41 +519,42 @@ erts_aint32_t erts_enqueue_signals(Process *rp, ErtsMessage *first, return enqueue_signals(rp, first, last, last_next, num_msgs, in_state); } -static ERTS_INLINE void -ensure_dirty_proc_handled(Eterm pid, - erts_aint32_t state, - erts_aint32_t prio) +void +erts_make_dirty_proc_handled(Eterm pid, + erts_aint32_t state, + erts_aint32_t prio) { - if (state & (ERTS_PSFLG_DIRTY_RUNNING - | ERTS_PSFLG_DIRTY_RUNNING_SYS)) { - Eterm *hp; - ErtsMessage *mp; - Process *sig_handler; + Eterm *hp; + ErtsMessage *mp; + Process *sig_handler; - if (prio < 0) - prio = (int) ERTS_PSFLGS_GET_USR_PRIO(state); + ASSERT(state & (ERTS_PSFLG_DIRTY_RUNNING | + ERTS_PSFLG_DIRTY_RUNNING_SYS)); - switch (prio) { - case PRIORITY_MAX: - sig_handler = erts_dirty_process_signal_handler_max; - break; - case PRIORITY_HIGH: - sig_handler = erts_dirty_process_signal_handler_high; - break; - default: - sig_handler = erts_dirty_process_signal_handler; - break; - } + if (prio < 0) + prio = (int) ERTS_PSFLGS_GET_USR_PRIO(state); - /* Make sure signals are handled... */ - mp = erts_alloc_message(0, &hp); - erts_queue_message(sig_handler, 0, mp, pid, am_system); + switch (prio) { + case PRIORITY_MAX: + sig_handler = erts_dirty_process_signal_handler_max; + break; + case PRIORITY_HIGH: + sig_handler = erts_dirty_process_signal_handler_high; + break; + default: + sig_handler = erts_dirty_process_signal_handler; + break; } + + /* Make sure signals are handled... */ + mp = erts_alloc_message(0, &hp); + erts_queue_message(sig_handler, 0, mp, pid, am_system); } static void check_push_msgq_len_offs_marker(Process *rp, ErtsSignal *sig); + static int proc_queue_signal(Process *c_p, Eterm pid, ErtsSignal *sig, int op) { @@ -679,14 +680,7 @@ first_last_done: sig_enqueue_trace_cleanup(first, sig, last); } - if (!(state & (ERTS_PSFLG_EXITING - | ERTS_PSFLG_ACTIVE_SYS - | ERTS_PSFLG_SIG_IN_Q))) { - /* Schedule process... */ - state = erts_proc_sys_schedule(rp, state, 0); - } - - ensure_dirty_proc_handled(rp->common.id, state, -1); + erts_proc_notify_new_sig(rp, state, 0); if (!is_normal_sched) erts_proc_dec_refc(rp); @@ -742,7 +736,10 @@ maybe_elevate_sig_handling_prio(Process *c_p, Eterm other) if (res) { /* ensure handled if dirty executing... */ state = erts_atomic32_read_nob(&rp->state); - ensure_dirty_proc_handled(other, state, my_prio); + if (state & (ERTS_PSFLG_DIRTY_RUNNING + | ERTS_PSFLG_DIRTY_RUNNING_SYS)) { + erts_make_dirty_proc_handled(other, state, my_prio); + } } } } diff --git a/erts/emulator/beam/erl_proc_sig_queue.h b/erts/emulator/beam/erl_proc_sig_queue.h index 8b7cd35f61..8edd277309 100644 --- a/erts/emulator/beam/erl_proc_sig_queue.h +++ b/erts/emulator/beam/erl_proc_sig_queue.h @@ -744,6 +744,13 @@ erts_enqueue_signals(Process *rp, ErtsMessage *first, void erts_proc_sig_send_pending(ErtsSchedulerData* esdp); +/* SVERK Doc me up! */ +ERTS_GLB_INLINE void erts_proc_notify_new_sig(Process* rp, erts_aint32_t state, + erts_aint32_t enable_flag); + +void erts_make_dirty_proc_handled(Eterm pid, erts_aint32_t state, + erts_aint32_t prio); + typedef struct { Uint size; @@ -879,6 +886,24 @@ erts_proc_sig_fetch(Process *proc) return res; } +ERTS_GLB_INLINE void +erts_proc_notify_new_sig(Process* rp, erts_aint32_t state, + erts_aint32_t enable_flag) +{ + if (~(state & (ERTS_PSFLG_EXITING + | ERTS_PSFLG_ACTIVE_SYS + | ERTS_PSFLG_SIG_IN_Q)) + | (~state & enable_flag)) { + /* Schedule process... */ + state = erts_proc_sys_schedule(rp, state, enable_flag); + } + + if (state & (ERTS_PSFLG_DIRTY_RUNNING + | ERTS_PSFLG_DIRTY_RUNNING_SYS)) { + erts_make_dirty_proc_handled(rp->common.id, state, -1); + } +} + #endif /* ERTS_GLB_INLINE_INCL_FUNC_DEF */ #endif /* ERTS_PROC_SIG_QUEUE_H__ */ diff --git a/erts/emulator/nifs/unix/unix_prim_file.c b/erts/emulator/nifs/unix/unix_prim_file.c index 1637f9cb71..2b112dda76 100644 --- a/erts/emulator/nifs/unix/unix_prim_file.c +++ b/erts/emulator/nifs/unix/unix_prim_file.c @@ -512,8 +512,8 @@ int efile_sync(efile_data_t *d, int data_only) { } int efile_advise(efile_data_t *d, Sint64 offset, Sint64 length, enum efile_advise_t advise) { - efile_unix_t *u = (efile_unix_t*)d; #ifdef HAVE_POSIX_FADVISE + efile_unix_t *u = (efile_unix_t*)d; int p_advise; switch(advise) { diff --git a/erts/emulator/sys/unix/erl_unix_sys.h b/erts/emulator/sys/unix/erl_unix_sys.h index e367d565a7..10adf80875 100644 --- a/erts/emulator/sys/unix/erl_unix_sys.h +++ b/erts/emulator/sys/unix/erl_unix_sys.h @@ -264,7 +264,7 @@ erts_os_monotonic_time(void) ERTS_GLB_INLINE void erts_os_times(ErtsMonotonicTime *mtimep, ErtsSystemTime *stimep) { - return (*erts_sys_time_data__.r.o.os_times)(mtimep, stimep); + (*erts_sys_time_data__.r.o.os_times)(mtimep, stimep); } #endif /* ERTS_OS_TIMES_INLINE_FUNC_PTR_CALL__ */ diff --git a/lib/kernel/doc/src/kernel_app.xml b/lib/kernel/doc/src/kernel_app.xml index f96d946a5d..7894600c21 100644 --- a/lib/kernel/doc/src/kernel_app.xml +++ b/lib/kernel/doc/src/kernel_app.xml @@ -122,21 +122,6 @@ application. For more information about configuration parameters, see file <seealso marker="app"><c>app(4)</c></seealso>.</p> <taglist> - <tag><c>browser_cmd = string() | {M,F,A}</c></tag> - <item> - <p>When pressing the <em>Help</em> button in a tool such as Debugger, - the help text (an HTML file <c>File</c>) is by default - displayed in a Netscape browser, which is required to be - operational. This parameter can be used to change the command for - how to display the help text if another browser than Netscape - is preferred, or if another platform than Unix or Windows is - used.</p> - <p>If set to a string <c>Command</c>, the command - <c>"Command File"</c> is evaluated using - <seealso marker="os#cmd/1"><c>os:cmd/1</c></seealso>.</p> - <p>If set to a module-function-args tuple, <c>{M,F,A}</c>, - the call <c>apply(M,F,[File|A])</c> is evaluated.</p> - </item> <tag><c>distributed = [Distrib]</c></tag> <item> <p>Specifies which applications that are distributed and on which diff --git a/lib/kernel/doc/src/logger.xml b/lib/kernel/doc/src/logger.xml index 2f7feb5eef..d901454e62 100644 --- a/lib/kernel/doc/src/logger.xml +++ b/lib/kernel/doc/src/logger.xml @@ -67,37 +67,86 @@ <datatype> <name name="metadata"/> <desc> - <p>Metadata associated with the message to be logged.</p> + <p>Metadata for the log event.</p> + <p>Logger adds the following metadata to each log event:</p> + <list> + <item><c>pid => self()</c></item> + <item><c>gl => group_leader()</c></item> + <item><c>time => erlang:monotonic_time(microsecond)</c></item> + </list> + <p>When a log macro is used, Logger also inserts location + information:</p> + <list> + <item><c>mfa => {?MODULE,?FUNCTION_NAME,?FUNCTION_ARITY}</c></item> + <item><c>file => ?FILE</c></item> + <item><c>line => ?LINE</c></item> + </list> + <p>You can add custom metadata, either by specifying a map as + the last parameter to any of the log macros or the API + functions, or by setting process metadata + with <seealso marker="#set_process_metadata-1"> + <c>set_process_metadata/1</c></seealso> + or <seealso marker="#update_process_metadata-1"> + <c>update_process_metadata/1</c></seealso>.</p> + <p>Logger merges all the metadata maps before forwarding the + log event to the handlers. If the same keys occur, values + from the log call overwrites process metadata, which in turn + overwrites values set by Logger.</p> </desc> </datatype> <datatype> <name name="config"/> <desc> - <p></p> + <p>Configuration data for the logger part of Logger, or for a handler.</p> + <p>The following default values apply:</p> + <list> + <item><c>level => info</c></item> + <item><c>filter_default => log</c></item> + <item><c>filters => []</c></item> + <item><c>formatter => {logger_formatter,DefaultFormatterConfig</c>}</item> + </list> + <p>See the <seealso marker="logger_formatter#configuration"> + <c>logger_formatter(3)</c></seealso> manual page for + information about the default configuration for this + formatter.</p> </desc> </datatype> <datatype> <name name="handler_id"/> <desc> - <p></p> + <p>A unique identifier for a handler instance.</p> </desc> </datatype> <datatype> <name name="filter_id"/> <desc> - <p></p> + <p>A unique identifier for a filter.</p> </desc> </datatype> <datatype> <name name="filter"/> <desc> - <p></p> + <p>A filter which can be installed for logger or for a handler.</p> + </desc> + </datatype> + <datatype> + <name name="filter_arg"/> + <desc> + <p>The second argument to the filter fun.</p> </desc> </datatype> <datatype> <name name="filter_return"/> <desc> - <p></p> + <p>The return value from the filter fun.</p> + </desc> + </datatype> + <datatype> + <name name="timestamp"/> + <desc> + <p>A timestamp produced + with <seealso marker="erts:erlang#monotonic_time-1"> + <c>erlang:monotonic_time(microsecond)</c></seealso>.</p> </desc> </datatype> </datatypes> @@ -126,14 +175,10 @@ </list> <p>All macros expand to a call to logger, where <c>Level</c> is - taken from the macro name, and the following metadata is added, - or merged with the given <c>Metadata</c>:</p> - - <code> -#{mfa=>{?MODULE,?FUNCTION_NAME,?FUNCTION_ARITY}, - file=>?FILE, - line=>?LINE} - </code> + taken from the macro name, and location data is added. See the + description of + the <seealso marker="#type-metadata"><c>metadata()</c></seealso> + type for more information about the location data.</p> <p>The call is wrapped in a case statement and will be evaluated only if <c>Level</c> is equal to or below the configured log @@ -267,7 +312,7 @@ <func> <name name="i" arity="0"/> - <fsummary>Get information about all logger configurations</fsummary> + <fsummary>Get all logger configurations</fsummary> <desc> <p>Same as <seealso marker="#i/1"><c>logger:i(term)</c></seealso></p> </desc> @@ -277,26 +322,29 @@ <name name="i" arity="1" clause_i="1"/> <name name="i" arity="1" clause_i="2"/> <name name="i" arity="1" clause_i="3"/> - <fsummary>Get information about all logger configurations</fsummary> + <fsummary>Get all logger configurations</fsummary> <desc> - <p>The <c>logger:i/1</c> function can be used to get all - current logger configuration. The way that the information - is returned depends on the <c><anno>Action</anno></c></p> + <p>Display or return all current logger configuration.</p> <taglist> - <tag>string</tag> - <item>Return the pretty printed current logger configuration - as iodata.</item> - <tag>term</tag> - <item>Return the current logger configuration as a term. The - format of this term may change inbetween releases. For a - stable format use <seealso marker="#get_handler_config/1"> + <tag><c><anno>Action</anno> = string</c></tag> + <item> + <p>Return the pretty printed current logger configuration + as iodata.</p> + </item> + <tag><c><anno>Action</anno> = term</c></tag> + <item> + <p>Return the current logger configuration as a term. The + format of this term may change inbetween releases. For a + stable format use <seealso marker="#get_handler_config/1"> <c>logger:get_handler_config/1</c></seealso> - and <seealso marker="#get_logger_config/0"> + and <seealso marker="#get_logger_config/0"> <c>logger:get_logger_config/0</c></seealso>. - The same as calling <c>logger:i()</c>.</item> - <tag>print</tag> - <item>Pretty print all the current logger configuration to - standard out. Example: + The same as calling <c>logger:i()</c>.</p> + </item> + <tag><c><anno>Action</anno> = print</c></tag> + <item> + <p>Pretty print all the current logger configuration to + standard out. Example:</p> <code><![CDATA[1> logger:i(print). Current logger configuration: Level: info @@ -339,6 +387,39 @@ Current logger configuration: <fsummary>Add a filter to the logger.</fsummary> <desc> <p>Add a filter to the logger.</p> + <p>The filter fun is called with the log event as the first + parameter, and the specified <c>filter_args()</c> as the + second parameter.</p> + <p>The return value of the fun specifies if a log event is to + be discarded or forwarded to the handlers:</p> + <taglist> + <tag><c>log()</c></tag> + <item> + <p>The filter <em>passed</em>. The next logger filter, if + any, is applied. If no more logger filters exist, the + log event is forwarded to the handler part of the + logger, where handler filters are applied.</p> + </item> + <tag><c>stop</c></tag> + <item> + <p>The filter <em>did not pass</em>, and the log event is + immediately discarded.</p> + </item> + <tag><c>ignore</c></tag> + <item> + <p>The filter has no knowledge of the log event. The next + logger filter, if any, is applied. If no more logger + filters exist, the value of the <c>filter_default</c> + configuration parameter for the logger specifies if the + log event shall be discarded or forwarded to the handler + part.</p> + </item> + </taglist> + <p>See section <seealso marker="logger_chapter#Filter"> + Filter</seealso> in the User's Guide for more information + about filters.</p> + <p>Some built-in filters exist. These are defined + in <seealso marker="logger_filters"><c>logger_filters</c></seealso>.</p> </desc> </func> @@ -347,6 +428,39 @@ Current logger configuration: <fsummary>Add a filter to the specified handler.</fsummary> <desc> <p>Add a filter to the specified handler.</p> + <p>The filter fun is called with the log event as the first + parameter, and the specified <c>filter_args()</c> as the + second parameter.</p> + <p>The return value of the fun specifies if a log event is to + be discarded or forwarded to the handler callback:</p> + <taglist> + <tag><c>log()</c></tag> + <item> + <p>The filter <em>passed</em>. The next handler filter, if + any, is applied. If no more filters exist for this + handler, the log event is forwarded to the handler + callback.</p> + </item> + <tag><c>stop</c></tag> + <item> + <p>The filter <em>did not pass</em>, and the log event is + immediately discarded.</p> + </item> + <tag><c>ignore</c></tag> + <item> + <p>The filter has no knowledge of the log event. The next + handler filter, if any, is applied. If no more filters + exist for this handler, the value of + the <c>filter_default</c> configuration parameter for + the handler specifies if the log event shall be + discarded or forwarded to the handler callback.</p> + </item> + </taglist> + <p>See + section <seealso marker="logger_chapter#Filter">Filter</seealso> + in the User's Guide for more information about filters.</p> + <p>Some built-in filters exist. These are defined in + <seealso marker="logger_filters"><c>logger_filters</c></seealso>.</p> </desc> </func> @@ -354,7 +468,8 @@ Current logger configuration: <name name="remove_logger_filter" arity="1"/> <fsummary>Remove a filter from the logger.</fsummary> <desc> - <p>Remove the filter with the specified identity from the logger.</p> + <p>Remove the filter identified + by <c><anno>FilterId</anno></c> from the logger.</p> </desc> </func> @@ -362,7 +477,9 @@ Current logger configuration: <name name="remove_handler_filter" arity="2"/> <fsummary>Remove a filter from the specified handler.</fsummary> <desc> - <p>Remove the filter with the specified identity from the given handler.</p> + <p>Remove the filter identified + by <c><anno>FilterId</anno></c> from the handler identified + by <c><anno>HandlerId</anno></c>.</p> </desc> </func> @@ -371,6 +488,9 @@ Current logger configuration: <fsummary>Add a handler with the given configuration.</fsummary> <desc> <p>Add a handler with the given configuration.</p> + <p><c><anno>HandlerId</anno></c> is a unique identifier which + must be used in all subsequent calls reffering to this + handler.</p> </desc> </func> @@ -378,7 +498,7 @@ Current logger configuration: <name name="remove_handler" arity="1"/> <fsummary>Remove the handler with the specified identity.</fsummary> <desc> - <p>Remove the handler with the specified identity.</p> + <p>Remove the handler identified by <c><anno>HandlerId</anno></c>.</p> </desc> </func> @@ -386,10 +506,37 @@ Current logger configuration: <name name="set_module_level" arity="2"/> <fsummary>Set the log level for the specified module.</fsummary> <desc> - <p>Set the log level for the specified module.</p> - <p>To change the logging level globally, use - <seealso marker="#set_logger_config/2"><c>logger:set_logger_config(level, Level)</c></seealso>. - </p> + <p>Set the log level for the + specified <c><anno>Module</anno></c>.</p> + <p>The log level for a module overrides the global log level + of the logger for log event originating from the module in + question. Notice, however, that it does not override the + level configuration for any handler.</p> + <p>For example: Assume that the global log level for the + logger is <c>info</c>, and there is one handler, <c>h1</c>, + with level <c>info</c> and one handler, <c>h2</c>, with + level <c>debug</c>.</p> + <p>With this configuration, no debug messages will be logged, + since they are all stopped by the global log level.</p> + <p>If the level for <c>mymodule</c> is set now set + to <c>debug</c>, then debug events from this module will be + logged by the handler <c>h2</c>, but not by + handler <c>h1</c>.</p> + <p>Debug events from other modules are still not logged.</p> + <p>To change the global log level for the logger, use + <seealso marker="#set_logger_config/2"> + <c>logger:set_logger_config(level,Level)</c></seealso>.</p> + <p>To change the log level for a handler, use + <seealso marker="#set_handler_config/3"> + <c>logger:set_handler_config(HandlerId,level,Level)</c></seealso>.</p> + <note> + <p>The originating module for a log event is only detected + if <c>mfa=>{Module,Function,Arity}</c> exists in the + metadata. When log macros are used, this association is + automatically added to all log events. If the logger API + is called directly, without using a macro, the logging + client must explicitly add this information.</p> + </note> </desc> </func> @@ -414,6 +561,8 @@ Current logger configuration: with <seealso marker="#get_logger_config-0"><c>get_logger_config/0</c> </seealso>, then merge in your added or updated associations before writing it back.</p> + <p>If a key is removed compared to the current configuration, + the default value is used.</p> </desc> </func> @@ -423,7 +572,7 @@ Current logger configuration: <desc> <p>Add or update configuration data for the logger. If the given <c><anno>Key</anno></c> already exists, its associated - value will be set to <c><anno>Value</anno></c>. If it + value will be changed to <c><anno>Value</anno></c>. If it doesn't exist, it will be added.</p> </desc> </func> @@ -440,6 +589,11 @@ Current logger configuration: with <seealso marker="#get_handler_config-1"><c>get_handler_config/1</c> </seealso>, then merge in your added or updated associations before writing it back.</p> + <p>If a key is removed compared to the current configuration, + and the key is know by Logger, the default value is used. If + it is a custom key, then it is up to the handler + implementation if the value is removed or a default value is + inserted.</p> </desc> </func> @@ -449,10 +603,10 @@ Current logger configuration: handler.</fsummary> <desc> <p>Add or update configuration data for the specified - handler. If the - given <c><anno>Key</anno></c> already exists, its associated - value will be set to <c><anno>Value</anno></c>. If it - doesn't exist, it will be added.</p> + handler. If the given <c><anno>Key</anno></c> already + exists, its associated value will be changed + to <c><anno>Value</anno></c>. If it doesn't exist, it will + be added.</p> </desc> </func> @@ -471,30 +625,36 @@ Current logger configuration: <name name="set_process_metadata" arity="1"/> <fsummary>Set metadata to use when logging from current process.</fsummary> <desc> - <p>Set metadata which <c>logger</c> automatically inserts in - all log events produced on the current process. Subsequent - calls will overwrite previous data set by this function.</p> - <p>When logging, location data produced by the log macros, - and/or metadata given as argument to the log call (API - function or macro), will be merged with the process - metadata. If the same keys occur, values from the metadata - argument to the log call will overwrite values in the - process metadata, which in turn will overwrite values from - the location data.</p> + <p>Set metadata which Logger shall automatically insert in + all log events produced on the current process.</p> + <p>Location data produced by the log macros, and/or metadata + given as argument to the log call (API function or macro), + are merged with the process metadata. If the same keys + occur, values from the metadata argument to the log call + overwrite values from the process metadata, which in turn + overwrite values from the location data.</p> + <p>Subsequent calls to this function overwrites previous data + set. To update existing data instead of overwriting it, + see <seealso marker="#update_process_metadata-1"> + <c>update_process_metadata/1</c></seealso>.</p> </desc> </func> <func> <name name="update_process_metadata" arity="1"/> - <fsummary>Update metadata to use when logging from current process.</fsummary> + <fsummary>Set or update metadata to use when logging from + current process.</fsummary> <desc> - <p>Update metadata to use when logging from current process</p> - <p>This function behaves as if it was implemented as follows:</p> + <p>Set or update metadata to use when logging from current + process</p> + <p>If process metadata exists for the current process, this + function behaves as if it was implemented as follows:</p> <code type="erl"> logger:set_process_metadata(maps:merge(logger:get_process_metadata(),Meta)) </code> <p>If no process metadata exists, the function behaves as - <seealso marker="#set_process_metadata-1"><c>set_process_metadata/1</c> + <seealso marker="#set_process_metadata-1"> + <c>set_process_metadata/1</c> </seealso>.</p> </desc> </func> @@ -505,7 +665,9 @@ logger:set_process_metadata(maps:merge(logger:get_process_metadata(),Meta)) <desc> <p>Retrieve data set with <seealso marker="#set_process_metadata-1"> - <c>set_process_metadata/1</c></seealso>.</p> + <c>set_process_metadata/1</c></seealso> or + <seealso marker="#update_process_metadata-1"> + <c>update_process_metadata/1</c></seealso>.</p> </desc> </func> @@ -515,12 +677,103 @@ logger:set_process_metadata(maps:merge(logger:get_process_metadata(),Meta)) <desc> <p>Delete data set with <seealso marker="#set_process_metadata-1"> - <c>set_process_metadata/1</c></seealso>.</p> + <c>set_process_metadata/1</c></seealso> or + <seealso marker="#update_process_metadata-1"> + <c>update_process_metadata/1</c></seealso>.</p> + </desc> + </func> + + <func> + <name name="format_report" arity="1"/> + <fsummary>Convert a log message on report form to {Format,Args}.</fsummary> + <desc> + <p>Convert a log message on report form to <c>{Format,Args}</c>.</p> + <p>This is the default report callback used + by <seealso marker="logger_formatter"> + <c>logger_formatter</c></seealso> when no custom report + callback is found.</p> + <p>The function produces lines of <c>Key: Value</c> from + key-value lists. Strings are printed with <c>~ts</c> and + other terms with <c>~tp</c>.</p> + <p>If the <c><anno>Report</anno></c> is a map, it is + converted to a key-value list before formatting as such.</p> </desc> </func> </funcs> + <section> + <title>Callback Functions</title> + <p>The following functions are to be exported from a handler + callback module.</p> + </section> + + <funcs> + <func> + <name>Module:adding_handler(HandlerId,Config1) -> {ok,Config2} | {error,Reason}</name> + <fsummary>An instance of this handler is about to be added.</fsummary> + <type> + <v>HandlerId = + <seealso marker="#type-handler_id">handler_id()</seealso></v> + <v>Config1 = Config2 = + <seealso marker="#type-config">config()</seealso></v> + <v>Reason = term()</v> + </type> + <desc> + <p>This callback function is optional.</p> + <p>The function is called when an new handler is about to be + added, and the purpose is to verify the configuration and + initiate all resourced needed by the handler.</p> + <p>If everything succeeds, the callback function can add + possible default values or internal state values to the + configuration, and return the adjusted map + in <c>{ok,Config2}</c>.</p> + <p>If the configuration is faulty, or if the initiation fails, + the callback function must return <c>{error,Reason}</c>.</p> + </desc> + </func> + + <func> + <name>Module:removing_handler(HandlerId,Config) -> ok</name> + <fsummary>The given handler is about to be removed.</fsummary> + <type> + <v>HandlerId = + <seealso marker="#type-handler_id">handler_id()</seealso></v> + <v>Config = + <seealso marker="#type-config">config()</seealso></v> + </type> + <desc> + <p>This callback function is optional.</p> + <p>The function is called when a handler is about to be + removed, and the purpose is to release all resources used by + the handler. The return value is ignored by Logger.</p> + </desc> + </func> + + <func> + <name>Module:changing_config(HandlerId,Config1,Config2) -> {ok,Config3} | {error,Reason}</name> + <fsummary>The configuration for this handler is about to change.</fsummary> + <type> + <v>HandlerId = + <seealso marker="#type-handler_id">handler_id()</seealso></v> + <v>Config1 = Config2 = Config3 = + <seealso marker="#type-config">config()</seealso></v> + <v>Reason = term()</v> + </type> + <desc> + <p>This callback function is optional.</p> + <p>The function is called when the configuration for a handler + is about to change, and the purpose is to verify and act on + the new configuration.</p> + <p><c>Config1</c> is the existing configuration + and <c>Config2</c> is the new configuration.</p> + <p>If everything succeeds, the callback function must return a + possibly adjusted configuration in <c>{ok,Config3}</c>.</p> + <p>If the configuration is faulty, the callback function must + return <c>{error,Reason}</c>.</p> + </desc> + </func> + </funcs> </erlref> diff --git a/lib/kernel/doc/src/logger_filters.xml b/lib/kernel/doc/src/logger_filters.xml index d742391e35..c34ec7d14c 100644 --- a/lib/kernel/doc/src/logger_filters.xml +++ b/lib/kernel/doc/src/logger_filters.xml @@ -33,16 +33,20 @@ <file>logger_filters.xml</file> </header> <module>logger_filters</module> - <modulesummary>Filters to use with logger.</modulesummary> + <modulesummary>Filters to use with Logger.</modulesummary> <description> - <p>Filters to use with logger. All functions exported from this - module can be used as logger or handler + <p>All functions exported from this module can be used as logger + or handler filters. See <seealso marker="logger#add_logger_filter-2"> <c>logger:add_logger_filter/2</c></seealso> and <seealso marker="logger#add_handler_filter-3"> - <c>logger:add_handler_filter/3</c></seealso> - for more information about how filters are added.</p> + <c>logger:add_handler_filter/3</c></seealso> for more information + about how filters are added.</p> + <p>Filters are removed with <seealso marker="logger#remove_logger_filter-1"> + <c>logger:remove_logger_filter/1</c></seealso> + and <seealso marker="logger#remove_handler_filter-2"> + <c>logger:remove_handler_filter/2</c></seealso>.</p> </description> <funcs> diff --git a/lib/kernel/doc/src/logger_formatter.xml b/lib/kernel/doc/src/logger_formatter.xml index a0940100ee..7df4c88f40 100644 --- a/lib/kernel/doc/src/logger_formatter.xml +++ b/lib/kernel/doc/src/logger_formatter.xml @@ -33,43 +33,38 @@ <file>logger_formatter.xml</file> </header> <module>logger_formatter</module> - <modulesummary>Default formatter for the Logger application.</modulesummary> + <modulesummary>Default formatter for Logger.</modulesummary> <description> - <p>Default formatter for the Logger application.</p> - + <p>Each log handler has a configured formatter specified as a + module and a configuration term. The purpose of the formatter is + to translate the log events to a final printable string + (<c>unicode:chardata()</c>) which can be written to the output + device of the handler.</p> + <p><c>logger_formatter</c> is the default formatter used by + Logger.</p> </description> <section> <title>Configuration</title> - <p>The following configuration parameters can be set - for <c>logger_formatter</c>:</p> + <p>The configuration term for <c>logger_formatter</c> is a map, + and the following keys can be set as configuration + parameters:</p> <taglist> - <tag><c>single_line = boolean()</c></tag> - <item> - <p>If set to <c>true</c>, all newlines in the message are - replaced with <c>", "</c>, and whitespaces following - directly after newlines are removed. Note that newlines - added by the formatter template are not replaced.</p> - <p>Default is <c>true</c>.</p> - </item> - <tag><c>legacy_header = boolen()</c></tag> - <item> - <p>If set to <c>true</c> a header field is added to - logger_formatter's part of <c>Metadata</c>. The value of - this field is a string similar to the header created by the - old <c>error_logger</c> event handlers. It can be included - in the log event by adding the - tuple <c>{logger_formatter,header}</c> to the - template. See <seealso marker="#default_templates">Default - Templates</seealso> for more information</p> - <p>Default is <c>false</c>.</p> - </item> - <tag><c>report_cb = fun((logger:report()) -> {io:format(),[term()]})</c></tag> + <tag><c>chars_limit = pos_integer() | unlimited</c></tag> <item> - <p>A function with arity 1, - returning <c>{Format,Args}</c>. This function will replace - any <c>report_cb</c> found in metadata.</p> + <p>A positive integer representing the value of the option + with the same name to be used when calling + <seealso marker="stdlib:io_lib#format-3">io_lib:format/3</seealso>. + This value limits the total number of characters printed + for each log event. Notice that this is a soft limit. For a + hard truncation limit, see option <c>max_size</c>.</p> + <p>Default is <c>unlimited</c>.</p> + <note> + <p><c>chars_limit</c> has no effect on log messages on + string form. These are expected to be short, but can still + be truncated by the <c>max_size</c> parameter.</p> + </note> </item> <tag><c>depth = pos_integer() | unlimited</c></tag> <item> @@ -82,47 +77,79 @@ <seealso marker="stdlib:io#format-2">io:format/2,3</seealso> in STDLIB.</p> <p>Default is <c>unlimited</c>.</p> - </item> - <tag><c>chars_limit = pos_integer() | unlimited</c></tag> - <item> - <p>A positive integer representing the value of the option - with the same name to be used when calling - <seealso marker="stdlib:io_lib#format-3">io_lib:format/3</seealso>. - This value limits the total number of characters printed - for each log event. Note that this is a soft limit. For a - hard truncation limit, see option <c>max_size</c>.</p> - <p>Default is <c>unlimited</c>.</p> + <note> + <p><c>depth</c> has no effect on log messages on string + form. These are expected to be short, but can still be + truncated by the <c>max_size</c> parameter.</p> + </note> </item> <tag><c>max_size = pos_integer() | unlimited</c></tag> <item> - <p>A positive integer representing the absolute maximum size - a string returned from this formatter can have. If the + <p>A positive integer representing the absolute maximum size a + string returned from this formatter can have. If the formatted string is longer, after possibly being limited - by <c>depth</c> and/or <c>chars_limit</c>, it will be - truncated.</p> + by <c>chars_limit</c> or <c>depth</c>, it is truncated.</p> <p>Default is <c>unlimited</c>.</p> </item> + <tag><c>single_line = boolean()</c></tag> + <item> + <p>If set to <c>true</c>, all newlines in the message are + replaced with <c>", "</c>, and whitespaces following + directly after newlines are removed. Note that newlines + added by the <c>template</c> parameter are not replaced.</p> + <p>Default is <c>true</c>.</p> + </item> + <tag><c>legacy_header = boolen()</c></tag> + <item> + <p>If set to <c>true</c> a header field is added to + logger_formatter's part of <c>Metadata</c>. The value of + this field is a string similar to the header created by the + old <c>error_logger</c> event handlers. It can be included + in the log event by adding the + tuple <c>{logger_formatter,header}</c> to the template. See + section <seealso marker="#default_templates">Default + Templates</seealso> for more information.</p> + <p>Default is <c>false</c>.</p> + </item> + <tag><c>report_cb = fun((</c><seealso marker="logger#type-report"><c>logger:report()</c></seealso><c>) -> {</c><seealso marker="stdlib:io#type-format"><c>io:format()</c></seealso><c>,[term()]})</c></tag> + <item> + <p>A report callback is used by the formatter to transform log + messages on report form to a format string and + arguments. The report callback can be specified in the + metadata for the log event. If no report callback exist in + metadata, <c>logger_formatter</c> will + use <seealso marker="logger#format_report-1"> + <c>logger:format_report/1</c></seealso> as default + callback.</p> + <p>If this configuration parameter is set, it replaces both + the default report callback, and any report callback found + in metadata. That is, all reports are converted by this + configured function.</p> + <p>The value must be a function with arity 1, + returning <c>{Format,Args}</c>, and it will be called with a + report as only argument.</p> + </item> <tag><c>template = </c><seealso marker="#type-template"><c>template()</c></seealso></tag> <item> <p>The template is a list of atoms, tuples and strings. The atoms <c>level</c> or <c>msg</c>, are treated as placeholders for the severity level and the log message, repectively. Other atoms or tuples are interpreted as - placeholders for metadata, where atoms are expected to - match top level keys, and tuples represent paths to sub - keys in a nested map. For example the - tuple <c>{key1,key2}</c> will be replaced by the value of + placeholders for metadata, where atoms are expected to match + top level keys, and tuples represent paths to sub keys when + the metadata is a nested map. For example the + tuple <c>{key1,key2}</c> is replaced by the value of the <c>key2</c> field in the nested map below. The - atom <c>key1</c> on its own would be replaced by the - complete value of the <c>key1</c> field. The values are - converted to strings.</p> + atom <c>key1</c> on its own is replaced by the complete + value of the <c>key1</c> field. The values are converted to + strings.</p> <code> #{key1=>#{key2=>my_value, ...} ...}</code> - <p>Strings are printed literally.</p> + <p>Strings in the template are printed literally.</p> <p>The default template differs depending on the values of <c>legacy_header</c> and <c>single_line</c>. See <seealso marker="#default_templates">Default @@ -131,7 +158,8 @@ <tag><c>utc = boolean()</c></tag> <item> <p>If set to <c>true</c>, all dates are displayed in Universal - Coordinated Time. Default is <c>false</c>.</p> + Coordinated Time.</p> + <p>Default is <c>false</c>.</p> </item> </taglist> </section> @@ -139,38 +167,51 @@ <section> <marker id="default_templates"/> <title>Default templates</title> - <p>The default template when <c>legacy_header=true</c> is</p> - - <code>[{logger_formatter,header},"\n",msg,"\n"]</code> - - <p>which will cause log entries like this:</p> - - <code>=ERROR REPORT==== 29-Dec-2017::13:30:51.245123 === - process: <0.74.0> - exit_reason: "Something went wrong"</code> - - <p>Note that all eight levels might occur here, not - only <c>ERROR</c>, <c>WARNING</c> or <c>INFO</c>. And also that - micro seconds are added at the end of the timestamp.</p> - - <p>The default template when <c>single_line=true</c> is</p> - <code>[time," ",level,": ",msg,"\n"]</code> + <p>The default value for the <c>template</c> configuration + parameter depends on the value of <c>single_line</c> + and <c>legacy_header</c> as follows.</p> - <p>which will cause log entries like this:</p> + <p>The log event used in the examples is:</p> + <code> +?LOG_ERROR("name: ~p~nexit_reason: ~p",[my_reg_name,"It crashed"])</code> - <code>2017-12-29 13:31:49.640317 error: process: <0.74.0>, exit_reason: "Something went wrong"</code> + <taglist> + <tag><c>legacy_header=true</c></tag> + <item> + <p>Default template: <c>[{logger_formatter,header},"\n",msg,"\n"]</c></p> + + <p>Example log entry:</p> + <code> +=ERROR REPORT==== 29-Dec-2017::13:30:51.245123 === +name: my_reg_name +exit_reason: "It crashed"</code> + + <p>Notice that all eight levels might occur in the heading, + not only <c>ERROR</c>, <c>WARNING</c> or <c>INFO</c> as the + old <c>error_logger</c> produced. And microseconds are + added at the end of the timestamp.</p> + </item> - <p>The default template when both <c>legacy_header</c> and - <c>single_line</c> are set to false is:</p> + <tag><c>single_line=true</c></tag> + <item> + <p>Default template: <c>[time," ",level,": ",msg,"\n"]</c></p> - <code>[time," ",level,":\n",msg,"\n"]</code> + <p>Example log entry:</p> + <code>2017-12-29 13:31:49.640317 error: name: my_reg_name, exit_reason: "It crashed"</code> + </item> - <p>which will cause log entries like this:</p> + <tag><c>legacy_header=false, single_line=false</c></tag> + <item> + <p>Default template: <c>[time," ",level,":\n",msg,"\n"]</c></p> - <code>2017-12-29 13:32:25.191925 error: - process: <0.74.0> - exit_reason: "Something went wrong"</code> + <p>Example log entry:</p> + <code> +2017-12-29 13:32:25.191925 error: +name: my_reg_name +exit_reason: "It crashed"</code> + </item> + </taglist> </section> <datatypes> @@ -186,8 +227,22 @@ <name name="format" arity="2"/> <fsummary>Formats the given message.</fsummary> <desc> - <p>This the callback function to be called from handlers. It - formats the given messages.</p> + <p>This the formatter callback function to be called from + handlers. The log event is processed as follows:</p> + <list> + <item>If the message is on report form, it is converted to + <c>{Format,Args}</c> by calling the report + callback.</item> + <item>The size is limited according to the values of + configuration parameters <c>chars_limit</c> + and <c>depth</c>. Notice that this does not apply to + messages on string form.</item> + <item>The full log entry is composed according to + the <c>template</c>.</item> + <item>If the final string is too long, it is truncated + according to the value of configuration + parameter <c>max_size</c>.</item> + </list> </desc> </func> diff --git a/lib/kernel/src/erl_signal_handler.erl b/lib/kernel/src/erl_signal_handler.erl index 22f235d4e4..b76c2a217a 100644 --- a/lib/kernel/src/erl_signal_handler.erl +++ b/lib/kernel/src/erl_signal_handler.erl @@ -19,12 +19,21 @@ -module(erl_signal_handler). -behaviour(gen_event). --export([init/1, format_status/2, +-export([start/0, init/1, format_status/2, handle_event/2, handle_call/2, handle_info/2, terminate/2, code_change/3]). -record(state,{}). +start() -> + %% add signal handler + case whereis(erl_signal_server) of + %% in case of minimal mode + undefined -> ok; + _ -> + gen_event:add_handler(erl_signal_server, erl_signal_handler, []) + end. + init(_Args) -> {ok, #state{}}. diff --git a/lib/kernel/src/kernel.erl b/lib/kernel/src/kernel.erl index 20aa47f602..ae982c1741 100644 --- a/lib/kernel/src/kernel.erl +++ b/lib/kernel/src/kernel.erl @@ -32,13 +32,7 @@ start(_, []) -> case supervisor:start_link({local, kernel_sup}, kernel, []) of {ok, Pid} -> - %% add signal handler - case whereis(erl_signal_server) of - %% in case of minimal mode - undefined -> ok; - _ -> - ok = gen_event:add_handler(erl_signal_server, erl_signal_handler, []) - end, + ok = erl_signal_handler:start(), %% add error handler case logger:setup_standard_handler() of ok -> {ok, Pid, []}; diff --git a/lib/kernel/src/kernel_config.erl b/lib/kernel/src/kernel_config.erl index 535083ef27..c5ff1887c2 100644 --- a/lib/kernel/src/kernel_config.erl +++ b/lib/kernel/src/kernel_config.erl @@ -30,11 +30,8 @@ %%%----------------------------------------------------------------- %%% This module implements a process that configures the kernel %%% application. -%%% Its purpose is that in the init phase add an error_logger -%%% and when it dies (when the kernel application dies) deleting the -%%% previously installed error_logger. -%%% Also, this process waits for other nodes at startup, if -%%% specified. +%%% Its purpose is that in the init phase waits for other nodes at startup, +%%% if specified. %%%----------------------------------------------------------------- start_link() -> gen_server:start_link(kernel_config, [], []). diff --git a/lib/kernel/src/logger.erl b/lib/kernel/src/logger.erl index 5562764204..98a9937111 100644 --- a/lib/kernel/src/logger.erl +++ b/lib/kernel/src/logger.erl @@ -60,27 +60,41 @@ %%%----------------------------------------------------------------- %%% Types --type log() :: #{level=>level(), - msg=>{io:format(),[term()]} | +-type log() :: #{level:=level(), + msg:={io:format(),[term()]} | {report,report()} | {string,unicode:chardata()}, - meta=>metadata()}. + meta:=metadata()}. -type level() :: emergency | alert | critical | error | warning | notice | info | debug. -type report() :: map() | [{atom(),term()}]. -type msg_fun() :: fun((term()) -> {io:format(),[term()]} | report() | unicode:chardata()). --type metadata() :: map(). - +-type metadata() :: #{pid => pid(), + gl => pid(), + time => timestamp(), + mfa => {module(),atom(),non_neg_integer()}, + file => file:filename(), + line => non_neg_integer(), + term() => term()}. +-type location() :: #{mfa := {module(),atom(),non_neg_integer()}, + file := file:filename(), + line := non_neg_integer()}. -type handler_id() :: atom(). -type filter_id() :: atom(). --type filter() :: {fun((log(),term()) -> filter_return()),term()}. +-type filter() :: {fun((log(),filter_arg()) -> filter_return()),filter_arg()}. +-type filter_arg() :: term(). -type filter_return() :: stop | ignore | log(). --type config() :: map(). +-type config() :: #{level => level(), + filter_default => log | stop, + filters => [{filter_id(),filter()}], + formatter => {module(),term()}, + term() => term()}. +-type timestamp() :: integer(). -export_type([log/0,level/0,report/0,msg_fun/0,metadata/0,config/0,handler_id/0, - filter_id/0,filter/0,filter_return/0]). + filter_id/0,filter/0,filter_arg/0,filter_return/0]). %%%----------------------------------------------------------------- %%% API @@ -185,24 +199,24 @@ allow(Level,Module) when ?IS_LEVEL(Level), is_atom(Module) -> -spec macro_log(Location,Level,StringOrReport) -> ok when - Location :: map(), + Location :: location(), Level :: level(), StringOrReport :: unicode:chardata() | report(). macro_log(Location,Level,StringOrReport) -> log_allowed(Location,Level,StringOrReport,#{}). -spec macro_log(Location,Level,StringOrReport,Meta) -> ok when - Location :: map(), + Location :: location(), Level :: level(), StringOrReport :: unicode:chardata() | report(), Meta :: metadata(); (Location,Level,Format,Args) -> ok when - Location :: map(), + Location :: location(), Level :: level(), Format :: io:format(), Args ::[term()]; (Location,Level,Fun,FunArgs) -> ok when - Location :: map(), + Location :: location(), Level :: level(), Fun :: msg_fun(), FunArgs :: term(). @@ -213,13 +227,13 @@ macro_log(Location,Level,FunOrFormat,Args) -> log_allowed(Location,Level,{FunOrFormat,Args},#{}). -spec macro_log(Location,Level,Format,Args,Meta) -> ok when - Location :: map(), + Location :: location(), Level :: level(), Format :: io:format(), Args ::[term()], Meta :: metadata(); (Location,Level,Fun,FunArgs,Meta) -> ok when - Location :: map(), + Location :: location(), Level :: level(), Fun :: msg_fun(), FunArgs :: term(), @@ -712,7 +726,7 @@ do_log_1(Level,Msg,Meta) -> end. -spec log_allowed(Location,Level,Msg,Meta) -> ok when - Location :: map(), + Location :: location() | #{}, Level :: level(), Msg :: {msg_fun(),term()} | {io:format(),[term()]} | diff --git a/lib/ssh/doc/src/notes.xml b/lib/ssh/doc/src/notes.xml index 067dcb0bec..d0ed674eee 100644 --- a/lib/ssh/doc/src/notes.xml +++ b/lib/ssh/doc/src/notes.xml @@ -509,6 +509,34 @@ </section> +<section><title>Ssh 4.4.2.4</title> + + <section><title>Fixed Bugs and Malfunctions</title> + <list> + <item> + <p> + Fix rare spurios shutdowns of ssh servers when receiveing + <c>{'EXIT',_,normal}</c> messages.</p> + <p> + Own Id: OTP-15018</p> + </item> + <item> + <p> + Host key hash erroneously calculated for clients + following draft-00 of RFC 4419, for example PuTTY</p> + <p> + Own Id: OTP-15064</p> + </item> + <item> + <p> + Renegotiation could fail in some states</p> + <p> + Own Id: OTP-15066</p> + </item> + </list> + </section> + +</section> <section><title>Ssh 4.4.2.3</title> <section><title>Fixed Bugs and Malfunctions</title> diff --git a/lib/ssh/doc/src/ssh.xml b/lib/ssh/doc/src/ssh.xml index 6aed525e8b..407956cc6f 100644 --- a/lib/ssh/doc/src/ssh.xml +++ b/lib/ssh/doc/src/ssh.xml @@ -762,6 +762,8 @@ <datatype> <name name="rekey_limit_common_option"/> + <name name="limit_bytes"/> + <name name="limit_time"/> <desc> <p>Sets the limit when rekeying is to be initiated. Both the max time and max amount of data could be configured: @@ -773,6 +775,10 @@ </list> <p>When a rekeying is done, both the timer and the byte counter are restarted. Defaults to one hour and one GByte.</p> + <p>If <c>Minutes</c> is set to <c>infinity</c>, no rekeying will ever occur due to that max time has passed. + Setting <c>Bytes</c> to <c>infinity</c> will inhibit rekeying after a certain amount of data has been transferred. + If the option value is set to <c>{infinity, infinity}</c>, no rekeying will be initiated. Note that rekeying initiated + by the peer will still be performed.</p> </desc> </datatype> diff --git a/lib/ssh/src/ssh.hrl b/lib/ssh/src/ssh.hrl index fc0a3786ac..2efd239aae 100644 --- a/lib/ssh/src/ssh.hrl +++ b/lib/ssh/src/ssh.hrl @@ -191,10 +191,13 @@ -type user_dir_common_option() :: {user_dir, false | string()}. -type profile_common_option() :: {profile, atom() }. -type max_idle_time_common_option() :: {idle_time, timeout()}. --type rekey_limit_common_option() :: {rekey_limit, Bytes::non_neg_integer() | - {Minutes::non_neg_integer(), Bytes::non_neg_integer()} +-type rekey_limit_common_option() :: {rekey_limit, Bytes::limit_bytes() | + {Minutes::limit_time(), Bytes::limit_bytes()} }. +-type limit_bytes() :: non_neg_integer() | infinity . % non_neg_integer due to compatibility +-type limit_time() :: pos_integer() | infinity . + -type key_cb_common_option() :: {key_cb, Module::atom() | {Module::atom(),Opts::[term()]} } . -type disconnectfun_common_option() :: {disconnectfun, fun((Reason::term()) -> void | any()) }. diff --git a/lib/ssh/src/ssh_connection_handler.erl b/lib/ssh/src/ssh_connection_handler.erl index dfdad769ed..f1ff3a70e2 100644 --- a/lib/ssh/src/ssh_connection_handler.erl +++ b/lib/ssh/src/ssh_connection_handler.erl @@ -71,7 +71,7 @@ -export([init_connection_handler/3, % proc_lib:spawn needs this init_ssh_record/3, % Export of this internal function % intended for low-level protocol test suites - renegotiate/1, renegotiate_data/1, alg/1 % Export intended for test cases + renegotiate/1, alg/1 % Export intended for test cases ]). -export([dbg_trace/3]). @@ -325,14 +325,7 @@ close(ConnectionHandler, ChannelId) -> ) -> ok. %% . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . renegotiate(ConnectionHandler) -> - cast(ConnectionHandler, renegotiate). - -%%-------------------------------------------------------------------- --spec renegotiate_data(connection_ref() - ) -> ok. -%% . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . -renegotiate_data(ConnectionHandler) -> - cast(ConnectionHandler, data_size). + cast(ConnectionHandler, force_renegotiate). %%-------------------------------------------------------------------- alg(ConnectionHandler) -> @@ -349,11 +342,6 @@ alg(ConnectionHandler) -> connection_state :: #connection{}, latest_channel_id = 0 :: non_neg_integer() | undefined, - idle_timer_ref :: undefined - | infinity - | reference(), - idle_timer_value = infinity :: infinity - | pos_integer(), transport_protocol :: atom() | undefined, % ex: tcp transport_cb :: atom() @@ -429,22 +417,17 @@ init([Role,Socket,Opts]) -> }, D = case Role of client -> - cache_init_idle_timer(D0); + D0; server -> Sups = ?GET_INTERNAL_OPT(supervisors, Opts), - cache_init_idle_timer( - D0#data{connection_state = - C#connection{cli_spec = ?GET_OPT(ssh_cli, Opts, {ssh_cli,[?GET_OPT(shell, Opts)]}), - exec = ?GET_OPT(exec, Opts), - system_supervisor = proplists:get_value(system_sup, Sups), - sub_system_supervisor = proplists:get_value(subsystem_sup, Sups), - connection_supervisor = proplists:get_value(connection_sup, Sups) - }}) + D0#data{connection_state = + C#connection{cli_spec = ?GET_OPT(ssh_cli, Opts, {ssh_cli,[?GET_OPT(shell, Opts)]}), + exec = ?GET_OPT(exec, Opts), + system_supervisor = proplists:get_value(system_sup, Sups), + sub_system_supervisor = proplists:get_value(subsystem_sup, Sups), + connection_supervisor = proplists:get_value(connection_sup, Sups) + }} end, - %% Start the renegotiation timers - {RekeyTimeout,_MaxSent} = ?GET_OPT(rekey_limit, (D#data.ssh_params)#ssh.opts), - timer:apply_after(RekeyTimeout, gen_statem, cast, [self(), renegotiate]), - timer:apply_after(?REKEY_DATA_TIMOUT, gen_statem, cast, [self(), data_size]), {ok, {hello,Role}, D}; {error,Error} -> @@ -559,10 +542,15 @@ renegotiation(_) -> false. #data{} ) -> gen_statem:event_handler_result(state_name()) . +-define(CONNECTION_MSG(Msg), + [{next_event, internal, prepare_next_packet}, + {next_event,internal,{conn_msg,Msg}}]). + %% . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . callback_mode() -> - handle_event_function. + [handle_event_function, + state_enter]. handle_event(_, _Event, {init_error,Error}=StateName, D) -> @@ -1017,97 +1005,92 @@ handle_event(_, #ssh_msg_debug{} = Msg, _, D) -> debug_fun(Msg, D), keep_state_and_data; -handle_event(internal, Msg=#ssh_msg_global_request{}, StateName, D) -> - handle_connection_msg(Msg, StateName, D); - -handle_event(internal, Msg=#ssh_msg_request_success{}, StateName, D) -> - handle_connection_msg(Msg, StateName, D); - -handle_event(internal, Msg=#ssh_msg_request_failure{}, StateName, D) -> - handle_connection_msg(Msg, StateName, D); - -handle_event(internal, Msg=#ssh_msg_channel_open{}, StateName, D) -> - handle_connection_msg(Msg, StateName, D); - -handle_event(internal, Msg=#ssh_msg_channel_open_confirmation{}, StateName, D) -> - handle_connection_msg(Msg, StateName, D); - -handle_event(internal, Msg=#ssh_msg_channel_open_failure{}, StateName, D) -> - handle_connection_msg(Msg, StateName, D); - -handle_event(internal, Msg=#ssh_msg_channel_window_adjust{}, StateName, D) -> - handle_connection_msg(Msg, StateName, D); - -handle_event(internal, Msg=#ssh_msg_channel_data{}, StateName, D) -> - handle_connection_msg(Msg, StateName, D); - -handle_event(internal, Msg=#ssh_msg_channel_extended_data{}, StateName, D) -> - handle_connection_msg(Msg, StateName, D); - -handle_event(internal, Msg=#ssh_msg_channel_eof{}, StateName, D) -> - handle_connection_msg(Msg, StateName, D); - -handle_event(internal, Msg=#ssh_msg_channel_close{}, {connected,server} = StateName, D) -> - handle_connection_msg(Msg, StateName, cache_request_idle_timer_check(D)); - -handle_event(internal, Msg=#ssh_msg_channel_close{}, StateName, D) -> - handle_connection_msg(Msg, StateName, D); +handle_event(internal, {conn_msg,Msg}, StateName, #data{starter = User, + connection_state = Connection0, + event_queue = Qev0} = D0) -> + Role = role(StateName), + Rengotation = renegotiation(StateName), + try ssh_connection:handle_msg(Msg, Connection0, Role) of + {disconnect, Reason0, RepliesConn} -> + {Repls, D} = send_replies(RepliesConn, D0), + case {Reason0,Role} of + {{_, Reason}, client} when ((StateName =/= {connected,client}) + and (not Rengotation)) -> + User ! {self(), not_connected, Reason}; + _ -> + ok + end, + {stop_and_reply, {shutdown,normal}, Repls, D}; -handle_event(internal, Msg=#ssh_msg_channel_request{}, StateName, D) -> - handle_connection_msg(Msg, StateName, D); + {Replies, Connection} when is_list(Replies) -> + {Repls, D} = + case StateName of + {connected,_} -> + send_replies(Replies, D0#data{connection_state=Connection}); + _ -> + {ConnReplies, NonConnReplies} = lists:splitwith(fun not_connected_filter/1, Replies), + send_replies(NonConnReplies, D0#data{event_queue = Qev0 ++ ConnReplies}) + end, + case {Msg, StateName} of + {#ssh_msg_channel_close{}, {connected,_}} -> + {keep_state, D, [cond_set_idle_timer(D)|Repls]}; + {#ssh_msg_channel_success{}, _} -> + update_inet_buffers(D#data.socket), + {keep_state, D, Repls}; + _ -> + {keep_state, D, Repls} + end -handle_event(internal, Msg=#ssh_msg_channel_success{}, StateName, D) -> - update_inet_buffers(D#data.socket), - handle_connection_msg(Msg, StateName, D); + catch + Class:Error -> + {Repls, D1} = send_replies(ssh_connection:handle_stop(Connection0), D0), + {Shutdown, D} = ?send_disconnect(?SSH_DISCONNECT_BY_APPLICATION, + io_lib:format("Internal error: ~p:~p",[Class,Error]), + StateName, D1), + {stop_and_reply, Shutdown, Repls, D} + end; -handle_event(internal, Msg=#ssh_msg_channel_failure{}, StateName, D) -> - handle_connection_msg(Msg, StateName, D); +handle_event(enter, _OldState, {connected,_}=State, D) -> + %% Entering the state where re-negotiation is possible + init_renegotiate_timers(State, D); + +handle_event(enter, _OldState, {ext_info,_,renegotiate}=State, D) -> + %% Could be hanging in exit_info state if nothing else arrives + init_renegotiate_timers(State, D); + +handle_event(enter, {connected,_}, State, D) -> + %% Exiting the state where re-negotiation is possible + pause_renegotiate_timers(State, D); + +handle_event(cast, force_renegotiate, StateName, D) -> + handle_event({timeout,renegotiate}, undefined, StateName, D); + +handle_event({timeout,renegotiate}, _, StateName, D0) -> + case StateName of + {connected,Role} -> + start_rekeying(Role, D0); + {ext_info,Role,renegotiate} -> + start_rekeying(Role, D0); + _ -> + %% Wrong state for starting a renegotiation, must be in re-negotiation + keep_state_and_data + end; -handle_event(cast, renegotiate, {connected,Role}, D) -> - {KeyInitMsg, SshPacket, Ssh} = ssh_transport:key_exchange_init_msg(D#data.ssh_params), - send_bytes(SshPacket, D), - {RekeyTimeout,_MaxSent} = ?GET_OPT(rekey_limit, Ssh#ssh.opts), - timer:apply_after(RekeyTimeout, gen_statem, cast, [self(), renegotiate]), - {next_state, {kexinit,Role,renegotiate}, D#data{ssh_params = Ssh, - key_exchange_init_msg = KeyInitMsg}}; +handle_event({timeout,check_data_size}, _, StateName, D0) -> + %% Rekey due to sent data limit reached? (Can't be in {ext_info,...} if data is sent) + case StateName of + {connected,Role} -> + check_data_rekeying(Role, D0); + _ -> + %% Wrong state for starting a renegotiation, must be in re-negotiation + keep_state_and_data + end; handle_event({call,From}, get_alg, _, D) -> #ssh{algorithms=Algs} = D#data.ssh_params, {keep_state_and_data, [{reply,From,Algs}]}; -handle_event(cast, renegotiate, _, D) -> - %% Already in key-exchange so safe to ignore - {RekeyTimeout,_MaxSent} = ?GET_OPT(rekey_limit, (D#data.ssh_params)#ssh.opts), - timer:apply_after(RekeyTimeout, gen_statem, cast, [self(), renegotiate]), - keep_state_and_data; - - -%% Rekey due to sent data limit reached? -handle_event(cast, data_size, {connected,Role}, D) -> - {ok, [{send_oct,Sent0}]} = inet:getstat(D#data.socket, [send_oct]), - Sent = Sent0 - D#data.last_size_rekey, - {_RekeyTimeout,MaxSent} = ?GET_OPT(rekey_limit, (D#data.ssh_params)#ssh.opts), - timer:apply_after(?REKEY_DATA_TIMOUT, gen_statem, cast, [self(), data_size]), - case Sent >= MaxSent of - true -> - {KeyInitMsg, SshPacket, Ssh} = - ssh_transport:key_exchange_init_msg(D#data.ssh_params), - send_bytes(SshPacket, D), - {next_state, {kexinit,Role,renegotiate}, D#data{ssh_params = Ssh, - key_exchange_init_msg = KeyInitMsg, - last_size_rekey = Sent0}}; - _ -> - keep_state_and_data - end; - -handle_event(cast, data_size, _, _) -> - %% Already in key-exchange so safe to ignore - timer:apply_after(?REKEY_DATA_TIMOUT, gen_statem, cast, [self(), data_size]), % FIXME: not here in original - keep_state_and_data; - - - handle_event(cast, _, StateName, _) when not ?CONNECTED(StateName) -> {keep_state_and_data, [postpone]}; @@ -1221,7 +1204,7 @@ handle_event({call,From}, {request, ChannelPid, ChannelId, Type, Data, Timeout}, D -> %% Note reply to channel will happen later when reply is recived from peer on the socket start_channel_request_timer(ChannelId, From, Timeout), - {keep_state, cache_request_idle_timer_check(D)} + {keep_state, D, cond_set_idle_timer(D)} end; handle_event({call,From}, {request, ChannelId, Type, Data, Timeout}, StateName, D0) @@ -1232,7 +1215,7 @@ handle_event({call,From}, {request, ChannelId, Type, Data, Timeout}, StateName, D -> %% Note reply to channel will happen later when reply is recived from peer on the socket start_channel_request_timer(ChannelId, From, Timeout), - {keep_state, cache_request_idle_timer_check(D)} + {keep_state, D, cond_set_idle_timer(D)} end; handle_event({call,From}, {data, ChannelId, Type, Data, Timeout}, StateName, D0) @@ -1273,7 +1256,7 @@ handle_event({call,From}, }), D = add_request(true, ChannelId, From, D2), start_channel_request_timer(ChannelId, From, Timeout), - {keep_state, cache_cancel_idle_timer(D)}; + {keep_state, D, cond_set_idle_timer(D)}; handle_event({call,From}, {send_window, ChannelId}, StateName, D) when ?CONNECTED(StateName) -> @@ -1303,7 +1286,7 @@ handle_event({call,From}, {close, ChannelId}, StateName, D0) #channel{remote_id = Id} = Channel -> D1 = send_msg(ssh_connection:channel_close_msg(Id), D0), ssh_client_channel:cache_update(cache(D1), Channel#channel{sent_close = true}), - {keep_state, cache_request_idle_timer_check(D1), [{reply,From,ok}]}; + {keep_state, D1, [cond_set_idle_timer(D1), {reply,From,ok}]}; undefined -> {keep_state_and_data, [{reply,From,ok}]} end; @@ -1319,6 +1302,7 @@ handle_event(info, {Proto, Sock, Info}, {hello,_}, #data{socket = Sock, {keep_state_and_data, [{next_event, internal, {info_line,Info}}]} end; + handle_event(info, {Proto, Sock, NewData}, StateName, D0 = #data{socket = Sock, transport_protocol = Proto}) -> try ssh_transport:handle_packet_part( @@ -1336,13 +1320,29 @@ handle_event(info, {Proto, Sock, NewData}, StateName, D0 = #data{socket = Sock, try ssh_message:decode(set_kex_overload_prefix(DecryptedBytes,D1)) of - Msg = #ssh_msg_kexinit{} -> + #ssh_msg_kexinit{} = Msg -> {keep_state, D1, [{next_event, internal, prepare_next_packet}, {next_event, internal, {Msg,DecryptedBytes}} ]}; + + #ssh_msg_global_request{} = Msg -> {keep_state, D1, ?CONNECTION_MSG(Msg)}; + #ssh_msg_request_success{} = Msg -> {keep_state, D1, ?CONNECTION_MSG(Msg)}; + #ssh_msg_request_failure{} = Msg -> {keep_state, D1, ?CONNECTION_MSG(Msg)}; + #ssh_msg_channel_open{} = Msg -> {keep_state, D1, ?CONNECTION_MSG(Msg)}; + #ssh_msg_channel_open_confirmation{} = Msg -> {keep_state, D1, ?CONNECTION_MSG(Msg)}; + #ssh_msg_channel_open_failure{} = Msg -> {keep_state, D1, ?CONNECTION_MSG(Msg)}; + #ssh_msg_channel_window_adjust{} = Msg -> {keep_state, D1, ?CONNECTION_MSG(Msg)}; + #ssh_msg_channel_data{} = Msg -> {keep_state, D1, ?CONNECTION_MSG(Msg)}; + #ssh_msg_channel_extended_data{} = Msg -> {keep_state, D1, ?CONNECTION_MSG(Msg)}; + #ssh_msg_channel_eof{} = Msg -> {keep_state, D1, ?CONNECTION_MSG(Msg)}; + #ssh_msg_channel_close{} = Msg -> {keep_state, D1, ?CONNECTION_MSG(Msg)}; + #ssh_msg_channel_request{} = Msg -> {keep_state, D1, ?CONNECTION_MSG(Msg)}; + #ssh_msg_channel_failure{} = Msg -> {keep_state, D1, ?CONNECTION_MSG(Msg)}; + #ssh_msg_channel_success{} = Msg -> {keep_state, D1, ?CONNECTION_MSG(Msg)}; + Msg -> {keep_state, D1, [{next_event, internal, prepare_next_packet}, - {next_event, internal, Msg} + {next_event, internal, Msg} ]} catch C:E -> @@ -1421,8 +1421,20 @@ handle_event(info, {timeout, {_, From} = Request}, _, end; %%% Handle that ssh channels user process goes down -handle_event(info, {'DOWN', _Ref, process, ChannelPid, _Reason}, _, D0) -> - {keep_state, handle_channel_down(ChannelPid, D0)}; +handle_event(info, {'DOWN', _Ref, process, ChannelPid, _Reason}, _, D) -> + Cache = cache(D), + ssh_client_channel:cache_foldl( + fun(#channel{user=U, + local_id=Id}, Acc) when U == ChannelPid -> + ssh_client_channel:cache_delete(Cache, Id), + Acc; + (_,Acc) -> + Acc + end, [], Cache), + {keep_state, D, cond_set_idle_timer(D)}; + +handle_event({timeout,idle_time}, _Data, _StateName, _D) -> + {stop, {shutdown, "Timeout"}}; %%% So that terminate will be run when supervisor is shutdown handle_event(info, {'EXIT', _Sup, Reason}, StateName, _) -> @@ -1442,7 +1454,7 @@ handle_event(info, {'EXIT', _Sup, Reason}, StateName, _) -> end; handle_event(info, check_cache, _, D) -> - {keep_state, cache_check_set_idle_timer(D)}; + {keep_state, D, cond_set_idle_timer(D)}; handle_event(info, UnexpectedMessage, StateName, D = #data{ssh_params = Ssh}) -> case unexpected_fun(UnexpectedMessage, D) of @@ -1489,6 +1501,11 @@ handle_event(internal, {send_disconnect,Code,DetailedText,Module,Line}, StateNam send_disconnect(Code, DetailedText, Module, Line, StateName, D0), {stop, Shutdown, D}; + +handle_event(enter, _OldState, State, D) -> + %% Just skip + {next_state, State, D}; + handle_event(_Type, _Msg, {ext_info,Role,_ReNegFlag}, D) -> %% If something else arrives, goto next state and handle the event in that one {next_state, {connected,Role}, D, [postpone]}; @@ -1746,46 +1763,6 @@ call(FsmPid, Event, Timeout) -> end. -handle_connection_msg(Msg, StateName, D0 = #data{starter = User, - connection_state = Connection0, - event_queue = Qev0}) -> - Renegotiation = renegotiation(StateName), - Role = role(StateName), - try ssh_connection:handle_msg(Msg, Connection0, Role) of - {disconnect, Reason0, RepliesConn} -> - {Repls, D} = send_replies(RepliesConn, D0), - case {Reason0,Role} of - {{_, Reason}, client} when ((StateName =/= {connected,client}) and (not Renegotiation)) -> - User ! {self(), not_connected, Reason}; - _ -> - ok - end, - {stop_and_reply, {shutdown,normal}, Repls, D}; - - {[], Connection} -> - {keep_state, D0#data{connection_state = Connection}}; - - {Replies, Connection} when is_list(Replies) -> - {Repls, D} = - case StateName of - {connected,_} -> - send_replies(Replies, D0#data{connection_state=Connection}); - _ -> - {ConnReplies, NonConnReplies} = lists:splitwith(fun not_connected_filter/1, Replies), - send_replies(NonConnReplies, D0#data{event_queue = Qev0 ++ ConnReplies}) - end, - {keep_state, D, Repls} - - catch - Class:Error -> - {Repls, D1} = send_replies(ssh_connection:handle_stop(Connection0), D0), - {Shutdown, D} = ?send_disconnect(?SSH_DISCONNECT_BY_APPLICATION, - io_lib:format("Internal error: ~p:~p",[Class,Error]), - StateName, D1), - {stop_and_reply, Shutdown, Repls, D} - end. - - set_kex_overload_prefix(Msg = <<?BYTE(Op),_/binary>>, #data{ssh_params=SshParams}) when Op == 30; Op == 31 @@ -1891,19 +1868,6 @@ handle_request(ChannelId, Type, Data, WantReply, From, D) -> end. %%%---------------------------------------------------------------- -handle_channel_down(ChannelPid, D) -> - Cache = cache(D), - ssh_client_channel:cache_foldl( - fun(#channel{user=U, - local_id=Id}, Acc) when U == ChannelPid -> - ssh_client_channel:cache_delete(Cache, Id), - Acc; - (_,Acc) -> - Acc - end, [], Cache), - cache_check_set_idle_timer(D). - - update_sys(Cache, Channel, Type, ChannelPid) -> ssh_client_channel:cache_update(Cache, Channel#channel{sys = Type, user = ChannelPid}). @@ -1922,6 +1886,42 @@ new_channel_id(#data{connection_state = #connection{channel_id_seed = Id} = {Id, State#data{connection_state = Connection#connection{channel_id_seed = Id + 1}}}. + +%%%---------------------------------------------------------------- +start_rekeying(Role, D0) -> + {KeyInitMsg, SshPacket, Ssh} = ssh_transport:key_exchange_init_msg(D0#data.ssh_params), + send_bytes(SshPacket, D0), + D = D0#data{ssh_params = Ssh, + key_exchange_init_msg = KeyInitMsg}, + {next_state, {kexinit,Role,renegotiate}, D}. + + +init_renegotiate_timers(State, D) -> + {RekeyTimeout,_MaxSent} = ?GET_OPT(rekey_limit, (D#data.ssh_params)#ssh.opts), + {next_state, State, D, [{{timeout,renegotiate}, RekeyTimeout, none}, + {{timeout,check_data_size}, ?REKEY_DATA_TIMOUT, none} ]}. + + +pause_renegotiate_timers(State, D) -> + {next_state, State, D, [{{timeout,renegotiate}, infinity, none}, + {{timeout,check_data_size}, infinity, none} ]}. + +check_data_rekeying(Role, D) -> + {ok, [{send_oct,SocketSentTotal}]} = inet:getstat(D#data.socket, [send_oct]), + SentSinceRekey = SocketSentTotal - D#data.last_size_rekey, + {_RekeyTimeout,MaxSent} = ?GET_OPT(rekey_limit, (D#data.ssh_params)#ssh.opts), + case check_data_rekeying_dbg(SentSinceRekey, MaxSent) of + true -> + start_rekeying(Role, D#data{last_size_rekey = SocketSentTotal}); + _ -> + %% Not enough data sent for a re-negotiation. Restart timer. + {keep_state, D, {{timeout,check_data_size}, ?REKEY_DATA_TIMOUT, none}} + end. + +check_data_rekeying_dbg(SentSinceRekey, MaxSent) -> + %% This function is for the ssh_dbg to trace on. See dbg_trace/3 at the end. + SentSinceRekey >= MaxSent. + %%%---------------------------------------------------------------- %%% This server/client has decided to disconnect via the state machine: %%% The unused arguments are for debugging. @@ -2134,60 +2134,12 @@ retry_fun(User, Reason, #data{ssh_params = #ssh{opts = Opts, %%% Cache idle timer that closes the connection if there are no %%% channels open for a while. -cache_init_idle_timer(D) -> - case ?GET_OPT(idle_time, (D#data.ssh_params)#ssh.opts) of - infinity -> - D#data{idle_timer_value = infinity, - idle_timer_ref = infinity % A flag used later... - }; - IdleTime -> - %% We dont want to set the timeout on first connect - D#data{idle_timer_value = IdleTime} - end. - - -cache_check_set_idle_timer(D = #data{idle_timer_ref = undefined, - idle_timer_value = IdleTime}) -> - %% No timer set - shall we set one? +cond_set_idle_timer(D) -> case ssh_client_channel:cache_info(num_entries, cache(D)) of - 0 when IdleTime == infinity -> - %% No. Meaningless to set a timer that fires in an infinite time... - D; - 0 -> - %% Yes, we'll set one since the cache is empty and it should not - %% be that for a specified time - D#data{idle_timer_ref = - erlang:send_after(IdleTime, self(), {'EXIT',[],"Timeout"})}; - _ -> - %% No - there are entries in the cache - D - end; -cache_check_set_idle_timer(D) -> - %% There is already a timer set or the timeout time is infinite - D. - - -cache_cancel_idle_timer(D) -> - case D#data.idle_timer_ref of - infinity -> - %% The timer is not activated - D; - undefined -> - %% The timer is already cancelled - D; - TimerRef -> - %% The timer is active - erlang:cancel_timer(TimerRef), - D#data{idle_timer_ref = undefined} + 0 -> {{timeout,idle_time}, ?GET_OPT(idle_time, (D#data.ssh_params)#ssh.opts), none}; + _ -> {{timeout,idle_time}, infinity, none} end. - -cache_request_idle_timer_check(D = #data{idle_timer_value = infinity}) -> - D; -cache_request_idle_timer_check(D = #data{idle_timer_value = IdleTime}) -> - erlang:send_after(IdleTime, self(), check_cache), - D. - %%%---------------------------------------------------------------- start_channel_request_timer(_,_, infinity) -> ok; @@ -2248,7 +2200,7 @@ update_inet_buffers(Socket) -> %%%# Tracing %%%# -dbg_trace(points, _, _) -> [terminate, disconnect, connections, connection_events]; +dbg_trace(points, _, _) -> [terminate, disconnect, connections, connection_events, renegotiation]; dbg_trace(flags, connections, A) -> [c] ++ dbg_trace(flags, terminate, A); dbg_trace(on, connections, A) -> dbg:tp(?MODULE, init_connection_handler, 3, x), @@ -2291,6 +2243,33 @@ dbg_trace(format, connection_events, {return_from, {?MODULE,handle_event,4}, Ret io_lib:format("~p~n", [event_handler_result(Ret)]) ]; +dbg_trace(flags, renegotiation, _) -> [c]; +dbg_trace(on, renegotiation, _) -> dbg:tpl(?MODULE, init_renegotiate_timers, 2, x), + dbg:tpl(?MODULE, pause_renegotiate_timers, 2, x), + dbg:tpl(?MODULE, check_data_rekeying_dbg, 2, x), + dbg:tpl(?MODULE, start_rekeying, 2, x); +dbg_trace(off, renegotiation, _) -> dbg:ctpl(?MODULE, init_renegotiate_timers, 2), + dbg:ctpl(?MODULE, pause_renegotiate_timers, 2), + dbg:ctpl(?MODULE, check_data_rekeying_dbg, 2), + dbg:ctpl(?MODULE, start_rekeying, 2); +dbg_trace(format, renegotiation, {call, {?MODULE,init_renegotiate_timers,[_State,D]}}) -> + ["Renegotiation init\n", + io_lib:format("rekey_limit: ~p ({ms,bytes})~ncheck_data_size: ~p (ms)~n", + [?GET_OPT(rekey_limit, (D#data.ssh_params)#ssh.opts), + ?REKEY_DATA_TIMOUT]) + ]; +dbg_trace(format, renegotiation, {call, {?MODULE,pause_renegotiate_timers,[_State,_D]}}) -> + ["Renegotiation pause\n"]; +dbg_trace(format, renegotiation, {call, {?MODULE,start_rekeying,[_Role,_D]}}) -> + ["Renegotiation start rekeying\n"]; +dbg_trace(format, renegotiation, {call, {?MODULE,check_data_rekeying_dbg,[SentSinceRekey, MaxSent]}}) -> + ["Renegotiation check data sent\n", + io_lib:format("TotalSentSinceRekey: ~p~nMaxBeforeRekey: ~p~nStartRekey: ~p~n", + [SentSinceRekey, MaxSent, SentSinceRekey >= MaxSent]) + ]; + + + dbg_trace(flags, terminate, _) -> [c]; dbg_trace(on, terminate, _) -> dbg:tp(?MODULE, terminate, 3, x); dbg_trace(off, terminate, _) -> dbg:ctpg(?MODULE, terminate, 3); diff --git a/lib/ssh/src/ssh_options.erl b/lib/ssh/src/ssh_options.erl index 73287e464a..fe95d2ac54 100644 --- a/lib/ssh/src/ssh_options.erl +++ b/lib/ssh/src/ssh_options.erl @@ -601,14 +601,19 @@ default(common) -> {rekey_limit, def} => #{default => {3600000, 1024000000}, % {1 hour, 1 GB} - chk => fun({TimeMins, SizBytes}) when is_integer(TimeMins) andalso TimeMins>=0, - is_integer(SizBytes) andalso SizBytes>=0 -> - %% New (>= 21) format - {true, {TimeMins * 60*1000, % To ms - SizBytes}}; - (SizBytes) when is_integer(SizBytes) andalso SizBytes>=0 -> - %% Old (< 21) format - {true, {3600000, SizBytes}}; + chk => fun({infinity, infinity}) -> + true; + ({Mins, infinity}) when is_integer(Mins), Mins>0 -> + {true, {Mins*60*1000, infinity}}; + ({infinity, Bytes}) when is_integer(Bytes), Bytes>=0 -> + true; + ({Mins, Bytes}) when is_integer(Mins), Mins>0, + is_integer(Bytes), Bytes>=0 -> + {true, {Mins*60*1000, Bytes}}; + (infinity) -> + {true, {3600000, infinity}}; + (Bytes) when is_integer(Bytes), Bytes>=0 -> + {true, {3600000, Bytes}}; (_) -> false end, diff --git a/lib/ssh/src/ssh_transport.erl b/lib/ssh/src/ssh_transport.erl index 458bdfc258..631c4d0213 100644 --- a/lib/ssh/src/ssh_transport.erl +++ b/lib/ssh/src/ssh_transport.erl @@ -1850,9 +1850,6 @@ public_algo({#'ECPoint'{},{namedCurve,OID}}) -> Curve = public_key:oid2ssh_curvename(OID), list_to_atom("ecdsa-sha2-" ++ binary_to_list(Curve)). - - - sha('ssh-rsa') -> sha; sha('rsa-sha2-256') -> sha256; sha('rsa-sha2-384') -> sha384; diff --git a/lib/ssh/test/ssh_basic_SUITE.erl b/lib/ssh/test/ssh_basic_SUITE.erl index 603ac71d4b..807e23ff01 100644 --- a/lib/ssh/test/ssh_basic_SUITE.erl +++ b/lib/ssh/test/ssh_basic_SUITE.erl @@ -32,7 +32,7 @@ -define(NEWLINE, <<"\r\n">>). --define(REKEY_DATA_TMO, 65000). +-define(REKEY_DATA_TMO, 1 * 60000). % Should be multiples of 60000 %%-------------------------------------------------------------------- %% Common Test interface functions ----------------------------------- @@ -45,7 +45,6 @@ suite() -> all() -> [{group, all_tests}]. - groups() -> [{all_tests, [parallel], [{group, ssh_renegotiate_SUITE}, {group, ssh_basic_SUITE} @@ -76,7 +75,11 @@ groups() -> shell_exit_status ]}, - {ssh_renegotiate_SUITE, [parallel], [rekey, + {ssh_renegotiate_SUITE, [parallel], [rekey0, + rekey1, + rekey2, + rekey3, + rekey4, rekey_limit_client, rekey_limit_daemon, rekey_time_limit_client, @@ -1330,28 +1333,36 @@ shell_exit_status(Config) when is_list(Config) -> ssh:stop_daemon(Pid). +%%---------------------------------------------------------------------------- %%% Idle timeout test -rekey() -> [{timetrap,{seconds,90}}]. +rekey0() -> [{timetrap,{seconds,90}}]. +rekey1() -> [{timetrap,{seconds,90}}]. +rekey2() -> [{timetrap,{seconds,90}}]. +rekey3() -> [{timetrap,{seconds,90}}]. +rekey4() -> [{timetrap,{seconds,90}}]. -rekey(Config) -> - {Pid, Host, Port} = - ssh_test_lib:std_daemon(Config, - [{rekey_limit, 0}]), - ConnectionRef = - ssh_test_lib:std_connect(Config, Host, Port, - [{rekey_limit, 0}]), +rekey0(Config) -> rekey_chk(Config, 0, 0). +rekey1(Config) -> rekey_chk(Config, infinity, 0). +rekey2(Config) -> rekey_chk(Config, {infinity,infinity}, 0). +rekey3(Config) -> rekey_chk(Config, 0, infinity). +rekey4(Config) -> rekey_chk(Config, 0, {infinity,infinity}). + +rekey_chk(Config, RLdaemon, RLclient) -> + {Pid, Host, Port} = ssh_test_lib:std_daemon(Config, [{rekey_limit, RLdaemon}]), + ConnectionRef = ssh_test_lib:std_connect(Config, Host, Port, [{rekey_limit, RLclient}]), Kex1 = ssh_test_lib:get_kex_init(ConnectionRef), - receive - after ?REKEY_DATA_TMO -> - %%By this time rekeying would have been done - Kex2 = ssh_test_lib:get_kex_init(ConnectionRef), - false = (Kex2 == Kex1), - ssh:close(ConnectionRef), - ssh:stop_daemon(Pid) - end. -%%-------------------------------------------------------------------- + %% Make both sides send something: + {ok, SftpPid} = ssh_sftp:start_channel(ConnectionRef), + + %% Check rekeying + timer:sleep(?REKEY_DATA_TMO), + ?wait_match(false, Kex1==ssh_test_lib:get_kex_init(ConnectionRef), [], 2000, 10), + ssh:close(ConnectionRef), + ssh:stop_daemon(Pid). + +%%-------------------------------------------------------------------- %%% Test rekeying by data volume rekey_limit_client() -> [{timetrap,{seconds,400}}]. @@ -1359,7 +1370,7 @@ rekey_limit_client(Config) -> Limit = 6000, UserDir = proplists:get_value(priv_dir, Config), DataFile = filename:join(UserDir, "rekey.data"), - + Data = lists:duplicate(Limit+10,1), Algs = proplists:get_value(preferred_algorithms, Config), {Pid, Host, Port} = ssh_test_lib:std_daemon(Config,[{max_random_length_padding,0}, {preferred_algorithms,Algs}]), @@ -1368,31 +1379,33 @@ rekey_limit_client(Config) -> {max_random_length_padding,0}]), {ok, SftpPid} = ssh_sftp:start_channel(ConnectionRef), + %% Check that it doesn't rekey without data transfer Kex1 = ssh_test_lib:get_kex_init(ConnectionRef), - timer:sleep(?REKEY_DATA_TMO), - Kex1 = ssh_test_lib:get_kex_init(ConnectionRef), + true = (Kex1 == ssh_test_lib:get_kex_init(ConnectionRef)), - Data = lists:duplicate(Limit+10,1), + %% Check that datatransfer triggers rekeying ok = ssh_sftp:write_file(SftpPid, DataFile, Data), - timer:sleep(?REKEY_DATA_TMO), - Kex2 = ssh_test_lib:get_kex_init(ConnectionRef), + ?wait_match(false, Kex1==(Kex2=ssh_test_lib:get_kex_init(ConnectionRef)), Kex2, 2000, 10), - false = (Kex2 == Kex1), + %% Check that datatransfer continues to trigger rekeying + ok = ssh_sftp:write_file(SftpPid, DataFile, Data), + timer:sleep(?REKEY_DATA_TMO), + ?wait_match(false, Kex2==(Kex3=ssh_test_lib:get_kex_init(ConnectionRef)), Kex3, 2000, 10), + %% Check that it doesn't rekey without data transfer timer:sleep(?REKEY_DATA_TMO), - Kex2 = ssh_test_lib:get_kex_init(ConnectionRef), + true = (Kex3 == ssh_test_lib:get_kex_init(ConnectionRef)), + %% Check that it doesn't rekey on a small datatransfer ok = ssh_sftp:write_file(SftpPid, DataFile, "hi\n"), - timer:sleep(?REKEY_DATA_TMO), - Kex2 = ssh_test_lib:get_kex_init(ConnectionRef), - - false = (Kex2 == Kex1), + true = (Kex3 == ssh_test_lib:get_kex_init(ConnectionRef)), + %% Check that it doesn't rekey without data transfer timer:sleep(?REKEY_DATA_TMO), - Kex2 = ssh_test_lib:get_kex_init(ConnectionRef), + true = (Kex3 == ssh_test_lib:get_kex_init(ConnectionRef)), ssh_sftp:stop_channel(SftpPid), ssh:close(ConnectionRef), @@ -1416,32 +1429,40 @@ rekey_limit_daemon(Config) -> ConnectionRef = ssh_test_lib:std_connect(Config, Host, Port, [{max_random_length_padding,0}]), {ok, SftpPid} = ssh_sftp:start_channel(ConnectionRef), + %% Check that it doesn't rekey without data transfer Kex1 = ssh_test_lib:get_kex_init(ConnectionRef), timer:sleep(?REKEY_DATA_TMO), Kex1 = ssh_test_lib:get_kex_init(ConnectionRef), + %% Check that datatransfer triggers rekeying {ok,_} = ssh_sftp:read_file(SftpPid, DataFile1), + timer:sleep(?REKEY_DATA_TMO), + ?wait_match(false, Kex1==(Kex2=ssh_test_lib:get_kex_init(ConnectionRef)), Kex2, 2000, 10), + %% Check that datatransfer continues to trigger rekeying + {ok,_} = ssh_sftp:read_file(SftpPid, DataFile1), timer:sleep(?REKEY_DATA_TMO), - Kex2 = ssh_test_lib:get_kex_init(ConnectionRef), - false = (Kex2 == Kex1), + ?wait_match(false, Kex2==(Kex3=ssh_test_lib:get_kex_init(ConnectionRef)), Kex3, 2000, 10), + %% Check that it doesn't rekey without data transfer timer:sleep(?REKEY_DATA_TMO), - Kex2 = ssh_test_lib:get_kex_init(ConnectionRef), + true = (Kex3 == ssh_test_lib:get_kex_init(ConnectionRef)), + %% Check that it doesn't rekey on a small datatransfer {ok,_} = ssh_sftp:read_file(SftpPid, DataFile2), - timer:sleep(?REKEY_DATA_TMO), - Kex2 = ssh_test_lib:get_kex_init(ConnectionRef), + true = (Kex3 == ssh_test_lib:get_kex_init(ConnectionRef)), + %% Check that it doesn't rekey without data transfer timer:sleep(?REKEY_DATA_TMO), - Kex2 = ssh_test_lib:get_kex_init(ConnectionRef), + true = (Kex3 == ssh_test_lib:get_kex_init(ConnectionRef)), ssh_sftp:stop_channel(SftpPid), ssh:close(ConnectionRef), ssh:stop_daemon(Pid). +%%-------------------------------------------------------------------- %% Check that datatransfer in the other direction does not trigger re-keying norekey_limit_client() -> [{timetrap,{seconds,400}}]. norekey_limit_client(Config) -> @@ -1460,13 +1481,12 @@ norekey_limit_client(Config) -> Kex1 = ssh_test_lib:get_kex_init(ConnectionRef), timer:sleep(?REKEY_DATA_TMO), - Kex1 = ssh_test_lib:get_kex_init(ConnectionRef), + true = (Kex1 == ssh_test_lib:get_kex_init(ConnectionRef)), {ok,_} = ssh_sftp:read_file(SftpPid, DataFile), timer:sleep(?REKEY_DATA_TMO), - Kex2 = ssh_test_lib:get_kex_init(ConnectionRef), + true = (Kex1 == ssh_test_lib:get_kex_init(ConnectionRef)), - Kex1 = Kex2, ssh_sftp:stop_channel(SftpPid), ssh:close(ConnectionRef), ssh:stop_daemon(Pid). @@ -1488,13 +1508,12 @@ norekey_limit_daemon(Config) -> Kex1 = ssh_test_lib:get_kex_init(ConnectionRef), timer:sleep(?REKEY_DATA_TMO), - Kex1 = ssh_test_lib:get_kex_init(ConnectionRef), + true = (Kex1 == ssh_test_lib:get_kex_init(ConnectionRef)), ok = ssh_sftp:write_file(SftpPid, DataFile, lists:duplicate(Limit+10,1)), timer:sleep(?REKEY_DATA_TMO), - Kex2 = ssh_test_lib:get_kex_init(ConnectionRef), + true = (Kex1 == ssh_test_lib:get_kex_init(ConnectionRef)), - Kex1 = Kex2, ssh_sftp:stop_channel(SftpPid), ssh:close(ConnectionRef), ssh:stop_daemon(Pid). @@ -1504,39 +1523,41 @@ norekey_limit_daemon(Config) -> rekey_time_limit_client() -> [{timetrap,{seconds,400}}]. rekey_time_limit_client(Config) -> - Minutes = 1, + Minutes = ?REKEY_DATA_TMO div 60000, GB = 1024*1000*1000, Algs = proplists:get_value(preferred_algorithms, Config), {Pid, Host, Port} = ssh_test_lib:std_daemon(Config,[{max_random_length_padding,0}, {preferred_algorithms,Algs}]), ConnectionRef = ssh_test_lib:std_connect(Config, Host, Port, [{rekey_limit, {Minutes, GB}}, {max_random_length_padding,0}]), - {ok, SftpPid} = ssh_sftp:start_channel(ConnectionRef), - rekey_time_limit(Pid, Minutes, ConnectionRef, SftpPid). + rekey_time_limit(Pid, ConnectionRef). rekey_time_limit_daemon() -> [{timetrap,{seconds,400}}]. rekey_time_limit_daemon(Config) -> - Minutes = 1, + Minutes = ?REKEY_DATA_TMO div 60000, GB = 1024*1000*1000, Algs = proplists:get_value(preferred_algorithms, Config), {Pid, Host, Port} = ssh_test_lib:std_daemon(Config,[{rekey_limit, {Minutes, GB}}, {max_random_length_padding,0}, {preferred_algorithms,Algs}]), ConnectionRef = ssh_test_lib:std_connect(Config, Host, Port, [{max_random_length_padding,0}]), - {ok, SftpPid} = ssh_sftp:start_channel(ConnectionRef), - rekey_time_limit(Pid, Minutes, ConnectionRef, SftpPid). + rekey_time_limit(Pid, ConnectionRef). -rekey_time_limit(Pid, Minutes, ConnectionRef, SftpPid) -> +rekey_time_limit(Pid, ConnectionRef) -> + {ok, SftpPid} = ssh_sftp:start_channel(ConnectionRef), Kex1 = ssh_test_lib:get_kex_init(ConnectionRef), timer:sleep(5000), - Kex1 = ssh_test_lib:get_kex_init(ConnectionRef), + true = (Kex1 == ssh_test_lib:get_kex_init(ConnectionRef)), - timer:sleep((Minutes*60 + 30) * 1000), - Kex2 = ssh_test_lib:get_kex_init(ConnectionRef), + %% Check that it rekeys when the max time + 30s has passed + timer:sleep(?REKEY_DATA_TMO + 30*1000), + ?wait_match(false, Kex1==(Kex2=ssh_test_lib:get_kex_init(ConnectionRef)), Kex2, 2000, 10), - false = (Kex2 == Kex1), + %% Check that it does not rekey when nothing is transferred + timer:sleep(?REKEY_DATA_TMO + 30*1000), + ?wait_match(false, Kex2==ssh_test_lib:get_kex_init(ConnectionRef), [], 2000, 10), ssh_sftp:stop_channel(SftpPid), ssh:close(ConnectionRef), @@ -1544,7 +1565,7 @@ rekey_time_limit(Pid, Minutes, ConnectionRef, SftpPid) -> %%-------------------------------------------------------------------- -%%% Test rekeying with simulataneous send request +%%% Test rekeying with simultaneous send request renegotiate1(Config) -> UserDir = proplists:get_value(priv_dir, Config), diff --git a/lib/ssh/test/ssh_test_lib.erl b/lib/ssh/test/ssh_test_lib.erl index 57ae2dbac2..65970535f4 100644 --- a/lib/ssh/test/ssh_test_lib.erl +++ b/lib/ssh/test/ssh_test_lib.erl @@ -926,7 +926,7 @@ get_kex_init(Conn, Ref, TRef) -> end; false -> - ct:log("Not in 'connected' state: ~p",[State]), + ct:log("~p:~p Not in 'connected' state: ~p",[?MODULE,?LINE,State]), receive {reneg_timeout,Ref} -> ct:log("S = ~p", [S]), diff --git a/lib/ssh/vsn.mk b/lib/ssh/vsn.mk index 538490ef96..f10e7aa96a 100644 --- a/lib/ssh/vsn.mk +++ b/lib/ssh/vsn.mk @@ -1,4 +1,5 @@ #-*-makefile-*- ; force emacs to enter makefile-mode SSH_VSN = 4.6.9 + APP_VSN = "ssh-$(SSH_VSN)" diff --git a/lib/tools/src/xref.erl b/lib/tools/src/xref.erl index 32efa36fa2..466ec7d331 100644 --- a/lib/tools/src/xref.erl +++ b/lib/tools/src/xref.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 2000-2016. All Rights Reserved. +%% Copyright Ericsson AB 2000-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -182,7 +182,9 @@ split_args(Opts) -> end. stop(Name) -> - gen_server:call(Name, stop, infinity). + try gen_server:call(Name, stop, infinity) + after catch unregister(Name) % ensure the name is gone + end. add_release(Name, Dir) -> gen_server:call(Name, {add_release, Dir}, infinity). diff --git a/otp_versions.table b/otp_versions.table index 7b6f38dc23..c66a390f21 100644 --- a/otp_versions.table +++ b/otp_versions.table @@ -25,6 +25,7 @@ OTP-20.0.3 : asn1-5.0.2 compiler-7.1.1 erts-9.0.3 ssh-4.5.1 # common_test-1.15.1 OTP-20.0.2 : asn1-5.0.1 erts-9.0.2 kernel-5.3.1 # common_test-1.15.1 compiler-7.1 cosEvent-2.2.1 cosEventDomain-1.2.1 cosFileTransfer-1.2.1 cosNotification-1.2.2 cosProperty-1.2.2 cosTime-1.2.2 cosTransactions-1.3.2 crypto-4.0 debugger-4.2.2 dialyzer-3.2 diameter-2.0 edoc-0.9 eldap-1.2.2 erl_docgen-0.7 erl_interface-3.10 et-1.6 eunit-2.3.3 hipe-3.16 ic-4.4.2 inets-6.4 jinterface-1.8 megaco-3.18.2 mnesia-4.15 observer-2.4 odbc-2.12 orber-3.8.3 os_mon-2.4.2 otp_mibs-1.1.1 parsetools-2.1.5 public_key-1.4.1 reltool-0.7.4 runtime_tools-1.12.1 sasl-3.0.4 snmp-5.2.6 ssh-4.5 ssl-8.2 stdlib-3.4.1 syntax_tools-2.1.2 tools-2.10.1 wx-1.8.1 xmerl-1.3.15 : OTP-20.0.1 : common_test-1.15.1 erts-9.0.1 runtime_tools-1.12.1 stdlib-3.4.1 tools-2.10.1 # asn1-5.0 compiler-7.1 cosEvent-2.2.1 cosEventDomain-1.2.1 cosFileTransfer-1.2.1 cosNotification-1.2.2 cosProperty-1.2.2 cosTime-1.2.2 cosTransactions-1.3.2 crypto-4.0 debugger-4.2.2 dialyzer-3.2 diameter-2.0 edoc-0.9 eldap-1.2.2 erl_docgen-0.7 erl_interface-3.10 et-1.6 eunit-2.3.3 hipe-3.16 ic-4.4.2 inets-6.4 jinterface-1.8 kernel-5.3 megaco-3.18.2 mnesia-4.15 observer-2.4 odbc-2.12 orber-3.8.3 os_mon-2.4.2 otp_mibs-1.1.1 parsetools-2.1.5 public_key-1.4.1 reltool-0.7.4 sasl-3.0.4 snmp-5.2.6 ssh-4.5 ssl-8.2 syntax_tools-2.1.2 wx-1.8.1 xmerl-1.3.15 : OTP-20.0 : asn1-5.0 common_test-1.15 compiler-7.1 cosProperty-1.2.2 crypto-4.0 debugger-4.2.2 dialyzer-3.2 diameter-2.0 edoc-0.9 erl_docgen-0.7 erl_interface-3.10 erts-9.0 eunit-2.3.3 hipe-3.16 inets-6.4 jinterface-1.8 kernel-5.3 megaco-3.18.2 mnesia-4.15 observer-2.4 orber-3.8.3 parsetools-2.1.5 public_key-1.4.1 reltool-0.7.4 runtime_tools-1.12 sasl-3.0.4 snmp-5.2.6 ssh-4.5 ssl-8.2 stdlib-3.4 syntax_tools-2.1.2 tools-2.10 wx-1.8.1 xmerl-1.3.15 # cosEvent-2.2.1 cosEventDomain-1.2.1 cosFileTransfer-1.2.1 cosNotification-1.2.2 cosTime-1.2.2 cosTransactions-1.3.2 eldap-1.2.2 et-1.6 ic-4.4.2 odbc-2.12 os_mon-2.4.2 otp_mibs-1.1.1 : +OTP-19.3.6.9 : ssh-4.4.2.4 # asn1-4.0.4 common_test-1.14 compiler-7.0.4.1 cosEvent-2.2.1 cosEventDomain-1.2.1 cosFileTransfer-1.2.1 cosNotification-1.2.2 cosProperty-1.2.1 cosTime-1.2.2 cosTransactions-1.3.2 crypto-3.7.4 debugger-4.2.1 dialyzer-3.1.1 diameter-1.12.2 edoc-0.8.1 eldap-1.2.2 erl_docgen-0.6.1 erl_interface-3.9.3 erts-8.3.5.4 et-1.6 eunit-2.3.2 gs-1.6.2 hipe-3.15.4 ic-4.4.2 inets-6.3.9 jinterface-1.7.1 kernel-5.2.0.1 megaco-3.18.1 mnesia-4.14.3.1 observer-2.3.1 odbc-2.12 orber-3.8.2 os_mon-2.4.2 otp_mibs-1.1.1 parsetools-2.1.4 percept-0.9 public_key-1.4 reltool-0.7.3 runtime_tools-1.11.1 sasl-3.0.3 snmp-5.2.5 ssl-8.1.3.1.1 stdlib-3.3 syntax_tools-2.1.1 tools-2.9.1 typer-0.9.12 wx-1.8 xmerl-1.3.14 : OTP-19.3.6.8 : ssh-4.4.2.3 # asn1-4.0.4 common_test-1.14 compiler-7.0.4.1 cosEvent-2.2.1 cosEventDomain-1.2.1 cosFileTransfer-1.2.1 cosNotification-1.2.2 cosProperty-1.2.1 cosTime-1.2.2 cosTransactions-1.3.2 crypto-3.7.4 debugger-4.2.1 dialyzer-3.1.1 diameter-1.12.2 edoc-0.8.1 eldap-1.2.2 erl_docgen-0.6.1 erl_interface-3.9.3 erts-8.3.5.4 et-1.6 eunit-2.3.2 gs-1.6.2 hipe-3.15.4 ic-4.4.2 inets-6.3.9 jinterface-1.7.1 kernel-5.2.0.1 megaco-3.18.1 mnesia-4.14.3.1 observer-2.3.1 odbc-2.12 orber-3.8.2 os_mon-2.4.2 otp_mibs-1.1.1 parsetools-2.1.4 percept-0.9 public_key-1.4 reltool-0.7.3 runtime_tools-1.11.1 sasl-3.0.3 snmp-5.2.5 ssl-8.1.3.1.1 stdlib-3.3 syntax_tools-2.1.1 tools-2.9.1 typer-0.9.12 wx-1.8 xmerl-1.3.14 : OTP-19.3.6.7 : kernel-5.2.0.1 # asn1-4.0.4 common_test-1.14 compiler-7.0.4.1 cosEvent-2.2.1 cosEventDomain-1.2.1 cosFileTransfer-1.2.1 cosNotification-1.2.2 cosProperty-1.2.1 cosTime-1.2.2 cosTransactions-1.3.2 crypto-3.7.4 debugger-4.2.1 dialyzer-3.1.1 diameter-1.12.2 edoc-0.8.1 eldap-1.2.2 erl_docgen-0.6.1 erl_interface-3.9.3 erts-8.3.5.4 et-1.6 eunit-2.3.2 gs-1.6.2 hipe-3.15.4 ic-4.4.2 inets-6.3.9 jinterface-1.7.1 megaco-3.18.1 mnesia-4.14.3.1 observer-2.3.1 odbc-2.12 orber-3.8.2 os_mon-2.4.2 otp_mibs-1.1.1 parsetools-2.1.4 percept-0.9 public_key-1.4 reltool-0.7.3 runtime_tools-1.11.1 sasl-3.0.3 snmp-5.2.5 ssh-4.4.2.2 ssl-8.1.3.1.1 stdlib-3.3 syntax_tools-2.1.1 tools-2.9.1 typer-0.9.12 wx-1.8 xmerl-1.3.14 : OTP-19.3.6.6 : ssh-4.4.2.2 ssl-8.1.3.1.1 # asn1-4.0.4 common_test-1.14 compiler-7.0.4.1 cosEvent-2.2.1 cosEventDomain-1.2.1 cosFileTransfer-1.2.1 cosNotification-1.2.2 cosProperty-1.2.1 cosTime-1.2.2 cosTransactions-1.3.2 crypto-3.7.4 debugger-4.2.1 dialyzer-3.1.1 diameter-1.12.2 edoc-0.8.1 eldap-1.2.2 erl_docgen-0.6.1 erl_interface-3.9.3 erts-8.3.5.4 et-1.6 eunit-2.3.2 gs-1.6.2 hipe-3.15.4 ic-4.4.2 inets-6.3.9 jinterface-1.7.1 kernel-5.2 megaco-3.18.1 mnesia-4.14.3.1 observer-2.3.1 odbc-2.12 orber-3.8.2 os_mon-2.4.2 otp_mibs-1.1.1 parsetools-2.1.4 percept-0.9 public_key-1.4 reltool-0.7.3 runtime_tools-1.11.1 sasl-3.0.3 snmp-5.2.5 stdlib-3.3 syntax_tools-2.1.1 tools-2.9.1 typer-0.9.12 wx-1.8 xmerl-1.3.14 : diff --git a/system/doc/efficiency_guide/binaryhandling.xml b/system/doc/efficiency_guide/binaryhandling.xml index 19f40c9abe..d07ff1325f 100644 --- a/system/doc/efficiency_guide/binaryhandling.xml +++ b/system/doc/efficiency_guide/binaryhandling.xml @@ -357,25 +357,8 @@ all_but_zeroes_to_list(<<Byte,T/binary>>, Acc, Remaining) -> <c>Buffer</c> from a match context to a sub binary (or do nothing if <c>Buffer</c> is a binary already).</p> - <p>Before you begin to think that the compiler can optimize any binary - patterns, the following function cannot be optimized by the compiler - (currently, at least):</p> - - <code type="erl"><![CDATA[ -non_opt_eq([H|T1], <<H,T2/binary>>) -> - non_opt_eq(T1, T2); -non_opt_eq([_|_], <<_,_/binary>>) -> - false; -non_opt_eq([], <<>>) -> - true.]]></code> - - <p>It was mentioned earlier that the compiler can only delay creation of - sub binaries if it knows that the binary will not be shared. In this case, - the compiler cannot know.</p> - - <p>Soon it is shown how to rewrite <c>non_opt_eq/2</c> so that the delayed - sub binary optimization can be applied, and more importantly, it is shown - how you can find out whether your code can be optimized.</p> + <p>But in more complicated code, how can one know whether the + optimization is applied or not?</p> <section> <marker id="bin_opt_info"></marker> @@ -422,67 +405,6 @@ after_zero(<<>>) -> binary cannot be delayed, because it will be returned. The warning for the second clause says that a sub binary will not be created (yet).</p> - - <p>Let us revisit the earlier example of the code that could not - be optimized and find out why:</p> - - <code type="erl"><![CDATA[ -non_opt_eq([H|T1], <<H,T2/binary>>) -> - %% INFO: matching anything else but a plain variable to - %% the left of binary pattern will prevent delayed - %% sub binary optimization; - %% SUGGEST changing argument order - %% NOT OPTIMIZED: called function non_opt_eq/2 does not - %% begin with a suitable binary matching instruction - non_opt_eq(T1, T2); -non_opt_eq([_|_], <<_,_/binary>>) -> - false; -non_opt_eq([], <<>>) -> - true.]]></code> - - <p>The compiler emitted two warnings. The <c>INFO</c> warning refers - to the function <c>non_opt_eq/2</c> as a callee, indicating that any - function that call <c>non_opt_eq/2</c> cannot make delayed sub binary - optimization. There is also a suggestion to change argument order. - The second warning (that happens to refer to the same line) refers to - the construction of the sub binary itself.</p> - - <p>Soon another example will show the difference between the - <c>INFO</c> and <c>NOT OPTIMIZED</c> warnings somewhat clearer, but - let us first follow the suggestion to change argument order:</p> - - <code type="erl"><![CDATA[ -opt_eq(<<H,T1/binary>>, [H|T2]) -> - %% OPTIMIZED: creation of sub binary delayed - opt_eq(T1, T2); -opt_eq(<<_,_/binary>>, [_|_]) -> - false; -opt_eq(<<>>, []) -> - true.]]></code> - - <p>The compiler gives a warning for the following code fragment:</p> - - <code type="erl"><![CDATA[ -match_body([0|_], <<H,_/binary>>) -> - %% INFO: matching anything else but a plain variable to - %% the left of binary pattern will prevent delayed - %% sub binary optimization; - %% SUGGEST changing argument order - done; -...]]></code> - - <p>The warning means that <em>if</em> there is a call to <c>match_body/2</c> - (from another clause in <c>match_body/2</c> or another function), the - delayed sub binary optimization will not be possible. More warnings will - occur for any place where a sub binary is matched out at the end of and - passed as the second argument to <c>match_body/2</c>, for example:</p> - - <code type="erl"><![CDATA[ -match_head(List, <<_:10,Data/binary>>) -> - %% NOT OPTIMIZED: called function match_body/2 does not - %% begin with a suitable binary matching instruction - match_body(List, Data).]]></code> - </section> <section> diff --git a/system/doc/efficiency_guide/efficiency_guide.erl b/system/doc/efficiency_guide/efficiency_guide.erl index e982bdae65..c57785aaa3 100644 --- a/system/doc/efficiency_guide/efficiency_guide.erl +++ b/system/doc/efficiency_guide/efficiency_guide.erl @@ -1,5 +1,5 @@ -module(efficiency_guide). --compile(export_all). +-compile([export_all,nowarn_export_all). %% DO NOT naive_reverse([H|T]) -> @@ -71,28 +71,6 @@ all_but_zeroes_to_list(<<0,T/binary>>, Acc, Remaining) -> all_but_zeroes_to_list(<<Byte,T/binary>>, Acc, Remaining) -> all_but_zeroes_to_list(T, [Byte|Acc], Remaining-1). -non_opt_eq([H|T1], <<H,T2/binary>>) -> - non_opt_eq(T1, T2); -non_opt_eq([_|_], <<_,_/binary>>) -> - false; -non_opt_eq([], <<>>) -> - true. - -opt_eq(<<H,T1/binary>>, [H|T2]) -> - opt_eq(T1, T2); -opt_eq(<<_,_/binary>>, [_|_]) -> - false; -opt_eq(<<>>, []) -> - true. - -match_head(List, <<_:10,Data/binary>>) -> - match_body(List, Data). - -match_body([0|_], <<H,_/binary>>) -> - done; -match_body([H|T1], <<H,T2/binary>>) -> - {T1,T2}. - count1(<<_,T/binary>>, Count) -> count1(T, Count+1); count1(<<>>, Count) -> Count. |