diff options
Diffstat (limited to 'lib')
50 files changed, 1752 insertions, 431 deletions
diff --git a/lib/compiler/src/beam_utils.erl b/lib/compiler/src/beam_utils.erl index 5580d2f123..6e23003fc7 100644 --- a/lib/compiler/src/beam_utils.erl +++ b/lib/compiler/src/beam_utils.erl @@ -1115,6 +1115,10 @@ defs([{bs_init,{f,L},_,Live,_,Dst}=I|Is], Regs0, D) -> end, Regs = def_regs([Dst], Regs1), [I|defs(Is, Regs, update_regs(L, Regs, D))]; +defs([{test,bs_start_match2,{f,L},Live,_,Dst}=I|Is], _Regs, D) -> + Regs0 = init_def_regs(Live), + Regs = def_regs([Dst], Regs0), + [I|defs(Is, Regs, update_regs(L, Regs0, D))]; defs([{bs_put,{f,L},_,_}=I|Is], Regs, D) -> [I|defs(Is, Regs, update_regs(L, Regs, D))]; defs([build_stacktrace=I|Is], _Regs, D) -> diff --git a/lib/compiler/test/bs_match_SUITE.erl b/lib/compiler/test/bs_match_SUITE.erl index e97dbac8a6..a751f6fda5 100644 --- a/lib/compiler/test/bs_match_SUITE.erl +++ b/lib/compiler/test/bs_match_SUITE.erl @@ -40,7 +40,8 @@ map_and_binary/1,unsafe_branch_caching/1, bad_literals/1,good_literals/1,constant_propagation/1, parse_xml/1,get_payload/1,escape/1,num_slots_different/1, - beam_bsm/1,guard/1,is_ascii/1,non_opt_eq/1,erl_689/1]). + beam_bsm/1,guard/1,is_ascii/1,non_opt_eq/1,erl_689/1, + bs_start_match2_defs/1]). -export([coverage_id/1,coverage_external_ignore/2]). @@ -72,7 +73,8 @@ groups() -> map_and_binary,unsafe_branch_caching, bad_literals,good_literals,constant_propagation,parse_xml, get_payload,escape,num_slots_different, - beam_bsm,guard,is_ascii,non_opt_eq,erl_689]}]. + beam_bsm,guard,is_ascii,non_opt_eq,erl_689, + bs_start_match2_defs]}]. init_per_suite(Config) -> @@ -1749,6 +1751,19 @@ do_erl_689_2b(_, <<Length, Data/binary>>) -> {{Y, M, D}, Rest} end. +%% ERL-753 + +bs_start_match2_defs(_Config) -> + {<<"http://127.0.0.1:1234/vsaas/hello">>} = api_url(<<"hello">>, dummy), + {"https://127.0.0.1:4321/vsaas/hello"} = api_url({https, "hello"}, dummy). + +api_url(URL, Auth) -> + Header = [], + case URL of + <<_/binary>> -> {<<"http://127.0.0.1:1234/vsaas/",URL/binary>>}; + {https, [_|_] = URL1} -> {"https://127.0.0.1:4321/vsaas/"++URL1} + end. + check(F, R) -> R = F(). diff --git a/lib/crypto/src/crypto.erl b/lib/crypto/src/crypto.erl index 2db73c4af0..c2ab88417e 100644 --- a/lib/crypto/src/crypto.erl +++ b/lib/crypto/src/crypto.erl @@ -2026,7 +2026,7 @@ check_otp_test_engine(LibDir) -> case filelib:wildcard("otp_test_engine*", LibDir) of [] -> {error, notexist}; - [LibName] -> + [LibName|_] -> % In case of Valgrind there could be more than one LibPath = filename:join(LibDir,LibName), case filelib:is_file(LibPath) of true -> diff --git a/lib/crypto/test/engine_SUITE.erl b/lib/crypto/test/engine_SUITE.erl index b083b30d70..0427923941 100644 --- a/lib/crypto/test/engine_SUITE.erl +++ b/lib/crypto/test/engine_SUITE.erl @@ -75,11 +75,18 @@ groups() -> init_per_suite(Config) -> - case crypto:info_lib() of - [{_,_, <<"OpenSSL 1.0.1s-freebsd 1 Mar 2016">>}] -> + case {os:type(), crypto:info_lib()} of + {_, [{_,_, <<"OpenSSL 1.0.1s-freebsd 1 Mar 2016">>}]} -> {skip, "Problem with engine on OpenSSL 1.0.1s-freebsd"}; - Res -> - ct:log("crypto:info_lib() -> ~p\n", [Res]), + + {{unix,darwin}, _} -> + {skip, "Engine unsupported on Darwin"}; + + {{win32,_}, _} -> + {skip, "Engine unsupported on Windows"}; + + {OS, Res} -> + ct:log("crypto:info_lib() -> ~p\nos:type() -> ~p", [Res,OS]), try crypto:start() of ok -> Config; diff --git a/lib/eldap/doc/src/notes.xml b/lib/eldap/doc/src/notes.xml index b390e0c047..bf9358c4d1 100644 --- a/lib/eldap/doc/src/notes.xml +++ b/lib/eldap/doc/src/notes.xml @@ -77,6 +77,22 @@ </section> +<section><title>Eldap 1.2.3.1</title> + + <section><title>Fixed Bugs and Malfunctions</title> + <list> + <item> + <p> + A race condition at close could cause the eldap client to + exit with a badarg message as cause.</p> + <p> + Own Id: OTP-15342 Aux Id: ERIERL-242 </p> + </item> + </list> + </section> + +</section> + <section><title>Eldap 1.2.3</title> <section><title>Fixed Bugs and Malfunctions</title> @@ -92,6 +108,22 @@ </section> +<section><title>Eldap 1.2.2.1</title> + + <section><title>Fixed Bugs and Malfunctions</title> + <list> + <item> + <p> + A race condition at close could cause the eldap client to + exit with a badarg message as cause.</p> + <p> + Own Id: OTP-15342 Aux Id: ERIERL-242 </p> + </item> + </list> + </section> + +</section> + <section><title>Eldap 1.2.2</title> <section><title>Fixed Bugs and Malfunctions</title> @@ -368,4 +400,3 @@ <p>New application. </p> </section> </chapter> - diff --git a/lib/hipe/main/hipe.erl b/lib/hipe/main/hipe.erl index ac2e6c1e3b..e2cb9c0f0b 100644 --- a/lib/hipe/main/hipe.erl +++ b/lib/hipe/main/hipe.erl @@ -542,7 +542,7 @@ file(File) -> | {'error', term()} when Mod :: mod(). file(File, Options) when is_atom(File) -> - case beam_lib:info(File) of + case beam_lib:info(atom_to_list(File)) of L when is_list(L) -> {module, Mod} = lists:keyfind(module, 1, L), case compile(Mod, File, Options) of diff --git a/lib/kernel/doc/src/inet.xml b/lib/kernel/doc/src/inet.xml index 127c110df4..87b08e4e36 100644 --- a/lib/kernel/doc/src/inet.xml +++ b/lib/kernel/doc/src/inet.xml @@ -1147,6 +1147,18 @@ setcap cap_sys_admin,cap_sys_ptrace,cap_dac_read_search+epi beam.smp</code> is turned on for the socket, which means that also small amounts of data are sent immediately.</p> </item> + <tag><c>{nopush, Boolean}</c>(TCP/IP sockets)</tag> + <item> + <p>This translates to <c>TCP_NOPUSH</c> on BSD and + to <c>TCP_CORK</c> on Linux.</p> + <p>If <c>Boolean == true</c>, the corresponding option + is turned on for the socket, which means that small + amounts of data are accumulated until a full MSS-worth + of data is available or this option is turned off.</p> + <p>Note that while <c>TCP_NOPUSH</c> socket option is available on OSX, its semantics + is very different (e.g., unsetting it does not cause immediate send + of accumulated data). Hence, <c>nopush</c> option is intentionally ignored on OSX.</p> + </item> <tag><c>{packet, PacketType}</c>(TCP/IP sockets)</tag> <item> <p><marker id="packet"/>Defines the type of packets to use for a socket. diff --git a/lib/kernel/doc/src/logger.xml b/lib/kernel/doc/src/logger.xml index 464c65ba76..2bcf137299 100644 --- a/lib/kernel/doc/src/logger.xml +++ b/lib/kernel/doc/src/logger.xml @@ -748,6 +748,14 @@ start(_, []) -> exists, its associated value will be changed to the given value. If it does not exist, it will be added.</p> + <p>If the value is incomplete, which for example can be the + case for the <c>config</c> key, it is up to the handler + implementation how the unspecified parts are set. For all + handlers in the Kernel application, unspecified data for + the <c>config</c> key is set to default values. To update + only specified data, and keep the existing configuration for + the rest, use <seealso marker="#update_handler_config-3"> + <c>update_handler_config/3</c></seealso>.</p> <p>See the definition of the <seealso marker="#type-handler_config"> <c>handler_config()</c></seealso> type for more @@ -933,6 +941,42 @@ logger:set_handler_config(HandlerId, maps:merge(Old, Config)). </func> <func> + <name name="update_handler_config" arity="3" clause_i="1"/> + <name name="update_handler_config" arity="3" clause_i="2"/> + <name name="update_handler_config" arity="3" clause_i="3"/> + <name name="update_handler_config" arity="3" clause_i="4"/> + <name name="update_handler_config" arity="3" clause_i="5"/> + <fsummary>Add or update configuration data for the specified + handler.</fsummary> + <type variable="HandlerId"/> + <type variable="Level" name_i="1"/> + <type variable="FilterDefault" name_i="2"/> + <type variable="Filters" name_i="3"/> + <type variable="Formatter" name_i="4"/> + <type variable="Config" name_i="5"/> + <type variable="Return"/> + <desc> + <p>Add or update configuration data for the specified + handler. If the given <c><anno>Key</anno></c> already + exists, its associated value will be changed + to the given value. If it does not exist, it will + be added.</p> + <p>If the value is incomplete, which for example can be the + case for the <c>config</c> key, it is up to the handler + implementation how the unspecified parts are set. For all + handlers in the Kernel application, unspecified data for + the <c>config</c> key is not changed. To reset unspecified + data to default values, + use <seealso marker="#set_handler_config-3"> + <c>set_handler_config/3</c></seealso>.</p> + <p>See the definition of + the <seealso marker="#type-handler_config"> + <c>handler_config()</c></seealso> type for more + information about the different parameters.</p> + </desc> + </func> + + <func> <name name="update_primary_config" arity="1"/> <fsummary>Update primary configuration data for Logger.</fsummary> <desc> @@ -1041,10 +1085,11 @@ logger:set_process_metadata(maps:merge(logger:get_process_metadata(), Meta)). </func> <func> - <name>HModule:changing_config(Config1, Config2) -> {ok, Config3} | {error, Reason}</name> + <name>HModule:changing_config(SetOrUpdate, OldConfig, NewConfig) -> {ok, Config} | {error, Reason}</name> <fsummary>The configuration for this handler is about to change.</fsummary> <type> - <v>Config1 = Config2 = Config3 = + <v>SetOrUpdate = set | update</v> + <v>OldConfig = NewConfig = Config = <seealso marker="#type-handler_config">handler_config()</seealso></v> <v>Reason = term()</v> </type> @@ -1053,18 +1098,51 @@ logger:set_process_metadata(maps:merge(logger:get_process_metadata(), Meta)). <p>The function is called on a temporary process when the configuration for a handler is about to change. The purpose is to verify and act on the new configuration.</p> - <p><c>Config1</c> is the existing configuration - and <c>Config2</c> is the new configuration.</p> + <p><c>OldConfig</c> is the existing configuration + and <c>NewConfig</c> is the new configuration.</p> <p>The handler identity is associated with the <c>id</c> key - in <c>Config1</c>.</p> + in <c>OldConfig</c>.</p> + <p><c>SetOrUpdate</c> has the value <c>set</c> if the + configuration change originates from a call to + <seealso marker="#set_handler_config-2"> + <c>set_handler_config/2,3</c></seealso>, and <c>update</c> + if it originates from <seealso marker="#update_handler_config-2"> + <c>update_handler_config/2,3</c></seealso>. The handler can + use this parameteter to decide how to update the value of + the <c>config</c> field, that is, the handler specific + configuration data. Typically, if <c>SetOrUpdate</c> + equals <c>set</c>, values that are not specified must be + given their default values. If <c>SetOrUpdate</c> + equals <c>update</c>, the values found in <c>OldConfig</c> + must be used instead.</p> <p>If everything succeeds, the callback function must return a - possibly adjusted configuration in <c>{ok,Config3}</c>.</p> + possibly adjusted configuration in <c>{ok,Config}</c>.</p> <p>If the configuration is faulty, the callback function must return <c>{error,Reason}</c>.</p> </desc> </func> <func> + <name>HModule:filter_config(Config) -> FilteredConfig</name> + <fsummary>Remove internal data from configuration.</fsummary> + <type> + <v>Config = FilteredConfig = + <seealso marker="#type-handler_config">handler_config()</seealso></v> + </type> + <desc> + <p>This callback function is optional.</p> + <p>The function is called when one of the Logger API functions + for fetching the handler configuration is called, for + example + <seealso marker="#get_handler_config-1"> + <c>logger:get_handler_config/1</c></seealso>.</p> + <p>It allows the handler to remove internal data fields from + its configuration data before it is returned to the + caller.</p> + </desc> + </func> + + <func> <name>HModule:log(LogEvent, Config) -> void()</name> <fsummary>Log the given log event.</fsummary> <type> @@ -1136,7 +1214,7 @@ logger:set_process_metadata(maps:merge(logger:get_process_metadata(), Meta)). <item><seealso marker="logger#set_handler_config-2"> <c>logger:set_handler_config/2,3</c></seealso></item> <item><seealso marker="logger#update_handler_config-2"> - <c>logger:updata_handler_config/2</c></seealso></item> + <c>logger:updata_handler_config/2,3</c></seealso></item> <item><seealso marker="logger#update_formatter_config-2"> <c>logger:update_formatter_config/2</c></seealso></item> </list> diff --git a/lib/kernel/doc/src/logger_chapter.xml b/lib/kernel/doc/src/logger_chapter.xml index 4a81cfa34a..1870d2ab79 100644 --- a/lib/kernel/doc/src/logger_chapter.xml +++ b/lib/kernel/doc/src/logger_chapter.xml @@ -384,8 +384,8 @@ logger:debug(#{got => connection_request, id => Id, state => State}, <p>In addition to the mandatory callback function <c>log/2</c>, a handler module can export the optional callback - functions <c>adding_handler/1</c>, <c>changing_config/2</c> - and <c>removing_handler/1</c>. See + functions <c>adding_handler/1</c>, <c>changing_config/3</c>, + <c>filter_config/1</c>, and <c>removing_handler/1</c>. See section <seealso marker="logger#handler_callback_functions">Handler Callback Functions</seealso> in the logger(3) manual page for more information about these function.</p> @@ -555,7 +555,7 @@ logger:debug(#{got => connection_request, id => Id, state => State}, <item><seealso marker="logger#set_handler_config-2"> <c>set_handler_config/2,3</c></seealso></item> <item><seealso marker="logger#update_handler_config-2"> - <c>update_handler_config/2</c></seealso></item> + <c>update_handler_config/2,3</c></seealso></item> <item><seealso marker="logger#add_handler_filter-3"> <c>add_handler_filter/3</c></seealso></item> <item><seealso marker="logger#remove_handler_filter-2"> @@ -704,9 +704,13 @@ logger:debug(#{got => connection_request, id => Id, state => State}, <item> <p>If <c>HandlerId</c> is <c>default</c>, then this entry modifies the default handler, equivalent to calling</p> - <pre><seealso marker="logger#set_handler_config-2"> - logger:set_handler_config(default, Module, HandlerConfig) - </seealso></pre> + <pre><seealso marker="logger#remove_handler-1"> + logger:remove_handler(default) + </seealso></pre> + <p>followed by</p> + <pre><seealso marker="logger#add_handler-3"> + logger:add_handler(default, Module, HandlerConfig) + </seealso></pre> <p>For all other values of <c>HandlerId</c>, this entry adds a new handler, equivalent to calling</p> <pre><seealso marker="logger:add_handler/3"> @@ -1024,7 +1028,8 @@ ok</pre> <list> <item><c>adding_handler(Config)</c></item> <item><c>removing_handler(Config)</c></item> - <item><c>changing_config(OldConfig, NewConfig)</c></item> + <item><c>changing_config(SetOrUpdate, OldConfig, NewConfig)</c></item> + <item><c>filter_config(Config)</c></item> </list> <p>When a handler is added, by for example a call to <seealso marker="logger#add_handler-3"> @@ -1043,11 +1048,18 @@ ok</pre> <p>When <seealso marker="logger#set_handler_config-2"> <c>logger:set_handler_config/2,3</c></seealso> or <seealso marker="logger#update_handler_config/2"> - <c>logger:update_handler_config/2</c></seealso> is called, + <c>logger:update_handler_config/2,3</c></seealso> is called, Logger - calls <c>HModule:changing_config(OldConfig, NewConfig)</c>. If + calls <c>HModule:changing_config(SetOrUpdate, OldConfig, NewConfig)</c>. If this function returns <c>{ok,NewConfig1}</c>, Logger writes <c>NewConfig1</c> to the configuration database.</p> + <p>When <seealso marker="logger#get_config-0"> + <c>logger:get_config/0</c></seealso> or + <seealso marker="logger#get_handler_config-0"> + <c>logger:get_handler_config/0,1</c></seealso> is called, + Logger calls <c>HModule:filter_config(Config)</c>. This function + must return the handler configuration where internal data is + removed.</p> <p>A simple handler that prints to the terminal can be implemented as follows:</p> diff --git a/lib/kernel/doc/src/logger_disk_log_h.xml b/lib/kernel/doc/src/logger_disk_log_h.xml index dfe2ab3275..d9b941a0a9 100644 --- a/lib/kernel/doc/src/logger_disk_log_h.xml +++ b/lib/kernel/doc/src/logger_disk_log_h.xml @@ -66,6 +66,10 @@ corresponds to the <c>name</c> property in the <seealso marker="disk_log#open-1"><c>dlog_option()</c></seealso> datatype.</p> + <p>The value is set when the handler is added, and it can not + be changed in runtime.</p> + <p>Defaults to the same name as the handler identity, in the + current directory.</p> </item> <tag><c>type</c></tag> <item> @@ -73,6 +77,8 @@ corresponds to the <c>type</c> property in the <seealso marker="disk_log#open-1"><c>dlog_option()</c></seealso> datatype.</p> + <p>The value is set when the handler is added, and it can not + be changed in runtime.</p> <p>Defaults to <c>wrap</c>.</p> </item> <tag><c>max_no_files</c></tag> @@ -82,6 +88,8 @@ corresponds to the <c>MaxNoFiles</c> element in the <c>size</c> property in the <seealso marker="disk_log#open-1"><c>dlog_option()</c></seealso> datatype.</p> + <p>The value is set when the handler is added, and it can not + be changed in runtime.</p> <p>Defaults to <c>10</c>.</p> <p>The setting has no effect on a halt log.</p> </item> @@ -93,6 +101,8 @@ corresponds to the <c>MaxNoBytes</c> element in the <c>size</c> property in the <seealso marker="disk_log#open-1"><c>dlog_option()</c></seealso> datatype.</p> + <p>The value is set when the handler is added, and it can not + be changed in runtime.</p> <p>Defaults to <c>1048576</c> bytes for a wrap log, and <c>infinity</c> for a halt log.</p> </item> diff --git a/lib/kernel/doc/src/logger_std_h.xml b/lib/kernel/doc/src/logger_std_h.xml index fcd8189bae..e156f5719b 100644 --- a/lib/kernel/doc/src/logger_std_h.xml +++ b/lib/kernel/doc/src/logger_std_h.xml @@ -74,7 +74,9 @@ circular logging. Use the disk_log handler, <seealso marker="logger_disk_log_h"><c>logger_disk_log_h</c></seealso>, for this.</p> - <p> Defaults to <c>standard_io</c>.</p> + <p>The value is set when the handler is added, and it can not + be changed in runtime.</p> + <p>Defaults to <c>standard_io</c>.</p> </item> <tag><c>filesync_repeat_interval</c></tag> <item> diff --git a/lib/kernel/src/inet_int.hrl b/lib/kernel/src/inet_int.hrl index c8e09d18ad..f6525d7261 100644 --- a/lib/kernel/src/inet_int.hrl +++ b/lib/kernel/src/inet_int.hrl @@ -162,6 +162,7 @@ -define(INET_OPT_PKTOPTIONS, 45). -define(INET_OPT_TTL, 46). -define(INET_OPT_RECVTTL, 47). +-define(TCP_OPT_NOPUSH, 48). % Specific SCTP options: separate range: -define(SCTP_OPT_RTOINFO, 100). -define(SCTP_OPT_ASSOCINFO, 101). diff --git a/lib/kernel/src/logger.erl b/lib/kernel/src/logger.erl index 752dd8d493..6762998d4f 100644 --- a/lib/kernel/src/logger.erl +++ b/lib/kernel/src/logger.erl @@ -43,7 +43,8 @@ get_module_level/0, get_module_level/1, set_primary_config/1, set_primary_config/2, set_handler_config/2, set_handler_config/3, - update_primary_config/1, update_handler_config/2, + update_primary_config/1, + update_handler_config/2, update_handler_config/3, update_formatter_config/2, update_formatter_config/3, get_primary_config/0, get_handler_config/1, get_handler_config/0, get_handler_ids/0, get_config/0, @@ -423,6 +424,29 @@ set_handler_config(HandlerId,Config) -> update_primary_config(Config) -> logger_server:update_config(primary,Config). +-spec update_handler_config(HandlerId,level,Level) -> Return when + HandlerId :: handler_id(), + Level :: level() | all | none, + Return :: ok | {error,term()}; + (HandlerId,filter_default,FilterDefault) -> Return when + HandlerId :: handler_id(), + FilterDefault :: log | stop, + Return :: ok | {error,term()}; + (HandlerId,filters,Filters) -> Return when + HandlerId :: handler_id(), + Filters :: [{filter_id(),filter()}], + Return :: ok | {error,term()}; + (HandlerId,formatter,Formatter) -> Return when + HandlerId :: handler_id(), + Formatter :: {module(), formatter_config()}, + Return :: ok | {error,term()}; + (HandlerId,config,Config) -> Return when + HandlerId :: handler_id(), + Config :: term(), + Return :: ok | {error,term()}. +update_handler_config(HandlerId,Key,Value) -> + logger_server:update_config(HandlerId,Key,Value). + -spec update_handler_config(HandlerId,Config) -> ok | {error,term()} when HandlerId :: handler_id(), Config :: handler_config(). @@ -439,7 +463,14 @@ get_primary_config() -> HandlerId :: handler_id(), Config :: handler_config(). get_handler_config(HandlerId) -> - logger_config:get(?LOGGER_TABLE,HandlerId). + case logger_config:get(?LOGGER_TABLE,HandlerId) of + {ok,#{module:=Module}=Config} -> + {ok,try Module:filter_config(Config) + catch _:_ -> Config + end}; + Error -> + Error + end. -spec get_handler_config() -> [Config] when Config :: handler_config(). diff --git a/lib/kernel/src/logger_disk_log_h.erl b/lib/kernel/src/logger_disk_log_h.erl index a8f141f135..2a81458ec8 100644 --- a/lib/kernel/src/logger_disk_log_h.erl +++ b/lib/kernel/src/logger_disk_log_h.erl @@ -33,7 +33,8 @@ terminate/2, code_change/3]). %% logger callbacks --export([log/2, adding_handler/1, removing_handler/1, changing_config/2]). +-export([log/2, adding_handler/1, removing_handler/1, changing_config/3, + filter_config/1]). %% handler internal -export([log_handler_info/4]). @@ -114,9 +115,8 @@ reset(Name) -> %%% Handler being added adding_handler(#{id:=Name}=Config) -> case check_config(adding, Config) of - {ok, Config1} -> + {ok, #{config:=HConfig}=Config1} -> %% create initial handler state by merging defaults with config - HConfig = maps:get(config, Config1, #{}), HState = maps:merge(get_init_state(), HConfig), case logger_h_common:overload_levels_ok(HState) of true -> @@ -133,32 +133,40 @@ adding_handler(#{id:=Name}=Config) -> %%%----------------------------------------------------------------- %%% Updating handler config -changing_config(OldConfig = #{id:=Name, config:=OldHConfig}, - NewConfig = #{id:=Name, config:=NewHConfig}) -> - #{type:=Type, file:=File, max_no_files:=MaxFs, - max_no_bytes:=MaxBytes} = OldHConfig, - case NewHConfig of - #{type:=Type, file:=File, max_no_files:=MaxFs, - max_no_bytes:=MaxBytes} -> - changing_config1(OldConfig, NewConfig); - _ -> - {error,{illegal_config_change,OldConfig,NewConfig}} - end; -changing_config(OldConfig, NewConfig) -> - {error,{illegal_config_change,OldConfig,NewConfig}}. +changing_config(SetOrUpdate,OldConfig=#{config:=OldHConfig},NewConfig) -> + WriteOnce = maps:with([type,file,max_no_files,max_no_bytes],OldHConfig), + ReadOnly = maps:with([handler_pid,mode_tab],OldHConfig), + NewHConfig0 = maps:get(config, NewConfig, #{}), + Default = + case SetOrUpdate of + set -> + %% Do not reset write-once fields to defaults + maps:merge(get_default_config(),WriteOnce); + update -> + OldHConfig + end, -changing_config1(OldConfig=#{config:=OldHConfig}, NewConfig) -> + %% Allow (accidentially) included read-only fields - just overwrite them + NewHConfig = maps:merge(maps:merge(Default,NewHConfig0),ReadOnly), + + %% But fail if write-once fields are changed + case maps:with([type,file,max_no_files,max_no_bytes],NewHConfig) of + WriteOnce -> + changing_config1(maps:get(handler_pid,OldHConfig), + OldConfig, + NewConfig#{config=>NewHConfig}); + Other -> + {Old,New} = logger_server:diff_maps(WriteOnce,Other), + {error,{illegal_config_change,#{config=>Old},#{config=>New}}} + end. + +changing_config1(HPid, OldConfig, NewConfig) -> case check_config(changing, NewConfig) of - {ok,NewConfig1 = #{config:=NewHConfig}} -> - #{handler_pid:=HPid, - mode_tab:=ModeTab} = OldHConfig, - NewHConfig1 = NewHConfig#{handler_pid=>HPid, - mode_tab=>ModeTab}, - NewConfig2 = NewConfig1#{config=>NewHConfig1}, - try gen_server:call(HPid, {change_config,OldConfig,NewConfig2}, + Result = {ok,NewConfig1} -> + try gen_server:call(HPid, {change_config,OldConfig,NewConfig1}, ?DEFAULT_CALL_TIMEOUT) of - ok -> {ok,NewConfig2}; - HError -> HError + ok -> Result; + Error -> Error catch _:{timeout,_} -> {error,handler_busy} end; @@ -168,10 +176,12 @@ changing_config1(OldConfig=#{config:=OldHConfig}, NewConfig) -> check_config(adding, #{id:=Name}=Config) -> %% merge handler specific config data - HConfig = merge_default_logopts(Name, maps:get(config, Config, #{})), - case check_h_config(maps:to_list(HConfig)) of + HConfig1 = maps:get(config, Config, #{}), + HConfig2 = maps:merge(get_default_config(), HConfig1), + HConfig3 = merge_default_logopts(Name, HConfig2), + case check_h_config(maps:to_list(HConfig3)) of ok -> - {ok,Config#{config=>HConfig}}; + {ok,Config#{config=>HConfig3}}; Error -> Error end; @@ -238,6 +248,11 @@ log(LogEvent, Config = #{id := Name, Bin = logger_h_common:log_to_binary(LogEvent, Config), logger_h_common:call_cast_or_drop(Name, HPid, ModeTab, Bin). +%%%----------------------------------------------------------------- +%%% Remove internal fields from configuration +filter_config(#{config:=HConfig}=Config) -> + Config#{config=>maps:without([handler_pid,mode_tab],HConfig)}. + %%%=================================================================== %%% gen_server callbacks %%%=================================================================== @@ -438,7 +453,7 @@ code_change(_OldVsn, State, _Extra) -> %%%----------------------------------------------------------------- %%% -get_init_state() -> +get_default_config() -> #{sync_mode_qlen => ?SYNC_MODE_QLEN, drop_mode_qlen => ?DROP_MODE_QLEN, flush_qlen => ?FLUSH_QLEN, @@ -449,10 +464,12 @@ get_init_state() -> overload_kill_qlen => ?OVERLOAD_KILL_QLEN, overload_kill_mem_size => ?OVERLOAD_KILL_MEM_SIZE, overload_kill_restart_after => ?OVERLOAD_KILL_RESTART_AFTER, - dl_sync_int => ?CONTROLLER_SYNC_INTERVAL, - filesync_ok_qlen => ?FILESYNC_OK_QLEN, filesync_repeat_interval => ?FILESYNC_REPEAT_INTERVAL}. +get_init_state() -> + #{dl_sync_int => ?CONTROLLER_SYNC_INTERVAL, + filesync_ok_qlen => ?FILESYNC_OK_QLEN}. + %%%----------------------------------------------------------------- %%% Add a disk_log handler to the logger. %%% This starts a dedicated handler process which should always diff --git a/lib/kernel/src/logger_h_common.erl b/lib/kernel/src/logger_h_common.erl index 38ac7d8ffc..94c640cb92 100644 --- a/lib/kernel/src/logger_h_common.erl +++ b/lib/kernel/src/logger_h_common.erl @@ -306,8 +306,11 @@ stop_or_restart(Name, {shutdown,Reason={overloaded,_Name,_QLen,_Mem}}, exit(HandlerPid, kill) end, case ConfigResult of - {ok,#{module:=HMod}=HConfig} when is_integer(RestartAfter) -> + {ok,#{module:=HMod}=HConfig0} when is_integer(RestartAfter) -> _ = logger:remove_handler(Name), + HConfig = try HMod:filter_config(HConfig0) + catch _:_ -> HConfig0 + end, _ = timer:apply_after(RestartAfter, logger, add_handler, [Name,HMod,HConfig]); {ok,_} -> diff --git a/lib/kernel/src/logger_server.erl b/lib/kernel/src/logger_server.erl index a1d40f1123..b7735dbcf7 100644 --- a/lib/kernel/src/logger_server.erl +++ b/lib/kernel/src/logger_server.erl @@ -27,9 +27,13 @@ add_filter/2, remove_filter/2, set_module_level/2, unset_module_level/0, unset_module_level/1, cache_module_level/1, - set_config/2, set_config/3, update_config/2, + set_config/2, set_config/3, + update_config/2, update_config/3, update_formatter_config/2]). +%% Helper +-export([diff_maps/2]). + %% gen_server callbacks -export([init/1, handle_call/3, handle_cast/2, handle_info/2, terminate/2]). @@ -105,12 +109,25 @@ cache_module_level(Module) -> gen_server:cast(?SERVER,{cache_module_level,Module}). set_config(Owner,Key,Value) -> - update_config(Owner,#{Key=>Value}). + case sanity_check(Owner,Key,Value) of + ok -> + call({change_config,set,Owner,Key,Value}); + Error -> + Error + end. set_config(Owner,Config) -> case sanity_check(Owner,Config) of ok -> - call({set_config,Owner,Config}); + call({change_config,set,Owner,Config}); + Error -> + Error + end. + +update_config(Owner,Key,Value) -> + case sanity_check(Owner,Key,Value) of + ok -> + call({change_config,update,Owner,Key,Value}); Error -> Error end. @@ -118,7 +135,7 @@ set_config(Owner,Config) -> update_config(Owner, Config) -> case sanity_check(Owner,Config) of ok -> - call({update_config,Owner,Config}); + call({change_config,update,Owner,Config}); Error -> Error end. @@ -204,46 +221,72 @@ handle_call({add_filter,Id,Filter}, _From,#state{tid=Tid}=State) -> handle_call({remove_filter,Id,FilterId}, _From, #state{tid=Tid}=State) -> Reply = do_remove_filter(Tid,Id,FilterId), {reply,Reply,State}; -handle_call({update_config,primary,NewConfig}, _From, #state{tid=Tid}=State) -> +handle_call({change_config,SetOrUpd,primary,Config0}, _From, + #state{tid=Tid}=State) -> + {ok,#{handlers:=Handlers}=OldConfig} = logger_config:get(Tid,primary), + Default = + case SetOrUpd of + set -> default_config(primary); + update -> OldConfig + end, + Config = maps:merge(Default,Config0), + Reply = logger_config:set(Tid,primary,Config#{handlers=>Handlers}), + {reply,Reply,State}; +handle_call({change_config,_SetOrUpd,primary,Key,Value}, _From, + #state{tid=Tid}=State) -> {ok,OldConfig} = logger_config:get(Tid,primary), - Config = maps:merge(OldConfig,NewConfig), - {reply,logger_config:set(Tid,primary,Config),State}; -handle_call({update_config,HandlerId,NewConfig}, From, #state{tid=Tid}=State) -> + Reply = logger_config:set(Tid,primary,OldConfig#{Key=>Value}), + {reply,Reply,State}; +handle_call({change_config,SetOrUpd,HandlerId,Config0}, From, + #state{tid=Tid}=State) -> case logger_config:get(Tid,HandlerId) of {ok,#{module:=Module}=OldConfig} -> - Config = maps:merge(OldConfig,NewConfig), - call_h_async( - fun() -> - call_h(Module,changing_config,[OldConfig,Config], - {ok,Config}) - end, - fun({ok,Config1}) -> - logger_config:set(Tid,HandlerId,Config1); - (Error) -> - Error - end,From,State); - Error -> - {reply,Error,State} + Default = + case SetOrUpd of + set -> default_config(HandlerId,Module); + update -> OldConfig + end, + Config = maps:merge(Default,Config0), + case check_config_change(OldConfig,Config) of + ok -> + call_h_async( + fun() -> + call_h(Module,changing_config, + [SetOrUpd,OldConfig,Config], + {ok,Config}) + end, + fun({ok,Config1}) -> + logger_config:set(Tid,HandlerId,Config1); + (Error) -> + Error + end,From,State); + Error -> + {reply,Error,State} + end; + _ -> + {reply,{error,{not_found,HandlerId}},State} end; -handle_call({set_config,primary,Config0}, _From, #state{tid=Tid}=State) -> - Config = maps:merge(default_config(primary),Config0), - {ok,#{handlers:=Handlers}} = logger_config:get(Tid,primary), - Reply = logger_config:set(Tid,primary,Config#{handlers=>Handlers}), - {reply,Reply,State}; -handle_call({set_config,HandlerId,Config0}, From, #state{tid=Tid}=State) -> +handle_call({change_config,SetOrUpd,HandlerId,Key,Value}, From, + #state{tid=Tid}=State) -> case logger_config:get(Tid,HandlerId) of {ok,#{module:=Module}=OldConfig} -> - Config = maps:merge(default_config(HandlerId,Module),Config0), - call_h_async( - fun() -> - call_h(Module,changing_config,[OldConfig,Config], - {ok,Config}) - end, - fun({ok,Config1}) -> - logger_config:set(Tid,HandlerId,Config1); - (Error) -> - Error - end,From,State); + Config = OldConfig#{Key=>Value}, + case check_config_change(OldConfig,Config) of + ok -> + call_h_async( + fun() -> + call_h(Module,changing_config, + [SetOrUpd,OldConfig,Config], + {ok,Config}) + end, + fun({ok,Config1}) -> + logger_config:set(Tid,HandlerId,Config1); + (Error) -> + Error + end,From,State); + Error -> + {reply,Error,State} + end; _ -> {reply,{error,{not_found,HandlerId}},State} end; @@ -320,7 +363,7 @@ call(Request) -> true when Action == add_handler; Action == remove_handler; Action == add_filter; Action == remove_filter; - Action == update_config; Action == set_config -> + Action == change_config -> {error,{attempting_syncronous_call_to_self,Request}}; _ -> gen_server:call(?SERVER,Request,?DEFAULT_LOGGER_CALL_TIMEOUT) @@ -458,6 +501,15 @@ check_formatter({Mod,Config}) -> check_formatter(Formatter) -> throw({invalid_formatter,Formatter}). +%% When changing configuration for a handler, the id and module fields +%% can not be changed. +check_config_change(#{id:=Id,module:=Module},#{id:=Id,module:=Module}) -> + ok; +check_config_change(OldConfig,NewConfig) -> + {Old,New} = logger_server:diff_maps(maps:with([id,module],OldConfig), + maps:with([id,module],NewConfig)), + {error,{illegal_config_change,Old,New}}. + call_h(Module, Function, Args, DefRet) -> %% Not calling code:ensure_loaded + erlang:function_exported here, %% since in some rare terminal cases, the code_server might not @@ -466,6 +518,11 @@ call_h(Module, Function, Args, DefRet) -> catch C:R:S -> case {C,R,S} of + {error,undef,[{Module,Function=changing_config,Args,_}|_]} + when length(Args)=:=3 -> + %% Backwards compatible call, if changing_config/3 + %% did not exist. + call_h(Module, Function, tl(Args), DefRet); {error,undef,[{Module,Function,Args,_}|_]} -> DefRet; _ -> @@ -525,3 +582,14 @@ call_h_reply(Unexpected,State) -> {process,?SERVER}, {message,Unexpected}]), {noreply,State}. + +%% Return two maps containing only the fields that differ. +diff_maps(M1,M2) -> + diffs(lists:sort(maps:to_list(M1)),lists:sort(maps:to_list(M2)),#{},#{}). + +diffs([H|T1],[H|T2],D1,D2) -> + diffs(T1,T2,D1,D2); +diffs([{K,V1}|T1],[{K,V2}|T2],D1,D2) -> + diffs(T1,T2,D1#{K=>V1},D2#{K=>V2}); +diffs([],[],D1,D2) -> + {D1,D2}. diff --git a/lib/kernel/src/logger_std_h.erl b/lib/kernel/src/logger_std_h.erl index 66fa6b6ab6..42e0f5caf4 100644 --- a/lib/kernel/src/logger_std_h.erl +++ b/lib/kernel/src/logger_std_h.erl @@ -35,7 +35,8 @@ terminate/2, code_change/3]). %% logger callbacks --export([log/2, adding_handler/1, removing_handler/1, changing_config/2]). +-export([log/2, adding_handler/1, removing_handler/1, changing_config/3, + filter_config/1]). %% handler internal -export([log_handler_info/4]). @@ -116,9 +117,8 @@ reset(Name) -> %%% Handler being added adding_handler(#{id:=Name}=Config) -> case check_config(adding, Config) of - {ok, Config1} -> + {ok, #{config:=HConfig}=Config1} -> %% create initial handler state by merging defaults with config - HConfig = maps:get(config, Config1, #{}), HState = maps:merge(get_init_state(), HConfig), case logger_h_common:overload_levels_ok(HState) of true -> @@ -135,22 +135,31 @@ adding_handler(#{id:=Name}=Config) -> %%%----------------------------------------------------------------- %%% Updating handler config -changing_config(OldConfig=#{id:=Name, config:=OldHConfig}, - NewConfig=#{id:=Name}) -> - #{type:=Type, handler_pid:=HPid, mode_tab:=ModeTab} = OldHConfig, - NewHConfig = maps:get(config, NewConfig, #{}), - case maps:get(type, NewHConfig, Type) of - Type -> - NewHConfig1 = NewHConfig#{type=>Type, - handler_pid=>HPid, - mode_tab=>ModeTab}, - changing_config1(HPid, OldConfig, - NewConfig#{config=>NewHConfig1}); - _ -> - {error,{illegal_config_change,OldConfig,NewConfig}} - end; -changing_config(OldConfig, NewConfig) -> - {error,{illegal_config_change,OldConfig,NewConfig}}. +changing_config(SetOrUpdate,OldConfig=#{config:=OldHConfig},NewConfig) -> + WriteOnce = maps:with([type],OldHConfig), + ReadOnly = maps:with([handler_pid,mode_tab],OldHConfig), + NewHConfig0 = maps:get(config, NewConfig, #{}), + Default = + case SetOrUpdate of + set -> + %% Do not reset write-once fields to defaults + maps:merge(get_default_config(),WriteOnce); + update -> + OldHConfig + end, + + %% Allow (accidentially) included read-only fields - just overwrite them + NewHConfig = maps:merge(maps:merge(Default, NewHConfig0),ReadOnly), + + %% But fail if write-once fields are changed + case maps:with([type],NewHConfig) of + WriteOnce -> + changing_config1(maps:get(handler_pid,OldHConfig), + OldConfig, + NewConfig#{config=>NewHConfig}); + Other -> + {error,{illegal_config_change,#{config=>WriteOnce},#{config=>Other}}} + end. changing_config1(HPid, OldConfig, NewConfig) -> case check_config(changing, NewConfig) of @@ -169,8 +178,7 @@ changing_config1(HPid, OldConfig, NewConfig) -> check_config(adding, Config) -> %% Merge in defaults on handler level HConfig0 = maps:get(config, Config, #{}), - HConfig = maps:merge(#{type => standard_io}, - HConfig0), + HConfig = maps:merge(get_default_config(),HConfig0), case check_h_config(maps:to_list(HConfig)) of ok -> {ok,Config#{config=>HConfig}}; @@ -223,6 +231,11 @@ log(LogEvent, Config = #{id := Name, Bin = logger_h_common:log_to_binary(LogEvent, Config), logger_h_common:call_cast_or_drop(Name, HPid, ModeTab, Bin). +%%%----------------------------------------------------------------- +%%% Remove internal fields from configuration +filter_config(#{config:=HConfig}=Config) -> + Config#{config=>maps:without([handler_pid,mode_tab],HConfig)}. + %%%=================================================================== %%% gen_server callbacks %%%=================================================================== @@ -428,8 +441,9 @@ code_change(_OldVsn, State, _Extra) -> %%%----------------------------------------------------------------- %%% -get_init_state() -> - #{sync_mode_qlen => ?SYNC_MODE_QLEN, +get_default_config() -> + #{type => standard_io, + sync_mode_qlen => ?SYNC_MODE_QLEN, drop_mode_qlen => ?DROP_MODE_QLEN, flush_qlen => ?FLUSH_QLEN, burst_limit_enable => ?BURST_LIMIT_ENABLE, @@ -439,10 +453,12 @@ get_init_state() -> overload_kill_qlen => ?OVERLOAD_KILL_QLEN, overload_kill_mem_size => ?OVERLOAD_KILL_MEM_SIZE, overload_kill_restart_after => ?OVERLOAD_KILL_RESTART_AFTER, - file_ctrl_sync_int => ?CONTROLLER_SYNC_INTERVAL, - filesync_ok_qlen => ?FILESYNC_OK_QLEN, filesync_repeat_interval => ?FILESYNC_REPEAT_INTERVAL}. +get_init_state() -> + #{file_ctrl_sync_int => ?CONTROLLER_SYNC_INTERVAL, + filesync_ok_qlen => ?FILESYNC_OK_QLEN}. + %%%----------------------------------------------------------------- %%% Add a standard handler to the logger. %%% This starts a dedicated handler process which should always @@ -825,4 +841,3 @@ sync_dev(Fd, DevName, PrevSyncResult, HandlerName) -> logger_h_common:error_notify({HandlerName,filesync,DevName,Error}), Error end. - diff --git a/lib/kernel/test/gen_tcp_misc_SUITE.erl b/lib/kernel/test/gen_tcp_misc_SUITE.erl index 194522c009..04c0c48e3a 100644 --- a/lib/kernel/test/gen_tcp_misc_SUITE.erl +++ b/lib/kernel/test/gen_tcp_misc_SUITE.erl @@ -52,7 +52,8 @@ several_accepts_in_one_go/1, accept_system_limit/1, active_once_closed/1, send_timeout/1, send_timeout_active/1, otp_7731/1, zombie_sockets/1, otp_7816/1, otp_8102/1, - wrapping_oct/0, wrapping_oct/1, otp_9389/1, otp_13939/1]). + wrapping_oct/0, wrapping_oct/1, otp_9389/1, otp_13939/1, + otp_12242/1]). %% Internal exports. -export([sender/3, not_owner/1, passive_sockets_server/2, priority_server/1, @@ -95,7 +96,8 @@ all() -> killing_multi_acceptors2, several_accepts_in_one_go, accept_system_limit, active_once_closed, send_timeout, send_timeout_active, otp_7731, wrapping_oct, - zombie_sockets, otp_7816, otp_8102, otp_9389]. + zombie_sockets, otp_7816, otp_8102, otp_9389, + otp_12242]. groups() -> []. @@ -3284,3 +3286,143 @@ otp_13939(Config) when is_list(Config) -> exit(Pid, normal), ct:fail("Server process blocked on send.") end. + +otp_12242(Config) when is_list(Config) -> + case os:type() of + {win32,_} -> + %% Even if we set sndbuf and recbuf to small sizes + %% Windows either happily accepts to send GBytes of data + %% in no time, so the second send below that is supposed + %% to time out just succedes, or the first send that + %% is supposed to fill the inet_drv I/O queue and + %% start waiting for when more data can be sent + %% instead sends all data but suffers a send + %% failure that closes the socket + {skipped,backpressure_broken_on_win32}; + _ -> + %% Find the IPv4 address of an up and running interface + %% that is not loopback nor pointtopoint + {ok,IFList} = inet:getifaddrs(), + ct:pal("IFList ~p~n", [IFList]), + case + lists:flatten( + [lists:filtermap( + fun ({addr,Addr}) when tuple_size(Addr) =:= 4 -> + {true,Addr}; + (_) -> + false + end, Opts) + || {_,Opts} <- IFList, + case lists:keyfind(flags, 1, Opts) of + {_,Flags} -> + lists:member(up, Flags) + andalso + lists:member(running, Flags) + andalso + not lists:member(loopback, Flags) + andalso + not lists:member(pointtopoint, Flags); + false -> + false + end]) + of + [Addr|_] -> + otp_12242(Addr); + Other -> + {skipped,{no_external_address,Other}} + end + end; +%% +otp_12242(Addr) when tuple_size(Addr) =:= 4 -> + ct:timetrap(30000), + ct:pal("Using address ~p~n", [Addr]), + Bufsize = 16 * 1024, + Datasize = 128 * 1024 * 1024, % At least 1 s on GBit interface + Blob = binary:copy(<<$x>>, Datasize), + LOpts = + [{backlog,4},{reuseaddr,true},{ip,Addr}, + binary,{active,false}, + {recbuf,Bufsize},{sndbuf,Bufsize},{buffer,Bufsize}], + COpts = + [binary,{active,false},{ip,Addr}, + {linger,{true,1}}, % 1 s + {send_timeout,500}, + {recbuf,Bufsize},{sndbuf,Bufsize},{buffer,Bufsize}], + Dir = filename:dirname(code:which(?MODULE)), + {ok,ListenerNode} = + test_server:start_node( + ?UNIQ_NODE_NAME, slave, [{args,"-pa " ++ Dir}]), + Tester = self(), + Listener = + spawn( + ListenerNode, + fun () -> + {ok,L} = gen_tcp:listen(0, LOpts), + {ok,LPort} = inet:port(L), + Tester ! {self(),port,LPort}, + {ok,A} = gen_tcp:accept(L), + ok = gen_tcp:close(L), + receive + {Tester,stop} -> + ok = gen_tcp:close(A) + end + end), + ListenerMref = monitor(process, Listener), + LPort = receive {Listener,port,P} -> P end, + {ok,C} = gen_tcp:connect(Addr, LPort, COpts, infinity), + {ok,ReadCOpts} = inet:getopts(C, [recbuf,sndbuf,buffer]), + ct:pal("ReadCOpts ~p~n", [ReadCOpts]), + %% + %% Fill the buffers + ct:pal("Sending ~p bytes~n", [Datasize]), + ok = gen_tcp:send(C, Blob), + ct:pal("Sent ~p bytes~n", [Datasize]), + %% Spawn the Closer, + %% try to ensure that the close call is in progress + %% before the owner proceeds with sending + Owner = self(), + {_Closer,CloserMref} = + spawn_opt( + fun () -> + Owner ! {tref, erlang:start_timer(50, Owner, closing)}, + ct:pal("Calling gen_tcp:close(C)~n"), + try gen_tcp:close(C) of + Result -> + ct:pal("gen_tcp:close(C) -> ~p~n", [Result]), + ok = Result + catch + Class:Reason:Stacktrace -> + ct:pal( + "gen_tcp:close(C) >< ~p:~p~n ~p~n", + [Class,Reason,Stacktrace]), + erlang:raise(Class, Reason, Stacktrace) + end + end, [link,monitor]), + receive + {tref,Tref} -> + receive {timeout,Tref,_} -> ok end, + ct:pal("Sending ~p bytes again~n", [Datasize]), + %% Now should the close be in progress... + %% All buffers are full, remote end is not reading, + %% and the send timeout is 1 s so this will timeout: + {error,timeout} = gen_tcp:send(C, Blob), + ct:pal("Sending ~p bytes again timed out~n", [Datasize]), + ok = inet:setopts(C, [{send_timeout,10000}]), + %% There is a hidden timeout here. Port close is sampled + %% every 5 s by prim_inet:send_recv_reply. + %% Linger is 3 s so the Closer will finish this send: + ct:pal("Sending ~p bytes with 10 s timeout~n", [Datasize]), + {error,closed} = gen_tcp:send(C, Blob), + ct:pal("Sending ~p bytes with 10 s timeout was closed~n", + [Datasize]), + normal = wait(CloserMref), + ct:pal("The Closer has exited~n"), + Listener ! {Tester,stop}, + receive {'DOWN',ListenerMref,_,_,_} -> ok end, + ct:pal("The Listener has exited~n"), + test_server:stop_node(ListenerNode), + ok + end. + +wait(Mref) -> + receive {'DOWN',Mref,_,_,Reason} -> Reason end. diff --git a/lib/kernel/test/inet_sockopt_SUITE.erl b/lib/kernel/test/inet_sockopt_SUITE.erl index ada9c2689c..27ff74e309 100644 --- a/lib/kernel/test/inet_sockopt_SUITE.erl +++ b/lib/kernel/test/inet_sockopt_SUITE.erl @@ -110,9 +110,14 @@ simple(Config) when is_list(Config) -> {S1,S2} = create_socketpair(Opt, Opt), {ok,Opt} = inet:getopts(S1,OptTags), {ok,Opt} = inet:getopts(S2,OptTags), - COpt = [{X,case X of nodelay -> false;_ -> Y end} || {X,Y} <- Opt], + NoPushOpt = case os:type() of + {unix, Osname} when Osname =:= linux; Osname =:= freebsd -> {nopush, true}; + {_,_} -> {nopush, false} + end, + COpt = [{X,case X of nodelay -> false;_ -> Y end} || {X,Y} <- [NoPushOpt|Opt]], + COptTags = [X || {X,_} <- COpt], inet:setopts(S1,COpt), - {ok,COpt} = inet:getopts(S1,OptTags), + {ok,COpt} = inet:getopts(S1,COptTags), {ok,Opt} = inet:getopts(S2,OptTags), gen_tcp:close(S1), gen_tcp:close(S2), diff --git a/lib/kernel/test/logger_SUITE.erl b/lib/kernel/test/logger_SUITE.erl index b7ccba8e70..d831d0d108 100644 --- a/lib/kernel/test/logger_SUITE.erl +++ b/lib/kernel/test/logger_SUITE.erl @@ -246,6 +246,18 @@ change_config(_Config) -> {ok,C4} = logger:get_handler_config(h1), C4 = C3#{custom:=new_custom}, + %% Change handler config: Id and module can not be changed + {error,{illegal_config_change,Old,New}} = + logger:set_handler_config(h1,id,newid), + %% Check that only the faulty field is included in return + [{id,h1}] = maps:to_list(Old), + [{id,newid}] = maps:to_list(New), + %% Check that both fields are included when both are changed + {error,{illegal_config_change, + #{id:=h1,module:=?MODULE}, + #{id:=newid,module:=newmodule}}} = + logger:set_handler_config(h1,#{id=>newid,module=>newmodule}), + %% Change primary config: Single key PConfig0 = logger:get_primary_config(), ok = logger:set_primary_config(level,warning), diff --git a/lib/kernel/test/logger_disk_log_h_SUITE.erl b/lib/kernel/test/logger_disk_log_h_SUITE.erl index a815db14e9..905c2c52c5 100644 --- a/lib/kernel/test/logger_disk_log_h_SUITE.erl +++ b/lib/kernel/test/logger_disk_log_h_SUITE.erl @@ -92,6 +92,7 @@ all() -> disk_log_opts, default_formatter, logging, + filter_config, errors, formatter_fail, config_fail, @@ -302,6 +303,20 @@ logging(cleanup, _Config) -> Name = list_to_atom(lists:concat([?FUNCTION_NAME,"_1"])), remove_and_stop(Name). +filter_config(_Config) -> + ok = logger:add_handler(?MODULE,logger_disk_log_h,#{}), + {ok,#{config:=HConfig}=Config} = logger:get_handler_config(?MODULE), + HConfig = maps:without([handler_pid,mode_tab],HConfig), + + FakeFullHConfig = HConfig#{handler_pid=>self(),mode_tab=>erlang:make_ref()}, + #{config:=HConfig} = + logger_disk_log_h:filter_config(Config#{config=>FakeFullHConfig}), + ok. + +filter_config(cleanup,_Config) -> + logger:remove_handler(?MODULE), + ok. + errors(Config) -> PrivDir = ?config(priv_dir,Config), Name1 = list_to_atom(lists:concat([?FUNCTION_NAME,"_1"])), @@ -316,13 +331,29 @@ errors(Config) -> %%! TODO: %%! Check how bad log_opts are handled! - {error,{illegal_config_change,_,_}} = - logger:set_handler_config(Name1, - config, - #{file=>LogFile1, - type=>halt}), - {error,{illegal_config_change,_,_}} = - logger:set_handler_config(Name1,id,new), + {error,{illegal_config_change, + #{config:=#{type:=wrap}}, + #{config:=#{type:=halt}}}} = + logger:update_handler_config(Name1, + config, + #{type=>halt, + file=>LogFile1}), + + {error,{illegal_config_change, + #{config:=#{file:=LogFile1}}, + #{config:=#{file:="newfilename"}}}} = + logger:update_handler_config(Name1, + config, + #{file=>"newfilename"}), + + %% Read-only fields may (accidentially) be included in the change, + %% but it won't take effect + {ok,C} = logger:get_handler_config(Name1), + ok = logger:set_handler_config(Name1,config, + #{handler_pid=>self(), + mode_tab=>erlang:make_ref()}), + {ok,C} = logger:get_handler_config(Name1), + ok = logger:remove_handler(Name1), {error,{not_found,Name1}} = logger:remove_handler(Name1), @@ -403,21 +434,21 @@ config_fail(_Config) -> formatter=>{?MODULE,self()}}), %% can't change the disk log options for a log already in use {error,{illegal_config_change,_,_}} = - logger:set_handler_config(?MODULE,config, - #{max_no_files=>2}), + logger:update_handler_config(?MODULE,config, + #{max_no_files=>2}), %% can't change name of an existing handler {error,{illegal_config_change,_,_}} = - logger:set_handler_config(?MODULE,id,bad), + logger:update_handler_config(?MODULE,id,bad), %% incorrect values of OP params {ok,#{config := HConfig}} = logger:get_handler_config(?MODULE), {error,{invalid_levels,_}} = - logger:set_handler_config(?MODULE,config, - HConfig#{sync_mode_qlen=>100, - flush_qlen=>99}), + logger:update_handler_config(?MODULE,config, + HConfig#{sync_mode_qlen=>100, + flush_qlen=>99}), %% invalid name of config parameter {error,{invalid_config,logger_disk_log_h,{filesync_rep_int,2000}}} = - logger:set_handler_config(?MODULE, config, - HConfig#{filesync_rep_int => 2000}), + logger:update_handler_config(?MODULE, config, + HConfig#{filesync_rep_int => 2000}), ok. config_fail(cleanup,_Config) -> logger:remove_handler(?MODULE). @@ -459,10 +490,26 @@ reconfig(Config) -> log_opts := #{type := ?DISK_LOG_TYPE, max_no_files := ?DISK_LOG_MAX_NO_FILES, max_no_bytes := ?DISK_LOG_MAX_NO_BYTES, - file := _DiskLogFile}} = + file := DiskLogFile}} = logger_disk_log_h:info(?MODULE), + {ok,#{config := + #{sync_mode_qlen := ?SYNC_MODE_QLEN, + drop_mode_qlen := ?DROP_MODE_QLEN, + flush_qlen := ?FLUSH_QLEN, + burst_limit_enable := ?BURST_LIMIT_ENABLE, + burst_limit_max_count := ?BURST_LIMIT_MAX_COUNT, + burst_limit_window_time := ?BURST_LIMIT_WINDOW_TIME, + overload_kill_enable := ?OVERLOAD_KILL_ENABLE, + overload_kill_qlen := ?OVERLOAD_KILL_QLEN, + overload_kill_mem_size := ?OVERLOAD_KILL_MEM_SIZE, + overload_kill_restart_after := ?OVERLOAD_KILL_RESTART_AFTER, + filesync_repeat_interval := ?FILESYNC_REPEAT_INTERVAL, + file := DiskLogFile, + max_no_files := ?DISK_LOG_MAX_NO_FILES, + max_no_bytes := ?DISK_LOG_MAX_NO_BYTES, + type := wrap} = HConfig0}} = + logger:get_handler_config(?MODULE), - {ok,#{config := HConfig0}} = logger:get_handler_config(?MODULE), HConfig1 = HConfig0#{sync_mode_qlen => 1, drop_mode_qlen => 2, flush_qlen => 3, @@ -488,6 +535,29 @@ reconfig(Config) -> overload_kill_restart_after := infinity, filesync_repeat_interval := no_repeat} = logger_disk_log_h:info(?MODULE), + {ok,#{config:=HConfig1}} = logger:get_handler_config(?MODULE), + + ok = logger:update_handler_config(?MODULE, config, + #{flush_qlen => ?FLUSH_QLEN}), + {ok,#{config:=C1}} = logger:get_handler_config(?MODULE), + ct:log("C1: ~p",[C1]), + C1 = HConfig1#{flush_qlen => ?FLUSH_QLEN}, + + ok = logger:set_handler_config(?MODULE, config, #{sync_mode_qlen => 1}), + {ok,#{config:=C2}} = logger:get_handler_config(?MODULE), + ct:log("C2: ~p",[C2]), + C2 = HConfig0#{sync_mode_qlen => 1}, + + ok = logger:set_handler_config(?MODULE, config, #{drop_mode_qlen => 100}), + {ok,#{config:=C3}} = logger:get_handler_config(?MODULE), + ct:log("C3: ~p",[C3]), + C3 = HConfig0#{drop_mode_qlen => 100}, + + ok = logger:update_handler_config(?MODULE, config, #{sync_mode_qlen => 1}), + {ok,#{config:=C4}} = logger:get_handler_config(?MODULE), + ct:log("C4: ~p",[C4]), + C4 = HConfig0#{sync_mode_qlen => 1, + drop_mode_qlen => 100}, ok = logger:remove_handler(?MODULE), @@ -507,6 +577,43 @@ reconfig(Config) -> max_no_bytes := 1024, file := File}} = logger_disk_log_h:info(?MODULE), + {ok,#{config := + #{type := halt, + max_no_files := 1, + max_no_bytes := 1024, + file := File}=HaltHConfig} = Config2} = + logger:get_handler_config(?MODULE), + + ok = logger:update_handler_config(?MODULE, level, notice), + {ok,C5} = logger:get_handler_config(?MODULE), + ct:log("C5: ~p",[C5]), + C5 = Config2#{level => notice}, + + ok = logger:set_handler_config(?MODULE, level, info), + {ok,C6} = logger:get_handler_config(?MODULE), + ct:log("C6: ~p",[C6]), + C6 = Config2#{level => info}, + + %% You are not allowed to actively set the write once fields + %% (type, max_no_files, max_no_bytes, file) in runtime. + {error, {illegal_config_change,_,_}} = + logger:set_handler_config(?MODULE,config,#{type=>wrap}), + {error, {illegal_config_change,_,_}} = + logger:set_handler_config(?MODULE,config,#{max_no_files=>2}), + {error, {illegal_config_change,_,_}} = + logger:set_handler_config(?MODULE,config,#{max_no_bytes=>2048}), + {error, {illegal_config_change,_,_}} = + logger:set_handler_config(?MODULE,config,#{file=>"otherfile.log"}), + {ok,C7} = logger:get_handler_config(?MODULE), + ct:log("C7: ~p",[C7]), + C7 = C6, + + %% ... but if you don't specify the write once fields, then + %% set_handler_config shall NOT reset them to their default value + ok = logger:set_handler_config(?MODULE,config,#{sync_mode_qlen=>1}), + {ok,#{config:=C8}} = logger:get_handler_config(?MODULE), + ct:log("C8: ~p",[C8]), + C8 = HaltHConfig#{sync_mode_qlen=>1}, ok. reconfig(cleanup, _Config) -> @@ -536,7 +643,7 @@ sync(Config) -> %% a disk_log_sync is still performed when handler goes idle {ok,#{config := HConfig}} = logger:get_handler_config(?MODULE), HConfig1 = HConfig#{filesync_repeat_interval => no_repeat}, - ok = logger:set_handler_config(?MODULE, config, HConfig1), + ok = logger:update_handler_config(?MODULE, config, HConfig1), no_repeat = maps:get(filesync_repeat_interval, logger_disk_log_h:info(?MODULE)), @@ -569,13 +676,13 @@ sync(Config) -> [OneSync || _ <- lists:seq(1, 1 + trunc(WaitT/SyncInt))]), HConfig2 = HConfig#{filesync_repeat_interval => SyncInt}, - ok = logger:set_handler_config(?MODULE, config, HConfig2), + ok = logger:update_handler_config(?MODULE, config, HConfig2), SyncInt = maps:get(filesync_repeat_interval, logger_disk_log_h:info(?MODULE)), timer:sleep(WaitT), HConfig3 = HConfig#{filesync_repeat_interval => no_repeat}, - ok = logger:set_handler_config(?MODULE, config, HConfig3), + ok = logger:update_handler_config(?MODULE, config, HConfig3), check_tracer(100), ok. sync(cleanup,_Config) -> @@ -797,7 +904,7 @@ sync_failure(Config) -> LogOpts = maps:get(log_opts, HState), SyncInt = 500, - ok = rpc:call(Node, logger, set_handler_config, + ok = rpc:call(Node, logger, update_handler_config, [?STANDARD_HANDLER, config, #{filesync_repeat_interval => SyncInt}]), Info = rpc:call(Node, logger_disk_log_h, info, [?STANDARD_HANDLER]), @@ -872,7 +979,7 @@ op_switch_to_sync(Config) -> drop_mode_qlen => NumOfReqs+1, flush_qlen => 2*NumOfReqs, burst_limit_enable => false}}, - ok = logger:set_handler_config(?MODULE, NewHConfig), + ok = logger:update_handler_config(?MODULE, NewHConfig), send_burst({n,NumOfReqs}, seq, {chars,79}, notice), Lines = count_lines(Log), NumOfReqs = Lines, @@ -897,7 +1004,7 @@ op_switch_to_drop(Config) -> drop_mode_qlen => 2, flush_qlen => Procs*NumOfReqs*Bursts, burst_limit_enable => false}}, - ok = logger:set_handler_config(?MODULE, NewHConfig), + ok = logger:update_handler_config(?MODULE, NewHConfig), %% It sometimes happens that the handler either gets %% the requests in a slow enough pace so that dropping %% never occurs. Therefore, lets generate a number of @@ -943,7 +1050,7 @@ op_switch_to_flush(Config) -> drop_mode_qlen => 300, flush_qlen => 300, burst_limit_enable => false}}, - ok = logger:set_handler_config(?MODULE, NewHConfig), + ok = logger:update_handler_config(?MODULE, NewHConfig), NumOfReqs = 1500, Procs = 10, Bursts = 10, @@ -985,7 +1092,7 @@ limit_burst_disabled(Config) -> burst_limit_window_time => 2000, drop_mode_qlen => 200, flush_qlen => 300}}, - ok = logger:set_handler_config(?MODULE, NewHConfig), + ok = logger:update_handler_config(?MODULE, NewHConfig), NumOfReqs = 100, send_burst({n,NumOfReqs}, seq, {chars,79}, notice), Logged = count_lines(Log), @@ -1005,7 +1112,7 @@ limit_burst_enabled_one(Config) -> burst_limit_window_time => 2000, drop_mode_qlen => 200, flush_qlen => 300}}, - ok = logger:set_handler_config(?MODULE, NewHConfig), + ok = logger:update_handler_config(?MODULE, NewHConfig), NumOfReqs = 100, send_burst({n,NumOfReqs}, seq, {chars,79}, notice), Logged = count_lines(Log), @@ -1026,7 +1133,7 @@ limit_burst_enabled_period(Config) -> burst_limit_window_time => BurstTWin, drop_mode_qlen => 20000, flush_qlen => 20001}}, - ok = logger:set_handler_config(?MODULE, NewHConfig), + ok = logger:update_handler_config(?MODULE, NewHConfig), Windows = 3, Sent = send_burst({t,BurstTWin*Windows}, seq, {chars,79}, notice), @@ -1046,7 +1153,7 @@ kill_disabled(Config) -> HConfig#{config=>DLHConfig#{overload_kill_enable=>false, overload_kill_qlen=>10, overload_kill_mem_size=>100}}, - ok = logger:set_handler_config(?MODULE, NewHConfig), + ok = logger:update_handler_config(?MODULE, NewHConfig), NumOfReqs = 100, send_burst({n,NumOfReqs}, seq, {chars,79}, notice), Logged = count_lines(Log), @@ -1068,7 +1175,7 @@ qlen_kill_new(Config) -> overload_kill_qlen=>10, overload_kill_mem_size=>Mem0+50000, overload_kill_restart_after=>RestartAfter}}, - ok = logger:set_handler_config(?MODULE, NewHConfig), + ok = logger:update_handler_config(?MODULE, NewHConfig), MRef = erlang:monitor(process, Pid0), NumOfReqs = 100, Procs = 4, @@ -1105,7 +1212,7 @@ mem_kill_new(Config) -> overload_kill_qlen=>50000, overload_kill_mem_size=>Mem0+500, overload_kill_restart_after=>RestartAfter}}, - ok = logger:set_handler_config(?MODULE, NewHConfig), + ok = logger:update_handler_config(?MODULE, NewHConfig), MRef = erlang:monitor(process, Pid0), NumOfReqs = 100, Procs = 4, @@ -1139,7 +1246,7 @@ restart_after(Config) -> HConfig#{config=>DLHConfig#{overload_kill_enable=>true, overload_kill_qlen=>10, overload_kill_restart_after=>infinity}}, - ok = logger:set_handler_config(?MODULE, NewHConfig1), + ok = logger:update_handler_config(?MODULE, NewHConfig1), MRef1 = erlang:monitor(process, whereis(h_proc_name())), %% kill handler send_burst({n,100}, {spawn,4,0}, {chars,79}, notice), @@ -1161,7 +1268,7 @@ restart_after(Config) -> HConfig#{config=>DLHConfig#{overload_kill_enable=>true, overload_kill_qlen=>10, overload_kill_restart_after=>RestartAfter}}, - ok = logger:set_handler_config(?MODULE, NewHConfig2), + ok = logger:update_handler_config(?MODULE, NewHConfig2), Pid0 = whereis(h_proc_name()), MRef2 = erlang:monitor(process, Pid0), %% kill handler @@ -1194,7 +1301,7 @@ handler_requests_under_load(Config) -> drop_mode_qlen => 1000, flush_qlen => 2000, burst_limit_enable => false}}, - ok = logger:set_handler_config(?MODULE, NewHConfig), + ok = logger:update_handler_config(?MODULE, NewHConfig), Pid = spawn_link(fun() -> send_requests(?MODULE, 1, [{filesync,[]}, {info,[]}, {reset,[]}, @@ -1227,9 +1334,9 @@ send_requests(HName, TO, Reqs = [{Req,Res}|Rs]) -> Result = case Req of change_config -> - logger:set_handler_config(HName, logger_disk_log_h, - #{overload_kill_enable => - false}); + logger:update_handler_config(HName, logger_disk_log_h, + #{overload_kill_enable => + false}); Func -> logger_disk_log_h:Func(HName) end, diff --git a/lib/kernel/test/logger_std_h_SUITE.erl b/lib/kernel/test/logger_std_h_SUITE.erl index 3426567bbf..b6a09f4980 100644 --- a/lib/kernel/test/logger_std_h_SUITE.erl +++ b/lib/kernel/test/logger_std_h_SUITE.erl @@ -108,6 +108,7 @@ all() -> add_remove_instance_file1, add_remove_instance_file2, default_formatter, + filter_config, errors, formatter_fail, config_fail, @@ -204,6 +205,20 @@ default_formatter(_Config) -> match = re:run(Msg,"=NOTICE REPORT====.*\n"++M1,[{capture,none}]), ok. +filter_config(_Config) -> + ok = logger:add_handler(?MODULE,logger_std_h,#{}), + {ok,#{config:=HConfig}=Config} = logger:get_handler_config(?MODULE), + HConfig = maps:without([handler_pid,mode_tab],HConfig), + + FakeFullHConfig = HConfig#{handler_pid=>self(),mode_tab=>erlang:make_ref()}, + #{config:=HConfig} = + logger_std_h:filter_config(Config#{config=>FakeFullHConfig}), + ok. + +filter_config(cleanup,_Config) -> + logger:remove_handler(?MODULE), + ok. + errors(Config) -> Dir = ?config(priv_dir,Config), Log = filename:join(Dir,?FUNCTION_NAME), @@ -319,11 +334,10 @@ config_fail(_Config) -> ok = logger:add_handler(?MODULE,logger_std_h, #{filter_default=>log, formatter=>{?MODULE,self()}}), - {error,{illegal_config_change,_,_}} = + {error,{illegal_config_change,#{config:=#{type:=_}},#{config:=#{type:=_}}}} = logger:set_handler_config(?MODULE,config, #{type=>{file,"file"}}), - {error,{illegal_config_change,_,_}} = - logger:set_handler_config(?MODULE,id,bad), + {error,{invalid_levels,_}} = logger:set_handler_config(?MODULE,config, #{sync_mode_qlen=>100, @@ -331,6 +345,15 @@ config_fail(_Config) -> {error,{invalid_config,logger_std_h,{filesync_rep_int,2000}}} = logger:set_handler_config(?MODULE, config, #{filesync_rep_int => 2000}), + + %% Read-only fields may (accidentially) be included in the change, + %% but it won't take effect + {ok,C} = logger:get_handler_config(?MODULE), + ok = logger:set_handler_config(?MODULE,config, + #{handler_pid=>self(), + mode_tab=>erlang:make_ref()}), + {ok,C} = logger:get_handler_config(?MODULE), + ok. config_fail(cleanup,_Config) -> @@ -457,9 +480,26 @@ reconfig(Config) -> overload_kill_qlen := ?OVERLOAD_KILL_QLEN, overload_kill_mem_size := ?OVERLOAD_KILL_MEM_SIZE, overload_kill_restart_after := ?OVERLOAD_KILL_RESTART_AFTER, - filesync_repeat_interval := ?FILESYNC_REPEAT_INTERVAL} = + filesync_repeat_interval := ?FILESYNC_REPEAT_INTERVAL} = DefaultInfo = logger_std_h:info(?MODULE), + {ok, + #{config:= + #{type := standard_io, + sync_mode_qlen := ?SYNC_MODE_QLEN, + drop_mode_qlen := ?DROP_MODE_QLEN, + flush_qlen := ?FLUSH_QLEN, + burst_limit_enable := ?BURST_LIMIT_ENABLE, + burst_limit_max_count := ?BURST_LIMIT_MAX_COUNT, + burst_limit_window_time := ?BURST_LIMIT_WINDOW_TIME, + overload_kill_enable := ?OVERLOAD_KILL_ENABLE, + overload_kill_qlen := ?OVERLOAD_KILL_QLEN, + overload_kill_mem_size := ?OVERLOAD_KILL_MEM_SIZE, + overload_kill_restart_after := ?OVERLOAD_KILL_RESTART_AFTER, + filesync_repeat_interval := ?FILESYNC_REPEAT_INTERVAL} = + DefaultHConf}} + = logger:get_handler_config(?MODULE), + ok = logger:set_handler_config(?MODULE, config, #{sync_mode_qlen => 1, drop_mode_qlen => 2, @@ -485,7 +525,77 @@ reconfig(Config) -> overload_kill_qlen := 100000, overload_kill_mem_size := 10000000, overload_kill_restart_after := infinity, - filesync_repeat_interval := no_repeat} = logger_std_h:info(?MODULE), + filesync_repeat_interval := no_repeat} = Info = logger_std_h:info(?MODULE), + + {ok,#{config := + #{type := standard_io, + sync_mode_qlen := 1, + drop_mode_qlen := 2, + flush_qlen := 3, + burst_limit_enable := false, + burst_limit_max_count := 10, + burst_limit_window_time := 10, + overload_kill_enable := true, + overload_kill_qlen := 100000, + overload_kill_mem_size := 10000000, + overload_kill_restart_after := infinity, + filesync_repeat_interval := no_repeat} = HConf}} = + logger:get_handler_config(?MODULE), + + ok = logger:update_handler_config(?MODULE, config, + #{flush_qlen => ?FLUSH_QLEN}), + {ok,#{config:=C1}} = logger:get_handler_config(?MODULE), + ct:log("C1: ~p",[C1]), + C1 = HConf#{flush_qlen => ?FLUSH_QLEN}, + + ok = logger:set_handler_config(?MODULE, config, #{sync_mode_qlen => 1}), + {ok,#{config:=C2}} = logger:get_handler_config(?MODULE), + ct:log("C2: ~p",[C2]), + C2 = DefaultHConf#{sync_mode_qlen => 1}, + + ok = logger:set_handler_config(?MODULE, config, #{drop_mode_qlen => 100}), + {ok,#{config:=C3}} = logger:get_handler_config(?MODULE), + ct:log("C3: ~p",[C3]), + C3 = DefaultHConf#{drop_mode_qlen => 100}, + + ok = logger:update_handler_config(?MODULE, config, #{sync_mode_qlen => 1}), + {ok,#{config:=C4}} = logger:get_handler_config(?MODULE), + ct:log("C4: ~p",[C4]), + C4 = DefaultHConf#{sync_mode_qlen => 1, + drop_mode_qlen => 100}, + + ok = logger:remove_handler(?MODULE), + + File = filename:join(Dir,lists:concat([?FUNCTION_NAME,".log"])), + ok = logger:add_handler(?MODULE, + logger_std_h, + #{config => #{type => {file,File}}, + filter_default=>log, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter=>{?MODULE,self()}}), + + {ok,#{config:=#{filesync_repeat_interval:=FSI}=FileHConfig}} = + logger:get_handler_config(?MODULE), + ok = logger:update_handler_config(?MODULE,config, + #{filesync_repeat_interval=>FSI+2000}), + {ok,#{config:=C5}} = logger:get_handler_config(?MODULE), + ct:log("C5: ~p",[C5]), + C5 = FileHConfig#{filesync_repeat_interval=>FSI+2000}, + + %% You are not allowed to actively set 'type' in runtime, since + %% this is a write once field. + {error, {illegal_config_change,_,_}} = + logger:set_handler_config(?MODULE,config,#{type=>standard_io}), + {ok,#{config:=C6}} = logger:get_handler_config(?MODULE), + ct:log("C6: ~p",[C6]), + C6 = C5, + + %% ... but if you don't specify 'type', then set_handler_config shall + %% NOT reset it to its default value + ok = logger:set_handler_config(?MODULE,config,#{sync_mode_qlen=>1}), + {ok,#{config:=C7}} = logger:get_handler_config(?MODULE), + ct:log("C7: ~p",[C7]), + C7 = FileHConfig#{sync_mode_qlen=>1}, ok. reconfig(cleanup, _Config) -> @@ -561,8 +671,8 @@ sync(Config) -> %% check that if there's no repeated filesync active, %% a filesync is still performed when handler goes idle - logger:set_handler_config(?MODULE, config, - #{filesync_repeat_interval => no_repeat}), + ok = logger:update_handler_config(?MODULE, config, + #{filesync_repeat_interval => no_repeat}), no_repeat = maps:get(filesync_repeat_interval, logger_std_h:info(?MODULE)), %% The following timer is to make sure the time from last log %% ("second") to next ("third") is long enough, so the a flush is @@ -592,12 +702,12 @@ sync(Config) -> start_tracer([{logger_std_h,handle_cast,2}], [OneSync || _ <- lists:seq(1, 1 + trunc(WaitT/SyncInt))]), - logger:set_handler_config(?MODULE, config, - #{filesync_repeat_interval => SyncInt}), + ok = logger:update_handler_config(?MODULE, config, + #{filesync_repeat_interval => SyncInt}), SyncInt = maps:get(filesync_repeat_interval, logger_std_h:info(?MODULE)), timer:sleep(WaitT), - logger:set_handler_config(?MODULE, config, - #{filesync_repeat_interval => no_repeat}), + ok = logger:update_handler_config(?MODULE, config, + #{filesync_repeat_interval => no_repeat}), check_tracer(100), ok. sync(cleanup, _Config) -> @@ -652,7 +762,7 @@ sync_failure(Config) -> rpc:call(Node, ?MODULE, set_result, [file_datasync,ok]), SyncInt = 500, - ok = rpc:call(Node, logger, set_handler_config, + ok = rpc:call(Node, logger, update_handler_config, [?STANDARD_HANDLER, config, #{filesync_repeat_interval => SyncInt}]), Info = rpc:call(Node, logger_std_h, info, [?STANDARD_HANDLER]), @@ -718,7 +828,7 @@ op_switch_to_sync_file(Config) -> drop_mode_qlen => NumOfReqs+1, flush_qlen => 2*NumOfReqs, burst_limit_enable => false}}, - ok = logger:set_handler_config(?MODULE, NewHConfig), + ok = logger:update_handler_config(?MODULE, NewHConfig), %% TRecvPid = start_op_trace(), send_burst({n,NumOfReqs}, seq, {chars,79}, notice), Lines = count_lines(Log), @@ -747,7 +857,7 @@ op_switch_to_sync_tty(Config) -> drop_mode_qlen => NumOfReqs+1, flush_qlen => 2*NumOfReqs, burst_limit_enable => false}}, - ok = logger:set_handler_config(?MODULE, NewHConfig), + ok = logger:update_handler_config(?MODULE, NewHConfig), send_burst({n,NumOfReqs}, seq, {chars,79}, notice), ok. op_switch_to_sync_tty(cleanup, _Config) -> @@ -770,7 +880,7 @@ op_switch_to_drop_file(Config) -> flush_qlen => Procs*NumOfReqs*Bursts, burst_limit_enable => false}}, - ok = logger:set_handler_config(?MODULE, NewHConfig), + ok = logger:update_handler_config(?MODULE, NewHConfig), %% It sometimes happens that the handler gets the %% requests in a slow enough pace so that dropping %% never occurs. Therefore, lets generate a number of @@ -807,7 +917,7 @@ op_switch_to_drop_tty(Config) -> flush_qlen => Procs*NumOfReqs+1, burst_limit_enable => false}}, - ok = logger:set_handler_config(?MODULE, NewHConfig), + ok = logger:update_handler_config(?MODULE, NewHConfig), send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, notice), ok. op_switch_to_drop_tty(cleanup, _Config) -> @@ -832,7 +942,7 @@ op_switch_to_flush_file(Config) -> drop_mode_qlen => 300, flush_qlen => 300, burst_limit_enable => false}}, - ok = logger:set_handler_config(?MODULE, NewHConfig), + ok = logger:update_handler_config(?MODULE, NewHConfig), NumOfReqs = 1500, Procs = 10, Bursts = 10, @@ -879,7 +989,7 @@ op_switch_to_flush_tty(Config) -> drop_mode_qlen => 100, flush_qlen => 100, burst_limit_enable => false}}, - ok = logger:set_handler_config(?MODULE, NewHConfig), + ok = logger:update_handler_config(?MODULE, NewHConfig), NumOfReqs = 1000, Procs = 100, send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, notice), @@ -895,7 +1005,7 @@ limit_burst_disabled(Config) -> burst_limit_window_time => 2000, drop_mode_qlen => 200, flush_qlen => 300}}, - ok = logger:set_handler_config(?MODULE, NewHConfig), + ok = logger:update_handler_config(?MODULE, NewHConfig), NumOfReqs = 100, send_burst({n,NumOfReqs}, seq, {chars,79}, notice), Logged = count_lines(Log), @@ -915,7 +1025,7 @@ limit_burst_enabled_one(Config) -> burst_limit_window_time => 2000, drop_mode_qlen => 200, flush_qlen => 300}}, - ok = logger:set_handler_config(?MODULE, NewHConfig), + ok = logger:update_handler_config(?MODULE, NewHConfig), NumOfReqs = 100, send_burst({n,NumOfReqs}, seq, {chars,79}, notice), Logged = count_lines(Log), @@ -936,7 +1046,7 @@ limit_burst_enabled_period(Config) -> burst_limit_window_time => BurstTWin, drop_mode_qlen => 20000, flush_qlen => 20001}}, - ok = logger:set_handler_config(?MODULE, NewHConfig), + ok = logger:update_handler_config(?MODULE, NewHConfig), Windows = 3, Sent = send_burst({t,BurstTWin*Windows}, seq, {chars,79}, notice), @@ -956,7 +1066,7 @@ kill_disabled(Config) -> HConfig#{config=>StdHConfig#{overload_kill_enable=>false, overload_kill_qlen=>10, overload_kill_mem_size=>100}}, - ok = logger:set_handler_config(?MODULE, NewHConfig), + ok = logger:update_handler_config(?MODULE, NewHConfig), NumOfReqs = 100, send_burst({n,NumOfReqs}, seq, {chars,79}, notice), Logged = count_lines(Log), @@ -977,7 +1087,7 @@ qlen_kill_new(Config) -> overload_kill_qlen=>10, overload_kill_mem_size=>Mem0+50000, overload_kill_restart_after=>RestartAfter}}, - ok = logger:set_handler_config(?MODULE, NewHConfig), + ok = logger:update_handler_config(?MODULE, NewHConfig), MRef = erlang:monitor(process, Pid0), NumOfReqs = 100, Procs = 4, @@ -1011,7 +1121,7 @@ qlen_kill_std(_Config) -> %% File = lists:concat([?MODULE,"_",?FUNCTION_NAME,".log"]), %% Log = filename:join(Dir, File), %% Node = start_std_h_on_new_node(Config, ?FUNCTION_NAME, Log), - %% ok = rpc:call(Node, logger, set_handler_config, + %% ok = rpc:call(Node, logger, update_handler_config, %% [?STANDARD_HANDLER, config, %% #{overload_kill_enable=>true, %% overload_kill_qlen=>10, @@ -1028,7 +1138,7 @@ mem_kill_new(Config) -> overload_kill_qlen=>50000, overload_kill_mem_size=>Mem0+500, overload_kill_restart_after=>RestartAfter}}, - ok = logger:set_handler_config(?MODULE, NewHConfig), + ok = logger:update_handler_config(?MODULE, NewHConfig), MRef = erlang:monitor(process, Pid0), NumOfReqs = 100, Procs = 4, @@ -1067,7 +1177,7 @@ restart_after(Config) -> HConfig#{config=>StdHConfig#{overload_kill_enable=>true, overload_kill_qlen=>10, overload_kill_restart_after=>infinity}}, - ok = logger:set_handler_config(?MODULE, NewHConfig1), + ok = logger:update_handler_config(?MODULE, NewHConfig1), MRef1 = erlang:monitor(process, whereis(h_proc_name())), %% kill handler send_burst({n,100}, {spawn,4,0}, {chars,79}, notice), @@ -1082,14 +1192,15 @@ restart_after(Config) -> ct:pal("Handler state = ~p", [Info1]), ct:fail("Handler not dead! It should not have survived this!") end, - + {Log,_,_} = start_handler(?MODULE, ?FUNCTION_NAME, Config), RestartAfter = ?OVERLOAD_KILL_RESTART_AFTER, + NewHConfig2 = HConfig#{config=>StdHConfig#{overload_kill_enable=>true, overload_kill_qlen=>10, overload_kill_restart_after=>RestartAfter}}, - ok = logger:set_handler_config(?MODULE, NewHConfig2), + ok = logger:update_handler_config(?MODULE, NewHConfig2), Pid0 = whereis(h_proc_name()), MRef2 = erlang:monitor(process, Pid0), %% kill handler @@ -1123,7 +1234,7 @@ handler_requests_under_load(Config) -> drop_mode_qlen => 1000, flush_qlen => 2000, burst_limit_enable => false}}, - ok = logger:set_handler_config(?MODULE, NewHConfig), + ok = logger:update_handler_config(?MODULE, NewHConfig), Pid = spawn_link(fun() -> send_requests(?MODULE, 1, [{filesync,[]}, {info,[]}, {reset,[]}, @@ -1155,9 +1266,9 @@ send_requests(HName, TO, Reqs = [{Req,Res}|Rs]) -> Result = case Req of change_config -> - logger:set_handler_config(HName, config, - #{overload_kill_enable => - false}); + logger:update_handler_config(HName, config, + #{overload_kill_enable => + false}); Func -> logger_std_h:Func(HName) end, diff --git a/lib/observer/src/crashdump_viewer.erl b/lib/observer/src/crashdump_viewer.erl index 14b086ff58..276b209ce5 100644 --- a/lib/observer/src/crashdump_viewer.erl +++ b/lib/observer/src/crashdump_viewer.erl @@ -1594,31 +1594,92 @@ read_heap(Fd,Pid,DecodeOpts,Dict0) -> Dict0 end. -read_heap(DecodeOpts,Dict0) -> - %% This function is never called if the dump is truncated in {?proc_heap,Pid} - case get(fd) of - end_of_heap -> +read_heap(DecodeOpts, Dict0) -> + %% This function is never called if the dump is truncated in + %% {?proc_heap,Pid}. + %% + %% It is not always possible to reconstruct the heap terms + %% in a single pass, especially if maps are involved. + %% See crashdump_helper:literal_map/0 for an example. + %% + %% Therefore, we need two passes. In the first pass + %% we collect all lines without parsing them, and in the + %% second pass we parse them. + %% + %% The first pass follows. + + Lines0 = read_heap_lines(), + + %% Save a map of all unprocessed lines so that deref_ptr() can + %% access any line when there are references to terms not yet + %% built. + + LineMap = maps:from_list(Lines0), + put(line_map, LineMap), + + %% Refc binaries (tag "Yc") must be processed before any sub + %% binaries (tag "Ys") referencing them, so we make sure to + %% process all the refc binaries first. + %% + %% The other lines can be processed in any order, but processing + %% them in the reverse order compared to how they are printed in + %% the crash dump seems to minimize the number of references to + %% terms that have not yet been built. That happens to be the + %% order of the line list as returned by read_heap_lines/0. + + RefcBins = [Refc || {_,<<"Yc",_/binary>>}=Refc <- Lines0], + Lines = RefcBins ++ Lines0, + + %% Second pass. + + init_progress("Processing terms", map_size(LineMap)), + Dict = parse_heap_terms(Lines, DecodeOpts, Dict0), + erase(line_map), + end_progress(), + Dict. + +read_heap_lines() -> + read_heap_lines_1(get(fd), []). + +read_heap_lines_1(Fd, Acc) -> + case bytes(Fd) of + "=" ++ _next_tag -> end_progress(), - Dict0; - Fd -> - case bytes(Fd) of - "=" ++ _next_tag -> - end_progress(), - put(fd, end_of_heap), - Dict0; - Line -> - update_progress(length(Line)+1), - Dict = parse(Line,DecodeOpts,Dict0), - read_heap(DecodeOpts,Dict) - end + put(fd, end_of_heap), + Acc; + Line0 -> + update_progress(length(Line0)+1), + {Addr,":"++Line1} = get_hex(Line0), + + %% Reduce the memory consumption by converting the + %% line to a binary. Measurements show that it may also + %% be benefical for performance, too, because it makes the + %% garbage collections cheaper. + + Line = list_to_binary(Line1), + read_heap_lines_1(Fd, [{Addr,Line}|Acc]) end. -parse(Line0, DecodeOpts, Dict0) -> - {Addr,":"++Line1} = get_hex(Line0), - {_Term,Line,Dict} = parse_heap_term(Line1, Addr, DecodeOpts, Dict0), - [] = skip_blanks(Line), +parse_heap_terms([{Addr,Line0}|T], DecodeOpts, Dict0) -> + case gb_trees:is_defined(Addr, Dict0) of + true -> + %% Already parsed (by a recursive call from do_deref_ptr() + %% to parse_line()). Nothing to do. + parse_heap_terms(T, DecodeOpts, Dict0); + false -> + %% Parse this previously unparsed term. + Dict = parse_line(Addr, Line0, DecodeOpts, Dict0), + parse_heap_terms(T, DecodeOpts, Dict) + end; +parse_heap_terms([], _DecodeOpts, Dict) -> Dict. +parse_line(Addr, Line0, DecodeOpts, Dict0) -> + update_progress(1), + Line1 = binary_to_list(Line0), + {_Term,Line,Dict} = parse_heap_term(Line1, Addr, DecodeOpts, Dict0), + [] = skip_blanks(Line), %Assertion. + Dict. %%----------------------------------------------------------------- %% Page with one port @@ -2871,16 +2932,18 @@ parse_atom_translation_table(N, Line0, As) -> deref_ptr(Ptr, Line, DecodeOpts, D) -> - Lookup = fun(D0) -> - gb_trees:lookup(Ptr, D0) - end, + Lookup0 = fun(D0) -> + gb_trees:lookup(Ptr, D0) + end, + Lookup = wrap_line_map(Ptr, Lookup0), do_deref_ptr(Lookup, Line, DecodeOpts, D). deref_bin(Binp0, Offset, Sz, Line, DecodeOpts, D) -> Binp = Binp0 bor DecodeOpts#dec_opts.bin_addr_adj, - Lookup = fun(D0) -> - lookup_binary(Binp, Offset, Sz, D0) - end, + Lookup0 = fun(D0) -> + lookup_binary(Binp, Offset, Sz, D0) + end, + Lookup = wrap_line_map(Binp, Lookup0), do_deref_ptr(Lookup, Line, DecodeOpts, D). lookup_binary(Binp, Offset, Sz, D) -> @@ -2899,26 +2962,36 @@ lookup_binary(Binp, Offset, Sz, D) -> end end. +wrap_line_map(Ptr, Lookup) -> + wrap_line_map_1(get(line_map), Ptr, Lookup). + +wrap_line_map_1(#{}=LineMap, Ptr, Lookup) -> + fun(D) -> + case Lookup(D) of + {value,_}=Res -> + Res; + none -> + case LineMap of + #{Ptr:=Line} -> + {line,Ptr,Line}; + #{} -> + none + end + end + end; +wrap_line_map_1(undefined, _Ptr, Lookup) -> + Lookup. + do_deref_ptr(Lookup, Line, DecodeOpts, D0) -> case Lookup(D0) of {value,Term} -> {Term,Line,D0}; none -> - case get(fd) of - end_of_heap -> - put(incomplete_heap,true), - {['#CDVIncompleteHeap'],Line,D0}; - Fd -> - case bytes(Fd) of - "="++_ -> - put(fd, end_of_heap), - do_deref_ptr(Lookup, Line, DecodeOpts, D0); - L -> - update_progress(length(L)+1), - D = parse(L, DecodeOpts, D0), - do_deref_ptr(Lookup, Line, DecodeOpts, D) - end - end + put(incomplete_heap, true), + {['#CDVIncompleteHeap'],Line,D0}; + {line,Addr,NewLine} -> + D = parse_line(Addr, NewLine, DecodeOpts, D0), + do_deref_ptr(Lookup, Line, DecodeOpts, D) end. get_hex(L) -> diff --git a/lib/observer/test/crashdump_helper.erl b/lib/observer/test/crashdump_helper.erl index 145ff56b71..d8f4e046ae 100644 --- a/lib/observer/test/crashdump_helper.erl +++ b/lib/observer/test/crashdump_helper.erl @@ -142,4 +142,23 @@ create_maps() -> Map3 = lists:foldl(fun(I, A) -> A#{I=>I*I} end, Map2, lists:seq(-10, 0)), - #{a=>Map0,b=>Map1,c=>Map2,d=>Map3,e=>#{}}. + #{a=>Map0,b=>Map1,c=>Map2,d=>Map3,e=>#{},literal=>literal_map()}. + +literal_map() -> + %% A literal map such as the one below will produce a heap dump + %% like this: + %% + %% Address1:t4:H<Address3>,H<Address4>,H<Address5>,H<Address6> + %% Address2:Mf4:H<Adress1>:I1,I2,I3,I4 + %% Address3: ... % "one" + %% Address4: ... % "two" + %% Address5: ... % "three" + %% Address6: ... % "four" + %% + %% The map cannot be reconstructed in a single sequential pass. + %% + %% To reconstruct the map, first the string keys "one" + %% through "four" must be reconstructed, then the tuple at + %% Adress1, then the map at Address2. + + #{"one"=>1,"two"=>2,"three"=>3,"four"=>4}. diff --git a/lib/os_mon/c_src/cpu_sup.c b/lib/os_mon/c_src/cpu_sup.c index 17ef48c26e..c96a5c9f7c 100644 --- a/lib/os_mon/c_src/cpu_sup.c +++ b/lib/os_mon/c_src/cpu_sup.c @@ -152,6 +152,8 @@ static void util_measure(unsigned int **result_vec, int *result_sz); #if defined(__sun__) static unsigned int misc_measure(char* name); +#elif defined(__linux__) +static unsigned int misc_measure(char cmd); #endif static void sendi(unsigned int data); static void sendv(unsigned int data[], int ints); @@ -231,6 +233,11 @@ int main(int argc, char** argv) { case AVG1: sendi(misc_measure("avenrun_1min")); break; case AVG5: sendi(misc_measure("avenrun_5min")); break; case AVG15: sendi(misc_measure("avenrun_15min")); break; +#elif defined(__linux__) + case NPROCS: + case AVG1: + case AVG5: + case AVG15: sendi(misc_measure(cmd)); break; #elif defined(__OpenBSD__) || (defined(__APPLE__) && defined(__MACH__)) || defined(__FreeBSD__) || defined(__DragonFly__) case NPROCS: bsd_count_procs(); break; case AVG1: bsd_loadavg(0); break; @@ -238,7 +245,7 @@ int main(int argc, char** argv) { case AVG15: bsd_loadavg(2); break; #endif #if defined(__sun__) || defined(__linux__) || (defined(__APPLE__) && defined(__MACH__)) || defined(__FreeBSD__) - case UTIL: util_measure(&rv,&sz); sendv(rv, sz); break; + case UTIL: util_measure(&rv,&sz); sendv(rv, sz); break; #endif case QUIT: free((void*)rv); return 0; default: error("Bad command"); break; @@ -329,6 +336,22 @@ static void bsd_count_procs(void) { #if defined(__linux__) +static unsigned int misc_measure(char cmd) { + struct sysinfo info; + + if (sysinfo(&info)) + error(strerror(errno)); + + switch (cmd) { + case AVG1: return (unsigned int)(info.loads[0] / 256); + case AVG5: return (unsigned int)(info.loads[1] / 256); + case AVG15: return (unsigned int)(info.loads[2] / 256); + case NPROCS: return info.procs; + } + + return -1; +} + static cpu_t *read_procstat(FILE *fp, cpu_t *cpu) { char buffer[BUFFERSIZE]; @@ -357,8 +380,24 @@ static void util_measure(unsigned int **result_vec, int *result_sz) { FILE *fp; unsigned int *rv = NULL; cpu_t cpu; - + + rv = *result_vec; + rv[0] = no_of_cpus; + if ( (fp = fopen(PROCSTAT,"r")) == NULL) { + if (errno == EACCES) { /* SELinux */ + rv[1] = 1; /* just the cpu id */ + ++rv; /* first value is number of cpus */ + ++rv; /* second value is number of entries */ + for (i = 0; i < no_of_cpus; ++i) { + rv[0] = CU_CPU_ID; + rv[1] = i; + rv += 1*2; + } + *result_sz = 2 + 2*1 * no_of_cpus; + return; + } + /* Check if procfs is mounted, * otherwise: * try and try again, bad procsfs. @@ -367,20 +406,19 @@ static void util_measure(unsigned int **result_vec, int *result_sz) { return; } - /*ignore read*/ + /*ignore read*/ if (fgets(buffer, BUFFERSIZE, fp) == NULL) { *result_sz = 0; return; } - rv = *result_vec; - rv[0] = no_of_cpus; + rv[1] = CU_VALUES; ++rv; /* first value is number of cpus */ ++rv; /* second value is number of entries */ for (i = 0; i < no_of_cpus; ++i) { read_procstat(fp, &cpu); - + rv[ 0] = CU_CPU_ID; rv[ 1] = cpu.id; rv[ 2] = CU_USER; rv[ 3] = cpu.user; rv[ 4] = CU_NICE_USER; rv[ 5] = cpu.nice_user; diff --git a/lib/os_mon/src/cpu_sup.erl b/lib/os_mon/src/cpu_sup.erl index 81e049ef22..ba2d89313e 100644 --- a/lib/os_mon/src/cpu_sup.erl +++ b/lib/os_mon/src/cpu_sup.erl @@ -220,17 +220,21 @@ code_change(_OldVsn, State, _Extra) -> %% internal functions %%---------------------------------------------------------------------- -get_uint32_measurement(Request, #internal{os_type = {unix, linux}}) -> - {ok,F} = file:open("/proc/loadavg",[read,raw]), - {ok,D} = file:read_line(F), - ok = file:close(F), - {ok,[Load1,Load5,Load15,_PRun,PTotal],_} = io_lib:fread("~f ~f ~f ~d/~d", D), - case Request of - ?avg1 -> sunify(Load1); - ?avg5 -> sunify(Load5); - ?avg15 -> sunify(Load15); - ?ping -> 4711; - ?nprocs -> PTotal +get_uint32_measurement(Request, #internal{port = P, os_type = {unix, linux}}) -> + case file:open("/proc/loadavg",[read,raw]) of + {ok,F} -> + {ok,D} = file:read_line(F), + ok = file:close(F), + {ok,[Load1,Load5,Load15,_PRun,PTotal],_} = io_lib:fread("~f ~f ~f ~d/~d", D), + case Request of + ?avg1 -> sunify(Load1); + ?avg5 -> sunify(Load5); + ?avg15 -> sunify(Load15); + ?ping -> 4711; + ?nprocs -> PTotal + end; + {error,_} -> + port_server_call(P, Request) end; get_uint32_measurement(Request, #internal{port = P, os_type = {unix, Sys}}) when Sys == sunos; diff --git a/lib/public_key/asn1/OTP-PKIX.asn1 b/lib/public_key/asn1/OTP-PKIX.asn1 index 37196bb9bf..10a83555af 100644 --- a/lib/public_key/asn1/OTP-PKIX.asn1 +++ b/lib/public_key/asn1/OTP-PKIX.asn1 @@ -368,6 +368,13 @@ SupportedPublicKeyAlgorithms PUBLIC-KEY-ALGORITHM-CLASS ::= { ID id-dsaWithSHA1 TYPE DSAParams } + id-dsa-with-sha224 OBJECT IDENTIFIER ::= { + joint-iso-ccitt(2) country(16) us(840) organization(1) gov(101) + csor(3) algorithms(4) id-dsa-with-sha2(3) 1 } + + id-dsa-with-sha256 OBJECT IDENTIFIER ::= { + joint-iso-ccitt(2) country(16) us(840) organization(1) gov(101) + csor(3) algorithms(4) id-dsa-with-sha2(3) 2 } -- -- RSA Keys and Signatures -- diff --git a/lib/public_key/src/public_key.erl b/lib/public_key/src/public_key.erl index 3f609ce6c6..dca1e0766e 100644 --- a/lib/public_key/src/public_key.erl +++ b/lib/public_key/src/public_key.erl @@ -609,6 +609,10 @@ pkix_sign_types(?'id-dsa-with-sha1') -> {sha, dsa}; pkix_sign_types(?'id-dsaWithSHA1') -> {sha, dsa}; +pkix_sign_types(?'id-dsa-with-sha224') -> + {sha224, dsa}; +pkix_sign_types(?'id-dsa-with-sha256') -> + {sha256, dsa}; pkix_sign_types(?'ecdsa-with-SHA1') -> {sha, ecdsa}; pkix_sign_types(?'ecdsa-with-SHA256') -> diff --git a/lib/public_key/test/public_key_SUITE.erl b/lib/public_key/test/public_key_SUITE.erl index 1955e9e119..878489eb0f 100644 --- a/lib/public_key/test/public_key_SUITE.erl +++ b/lib/public_key/test/public_key_SUITE.erl @@ -44,7 +44,9 @@ all() -> encrypt_decrypt, {group, sign_verify}, pkix, pkix_countryname, pkix_emailaddress, pkix_path_validation, - pkix_iso_rsa_oid, pkix_iso_dsa_oid, pkix_crl, general_name, + pkix_iso_rsa_oid, pkix_iso_dsa_oid, + pkix_dsa_sha2_oid, + pkix_crl, general_name, pkix_verify_hostname_cn, pkix_verify_hostname_subjAltName, pkix_verify_hostname_subjAltName_IP, @@ -1114,6 +1116,13 @@ pkix_iso_dsa_oid(Config) when is_list(Config) -> {_, dsa} = public_key:pkix_sign_types(SigAlg#'SignatureAlgorithm'.algorithm). %%-------------------------------------------------------------------- +pkix_dsa_sha2_oid() -> + [{doc, "Test support dsa_sha2 oid"}]. +pkix_dsa_sha2_oid(Config) when is_list(Config) -> + {sha224, dsa} = public_key:pkix_sign_types(?'id-dsa-with-sha224'), + {sha256, dsa} = public_key:pkix_sign_types(?'id-dsa-with-sha256'). + +%%-------------------------------------------------------------------- pkix_crl() -> [{doc, "test pkix_crl_* functions"}]. diff --git a/lib/ssh/doc/src/Makefile b/lib/ssh/doc/src/Makefile index 77fa356092..4e32dd9976 100644 --- a/lib/ssh/doc/src/Makefile +++ b/lib/ssh/doc/src/Makefile @@ -45,6 +45,7 @@ XML_REF3_FILES = \ ssh_connection.xml \ ssh_server_channel.xml \ ssh_server_key_api.xml \ + ssh_file.xml \ ssh_sftp.xml \ ssh_sftpd.xml \ @@ -56,8 +57,8 @@ XML_CHAPTER_FILES = \ notes.xml \ introduction.xml \ using_ssh.xml \ + terminology.xml \ configure_algos.xml -# ssh_protocol.xml \ BOOK_FILES = book.xml diff --git a/lib/ssh/doc/src/ref_man.xml b/lib/ssh/doc/src/ref_man.xml index df37b0244f..60572b985b 100644 --- a/lib/ssh/doc/src/ref_man.xml +++ b/lib/ssh/doc/src/ref_man.xml @@ -40,6 +40,7 @@ <xi:include href="ssh_connection.xml"/> <xi:include href="ssh_client_key_api.xml"/> <xi:include href="ssh_server_key_api.xml"/> + <xi:include href="ssh_file.xml"/> <xi:include href="ssh_sftp.xml"/> <xi:include href="ssh_sftpd.xml"/> </application> diff --git a/lib/ssh/doc/src/specs.xml b/lib/ssh/doc/src/specs.xml index acdbe2ddfd..a6517f3660 100644 --- a/lib/ssh/doc/src/specs.xml +++ b/lib/ssh/doc/src/specs.xml @@ -6,6 +6,7 @@ <xi:include href="../specs/specs_ssh_connection.xml"/> <xi:include href="../specs/specs_ssh_server_channel.xml"/> <xi:include href="../specs/specs_ssh_server_key_api.xml"/> + <xi:include href="../specs/specs_ssh_file.xml"/> <xi:include href="../specs/specs_ssh_sftp.xml"/> <xi:include href="../specs/specs_ssh_sftpd.xml"/> </specs> diff --git a/lib/ssh/doc/src/ssh.xml b/lib/ssh/doc/src/ssh.xml index f238bf2ca8..b75b4a33c2 100644 --- a/lib/ssh/doc/src/ssh.xml +++ b/lib/ssh/doc/src/ssh.xml @@ -99,8 +99,8 @@ </p> <p>The paths could easily be changed by options: - <seealso marker="#type-user_dir_common_option"><c>user_dir</c></seealso> and - <seealso marker="#type-system_dir_daemon_option"><c>system_dir</c></seealso>. + <seealso marker="ssh_file#type-user_dir_common_option"><c>user_dir</c></seealso> and + <seealso marker="ssh_file#type-system_dir_daemon_option"><c>system_dir</c></seealso>. </p> <p>A completly different storage could be interfaced by writing call-back modules using the behaviours @@ -123,12 +123,12 @@ <item><c>ssh_host_ecdsa_key</c> and <c>ssh_host_ecdsa_key.pub</c></item> </list> <p>The host keys directory could be changed with the option - <seealso marker="#type-system_dir_daemon_option"><c>system_dir</c></seealso>.</p> + <seealso marker="ssh_file#type-system_dir_daemon_option"><c>system_dir</c></seealso>.</p> </item> <item>Optional: one or more <i>User's public key</i> in case of <c>publickey</c> authorization. Default is to store them concatenated in the file <c>.ssh/authorized_keys</c> in the user's home directory. <p>The user keys directory could be changed with the option - <seealso marker="#type-user_dir_common_option"><c>user_dir</c></seealso>.</p> + <seealso marker="ssh_file#type-user_dir_common_option"><c>user_dir</c></seealso>.</p> </item> </list> </section> @@ -138,7 +138,7 @@ <p>The keys and some other data are by default stored in files in the directory <c>.ssh</c> in the user's home directory.</p> <p>The directory could be changed with the option - <seealso marker="#type-user_dir_common_option"><c>user_dir</c></seealso>. + <seealso marker="ssh_file#type-user_dir_common_option"><c>user_dir</c></seealso>. </p> <list> <item>Optional: a list of <i>Host public key(s)</i> for previously connected hosts. This list @@ -192,22 +192,13 @@ <p>If there is no public key of a specified type available, the corresponding entry is ignored. Note that the available set is dependent on the underlying cryptolib and current user's public keys. </p> - <p>See also the option <seealso marker="#type-user_dir_common_option"><c>user_dir</c></seealso> + <p>See also the option <seealso marker="ssh_file#type-user_dir_common_option"><c>user_dir</c></seealso> for specifying the path to the user's keys. </p> </desc> </datatype> <datatype> - <name name="pubkey_passphrase_client_options"/> - <desc> - <p>If the user's DSA, RSA or ECDSA key is protected by a passphrase, it can be - supplied with thoose options. - </p> - </desc> - </datatype> - - <datatype> <name name="host_accepting_client_options"/> <name name="accept_hosts"/> <name name="fp_digest_alg"/> @@ -220,7 +211,7 @@ <p>This option guides the <c>connect</c> function on how to act when the connected server presents a Host Key that the client has not seen before. The default is to ask the user with a question on stdio of whether to accept or reject the new Host Key. - See the option <seealso marker="#type-user_dir_common_option"><c>user_dir</c></seealso> + See the option <seealso marker="ssh_file#type-user_dir_common_option"><c>user_dir</c></seealso> for specifying the path to the file <c>known_hosts</c> where previously accepted Host Keys are recorded. See also the option <seealso marker="#type-key_cb_common_option">key_cb</seealso> @@ -276,7 +267,7 @@ accept question the next time the same host is connected. If the option <seealso marker="#type-key_cb_common_option"><c>key_cb</c></seealso> is not present, the key is saved in the file "known_hosts". See option - <seealso marker="#type-user_dir_common_option"><c>user_dir</c></seealso> for + <seealso marker="ssh_file#type-user_dir_common_option"><c>user_dir</c></seealso> for the location of that file. </p> <p>If <c>false</c>, the key is not saved and the key will still be unknown @@ -478,18 +469,6 @@ <name name="pwdfun_4"/> <desc> <taglist> - <tag><marker id="type-system_dir_daemon_option"/><c>system_dir</c></tag> - <item> - <p>Sets the system directory, containing the host key files - that identify the host keys for <c>ssh</c>. Defaults to - <c>/etc/ssh</c>.</p> - <p>For security reasons, this directory is normally accessible only to the root user.</p> - <p>See also the option - <seealso marker="#type-key_cb_common_option">key_cb</seealso> - for the general way to handle keys. - </p> - </item> - <tag><c>auth_method_kb_interactive_data</c></tag> <item> <p>Sets the text strings that the daemon sends to the client for presentation to the user when @@ -502,7 +481,7 @@ </p> </item> - <tag><c>user_passwords</c></tag> + <tag><marker id="option-user_passwords"/><c>user_passwords</c></tag> <item> <p>Provides passwords for password authentication. The passwords are used when someone tries to connect to the server and public key user-authentication fails. The option provides @@ -510,7 +489,7 @@ </p> </item> - <tag><c>password</c></tag> + <tag><marker id="option-password"/><c>password</c></tag> <item> <p>Provides a global password that authenticates any user.</p> <warning> @@ -519,7 +498,9 @@ </warning> </item> - <tag><c>pwdfun</c> with <c>pwdfun_4()</c></tag> + <tag><marker id="option-pwdfun"/><c>pwdfun</c> with + <seealso marker="#type-pwdfun_4"><c>pwdfun_4()</c></seealso> + </tag> <item> <p>Provides a function for password validation. This could used for calling an external system or handeling passwords stored as hash values. @@ -546,7 +527,9 @@ can be used for this. The return value <c>disconnect</c> is useful for this.</p> </item> - <tag><c>pwdfun</c> with <c>pwdfun_2()</c></tag> + <tag><c>pwdfun</c> with + <seealso marker="#type-pwdfun_2"><c>pwdfun_2()</c></seealso> + </tag> <item> <p>Provides a function for password validation. This function is called with user and password as strings, and returns:</p> @@ -725,21 +708,6 @@ </datatype> <datatype> - <name name="user_dir_common_option"/> - <desc> - <p>Sets the user directory. That is, the directory containing <c>ssh</c> configuration - files for the user, such as - <c>known_hosts</c>, <c>id_rsa</c>, <c>id_dsa</c>>, <c>id_ecdsa</c> and <c>authorized_key</c>. - Defaults to the directory normally referred to as <c>~/.ssh</c>. - </p> - <p>See also the option - <seealso marker="#type-key_cb_common_option">key_cb</seealso> - for the general way to handle keys. - </p> - </desc> - </datatype> - - <datatype> <name name="profile_common_option"/> <desc> <p>Used together with <c>ip-address</c> and <c>port</c> to @@ -795,7 +763,8 @@ </p> <p>The <c>Opts</c> defaults to <c>[]</c> when only the <c>Module</c> is specified. </p> - <p>The default value of this option is <c>{ssh_file, []}</c>. + <p>The default value of this option is <c>{ssh_file, []}</c>. See also the manpage of + <seealso marker="ssh:ssh_file">ssh_file</seealso>. </p> <p>A call to the call-back function <c>F</c> will be</p> <code> @@ -804,7 +773,10 @@ <p>where <c>...</c> are arguments to <c>F</c> as in <seealso marker="ssh_client_key_api">ssh_client_key_api</seealso> and/or <seealso marker="ssh_server_key_api">ssh_server_key_api</seealso>. - The <c>UserOptions</c> are the options given to <c>ssh:connect</c>, <c>ssh:shell</c> or <c>ssh:daemon</c>. + The <c>UserOptions</c> are the options given to + <seealso marker="ssh:ssh#connect-3">ssh:connect</seealso>, + <seealso marker="ssh:ssh#shell-1">ssh:shell</seealso> or + <seealso marker="ssh:ssh#daemon-2">ssh:daemon</seealso>. </p> </desc> diff --git a/lib/ssh/doc/src/ssh_app.xml b/lib/ssh/doc/src/ssh_app.xml index e80bb1853d..eb804e67dc 100644 --- a/lib/ssh/doc/src/ssh_app.xml +++ b/lib/ssh/doc/src/ssh_app.xml @@ -74,13 +74,18 @@ <c>id_ecdsa_key</c>, <c>known_hosts</c>, and <c>authorized_keys</c> in ~/.ssh, and for the host key files in <c>/etc/ssh</c>. These locations can be changed - by the options <c>user_dir</c> and <c>system_dir</c>. + by the options + <seealso marker="ssh_file#type-user_dir_common_option"><c>user_dir</c></seealso> and + <seealso marker="ssh_file#type-system_dir_daemon_option"><c>system_dir</c></seealso>. </p> <p>Public key handling can also be customized through a callback module that implements the behaviors <seealso marker="ssh_client_key_api">ssh_client_key_api</seealso> and <seealso marker="ssh_server_key_api">ssh_server_key_api</seealso>. </p> + <p>See also the default callback module documentation in + <seealso marker="ssh_file">ssh_file</seealso>. + </p> </section> <section> diff --git a/lib/ssh/doc/src/ssh_file.xml b/lib/ssh/doc/src/ssh_file.xml new file mode 100644 index 0000000000..ae6ba2e1d9 --- /dev/null +++ b/lib/ssh/doc/src/ssh_file.xml @@ -0,0 +1,275 @@ +<?xml version="1.0" encoding="utf-8" ?> +<!DOCTYPE erlref SYSTEM "erlref.dtd"> + +<erlref> + <header> + <copyright> + <year>2018</year><year>2018</year> + <holder>Ericsson AB. All Rights Reserved.</holder> + </copyright> + <legalnotice> + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + </legalnotice> + + <title>ssh_file</title> + <prepared></prepared> + <docno></docno> + <date></date> + <rev></rev> + </header> + <module>ssh_file</module> + <modulesummary>Default callback module for the client's and server's database operations in the ssh application</modulesummary> + <description> + <p>This module is the default callback handler for the client's and the server's user and host "database" operations. + All data, for instance key pairs, are stored in files in the normal file system. This page documents the files, where they + are stored and configuration options for this callback module. + </p> + <p>The intention is to be compatible with the + <url href="http://www.openssh.com">OpenSSH</url> + storage in files. Therefore it mimics directories and filenames of + <url href="http://www.openssh.com">OpenSSH</url>. + </p> + + <p>Ssh_file implements the <seealso marker="ssh:ssh_server_key_api">ssh_server_key_api</seealso> and + the <seealso marker="ssh:ssh_client_key_api">ssh_client_key_api</seealso>. + This enables the user to make an own interface using for example a database handler. + </p> + <p>Such another callback module could be used by setting the option + <seealso marker="ssh:ssh#type-key_cb_common_option"><c>key_cb</c></seealso> + when starting a client or a server (with for example + <seealso marker="ssh:ssh#connect-3">ssh:connect</seealso>, + <seealso marker="ssh:ssh#daemon-2">ssh:daemon</seealso> of + <seealso marker="ssh:ssh#shell-1">ssh:shell</seealso> + ). + </p> + + <note> + <p>The functions are <i>Callbacks</i> for the SSH app. They are not intended to be called from the user's code! + </p> + </note> + </description> + + <section> + <title>Files, directories and who uses them</title> + <section> + <title>Daemons</title> + <p>Daemons uses all files stored in the <seealso marker="#SYSDIR">SYSDIR</seealso> directory. + </p> + <p>Optionaly, in case of <c>publickey</c> authorization, one or more of the remote user's public keys + in the <seealso marker="#USERDIR">USERDIR</seealso> directory are used. + See the files + <seealso marker="#USERDIR-authorized_keys"><c>USERDIR/authorized_keys</c></seealso> and + <seealso marker="#USERDIR-authorized_keys2"><c>USERDIR/authorized_keys2</c></seealso>. + </p> + </section> + + <section> + <title>Clients</title> + <p>Clients uses all files stored in the <seealso marker="#USERDIR">USERDIR</seealso> directory. + </p> + </section> + + <section> + <title>Directory contents</title> + <taglist> + <tag><marker id="LOCALUSER"/>LOCALUSER</tag> + <item><p>The user name of the OS process running the Erlang virtual machine (emulator).</p> + </item> + + <tag><marker id="SYSDIR"/>SYSDIR</tag> + <item><p>This is the directory holding the server's files:</p> + <list> + <item><marker id="SYSDIR-ssh_host_dsa_key"/><c>ssh_host_dsa_key</c> - private dss host key (optional)</item> + <item><marker id="SYSDIR-ssh_host_rsa_key"/><c>ssh_host_rsa_key</c> - private rsa host key (optional)</item> + <item><marker id="SYSDIR-ssh_host_ecdsa_key"/><c>ssh_host_ecdsa_key</c> - private ecdsa host key (optional)</item> + </list> + <p>At least one host key must be defined. The default value of SYSDIR is <marker id="#/etc/ssh"/><c>/etc/ssh</c>. + </p> + <p>For security reasons, this directory is normally accessible only to the root user. + </p> + <p>To change the SYSDIR, see the <seealso marker="#type-system_dir_daemon_option">system_dir</seealso> option. + </p> + </item> + + <tag><marker id="USERDIR"/>USERDIR</tag> + <item><p>This is the directory holding the files:</p> + <list> + <item><marker id="USERDIR-authorized_keys"/><c>authorized_keys</c> + and, as second alternative + <marker id="USERDIR-authorized_keys2"/><c>authorized_keys2</c> - + the user's public keys are stored concatenated in one of those files. + </item> + <item><marker id="USERDIR-known_hosts"/><c>known_hosts</c> - host keys from hosts visited + concatenated. The file is created and used by the client.</item> + <item><marker id="USERDIR-id_dsa"/><c>id_dsa</c> - private dss user key (optional)</item> + <item><marker id="USERDIR-id_rsa"/><c>id_rsa</c> - private rsa user key (optional)</item> + <item><marker id="USERDIR-id_ecdsa"/><c>id_ecdsa</c> - private ecdsa user key (optional)</item> + </list> + <p>The default value of USERDIR is <c>/home/</c><seealso marker="#LOCALUSER"><c>LOCALUSER</c></seealso><c>/.ssh</c>. + </p> + <p>To change the USERDIR, see the <seealso marker="#type-user_dir_common_option">user_dir</seealso> option + </p> + </item> + </taglist> + </section> + </section> + + <datatypes> + <datatype_title>Options for the default ssh_file callback module</datatype_title> + <datatype> + <name name="user_dir_common_option"/> + <desc> + <p>Sets the <seealso marker="#USERDIR">user directory</seealso>.</p> + </desc> + </datatype> + + <datatype> + <name name="user_dir_fun_common_option"/> + <name name="user2dir"/> + <desc> + <p>Sets the <seealso marker="#USERDIR">user directory</seealso> dynamically + by evaluating the <c>user2dir</c> function. + </p> + </desc> + </datatype> + + <datatype> + <name name="system_dir_daemon_option"/> + <desc> + <p>Sets the <seealso marker="#SYSDIR">system directory</seealso>.</p> + </desc> + </datatype> + + <datatype> + <name name="pubkey_passphrase_client_options"/> + <desc> + <p>If the user's DSA, RSA or ECDSA key is protected by a passphrase, it can be + supplied with thoose options. + </p> + </desc> + </datatype> + + </datatypes> + + <funcs> + <func> + <name>host_key(Algorithm, DaemonOptions) -> {ok, Key} | {error, Reason}</name> + <fsummary></fsummary> + <desc> + <p><strong>Types and description</strong></p> + <p>See the api description in + <seealso marker="ssh:ssh_server_key_api#Module:host_key-2">ssh_server_key_api, Module:host_key/2</seealso>. + </p> + <p><strong>Options</strong></p> + <list> + <item><seealso marker="#type-system_dir_daemon_option">system_dir</seealso></item> + <!-- item>dsa_pass_phrase</item --> + <!-- item>rsa_pass_phrase</item --> + <!-- item>ecdsa_pass_phrase</item --> + </list> + <p><strong>Files</strong></p> + <list> + <item><seealso marker="#SYSDIR-ssh_host_rsa_key"><c>SYSDIR/ssh_host_rsa_key</c></seealso></item> + <item><seealso marker="#SYSDIR-ssh_host_dsa_key"><c>SYSDIR/ssh_host_dsa_key</c></seealso></item> + <item><seealso marker="#SYSDIR-ssh_host_ecdsa_key"><c>SYSDIR/ssh_host_ecdsa_key</c></seealso></item> + </list> + </desc> + </func> + + <func> + <name>is_auth_key(PublicUserKey, User, DaemonOptions) -> Result</name> + <fsummary></fsummary> + <desc> + <p><strong>Types and description</strong></p> + <p>See the api description in + <seealso marker="ssh:ssh_server_key_api#Module:is_auth_key-3">ssh_server_key_api: Module:is_auth_key/3</seealso>. + </p> + <p><strong>Options</strong></p> + <list> + <item><seealso marker="#type-user_dir_fun_common_option">user_dir_fun</seealso></item> + <item><seealso marker="#type-user_dir_common_option">user_dir</seealso></item> + </list> + <p><strong>Files</strong></p> + <list> + <item><seealso marker="#USERDIR-authorized_keys"><c>USERDIR/authorized_keys</c></seealso></item> + <item><seealso marker="#USERDIR-authorized_keys2"><c>USERDIR/authorized_keys2</c></seealso></item> + </list> + </desc> + </func> + + <func> + <name>add_host_key(HostNames, PublicHostKey, ConnectOptions) -> ok | {error, Reason}</name> + <fsummary></fsummary> + <desc> + <p><strong>Types and description</strong></p> + <p>See the api description in + <seealso marker="ssh:ssh_client_key_api#Module:add_host_key-3">ssh_client_key_api, Module:add_host_key/3</seealso>. + </p> + <p><strong>Option</strong></p> + <list> + <item><seealso marker="#type-user_dir_common_option">user_dir</seealso></item> + </list> + <p><strong>File</strong></p> + <list> + <item><seealso marker="#USERDIR-known_hosts"><c>USERDIR/known_hosts</c></seealso></item> + </list> + </desc> + </func> + + <func> + <name>is_host_key(Key, Host, Algorithm, ConnectOptions) -> Result</name> + <fsummary></fsummary> + <desc> + <p><strong>Types and description</strong></p> + <p>See the api description in + <seealso marker="ssh:ssh_client_key_api#Module:is_host_key-4">ssh_client_key_api, Module:is_host_key/4</seealso>. + </p> + <p><strong>Option</strong></p> + <list> + <item><seealso marker="#type-user_dir_common_option">user_dir</seealso></item> + </list> + <p><strong>File</strong></p> + <list> + <item><seealso marker="#USERDIR-known_hosts"><c>USERDIR/known_hosts</c></seealso></item> + </list> + </desc> + </func> + + <func> + <name>user_key(Algorithm, ConnectOptions) -> {ok, PrivateKey} | {error, Reason}</name> + <fsummary></fsummary> + <desc> + <p><strong>Types and description</strong></p> + <p>See the api description in + <seealso marker="ssh:ssh_client_key_api#Module:user_key-2">ssh_client_key_api, Module:user_key/2</seealso>. + </p> + <p><strong>Options</strong></p> + <list> + <item><seealso marker="#type-user_dir_common_option">user_dir</seealso></item> + <item><seealso marker="#type-pubkey_passphrase_client_options">dsa_pass_phrase</seealso></item> + <item><seealso marker="#type-pubkey_passphrase_client_options">rsa_pass_phrase</seealso></item> + <item><seealso marker="#type-pubkey_passphrase_client_options">ecdsa_pass_phrase</seealso></item> + </list> + <p><strong>Files</strong></p> + <list> + <item><seealso marker="#USERDIR-id_dsa"><c>USERDIR/id_dsa</c></seealso></item> + <item><seealso marker="#USERDIR-id_rsa"><c>USERDIR/id_rsa</c></seealso></item> + <item><seealso marker="#USERDIR-id_ecdsa"><c>USERDIR/id_ecdsa</c></seealso></item> + </list> + </desc> + </func> + + </funcs> + +</erlref> diff --git a/lib/ssh/doc/src/terminology.xml b/lib/ssh/doc/src/terminology.xml new file mode 100644 index 0000000000..db1e08970d --- /dev/null +++ b/lib/ssh/doc/src/terminology.xml @@ -0,0 +1,185 @@ +<?xml version="1.0" encoding="utf-8" ?> +<!DOCTYPE chapter SYSTEM "chapter.dtd"> + +<chapter> + <header> + <copyright> + <year>2018</year> + <year>2018</year> + <holder>Ericsson AB. All Rights Reserved.</holder> + </copyright> + <legalnotice> + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + </legalnotice> + + <title>Terminology</title> + <prepared></prepared> + <docno></docno> + <approved></approved> + <date></date> + <rev></rev> + <file>terminology.xml</file> + </header> + + <section> + <title>General Information</title> + <p>In the following terms that may cause confusion are explained. + </p> + </section> + + <section> + <title>The term "user"</title> + <p>A "user" is a term that everyone understands intuitively. However, the understandings may differ which can + cause confusion. + </p> + <p>The term is used differently in <url href="http://www.openssh.com">OpenSSH</url> and SSH in Erlang/OTP. + The reason is the different environments and use cases that are not immediatly obvious. + </p> + <p>This chapter aims at explaining the differences and giving a rationale for why Erlang/OTP handles "user" as + it does. + </p> + + <section> + <title>In OpenSSH</title> + <p>Many have been in contact with the command 'ssh' on a Linux machine (or similar) to remotly log in on + another machine. One types + </p> + <code>ssh host</code> + <p>to log in on the machine named <c>host</c>. The command prompts for your password on the remote <c>host</c> and + then you can read, write and execute as your <i>user name</i> has rights on the remote <c>host</c>. There are + stronger variants with pre-distributed keys or certificates, but that are for now just details in the + authentication process. + </p> + <p>You could log in as the user <c>anotheruser</c> with + </p> + <code>ssh anotheruser@host</code> + <p>and you will then be enabled to act as <c>anotheruser</c> on the <c>host</c> if authorized correctly. + </p> + <p>So what does <i>"your user name has rights"</i> mean? In a UNIX/Linux/etc context it is exactly as that context: + The <i>user</i> could read, write and execute programs according to the OS rules. + In addition, the user has a home directory (<c>$HOME</c>) and there is a <c>$HOME/.ssh/</c> directory + with ssh-specific files. + </p> + <section> + <title>SSH password authentication</title> + <p>When SSH tries to log in to a host, the ssh protocol communicates the user name (as a string) and a password. + The remote ssh server checks that there is such a user defined and that the provided password is acceptable. + </p> + <p>If so, the user is authorized. + </p> + </section> + <section> + <title>SSH public key authentication</title> + <p>This is a stronger method where the ssh protocol brings the user name, the user's public key and some + cryptographic information which we could ignore here. + </p> + <p>The ssh server on the remote host checks: + </p> + <list> + <item>That the <i>user</i> has a home directory,</item> + <item>that home directory contains a .ssh/ directory and</item> + <item>the .ssh/ directory contains the public key just received in the <c>authorized_keys</c> file</item> + </list> + <p>if so, the user is authorized. + </p> + </section> + <section> + <title>The SSH server on UNIX/Linux/etc after a succesful authentication</title> + <p>After a succesful incoming authentication, a new process runs as the just authenticated user.</p> + <p>Next step is to start a service according to the ssh request. In case of a request of a shell, + a new one is started which handles the OS-commands that arrives from the client (that's "you"). + </p> + <p>In case of a sftp request, an sftp server is started in with the user's rights. So it could read, write or delete + files if allowed for that user. + </p> + </section> + </section> + + <section> + <title>In Erlang/OTP SSH</title> + <p>For the Erlang/OTP SSH server the situation is different. The server executes in an Erlang process + in the Erlang emulator which in turn executes in an OS process. The emulator does not try to change its + user when authenticated over the SSH protocol. + So the remote user name is only for authentication purposes in the Erlang/OTP SSH application. + </p> + <section> + <title>Password authentication in Erlang SSH</title> + <p>The Erlang/OTP SSH server checks the user name and password in the following order: + </p> + <list type="ordered"> + <item>If a + <seealso marker="ssh:ssh#option-pwdfun"><c>pwdfun</c></seealso> + is defined, that one is called and the returned boolean is the authentication result. + </item> + <item>Else, if the + <seealso marker="ssh:ssh#option-user_passwords"><c>user_passwords</c></seealso> + option is defined and the username and the password matches, the authentication is a success. + </item> + <item>Else, if the option + <seealso marker="ssh:ssh#option-password"><c>password</c></seealso> + is defined and matches the password the authentication is a success. + Note that the use of this option is not recommended in non-test code. + </item> + </list> + </section> + <section> + <title>Public key authentication in Erlang SSH</title> + <p>The user name, public key and cryptographic data (a signature) that is sent by the client, are used as follows + (some steps left out for clearity): + </p> + <list type="ordered"> + <item>A callback module is selected using the options + <seealso marker="ssh:ssh#type-key_cb_common_option"><c>key_cb</c></seealso>. + </item> + <item>The callback module is used to check that the provided public key is one of the user's pre-stored. + In case of the default callback module, the files <c>authorized_keys</c> and <c>authorized_keys2</c> + are searched in a directory found in the following order: + <list> + <item>If the option + <seealso marker="ssh:ssh_file#type-user_dir_fun_common_option"><c>user_dir_fun</c></seealso> + is defined, that fun is called and the returned directory is used, + </item> + <item>Else, If the option + <seealso marker="ssh:ssh_file#type-user_dir_common_option"><c>user_dir</c></seealso> + is defined, that directory is used, + </item> + <item>Else the subdirectory <c>.ssh</c> in the home directory of the user executing + the OS process of the Erlang emulator is used. + </item> + </list> + If the provided public key is not found, the authentication fails. + </item> + <item>Finally, if the provided public key is found, the signature provided by the client is checked with + the public key. + </item> + </list> + </section> + <section> + <title>The Erlang/OTP SSH server after a succesful authentication</title> + <p>After a successful authentication an <i>Erlang process</i> is handling the service request from the remote + ssh client. The rights of that process are those of the user of the OS process running the Erlang emulator. + </p> + <p>If a shell service request arrives to the server, an <i>Erlang shell</i> is opened in the server's emulator. + The rights in that shell is independent of the just authenticated user. + </p> + <p>In case of an sftp request, an sftp server is started with the rights of the user of the Erlang emulator's OS + process. So with sftp the authenticated user does not influence the rights. + </p> + <p>So after an authentication, the user name is not used anymore and has no influence. + </p> + </section> + </section> + </section> +</chapter> + diff --git a/lib/ssh/doc/src/usersguide.xml b/lib/ssh/doc/src/usersguide.xml index 38ffa48cde..8a4df208d8 100644 --- a/lib/ssh/doc/src/usersguide.xml +++ b/lib/ssh/doc/src/usersguide.xml @@ -36,5 +36,6 @@ </description> <xi:include href="introduction.xml"/> <xi:include href="using_ssh.xml"/> + <xi:include href="terminology.xml"/> <xi:include href="configure_algos.xml"/> </part> diff --git a/lib/ssh/doc/src/using_ssh.xml b/lib/ssh/doc/src/using_ssh.xml index 80662e9a70..4455d5ecc5 100644 --- a/lib/ssh/doc/src/using_ssh.xml +++ b/lib/ssh/doc/src/using_ssh.xml @@ -74,16 +74,17 @@ <marker id="Running an Erlang ssh Daemon"></marker> <title>Running an Erlang ssh Daemon</title> - <p>The <c>system_dir</c> option must be a directory containing a host - key file and it defaults to <c>/etc/ssh</c>. For details, see Section - Configuration Files in <seealso - marker="SSH_app">ssh(6)</seealso>. + <p>The + <seealso marker="ssh_file#type-system_dir_daemon_option"><c>system_dir</c></seealso> + option must be a directory containing a host key file and it defaults to <c>/etc/ssh</c>. + For details, see Section Configuration Files in <seealso marker="SSH_app">ssh(6)</seealso>. </p> <note><p>Normally, the <c>/etc/ssh</c> directory is only readable by root.</p> </note> - <p>The option <c>user_dir</c> defaults to directory <c>users ~/.ssh</c>.</p> + <p>The option <seealso marker="ssh_file#type-user_dir_common_option"><c>user_dir</c></seealso> + defaults to directory <c>users ~/.ssh</c>.</p> <p><em>Step 1.</em> To run the example without root privileges, generate new keys and host keys:</p> diff --git a/lib/ssh/src/ssh.hrl b/lib/ssh/src/ssh.hrl index 94b9f3a196..3ac74c4925 100644 --- a/lib/ssh/src/ssh.hrl +++ b/lib/ssh/src/ssh.hrl @@ -173,7 +173,7 @@ -type common_options() :: [ common_option() ]. -type common_option() :: - user_dir_common_option() + ssh_file:user_dir_common_option() | profile_common_option() | max_idle_time_common_option() | key_cb_common_option() @@ -191,8 +191,6 @@ -define(COMMON_OPTION, common_option()). - --type user_dir_common_option() :: {user_dir, false | string()}. -type profile_common_option() :: {profile, atom() }. -type max_idle_time_common_option() :: {idle_time, timeout()}. -type rekey_limit_common_option() :: {rekey_limit, Bytes::limit_bytes() | @@ -223,14 +221,14 @@ {transport, {atom(),atom(),atom()} } | {vsn, {non_neg_integer(),non_neg_integer()} } | {tstflg, list(term())} - | {user_dir_fun, fun()} + | ssh_file:user_dir_fun_common_option() | {max_random_length_padding, non_neg_integer()} . -type client_option() :: pref_public_key_algs_client_option() - | pubkey_passphrase_client_options() + | ssh_file:pubkey_passphrase_client_options() | host_accepting_client_options() | authentication_client_options() | diffie_hellman_group_exchange_client_option() @@ -246,10 +244,6 @@ -type pref_public_key_algs_client_option() :: {pref_public_key_algs, [pubkey_alg()] } . --type pubkey_passphrase_client_options() :: {dsa_pass_phrase, string()} - | {rsa_pass_phrase, string()} - | {ecdsa_pass_phrase, string()} . - -type host_accepting_client_options() :: {silently_accept_hosts, accept_hosts()} | {user_interaction, boolean()} @@ -311,7 +305,7 @@ -type send_ext_info_daemon_option() :: {send_ext_info, boolean()} . -type authentication_daemon_options() :: - {system_dir, string()} + ssh_file:system_dir_daemon_option() | {auth_method_kb_interactive_data, prompt_texts() } | {user_passwords, [{UserName::string(),Pwd::string()}]} | {password, string()} diff --git a/lib/ssh/src/ssh_file.erl b/lib/ssh/src/ssh_file.erl index 832952ed52..669b0f9be2 100644 --- a/lib/ssh/src/ssh_file.erl +++ b/lib/ssh/src/ssh_file.erl @@ -39,6 +39,23 @@ is_auth_key/3]). +-export_type([system_dir_daemon_option/0, + user_dir_common_option/0, + user_dir_fun_common_option/0, + pubkey_passphrase_client_options/0 + ]). + +-type system_dir_daemon_option() :: {system_dir, string()}. +-type user_dir_common_option() :: {user_dir, string()}. +-type user_dir_fun_common_option() :: {user_dir_fun, user2dir()}. +-type user2dir() :: fun((RemoteUserName::string()) -> UserDir :: string()) . + +-type pubkey_passphrase_client_options() :: {dsa_pass_phrase, string()} + | {rsa_pass_phrase, string()} + | {ecdsa_pass_phrase, string()} . + + + -define(PERM_700, 8#700). -define(PERM_644, 8#644). diff --git a/lib/ssl/src/dtls_record.erl b/lib/ssl/src/dtls_record.erl index 9eb0d8e2d7..b7346d3ec8 100644 --- a/lib/ssl/src/dtls_record.erl +++ b/lib/ssl/src/dtls_record.erl @@ -499,23 +499,22 @@ encode_dtls_cipher_text(Type, {MajVer, MinVer}, Fragment, WriteState#{sequence_number => Seq + 1}}. encode_plain_text(Type, Version, Data, #{compression_state := CompS0, + cipher_state := CipherS0, epoch := Epoch, sequence_number := Seq, - cipher_state := CipherS0, security_parameters := #security_parameters{ cipher_type = ?AEAD, - bulk_cipher_algorithm = - BulkCipherAlgo, + bulk_cipher_algorithm = BCAlg, compression_algorithm = CompAlg} } = WriteState0) -> {Comp, CompS1} = ssl_record:compress(CompAlg, Data, CompS0), - AAD = calc_aad(Type, Version, Epoch, Seq), + AAD = start_additional_data(Type, Version, Epoch, Seq), + CipherS = ssl_record:nonce_seed(BCAlg, <<?UINT16(Epoch), ?UINT48(Seq)>>, CipherS0), + WriteState = WriteState0#{compression_state => CompS1, + cipher_state => CipherS}, TLSVersion = dtls_v1:corresponding_tls_version(Version), - {CipherFragment, CipherS1} = - ssl_cipher:cipher_aead(BulkCipherAlgo, CipherS0, Seq, AAD, Comp, TLSVersion), - {CipherFragment, WriteState0#{compression_state => CompS1, - cipher_state => CipherS1}}; + ssl_record:cipher_aead(TLSVersion, Comp, WriteState, AAD); encode_plain_text(Type, Version, Fragment, #{compression_state := CompS0, epoch := Epoch, sequence_number := Seq, @@ -547,9 +546,10 @@ decode_cipher_text(#ssl_tls{type = Type, version = Version, BulkCipherAlgo, compression_algorithm = CompAlg}} = ReadState0, ConnnectionStates0) -> - AAD = calc_aad(Type, Version, Epoch, Seq), + AAD = start_additional_data(Type, Version, Epoch, Seq), + CipherS1 = ssl_record:nonce_seed(BulkCipherAlgo, <<?UINT16(Epoch), ?UINT48(Seq)>>, CipherS0), TLSVersion = dtls_v1:corresponding_tls_version(Version), - case ssl_cipher:decipher_aead(BulkCipherAlgo, CipherS0, Seq, AAD, CipherFragment, TLSVersion) of + case ssl_record:decipher_aead(BulkCipherAlgo, CipherS1, AAD, CipherFragment, TLSVersion) of {PlainFragment, CipherState} -> {Plain, CompressionS1} = ssl_record:uncompress(CompAlg, PlainFragment, CompressionS0), @@ -600,7 +600,7 @@ mac_hash({Major, Minor}, MacAlg, MacSecret, Epoch, SeqNo, Type, Length, Fragment Fragment], dtls_v1:hmac_hash(MacAlg, MacSecret, Value). -calc_aad(Type, {MajVer, MinVer}, Epoch, SeqNo) -> +start_additional_data(Type, {MajVer, MinVer}, Epoch, SeqNo) -> <<?UINT16(Epoch), ?UINT48(SeqNo), ?BYTE(Type), ?BYTE(MajVer), ?BYTE(MinVer)>>. %%-------------------------------------------------------------------- diff --git a/lib/ssl/src/ssl_cipher.erl b/lib/ssl/src/ssl_cipher.erl index b23129dcdd..0e22d63b4b 100644 --- a/lib/ssl/src/ssl_cipher.erl +++ b/lib/ssl/src/ssl_cipher.erl @@ -34,7 +34,7 @@ -include_lib("public_key/include/public_key.hrl"). -export([security_parameters/2, security_parameters/3, - cipher_init/3, decipher/6, cipher/5, decipher_aead/6, cipher_aead/6, + cipher_init/3, nonce_seed/2, decipher/6, cipher/5, aead_encrypt/5, aead_decrypt/6, suites/1, all_suites/1, crypto_support_filters/0, chacha_suites/1, anonymous_suites/1, psk_suites/1, psk_suites_anon/1, srp_suites/0, srp_suites_anon/0, @@ -48,6 +48,8 @@ -type cipher_enum() :: integer(). +-export_type([cipher_enum/0]). + %%-------------------------------------------------------------------- -spec security_parameters(ssl_cipher_format:cipher_suite(), #security_parameters{}) -> #security_parameters{}. @@ -91,10 +93,15 @@ cipher_init(?RC4, IV, Key) -> #cipher_state{iv = IV, key = Key, state = State}; cipher_init(?AES_GCM, IV, Key) -> <<Nonce:64>> = random_bytes(8), - #cipher_state{iv = IV, key = Key, nonce = Nonce}; + #cipher_state{iv = IV, key = Key, nonce = Nonce, tag_len = 16}; +cipher_init(?CHACHA20_POLY1305, IV, Key) -> + #cipher_state{iv = IV, key = Key, tag_len = 16}; cipher_init(_BCA, IV, Key) -> #cipher_state{iv = IV, key = Key}. +nonce_seed(Seed, CipherState) -> + CipherState#cipher_state{nonce = Seed}. + %%-------------------------------------------------------------------- -spec cipher(cipher_enum(), #cipher_state{}, binary(), iodata(), ssl_record:ssl_version()) -> {binary(), #cipher_state{}}. @@ -126,32 +133,16 @@ cipher(?AES_CBC, CipherState, Mac, Fragment, Version) -> crypto:block_encrypt(aes_cbc256, Key, IV, T) end, block_size(aes_128_cbc), CipherState, Mac, Fragment, Version). -%%-------------------------------------------------------------------- --spec cipher_aead(cipher_enum(), #cipher_state{}, integer(), binary(), iodata(), ssl_record:ssl_version()) -> - {binary(), #cipher_state{}}. -%% -%% Description: Encrypts the data and protects associated data (AAD) using chipher -%% described by cipher_enum() and updating the cipher state -%% Use for suites that use authenticated encryption with associated data (AEAD) -%%------------------------------------------------------------------- -cipher_aead(?AES_GCM, CipherState, SeqNo, AAD, Fragment, Version) -> - aead_cipher(aes_gcm, CipherState, SeqNo, AAD, Fragment, Version); -cipher_aead(?CHACHA20_POLY1305, CipherState, SeqNo, AAD, Fragment, Version) -> - aead_cipher(chacha20_poly1305, CipherState, SeqNo, AAD, Fragment, Version). - -aead_cipher(chacha20_poly1305, #cipher_state{key=Key} = CipherState, SeqNo, AAD0, Fragment, _Version) -> - CipherLen = erlang:iolist_size(Fragment), - AAD = <<AAD0/binary, ?UINT16(CipherLen)>>, - Nonce = ?uint64(SeqNo), - {Content, CipherTag} = crypto:block_encrypt(chacha20_poly1305, Key, Nonce, {AAD, Fragment}), - {<<Content/binary, CipherTag/binary>>, CipherState}; -aead_cipher(Type, #cipher_state{key=Key, iv = IV0, nonce = Nonce} = CipherState, _SeqNo, AAD0, Fragment, _Version) -> - CipherLen = erlang:iolist_size(Fragment), - AAD = <<AAD0/binary, ?UINT16(CipherLen)>>, - <<Salt:4/bytes, _/binary>> = IV0, - IV = <<Salt/binary, Nonce:64/integer>>, - {Content, CipherTag} = crypto:block_encrypt(Type, Key, IV, {AAD, Fragment}), - {<<Nonce:64/integer, Content/binary, CipherTag/binary>>, CipherState#cipher_state{nonce = Nonce + 1}}. +aead_encrypt(Type, Key, Nonce, Fragment, AdditionalData) -> + crypto:block_encrypt(aead_type(Type), Key, Nonce, {AdditionalData, Fragment}). + +aead_decrypt(Type, Key, Nonce, CipherText, CipherTag, AdditionalData) -> + crypto:block_decrypt(aead_type(Type), Key, Nonce, {AdditionalData, CipherText, CipherTag}). + +aead_type(?AES_GCM) -> + aes_gcm; +aead_type(?CHACHA20_POLY1305) -> + chacha20_poly1305. build_cipher_block(BlockSz, Mac, Fragment) -> TotSz = byte_size(Mac) + erlang:iolist_size(Fragment) + 1, @@ -218,19 +209,6 @@ decipher(?AES_CBC, HashSz, CipherState, Fragment, Version, PaddingCheck) -> crypto:block_decrypt(aes_cbc256, Key, IV, T) end, CipherState, HashSz, Fragment, Version, PaddingCheck). -%%-------------------------------------------------------------------- --spec decipher_aead(cipher_enum(), #cipher_state{}, integer(), binary(), binary(), ssl_record:ssl_version()) -> - {binary(), #cipher_state{}} | #alert{}. -%% -%% Description: Decrypts the data and checks the associated data (AAD) MAC using -%% cipher described by cipher_enum() and updating the cipher state. -%% Use for suites that use authenticated encryption with associated data (AEAD) -%%------------------------------------------------------------------- -decipher_aead(?AES_GCM, CipherState, SeqNo, AAD, Fragment, Version) -> - aead_decipher(aes_gcm, CipherState, SeqNo, AAD, Fragment, Version); -decipher_aead(?CHACHA20_POLY1305, CipherState, SeqNo, AAD, Fragment, Version) -> - aead_decipher(chacha20_poly1305, CipherState, SeqNo, AAD, Fragment, Version). - block_decipher(Fun, #cipher_state{key=Key, iv=IV} = CipherState0, HashSz, Fragment, Version, PaddingCheck) -> try @@ -261,34 +239,6 @@ block_decipher(Fun, #cipher_state{key=Key, iv=IV} = CipherState0, ?ALERT_REC(?FATAL, ?BAD_RECORD_MAC, decryption_failed) end. -aead_ciphertext_to_state(chacha20_poly1305, SeqNo, _IV, AAD0, Fragment, _Version) -> - CipherLen = size(Fragment) - 16, - <<CipherText:CipherLen/bytes, CipherTag:16/bytes>> = Fragment, - AAD = <<AAD0/binary, ?UINT16(CipherLen)>>, - Nonce = ?uint64(SeqNo), - {Nonce, AAD, CipherText, CipherTag}; -aead_ciphertext_to_state(_, _SeqNo, <<Salt:4/bytes, _/binary>>, AAD0, Fragment, _Version) -> - CipherLen = size(Fragment) - 24, - <<ExplicitNonce:8/bytes, CipherText:CipherLen/bytes, CipherTag:16/bytes>> = Fragment, - AAD = <<AAD0/binary, ?UINT16(CipherLen)>>, - Nonce = <<Salt/binary, ExplicitNonce/binary>>, - {Nonce, AAD, CipherText, CipherTag}. - -aead_decipher(Type, #cipher_state{key = Key, iv = IV} = CipherState, - SeqNo, AAD0, Fragment, Version) -> - try - {Nonce, AAD, CipherText, CipherTag} = aead_ciphertext_to_state(Type, SeqNo, IV, AAD0, Fragment, Version), - case crypto:block_decrypt(Type, Key, Nonce, {AAD, CipherText, CipherTag}) of - Content when is_binary(Content) -> - {Content, CipherState}; - _ -> - ?ALERT_REC(?FATAL, ?BAD_RECORD_MAC, decryption_failed) - end - catch - _:_ -> - ?ALERT_REC(?FATAL, ?BAD_RECORD_MAC, decryption_failed) - end. - %%-------------------------------------------------------------------- -spec suites(ssl_record:ssl_version()) -> [ssl_cipher_format:cipher_suite()]. %% diff --git a/lib/ssl/src/ssl_cipher.hrl b/lib/ssl/src/ssl_cipher.hrl index ba6a98b92a..2371e8bd32 100644 --- a/lib/ssl/src/ssl_cipher.hrl +++ b/lib/ssl/src/ssl_cipher.hrl @@ -48,7 +48,8 @@ iv, key, state, - nonce + nonce, + tag_len }). %%% TLS_NULL_WITH_NULL_NULL is specified and is the initial state of a diff --git a/lib/ssl/src/ssl_record.erl b/lib/ssl/src/ssl_record.erl index 659e1485ac..b9d1320ef3 100644 --- a/lib/ssl/src/ssl_record.erl +++ b/lib/ssl/src/ssl_record.erl @@ -45,7 +45,7 @@ -export([compress/3, uncompress/3, compressions/0]). %% Payload encryption/decryption --export([cipher/4, decipher/4, cipher_aead/4, is_correct_mac/2]). +-export([cipher/4, decipher/4, cipher_aead/4, decipher_aead/5, is_correct_mac/2, nonce_seed/3]). -export_type([ssl_version/0, ssl_atom_version/0, connection_states/0, connection_state/0]). @@ -306,22 +306,20 @@ cipher(Version, Fragment, {CipherFragment, CipherS1} = ssl_cipher:cipher(BulkCipherAlgo, CipherS0, MacHash, Fragment, Version), {CipherFragment, WriteState0#{cipher_state => CipherS1}}. -%% %%-------------------------------------------------------------------- -%% -spec cipher_aead(ssl_version(), iodata(), connection_state(), MacHash::binary()) -> -%% {CipherFragment::binary(), connection_state()}. -%% %% -%% %% Description: Payload encryption +%%-------------------------------------------------------------------- +-spec cipher_aead(ssl_version(), iodata(), connection_state(), AAD::binary()) -> + {CipherFragment::binary(), connection_state()}. + +%% Description: Payload encryption %% %%-------------------------------------------------------------------- cipher_aead(Version, Fragment, #{cipher_state := CipherS0, - sequence_number := SeqNo, security_parameters := #security_parameters{bulk_cipher_algorithm = BulkCipherAlgo} } = WriteState0, AAD) -> - {CipherFragment, CipherS1} = - ssl_cipher:cipher_aead(BulkCipherAlgo, CipherS0, SeqNo, AAD, Fragment, Version), + cipher_aead(BulkCipherAlgo, CipherS0, AAD, Fragment, Version), {CipherFragment, WriteState0#{cipher_state => CipherS1}}. %%-------------------------------------------------------------------- @@ -344,10 +342,39 @@ decipher(Version, CipherFragment, #alert{} = Alert -> Alert end. +%%-------------------------------------------------------------------- +-spec decipher_aead(ssl_cipher:cipher_enum(), #cipher_state{}, + binary(), binary(), ssl_record:ssl_version()) -> + {binary(), #cipher_state{}} | #alert{}. +%% +%% Description: Decrypts the data and checks the associated data (AAD) MAC using +%% cipher described by cipher_enum() and updating the cipher state. +%% Use for suites that use authenticated encryption with associated data (AEAD) +%%------------------------------------------------------------------- +decipher_aead(Type, #cipher_state{key = Key} = CipherState, AAD0, CipherFragment, _) -> + try + Nonce = decrypt_nonce(Type, CipherState, CipherFragment), + {AAD, CipherText, CipherTag} = aead_ciphertext_split(Type, CipherState, CipherFragment, AAD0), + case ssl_cipher:aead_decrypt(Type, Key, Nonce, CipherText, CipherTag, AAD) of + Content when is_binary(Content) -> + {Content, CipherState}; + _ -> + ?ALERT_REC(?FATAL, ?BAD_RECORD_MAC, decryption_failed) + end + catch + _:_ -> + ?ALERT_REC(?FATAL, ?BAD_RECORD_MAC, decryption_failed) + end. + +nonce_seed(?CHACHA20_POLY1305, Seed, CipherState) -> + ssl_cipher:nonce_seed(Seed, CipherState); +nonce_seed(_,_, CipherState) -> + CipherState. %%-------------------------------------------------------------------- %%% Internal functions %%-------------------------------------------------------------------- + empty_connection_state(ConnectionEnd, BeastMitigation) -> SecParams = empty_security_params(ConnectionEnd), #{security_parameters => SecParams, @@ -400,3 +427,37 @@ initial_security_params(ConnectionEnd) -> compression_algorithm = ?NULL}, ssl_cipher:security_parameters(?TLS_NULL_WITH_NULL_NULL, SecParams). +cipher_aead(?CHACHA20_POLY1305 = Type, #cipher_state{key=Key} = CipherState, AAD0, Fragment, _Version) -> + AAD = end_additional_data(AAD0, erlang:iolist_size(Fragment)), + Nonce = encrypt_nonce(Type, CipherState), + {Content, CipherTag} = ssl_cipher:aead_encrypt(Type, Key, Nonce, Fragment, AAD), + {<<Content/binary, CipherTag/binary>>, CipherState}; +cipher_aead(Type, #cipher_state{key=Key, nonce = ExplicitNonce} = CipherState, AAD0, Fragment, _Version) -> + AAD = end_additional_data(AAD0, erlang:iolist_size(Fragment)), + Nonce = encrypt_nonce(Type, CipherState), + {Content, CipherTag} = ssl_cipher:aead_encrypt(Type, Key, Nonce, Fragment, AAD), + {<<ExplicitNonce:64/integer, Content/binary, CipherTag/binary>>, CipherState#cipher_state{nonce = ExplicitNonce + 1}}. + +encrypt_nonce(?CHACHA20_POLY1305, #cipher_state{nonce = Nonce, iv = IV}) -> + crypto:exor(<<?UINT32(0), Nonce/binary>>, IV); +encrypt_nonce(?AES_GCM, #cipher_state{iv = IV, nonce = ExplicitNonce}) -> + <<Salt:4/bytes, _/binary>> = IV, + <<Salt/binary, ExplicitNonce:64/integer>>. + +decrypt_nonce(?CHACHA20_POLY1305, #cipher_state{nonce = Nonce, iv = IV}, _) -> + crypto:exor(<<Nonce:96/unsigned-big-integer>>, IV); +decrypt_nonce(?AES_GCM, #cipher_state{iv = <<Salt:4/bytes, _/binary>>}, <<ExplicitNonce:8/bytes, _/binary>>) -> + <<Salt/binary, ExplicitNonce/binary>>. + +aead_ciphertext_split(?CHACHA20_POLY1305, #cipher_state{tag_len = Len}, CipherTextFragment, AAD) -> + CipherLen = size(CipherTextFragment) - Len, + <<CipherText:CipherLen/bytes, CipherTag:Len/bytes>> = CipherTextFragment, + {end_additional_data(AAD, CipherLen), CipherText, CipherTag}; +aead_ciphertext_split(?AES_GCM, #cipher_state{tag_len = Len}, CipherTextFragment, AAD) -> + CipherLen = size(CipherTextFragment) - (Len + 8), %% 8 is length of explicit Nonce + << _:8/bytes, CipherText:CipherLen/bytes, CipherTag:Len/bytes>> = CipherTextFragment, + {end_additional_data(AAD, CipherLen), CipherText, CipherTag}. + +end_additional_data(AAD, Len) -> + <<AAD/binary, ?UINT16(Len)>>. + diff --git a/lib/ssl/src/tls_record.erl b/lib/ssl/src/tls_record.erl index f1aca8c801..ce7edc9dcd 100644 --- a/lib/ssl/src/tls_record.erl +++ b/lib/ssl/src/tls_record.erl @@ -176,14 +176,15 @@ decode_cipher_text(#ssl_tls{type = Type, version = Version, BulkCipherAlgo, compression_algorithm = CompAlg} } = ReadState0} = ConnnectionStates0, _) -> - AAD = calc_aad(Type, Version, ReadState0), - case ssl_cipher:decipher_aead(BulkCipherAlgo, CipherS0, Seq, AAD, CipherFragment, Version) of - {PlainFragment, CipherS1} -> + AAD = start_additional_data(Type, Version, ReadState0), + CipherS1 = ssl_record:nonce_seed(BulkCipherAlgo, <<?UINT64(Seq)>>, CipherS0), + case ssl_record:decipher_aead(BulkCipherAlgo, CipherS1, AAD, CipherFragment, Version) of + {PlainFragment, CipherState} -> {Plain, CompressionS1} = ssl_record:uncompress(CompAlg, PlainFragment, CompressionS0), ConnnectionStates = ConnnectionStates0#{ current_read => ReadState0#{ - cipher_state => CipherS1, + cipher_state => CipherState, sequence_number => Seq + 1, compression_state => CompressionS1}}, {CipherText#ssl_tls{fragment = Plain}, ConnnectionStates}; @@ -453,15 +454,20 @@ encode_iolist(Type, Data, Version, ConnectionStates0) -> {lists:reverse(EncodedMsg), ConnectionStates}. %%-------------------------------------------------------------------- do_encode_plain_text(Type, Version, Data, #{compression_state := CompS0, - security_parameters := + cipher_state := CipherS0, + sequence_number := Seq, + security_parameters := #security_parameters{ cipher_type = ?AEAD, + bulk_cipher_algorithm = BCAlg, compression_algorithm = CompAlg} } = WriteState0) -> {Comp, CompS1} = ssl_record:compress(CompAlg, Data, CompS0), - WriteState1 = WriteState0#{compression_state => CompS1}, - AAD = calc_aad(Type, Version, WriteState1), - ssl_record:cipher_aead(Version, Comp, WriteState1, AAD); + CipherS = ssl_record:nonce_seed(BCAlg, <<?UINT64(Seq)>>, CipherS0), + WriteState = WriteState0#{compression_state => CompS1, + cipher_state => CipherS}, + AAD = start_additional_data(Type, Version, WriteState), + ssl_record:cipher_aead(Version, Comp, WriteState, AAD); do_encode_plain_text(Type, Version, Data, #{compression_state := CompS0, security_parameters := #security_parameters{compression_algorithm = CompAlg} @@ -473,7 +479,7 @@ do_encode_plain_text(Type, Version, Data, #{compression_state := CompS0, do_encode_plain_text(_,_,_,CS) -> exit({cs, CS}). %%-------------------------------------------------------------------- -calc_aad(Type, {MajVer, MinVer}, +start_additional_data(Type, {MajVer, MinVer}, #{sequence_number := SeqNo}) -> <<?UINT64(SeqNo), ?BYTE(Type), ?BYTE(MajVer), ?BYTE(MinVer)>>. diff --git a/lib/stdlib/doc/src/assert_hrl.xml b/lib/stdlib/doc/src/assert_hrl.xml index 4dc7299609..fb27954235 100644 --- a/lib/stdlib/doc/src/assert_hrl.xml +++ b/lib/stdlib/doc/src/assert_hrl.xml @@ -46,7 +46,7 @@ is the macro name, for example, <c>assertEqual</c>. <c>Info</c> is a list of tagged values, such as <c>[{module, M}, {line, L}, ...]</c>, which gives more information about the location and cause of the exception. All - entries in the <c>Info</c> list are optional; do not rely programatically + entries in the <c>Info</c> list are optional; do not rely programmatically on any of them being present.</p> <p>Each assert macro has a corresponding version with an extra argument, diff --git a/lib/stdlib/doc/src/beam_lib.xml b/lib/stdlib/doc/src/beam_lib.xml index 26d0724aaf..213170df7f 100644 --- a/lib/stdlib/doc/src/beam_lib.xml +++ b/lib/stdlib/doc/src/beam_lib.xml @@ -180,8 +180,8 @@ io:fwrite("~s~n", [erl_prettypr:format(erl_syntax:form_list(AC))]).</code> <name name="beam"/> <desc> <p>Each of the functions described below accept either the - module name, the filename, or a binary containing the BEAM - module.</p> + filename (as a string) or a binary containing the BEAM + module.</p> </desc> </datatype> <datatype> diff --git a/lib/stdlib/src/beam_lib.erl b/lib/stdlib/src/beam_lib.erl index 01181b1097..3386cfcbe6 100644 --- a/lib/stdlib/src/beam_lib.erl +++ b/lib/stdlib/src/beam_lib.erl @@ -53,7 +53,7 @@ %%------------------------------------------------------------------------- --type beam() :: module() | file:filename() | binary(). +-type beam() :: file:filename() | binary(). -type debug_info() :: {DbgiVersion :: atom(), Backend :: module(), Data :: term()} | 'no_debug_info'. -type forms() :: [erl_parse:abstract_form() | erl_parse:form_info()]. diff --git a/lib/stdlib/test/gen_fsm_SUITE.erl b/lib/stdlib/test/gen_fsm_SUITE.erl index 41ee3246f5..a8264e5a84 100644 --- a/lib/stdlib/test/gen_fsm_SUITE.erl +++ b/lib/stdlib/test/gen_fsm_SUITE.erl @@ -124,8 +124,10 @@ start2(Config) when is_list(Config) -> {ok, Pid0} = gen_fsm:start(gen_fsm_SUITE, [], []), ok = do_func_test(Pid0), ok = do_sync_func_test(Pid0), + MRef = monitor(process,Pid0), shutdown_stopped = gen_fsm:sync_send_all_state_event(Pid0, stop_shutdown), + receive {'DOWN',MRef,_,_,shutdown} -> ok end, {'EXIT', {noproc,_}} = (catch gen_fsm:sync_send_event(Pid0, hej)), diff --git a/lib/stdlib/test/sys_SUITE.erl b/lib/stdlib/test/sys_SUITE.erl index 3278eb0eb0..fcc4419569 100644 --- a/lib/stdlib/test/sys_SUITE.erl +++ b/lib/stdlib/test/sys_SUITE.erl @@ -219,7 +219,7 @@ spec_proc(Mod) -> {Mod,system_get_state},{throw,fail}},_}} -> ok end, - ok = sys:terminate(Mod, normal), + ok = sync_terminate(Mod), {ok,_} = Mod:start_link(4), ok = case catch sys:replace_state(Mod, fun(_) -> {} end) of {} -> @@ -228,7 +228,7 @@ spec_proc(Mod) -> {Mod,system_replace_state},{throw,fail}},_}} -> ok end, - ok = sys:terminate(Mod, normal), + ok = sync_terminate(Mod), {ok,_} = Mod:start_link(4), StateFun = fun(_) -> error(fail) end, ok = case catch sys:replace_state(Mod, StateFun) of @@ -240,7 +240,18 @@ spec_proc(Mod) -> {'EXIT',{{callback_failed,StateFun,{error,fail}},_}} -> ok end, - ok = sys:terminate(Mod, normal). + ok = sync_terminate(Mod). + +sync_terminate(Mod) -> + P = whereis(Mod), + MRef = erlang:monitor(process,P), + ok = sys:terminate(Mod, normal), + receive + {'DOWN',MRef,_,_,normal} -> + ok + end, + undefined = whereis(Mod), + ok. %%%%%%%%%%%%%%%%%%%% %% Dummy server |