aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--erts/doc/src/erl_driver.xml6
-rw-r--r--lib/ssh/doc/src/ssh.xml6
-rw-r--r--lib/ssh/src/ssh.hrl7
-rw-r--r--lib/ssh/src/ssh_connection_handler.erl425
-rw-r--r--lib/ssh/src/ssh_options.erl21
-rw-r--r--lib/ssh/test/ssh_basic_SUITE.erl135
-rw-r--r--lib/ssh/test/ssh_test_lib.erl2
-rw-r--r--lib/tools/src/xref.erl6
-rw-r--r--system/doc/efficiency_guide/binaryhandling.xml82
-rw-r--r--system/doc/efficiency_guide/efficiency_guide.erl24
10 files changed, 315 insertions, 399 deletions
diff --git a/erts/doc/src/erl_driver.xml b/erts/doc/src/erl_driver.xml
index c790872fe4..e6c9905039 100644
--- a/erts/doc/src/erl_driver.xml
+++ b/erts/doc/src/erl_driver.xml
@@ -429,7 +429,7 @@
<taglist>
<tag>Return types for driver callbacks</tag>
<item>
- <p>Rrewrite driver callback
+ <p>Rewrite driver callback
<seealso marker="driver_entry#control"><c>control</c></seealso>
to use return type <c>ErlDrvSSizeT</c> instead of <c>int</c>.</p>
<p>Rewrite driver callback
@@ -841,7 +841,7 @@ int suggested_stack_size;</code>
<p>Thread options structure passed to
<seealso marker="#erl_drv_thread_create">
<c>erl_drv_thread_create</c></seealso>.
- The following fields exists:</p>
+ The following field exists:</p>
<taglist>
<tag><c>suggested_stack_size</c></tag>
<item>A suggestion, in kilowords, on how large a stack to use.
@@ -3220,6 +3220,6 @@ erl_drv_output_term(driver_mk_port(drvport), spec, sizeof(spec) / sizeof(spec[0]
<seealso marker="erlang"><c>erlang(3)</c></seealso>,
<seealso marker="kernel:erl_ddll"><c>erl_ddll(3)</c></seealso>,
section <seealso marker="alt_dist">How to Implement an Alternative
- Carrier for the Erlang Distribution></seealso> in the User's Guide</p>
+ Carrier for the Erlang Distribution</seealso> in the User's Guide</p>
</section>
</cref>
diff --git a/lib/ssh/doc/src/ssh.xml b/lib/ssh/doc/src/ssh.xml
index 6aed525e8b..407956cc6f 100644
--- a/lib/ssh/doc/src/ssh.xml
+++ b/lib/ssh/doc/src/ssh.xml
@@ -762,6 +762,8 @@
<datatype>
<name name="rekey_limit_common_option"/>
+ <name name="limit_bytes"/>
+ <name name="limit_time"/>
<desc>
<p>Sets the limit when rekeying is to be initiated. Both the max time and max amount of data
could be configured:
@@ -773,6 +775,10 @@
</list>
<p>When a rekeying is done, both the timer and the byte counter are restarted.
Defaults to one hour and one GByte.</p>
+ <p>If <c>Minutes</c> is set to <c>infinity</c>, no rekeying will ever occur due to that max time has passed.
+ Setting <c>Bytes</c> to <c>infinity</c> will inhibit rekeying after a certain amount of data has been transferred.
+ If the option value is set to <c>{infinity, infinity}</c>, no rekeying will be initiated. Note that rekeying initiated
+ by the peer will still be performed.</p>
</desc>
</datatype>
diff --git a/lib/ssh/src/ssh.hrl b/lib/ssh/src/ssh.hrl
index fc0a3786ac..2efd239aae 100644
--- a/lib/ssh/src/ssh.hrl
+++ b/lib/ssh/src/ssh.hrl
@@ -191,10 +191,13 @@
-type user_dir_common_option() :: {user_dir, false | string()}.
-type profile_common_option() :: {profile, atom() }.
-type max_idle_time_common_option() :: {idle_time, timeout()}.
--type rekey_limit_common_option() :: {rekey_limit, Bytes::non_neg_integer() |
- {Minutes::non_neg_integer(), Bytes::non_neg_integer()}
+-type rekey_limit_common_option() :: {rekey_limit, Bytes::limit_bytes() |
+ {Minutes::limit_time(), Bytes::limit_bytes()}
}.
+-type limit_bytes() :: non_neg_integer() | infinity . % non_neg_integer due to compatibility
+-type limit_time() :: pos_integer() | infinity .
+
-type key_cb_common_option() :: {key_cb, Module::atom() | {Module::atom(),Opts::[term()]} } .
-type disconnectfun_common_option() ::
{disconnectfun, fun((Reason::term()) -> void | any()) }.
diff --git a/lib/ssh/src/ssh_connection_handler.erl b/lib/ssh/src/ssh_connection_handler.erl
index dfdad769ed..f1ff3a70e2 100644
--- a/lib/ssh/src/ssh_connection_handler.erl
+++ b/lib/ssh/src/ssh_connection_handler.erl
@@ -71,7 +71,7 @@
-export([init_connection_handler/3, % proc_lib:spawn needs this
init_ssh_record/3, % Export of this internal function
% intended for low-level protocol test suites
- renegotiate/1, renegotiate_data/1, alg/1 % Export intended for test cases
+ renegotiate/1, alg/1 % Export intended for test cases
]).
-export([dbg_trace/3]).
@@ -325,14 +325,7 @@ close(ConnectionHandler, ChannelId) ->
) -> ok.
%% . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
renegotiate(ConnectionHandler) ->
- cast(ConnectionHandler, renegotiate).
-
-%%--------------------------------------------------------------------
--spec renegotiate_data(connection_ref()
- ) -> ok.
-%% . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
-renegotiate_data(ConnectionHandler) ->
- cast(ConnectionHandler, data_size).
+ cast(ConnectionHandler, force_renegotiate).
%%--------------------------------------------------------------------
alg(ConnectionHandler) ->
@@ -349,11 +342,6 @@ alg(ConnectionHandler) ->
connection_state :: #connection{},
latest_channel_id = 0 :: non_neg_integer()
| undefined,
- idle_timer_ref :: undefined
- | infinity
- | reference(),
- idle_timer_value = infinity :: infinity
- | pos_integer(),
transport_protocol :: atom()
| undefined, % ex: tcp
transport_cb :: atom()
@@ -429,22 +417,17 @@ init([Role,Socket,Opts]) ->
},
D = case Role of
client ->
- cache_init_idle_timer(D0);
+ D0;
server ->
Sups = ?GET_INTERNAL_OPT(supervisors, Opts),
- cache_init_idle_timer(
- D0#data{connection_state =
- C#connection{cli_spec = ?GET_OPT(ssh_cli, Opts, {ssh_cli,[?GET_OPT(shell, Opts)]}),
- exec = ?GET_OPT(exec, Opts),
- system_supervisor = proplists:get_value(system_sup, Sups),
- sub_system_supervisor = proplists:get_value(subsystem_sup, Sups),
- connection_supervisor = proplists:get_value(connection_sup, Sups)
- }})
+ D0#data{connection_state =
+ C#connection{cli_spec = ?GET_OPT(ssh_cli, Opts, {ssh_cli,[?GET_OPT(shell, Opts)]}),
+ exec = ?GET_OPT(exec, Opts),
+ system_supervisor = proplists:get_value(system_sup, Sups),
+ sub_system_supervisor = proplists:get_value(subsystem_sup, Sups),
+ connection_supervisor = proplists:get_value(connection_sup, Sups)
+ }}
end,
- %% Start the renegotiation timers
- {RekeyTimeout,_MaxSent} = ?GET_OPT(rekey_limit, (D#data.ssh_params)#ssh.opts),
- timer:apply_after(RekeyTimeout, gen_statem, cast, [self(), renegotiate]),
- timer:apply_after(?REKEY_DATA_TIMOUT, gen_statem, cast, [self(), data_size]),
{ok, {hello,Role}, D};
{error,Error} ->
@@ -559,10 +542,15 @@ renegotiation(_) -> false.
#data{}
) -> gen_statem:event_handler_result(state_name()) .
+-define(CONNECTION_MSG(Msg),
+ [{next_event, internal, prepare_next_packet},
+ {next_event,internal,{conn_msg,Msg}}]).
+
%% . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
callback_mode() ->
- handle_event_function.
+ [handle_event_function,
+ state_enter].
handle_event(_, _Event, {init_error,Error}=StateName, D) ->
@@ -1017,97 +1005,92 @@ handle_event(_, #ssh_msg_debug{} = Msg, _, D) ->
debug_fun(Msg, D),
keep_state_and_data;
-handle_event(internal, Msg=#ssh_msg_global_request{}, StateName, D) ->
- handle_connection_msg(Msg, StateName, D);
-
-handle_event(internal, Msg=#ssh_msg_request_success{}, StateName, D) ->
- handle_connection_msg(Msg, StateName, D);
-
-handle_event(internal, Msg=#ssh_msg_request_failure{}, StateName, D) ->
- handle_connection_msg(Msg, StateName, D);
-
-handle_event(internal, Msg=#ssh_msg_channel_open{}, StateName, D) ->
- handle_connection_msg(Msg, StateName, D);
-
-handle_event(internal, Msg=#ssh_msg_channel_open_confirmation{}, StateName, D) ->
- handle_connection_msg(Msg, StateName, D);
-
-handle_event(internal, Msg=#ssh_msg_channel_open_failure{}, StateName, D) ->
- handle_connection_msg(Msg, StateName, D);
-
-handle_event(internal, Msg=#ssh_msg_channel_window_adjust{}, StateName, D) ->
- handle_connection_msg(Msg, StateName, D);
-
-handle_event(internal, Msg=#ssh_msg_channel_data{}, StateName, D) ->
- handle_connection_msg(Msg, StateName, D);
-
-handle_event(internal, Msg=#ssh_msg_channel_extended_data{}, StateName, D) ->
- handle_connection_msg(Msg, StateName, D);
-
-handle_event(internal, Msg=#ssh_msg_channel_eof{}, StateName, D) ->
- handle_connection_msg(Msg, StateName, D);
-
-handle_event(internal, Msg=#ssh_msg_channel_close{}, {connected,server} = StateName, D) ->
- handle_connection_msg(Msg, StateName, cache_request_idle_timer_check(D));
-
-handle_event(internal, Msg=#ssh_msg_channel_close{}, StateName, D) ->
- handle_connection_msg(Msg, StateName, D);
+handle_event(internal, {conn_msg,Msg}, StateName, #data{starter = User,
+ connection_state = Connection0,
+ event_queue = Qev0} = D0) ->
+ Role = role(StateName),
+ Rengotation = renegotiation(StateName),
+ try ssh_connection:handle_msg(Msg, Connection0, Role) of
+ {disconnect, Reason0, RepliesConn} ->
+ {Repls, D} = send_replies(RepliesConn, D0),
+ case {Reason0,Role} of
+ {{_, Reason}, client} when ((StateName =/= {connected,client})
+ and (not Rengotation)) ->
+ User ! {self(), not_connected, Reason};
+ _ ->
+ ok
+ end,
+ {stop_and_reply, {shutdown,normal}, Repls, D};
-handle_event(internal, Msg=#ssh_msg_channel_request{}, StateName, D) ->
- handle_connection_msg(Msg, StateName, D);
+ {Replies, Connection} when is_list(Replies) ->
+ {Repls, D} =
+ case StateName of
+ {connected,_} ->
+ send_replies(Replies, D0#data{connection_state=Connection});
+ _ ->
+ {ConnReplies, NonConnReplies} = lists:splitwith(fun not_connected_filter/1, Replies),
+ send_replies(NonConnReplies, D0#data{event_queue = Qev0 ++ ConnReplies})
+ end,
+ case {Msg, StateName} of
+ {#ssh_msg_channel_close{}, {connected,_}} ->
+ {keep_state, D, [cond_set_idle_timer(D)|Repls]};
+ {#ssh_msg_channel_success{}, _} ->
+ update_inet_buffers(D#data.socket),
+ {keep_state, D, Repls};
+ _ ->
+ {keep_state, D, Repls}
+ end
-handle_event(internal, Msg=#ssh_msg_channel_success{}, StateName, D) ->
- update_inet_buffers(D#data.socket),
- handle_connection_msg(Msg, StateName, D);
+ catch
+ Class:Error ->
+ {Repls, D1} = send_replies(ssh_connection:handle_stop(Connection0), D0),
+ {Shutdown, D} = ?send_disconnect(?SSH_DISCONNECT_BY_APPLICATION,
+ io_lib:format("Internal error: ~p:~p",[Class,Error]),
+ StateName, D1),
+ {stop_and_reply, Shutdown, Repls, D}
+ end;
-handle_event(internal, Msg=#ssh_msg_channel_failure{}, StateName, D) ->
- handle_connection_msg(Msg, StateName, D);
+handle_event(enter, _OldState, {connected,_}=State, D) ->
+ %% Entering the state where re-negotiation is possible
+ init_renegotiate_timers(State, D);
+
+handle_event(enter, _OldState, {ext_info,_,renegotiate}=State, D) ->
+ %% Could be hanging in exit_info state if nothing else arrives
+ init_renegotiate_timers(State, D);
+
+handle_event(enter, {connected,_}, State, D) ->
+ %% Exiting the state where re-negotiation is possible
+ pause_renegotiate_timers(State, D);
+
+handle_event(cast, force_renegotiate, StateName, D) ->
+ handle_event({timeout,renegotiate}, undefined, StateName, D);
+
+handle_event({timeout,renegotiate}, _, StateName, D0) ->
+ case StateName of
+ {connected,Role} ->
+ start_rekeying(Role, D0);
+ {ext_info,Role,renegotiate} ->
+ start_rekeying(Role, D0);
+ _ ->
+ %% Wrong state for starting a renegotiation, must be in re-negotiation
+ keep_state_and_data
+ end;
-handle_event(cast, renegotiate, {connected,Role}, D) ->
- {KeyInitMsg, SshPacket, Ssh} = ssh_transport:key_exchange_init_msg(D#data.ssh_params),
- send_bytes(SshPacket, D),
- {RekeyTimeout,_MaxSent} = ?GET_OPT(rekey_limit, Ssh#ssh.opts),
- timer:apply_after(RekeyTimeout, gen_statem, cast, [self(), renegotiate]),
- {next_state, {kexinit,Role,renegotiate}, D#data{ssh_params = Ssh,
- key_exchange_init_msg = KeyInitMsg}};
+handle_event({timeout,check_data_size}, _, StateName, D0) ->
+ %% Rekey due to sent data limit reached? (Can't be in {ext_info,...} if data is sent)
+ case StateName of
+ {connected,Role} ->
+ check_data_rekeying(Role, D0);
+ _ ->
+ %% Wrong state for starting a renegotiation, must be in re-negotiation
+ keep_state_and_data
+ end;
handle_event({call,From}, get_alg, _, D) ->
#ssh{algorithms=Algs} = D#data.ssh_params,
{keep_state_and_data, [{reply,From,Algs}]};
-handle_event(cast, renegotiate, _, D) ->
- %% Already in key-exchange so safe to ignore
- {RekeyTimeout,_MaxSent} = ?GET_OPT(rekey_limit, (D#data.ssh_params)#ssh.opts),
- timer:apply_after(RekeyTimeout, gen_statem, cast, [self(), renegotiate]),
- keep_state_and_data;
-
-
-%% Rekey due to sent data limit reached?
-handle_event(cast, data_size, {connected,Role}, D) ->
- {ok, [{send_oct,Sent0}]} = inet:getstat(D#data.socket, [send_oct]),
- Sent = Sent0 - D#data.last_size_rekey,
- {_RekeyTimeout,MaxSent} = ?GET_OPT(rekey_limit, (D#data.ssh_params)#ssh.opts),
- timer:apply_after(?REKEY_DATA_TIMOUT, gen_statem, cast, [self(), data_size]),
- case Sent >= MaxSent of
- true ->
- {KeyInitMsg, SshPacket, Ssh} =
- ssh_transport:key_exchange_init_msg(D#data.ssh_params),
- send_bytes(SshPacket, D),
- {next_state, {kexinit,Role,renegotiate}, D#data{ssh_params = Ssh,
- key_exchange_init_msg = KeyInitMsg,
- last_size_rekey = Sent0}};
- _ ->
- keep_state_and_data
- end;
-
-handle_event(cast, data_size, _, _) ->
- %% Already in key-exchange so safe to ignore
- timer:apply_after(?REKEY_DATA_TIMOUT, gen_statem, cast, [self(), data_size]), % FIXME: not here in original
- keep_state_and_data;
-
-
-
handle_event(cast, _, StateName, _) when not ?CONNECTED(StateName) ->
{keep_state_and_data, [postpone]};
@@ -1221,7 +1204,7 @@ handle_event({call,From}, {request, ChannelPid, ChannelId, Type, Data, Timeout},
D ->
%% Note reply to channel will happen later when reply is recived from peer on the socket
start_channel_request_timer(ChannelId, From, Timeout),
- {keep_state, cache_request_idle_timer_check(D)}
+ {keep_state, D, cond_set_idle_timer(D)}
end;
handle_event({call,From}, {request, ChannelId, Type, Data, Timeout}, StateName, D0)
@@ -1232,7 +1215,7 @@ handle_event({call,From}, {request, ChannelId, Type, Data, Timeout}, StateName,
D ->
%% Note reply to channel will happen later when reply is recived from peer on the socket
start_channel_request_timer(ChannelId, From, Timeout),
- {keep_state, cache_request_idle_timer_check(D)}
+ {keep_state, D, cond_set_idle_timer(D)}
end;
handle_event({call,From}, {data, ChannelId, Type, Data, Timeout}, StateName, D0)
@@ -1273,7 +1256,7 @@ handle_event({call,From},
}),
D = add_request(true, ChannelId, From, D2),
start_channel_request_timer(ChannelId, From, Timeout),
- {keep_state, cache_cancel_idle_timer(D)};
+ {keep_state, D, cond_set_idle_timer(D)};
handle_event({call,From}, {send_window, ChannelId}, StateName, D)
when ?CONNECTED(StateName) ->
@@ -1303,7 +1286,7 @@ handle_event({call,From}, {close, ChannelId}, StateName, D0)
#channel{remote_id = Id} = Channel ->
D1 = send_msg(ssh_connection:channel_close_msg(Id), D0),
ssh_client_channel:cache_update(cache(D1), Channel#channel{sent_close = true}),
- {keep_state, cache_request_idle_timer_check(D1), [{reply,From,ok}]};
+ {keep_state, D1, [cond_set_idle_timer(D1), {reply,From,ok}]};
undefined ->
{keep_state_and_data, [{reply,From,ok}]}
end;
@@ -1319,6 +1302,7 @@ handle_event(info, {Proto, Sock, Info}, {hello,_}, #data{socket = Sock,
{keep_state_and_data, [{next_event, internal, {info_line,Info}}]}
end;
+
handle_event(info, {Proto, Sock, NewData}, StateName, D0 = #data{socket = Sock,
transport_protocol = Proto}) ->
try ssh_transport:handle_packet_part(
@@ -1336,13 +1320,29 @@ handle_event(info, {Proto, Sock, NewData}, StateName, D0 = #data{socket = Sock,
try
ssh_message:decode(set_kex_overload_prefix(DecryptedBytes,D1))
of
- Msg = #ssh_msg_kexinit{} ->
+ #ssh_msg_kexinit{} = Msg ->
{keep_state, D1, [{next_event, internal, prepare_next_packet},
{next_event, internal, {Msg,DecryptedBytes}}
]};
+
+ #ssh_msg_global_request{} = Msg -> {keep_state, D1, ?CONNECTION_MSG(Msg)};
+ #ssh_msg_request_success{} = Msg -> {keep_state, D1, ?CONNECTION_MSG(Msg)};
+ #ssh_msg_request_failure{} = Msg -> {keep_state, D1, ?CONNECTION_MSG(Msg)};
+ #ssh_msg_channel_open{} = Msg -> {keep_state, D1, ?CONNECTION_MSG(Msg)};
+ #ssh_msg_channel_open_confirmation{} = Msg -> {keep_state, D1, ?CONNECTION_MSG(Msg)};
+ #ssh_msg_channel_open_failure{} = Msg -> {keep_state, D1, ?CONNECTION_MSG(Msg)};
+ #ssh_msg_channel_window_adjust{} = Msg -> {keep_state, D1, ?CONNECTION_MSG(Msg)};
+ #ssh_msg_channel_data{} = Msg -> {keep_state, D1, ?CONNECTION_MSG(Msg)};
+ #ssh_msg_channel_extended_data{} = Msg -> {keep_state, D1, ?CONNECTION_MSG(Msg)};
+ #ssh_msg_channel_eof{} = Msg -> {keep_state, D1, ?CONNECTION_MSG(Msg)};
+ #ssh_msg_channel_close{} = Msg -> {keep_state, D1, ?CONNECTION_MSG(Msg)};
+ #ssh_msg_channel_request{} = Msg -> {keep_state, D1, ?CONNECTION_MSG(Msg)};
+ #ssh_msg_channel_failure{} = Msg -> {keep_state, D1, ?CONNECTION_MSG(Msg)};
+ #ssh_msg_channel_success{} = Msg -> {keep_state, D1, ?CONNECTION_MSG(Msg)};
+
Msg ->
{keep_state, D1, [{next_event, internal, prepare_next_packet},
- {next_event, internal, Msg}
+ {next_event, internal, Msg}
]}
catch
C:E ->
@@ -1421,8 +1421,20 @@ handle_event(info, {timeout, {_, From} = Request}, _,
end;
%%% Handle that ssh channels user process goes down
-handle_event(info, {'DOWN', _Ref, process, ChannelPid, _Reason}, _, D0) ->
- {keep_state, handle_channel_down(ChannelPid, D0)};
+handle_event(info, {'DOWN', _Ref, process, ChannelPid, _Reason}, _, D) ->
+ Cache = cache(D),
+ ssh_client_channel:cache_foldl(
+ fun(#channel{user=U,
+ local_id=Id}, Acc) when U == ChannelPid ->
+ ssh_client_channel:cache_delete(Cache, Id),
+ Acc;
+ (_,Acc) ->
+ Acc
+ end, [], Cache),
+ {keep_state, D, cond_set_idle_timer(D)};
+
+handle_event({timeout,idle_time}, _Data, _StateName, _D) ->
+ {stop, {shutdown, "Timeout"}};
%%% So that terminate will be run when supervisor is shutdown
handle_event(info, {'EXIT', _Sup, Reason}, StateName, _) ->
@@ -1442,7 +1454,7 @@ handle_event(info, {'EXIT', _Sup, Reason}, StateName, _) ->
end;
handle_event(info, check_cache, _, D) ->
- {keep_state, cache_check_set_idle_timer(D)};
+ {keep_state, D, cond_set_idle_timer(D)};
handle_event(info, UnexpectedMessage, StateName, D = #data{ssh_params = Ssh}) ->
case unexpected_fun(UnexpectedMessage, D) of
@@ -1489,6 +1501,11 @@ handle_event(internal, {send_disconnect,Code,DetailedText,Module,Line}, StateNam
send_disconnect(Code, DetailedText, Module, Line, StateName, D0),
{stop, Shutdown, D};
+
+handle_event(enter, _OldState, State, D) ->
+ %% Just skip
+ {next_state, State, D};
+
handle_event(_Type, _Msg, {ext_info,Role,_ReNegFlag}, D) ->
%% If something else arrives, goto next state and handle the event in that one
{next_state, {connected,Role}, D, [postpone]};
@@ -1746,46 +1763,6 @@ call(FsmPid, Event, Timeout) ->
end.
-handle_connection_msg(Msg, StateName, D0 = #data{starter = User,
- connection_state = Connection0,
- event_queue = Qev0}) ->
- Renegotiation = renegotiation(StateName),
- Role = role(StateName),
- try ssh_connection:handle_msg(Msg, Connection0, Role) of
- {disconnect, Reason0, RepliesConn} ->
- {Repls, D} = send_replies(RepliesConn, D0),
- case {Reason0,Role} of
- {{_, Reason}, client} when ((StateName =/= {connected,client}) and (not Renegotiation)) ->
- User ! {self(), not_connected, Reason};
- _ ->
- ok
- end,
- {stop_and_reply, {shutdown,normal}, Repls, D};
-
- {[], Connection} ->
- {keep_state, D0#data{connection_state = Connection}};
-
- {Replies, Connection} when is_list(Replies) ->
- {Repls, D} =
- case StateName of
- {connected,_} ->
- send_replies(Replies, D0#data{connection_state=Connection});
- _ ->
- {ConnReplies, NonConnReplies} = lists:splitwith(fun not_connected_filter/1, Replies),
- send_replies(NonConnReplies, D0#data{event_queue = Qev0 ++ ConnReplies})
- end,
- {keep_state, D, Repls}
-
- catch
- Class:Error ->
- {Repls, D1} = send_replies(ssh_connection:handle_stop(Connection0), D0),
- {Shutdown, D} = ?send_disconnect(?SSH_DISCONNECT_BY_APPLICATION,
- io_lib:format("Internal error: ~p:~p",[Class,Error]),
- StateName, D1),
- {stop_and_reply, Shutdown, Repls, D}
- end.
-
-
set_kex_overload_prefix(Msg = <<?BYTE(Op),_/binary>>, #data{ssh_params=SshParams})
when Op == 30;
Op == 31
@@ -1891,19 +1868,6 @@ handle_request(ChannelId, Type, Data, WantReply, From, D) ->
end.
%%%----------------------------------------------------------------
-handle_channel_down(ChannelPid, D) ->
- Cache = cache(D),
- ssh_client_channel:cache_foldl(
- fun(#channel{user=U,
- local_id=Id}, Acc) when U == ChannelPid ->
- ssh_client_channel:cache_delete(Cache, Id),
- Acc;
- (_,Acc) ->
- Acc
- end, [], Cache),
- cache_check_set_idle_timer(D).
-
-
update_sys(Cache, Channel, Type, ChannelPid) ->
ssh_client_channel:cache_update(Cache,
Channel#channel{sys = Type, user = ChannelPid}).
@@ -1922,6 +1886,42 @@ new_channel_id(#data{connection_state = #connection{channel_id_seed = Id} =
{Id, State#data{connection_state =
Connection#connection{channel_id_seed = Id + 1}}}.
+
+%%%----------------------------------------------------------------
+start_rekeying(Role, D0) ->
+ {KeyInitMsg, SshPacket, Ssh} = ssh_transport:key_exchange_init_msg(D0#data.ssh_params),
+ send_bytes(SshPacket, D0),
+ D = D0#data{ssh_params = Ssh,
+ key_exchange_init_msg = KeyInitMsg},
+ {next_state, {kexinit,Role,renegotiate}, D}.
+
+
+init_renegotiate_timers(State, D) ->
+ {RekeyTimeout,_MaxSent} = ?GET_OPT(rekey_limit, (D#data.ssh_params)#ssh.opts),
+ {next_state, State, D, [{{timeout,renegotiate}, RekeyTimeout, none},
+ {{timeout,check_data_size}, ?REKEY_DATA_TIMOUT, none} ]}.
+
+
+pause_renegotiate_timers(State, D) ->
+ {next_state, State, D, [{{timeout,renegotiate}, infinity, none},
+ {{timeout,check_data_size}, infinity, none} ]}.
+
+check_data_rekeying(Role, D) ->
+ {ok, [{send_oct,SocketSentTotal}]} = inet:getstat(D#data.socket, [send_oct]),
+ SentSinceRekey = SocketSentTotal - D#data.last_size_rekey,
+ {_RekeyTimeout,MaxSent} = ?GET_OPT(rekey_limit, (D#data.ssh_params)#ssh.opts),
+ case check_data_rekeying_dbg(SentSinceRekey, MaxSent) of
+ true ->
+ start_rekeying(Role, D#data{last_size_rekey = SocketSentTotal});
+ _ ->
+ %% Not enough data sent for a re-negotiation. Restart timer.
+ {keep_state, D, {{timeout,check_data_size}, ?REKEY_DATA_TIMOUT, none}}
+ end.
+
+check_data_rekeying_dbg(SentSinceRekey, MaxSent) ->
+ %% This function is for the ssh_dbg to trace on. See dbg_trace/3 at the end.
+ SentSinceRekey >= MaxSent.
+
%%%----------------------------------------------------------------
%%% This server/client has decided to disconnect via the state machine:
%%% The unused arguments are for debugging.
@@ -2134,60 +2134,12 @@ retry_fun(User, Reason, #data{ssh_params = #ssh{opts = Opts,
%%% Cache idle timer that closes the connection if there are no
%%% channels open for a while.
-cache_init_idle_timer(D) ->
- case ?GET_OPT(idle_time, (D#data.ssh_params)#ssh.opts) of
- infinity ->
- D#data{idle_timer_value = infinity,
- idle_timer_ref = infinity % A flag used later...
- };
- IdleTime ->
- %% We dont want to set the timeout on first connect
- D#data{idle_timer_value = IdleTime}
- end.
-
-
-cache_check_set_idle_timer(D = #data{idle_timer_ref = undefined,
- idle_timer_value = IdleTime}) ->
- %% No timer set - shall we set one?
+cond_set_idle_timer(D) ->
case ssh_client_channel:cache_info(num_entries, cache(D)) of
- 0 when IdleTime == infinity ->
- %% No. Meaningless to set a timer that fires in an infinite time...
- D;
- 0 ->
- %% Yes, we'll set one since the cache is empty and it should not
- %% be that for a specified time
- D#data{idle_timer_ref =
- erlang:send_after(IdleTime, self(), {'EXIT',[],"Timeout"})};
- _ ->
- %% No - there are entries in the cache
- D
- end;
-cache_check_set_idle_timer(D) ->
- %% There is already a timer set or the timeout time is infinite
- D.
-
-
-cache_cancel_idle_timer(D) ->
- case D#data.idle_timer_ref of
- infinity ->
- %% The timer is not activated
- D;
- undefined ->
- %% The timer is already cancelled
- D;
- TimerRef ->
- %% The timer is active
- erlang:cancel_timer(TimerRef),
- D#data{idle_timer_ref = undefined}
+ 0 -> {{timeout,idle_time}, ?GET_OPT(idle_time, (D#data.ssh_params)#ssh.opts), none};
+ _ -> {{timeout,idle_time}, infinity, none}
end.
-
-cache_request_idle_timer_check(D = #data{idle_timer_value = infinity}) ->
- D;
-cache_request_idle_timer_check(D = #data{idle_timer_value = IdleTime}) ->
- erlang:send_after(IdleTime, self(), check_cache),
- D.
-
%%%----------------------------------------------------------------
start_channel_request_timer(_,_, infinity) ->
ok;
@@ -2248,7 +2200,7 @@ update_inet_buffers(Socket) ->
%%%# Tracing
%%%#
-dbg_trace(points, _, _) -> [terminate, disconnect, connections, connection_events];
+dbg_trace(points, _, _) -> [terminate, disconnect, connections, connection_events, renegotiation];
dbg_trace(flags, connections, A) -> [c] ++ dbg_trace(flags, terminate, A);
dbg_trace(on, connections, A) -> dbg:tp(?MODULE, init_connection_handler, 3, x),
@@ -2291,6 +2243,33 @@ dbg_trace(format, connection_events, {return_from, {?MODULE,handle_event,4}, Ret
io_lib:format("~p~n", [event_handler_result(Ret)])
];
+dbg_trace(flags, renegotiation, _) -> [c];
+dbg_trace(on, renegotiation, _) -> dbg:tpl(?MODULE, init_renegotiate_timers, 2, x),
+ dbg:tpl(?MODULE, pause_renegotiate_timers, 2, x),
+ dbg:tpl(?MODULE, check_data_rekeying_dbg, 2, x),
+ dbg:tpl(?MODULE, start_rekeying, 2, x);
+dbg_trace(off, renegotiation, _) -> dbg:ctpl(?MODULE, init_renegotiate_timers, 2),
+ dbg:ctpl(?MODULE, pause_renegotiate_timers, 2),
+ dbg:ctpl(?MODULE, check_data_rekeying_dbg, 2),
+ dbg:ctpl(?MODULE, start_rekeying, 2);
+dbg_trace(format, renegotiation, {call, {?MODULE,init_renegotiate_timers,[_State,D]}}) ->
+ ["Renegotiation init\n",
+ io_lib:format("rekey_limit: ~p ({ms,bytes})~ncheck_data_size: ~p (ms)~n",
+ [?GET_OPT(rekey_limit, (D#data.ssh_params)#ssh.opts),
+ ?REKEY_DATA_TIMOUT])
+ ];
+dbg_trace(format, renegotiation, {call, {?MODULE,pause_renegotiate_timers,[_State,_D]}}) ->
+ ["Renegotiation pause\n"];
+dbg_trace(format, renegotiation, {call, {?MODULE,start_rekeying,[_Role,_D]}}) ->
+ ["Renegotiation start rekeying\n"];
+dbg_trace(format, renegotiation, {call, {?MODULE,check_data_rekeying_dbg,[SentSinceRekey, MaxSent]}}) ->
+ ["Renegotiation check data sent\n",
+ io_lib:format("TotalSentSinceRekey: ~p~nMaxBeforeRekey: ~p~nStartRekey: ~p~n",
+ [SentSinceRekey, MaxSent, SentSinceRekey >= MaxSent])
+ ];
+
+
+
dbg_trace(flags, terminate, _) -> [c];
dbg_trace(on, terminate, _) -> dbg:tp(?MODULE, terminate, 3, x);
dbg_trace(off, terminate, _) -> dbg:ctpg(?MODULE, terminate, 3);
diff --git a/lib/ssh/src/ssh_options.erl b/lib/ssh/src/ssh_options.erl
index 73287e464a..fe95d2ac54 100644
--- a/lib/ssh/src/ssh_options.erl
+++ b/lib/ssh/src/ssh_options.erl
@@ -601,14 +601,19 @@ default(common) ->
{rekey_limit, def} =>
#{default => {3600000, 1024000000}, % {1 hour, 1 GB}
- chk => fun({TimeMins, SizBytes}) when is_integer(TimeMins) andalso TimeMins>=0,
- is_integer(SizBytes) andalso SizBytes>=0 ->
- %% New (>= 21) format
- {true, {TimeMins * 60*1000, % To ms
- SizBytes}};
- (SizBytes) when is_integer(SizBytes) andalso SizBytes>=0 ->
- %% Old (< 21) format
- {true, {3600000, SizBytes}};
+ chk => fun({infinity, infinity}) ->
+ true;
+ ({Mins, infinity}) when is_integer(Mins), Mins>0 ->
+ {true, {Mins*60*1000, infinity}};
+ ({infinity, Bytes}) when is_integer(Bytes), Bytes>=0 ->
+ true;
+ ({Mins, Bytes}) when is_integer(Mins), Mins>0,
+ is_integer(Bytes), Bytes>=0 ->
+ {true, {Mins*60*1000, Bytes}};
+ (infinity) ->
+ {true, {3600000, infinity}};
+ (Bytes) when is_integer(Bytes), Bytes>=0 ->
+ {true, {3600000, Bytes}};
(_) ->
false
end,
diff --git a/lib/ssh/test/ssh_basic_SUITE.erl b/lib/ssh/test/ssh_basic_SUITE.erl
index 603ac71d4b..807e23ff01 100644
--- a/lib/ssh/test/ssh_basic_SUITE.erl
+++ b/lib/ssh/test/ssh_basic_SUITE.erl
@@ -32,7 +32,7 @@
-define(NEWLINE, <<"\r\n">>).
--define(REKEY_DATA_TMO, 65000).
+-define(REKEY_DATA_TMO, 1 * 60000). % Should be multiples of 60000
%%--------------------------------------------------------------------
%% Common Test interface functions -----------------------------------
@@ -45,7 +45,6 @@ suite() ->
all() ->
[{group, all_tests}].
-
groups() ->
[{all_tests, [parallel], [{group, ssh_renegotiate_SUITE},
{group, ssh_basic_SUITE}
@@ -76,7 +75,11 @@ groups() ->
shell_exit_status
]},
- {ssh_renegotiate_SUITE, [parallel], [rekey,
+ {ssh_renegotiate_SUITE, [parallel], [rekey0,
+ rekey1,
+ rekey2,
+ rekey3,
+ rekey4,
rekey_limit_client,
rekey_limit_daemon,
rekey_time_limit_client,
@@ -1330,28 +1333,36 @@ shell_exit_status(Config) when is_list(Config) ->
ssh:stop_daemon(Pid).
+%%----------------------------------------------------------------------------
%%% Idle timeout test
-rekey() -> [{timetrap,{seconds,90}}].
+rekey0() -> [{timetrap,{seconds,90}}].
+rekey1() -> [{timetrap,{seconds,90}}].
+rekey2() -> [{timetrap,{seconds,90}}].
+rekey3() -> [{timetrap,{seconds,90}}].
+rekey4() -> [{timetrap,{seconds,90}}].
-rekey(Config) ->
- {Pid, Host, Port} =
- ssh_test_lib:std_daemon(Config,
- [{rekey_limit, 0}]),
- ConnectionRef =
- ssh_test_lib:std_connect(Config, Host, Port,
- [{rekey_limit, 0}]),
+rekey0(Config) -> rekey_chk(Config, 0, 0).
+rekey1(Config) -> rekey_chk(Config, infinity, 0).
+rekey2(Config) -> rekey_chk(Config, {infinity,infinity}, 0).
+rekey3(Config) -> rekey_chk(Config, 0, infinity).
+rekey4(Config) -> rekey_chk(Config, 0, {infinity,infinity}).
+
+rekey_chk(Config, RLdaemon, RLclient) ->
+ {Pid, Host, Port} = ssh_test_lib:std_daemon(Config, [{rekey_limit, RLdaemon}]),
+ ConnectionRef = ssh_test_lib:std_connect(Config, Host, Port, [{rekey_limit, RLclient}]),
Kex1 = ssh_test_lib:get_kex_init(ConnectionRef),
- receive
- after ?REKEY_DATA_TMO ->
- %%By this time rekeying would have been done
- Kex2 = ssh_test_lib:get_kex_init(ConnectionRef),
- false = (Kex2 == Kex1),
- ssh:close(ConnectionRef),
- ssh:stop_daemon(Pid)
- end.
-%%--------------------------------------------------------------------
+ %% Make both sides send something:
+ {ok, SftpPid} = ssh_sftp:start_channel(ConnectionRef),
+
+ %% Check rekeying
+ timer:sleep(?REKEY_DATA_TMO),
+ ?wait_match(false, Kex1==ssh_test_lib:get_kex_init(ConnectionRef), [], 2000, 10),
+ ssh:close(ConnectionRef),
+ ssh:stop_daemon(Pid).
+
+%%--------------------------------------------------------------------
%%% Test rekeying by data volume
rekey_limit_client() -> [{timetrap,{seconds,400}}].
@@ -1359,7 +1370,7 @@ rekey_limit_client(Config) ->
Limit = 6000,
UserDir = proplists:get_value(priv_dir, Config),
DataFile = filename:join(UserDir, "rekey.data"),
-
+ Data = lists:duplicate(Limit+10,1),
Algs = proplists:get_value(preferred_algorithms, Config),
{Pid, Host, Port} = ssh_test_lib:std_daemon(Config,[{max_random_length_padding,0},
{preferred_algorithms,Algs}]),
@@ -1368,31 +1379,33 @@ rekey_limit_client(Config) ->
{max_random_length_padding,0}]),
{ok, SftpPid} = ssh_sftp:start_channel(ConnectionRef),
+ %% Check that it doesn't rekey without data transfer
Kex1 = ssh_test_lib:get_kex_init(ConnectionRef),
-
timer:sleep(?REKEY_DATA_TMO),
- Kex1 = ssh_test_lib:get_kex_init(ConnectionRef),
+ true = (Kex1 == ssh_test_lib:get_kex_init(ConnectionRef)),
- Data = lists:duplicate(Limit+10,1),
+ %% Check that datatransfer triggers rekeying
ok = ssh_sftp:write_file(SftpPid, DataFile, Data),
-
timer:sleep(?REKEY_DATA_TMO),
- Kex2 = ssh_test_lib:get_kex_init(ConnectionRef),
+ ?wait_match(false, Kex1==(Kex2=ssh_test_lib:get_kex_init(ConnectionRef)), Kex2, 2000, 10),
- false = (Kex2 == Kex1),
+ %% Check that datatransfer continues to trigger rekeying
+ ok = ssh_sftp:write_file(SftpPid, DataFile, Data),
+ timer:sleep(?REKEY_DATA_TMO),
+ ?wait_match(false, Kex2==(Kex3=ssh_test_lib:get_kex_init(ConnectionRef)), Kex3, 2000, 10),
+ %% Check that it doesn't rekey without data transfer
timer:sleep(?REKEY_DATA_TMO),
- Kex2 = ssh_test_lib:get_kex_init(ConnectionRef),
+ true = (Kex3 == ssh_test_lib:get_kex_init(ConnectionRef)),
+ %% Check that it doesn't rekey on a small datatransfer
ok = ssh_sftp:write_file(SftpPid, DataFile, "hi\n"),
-
timer:sleep(?REKEY_DATA_TMO),
- Kex2 = ssh_test_lib:get_kex_init(ConnectionRef),
-
- false = (Kex2 == Kex1),
+ true = (Kex3 == ssh_test_lib:get_kex_init(ConnectionRef)),
+ %% Check that it doesn't rekey without data transfer
timer:sleep(?REKEY_DATA_TMO),
- Kex2 = ssh_test_lib:get_kex_init(ConnectionRef),
+ true = (Kex3 == ssh_test_lib:get_kex_init(ConnectionRef)),
ssh_sftp:stop_channel(SftpPid),
ssh:close(ConnectionRef),
@@ -1416,32 +1429,40 @@ rekey_limit_daemon(Config) ->
ConnectionRef = ssh_test_lib:std_connect(Config, Host, Port, [{max_random_length_padding,0}]),
{ok, SftpPid} = ssh_sftp:start_channel(ConnectionRef),
+ %% Check that it doesn't rekey without data transfer
Kex1 = ssh_test_lib:get_kex_init(ConnectionRef),
timer:sleep(?REKEY_DATA_TMO),
Kex1 = ssh_test_lib:get_kex_init(ConnectionRef),
+ %% Check that datatransfer triggers rekeying
{ok,_} = ssh_sftp:read_file(SftpPid, DataFile1),
+ timer:sleep(?REKEY_DATA_TMO),
+ ?wait_match(false, Kex1==(Kex2=ssh_test_lib:get_kex_init(ConnectionRef)), Kex2, 2000, 10),
+ %% Check that datatransfer continues to trigger rekeying
+ {ok,_} = ssh_sftp:read_file(SftpPid, DataFile1),
timer:sleep(?REKEY_DATA_TMO),
- Kex2 = ssh_test_lib:get_kex_init(ConnectionRef),
- false = (Kex2 == Kex1),
+ ?wait_match(false, Kex2==(Kex3=ssh_test_lib:get_kex_init(ConnectionRef)), Kex3, 2000, 10),
+ %% Check that it doesn't rekey without data transfer
timer:sleep(?REKEY_DATA_TMO),
- Kex2 = ssh_test_lib:get_kex_init(ConnectionRef),
+ true = (Kex3 == ssh_test_lib:get_kex_init(ConnectionRef)),
+ %% Check that it doesn't rekey on a small datatransfer
{ok,_} = ssh_sftp:read_file(SftpPid, DataFile2),
-
timer:sleep(?REKEY_DATA_TMO),
- Kex2 = ssh_test_lib:get_kex_init(ConnectionRef),
+ true = (Kex3 == ssh_test_lib:get_kex_init(ConnectionRef)),
+ %% Check that it doesn't rekey without data transfer
timer:sleep(?REKEY_DATA_TMO),
- Kex2 = ssh_test_lib:get_kex_init(ConnectionRef),
+ true = (Kex3 == ssh_test_lib:get_kex_init(ConnectionRef)),
ssh_sftp:stop_channel(SftpPid),
ssh:close(ConnectionRef),
ssh:stop_daemon(Pid).
+%%--------------------------------------------------------------------
%% Check that datatransfer in the other direction does not trigger re-keying
norekey_limit_client() -> [{timetrap,{seconds,400}}].
norekey_limit_client(Config) ->
@@ -1460,13 +1481,12 @@ norekey_limit_client(Config) ->
Kex1 = ssh_test_lib:get_kex_init(ConnectionRef),
timer:sleep(?REKEY_DATA_TMO),
- Kex1 = ssh_test_lib:get_kex_init(ConnectionRef),
+ true = (Kex1 == ssh_test_lib:get_kex_init(ConnectionRef)),
{ok,_} = ssh_sftp:read_file(SftpPid, DataFile),
timer:sleep(?REKEY_DATA_TMO),
- Kex2 = ssh_test_lib:get_kex_init(ConnectionRef),
+ true = (Kex1 == ssh_test_lib:get_kex_init(ConnectionRef)),
- Kex1 = Kex2,
ssh_sftp:stop_channel(SftpPid),
ssh:close(ConnectionRef),
ssh:stop_daemon(Pid).
@@ -1488,13 +1508,12 @@ norekey_limit_daemon(Config) ->
Kex1 = ssh_test_lib:get_kex_init(ConnectionRef),
timer:sleep(?REKEY_DATA_TMO),
- Kex1 = ssh_test_lib:get_kex_init(ConnectionRef),
+ true = (Kex1 == ssh_test_lib:get_kex_init(ConnectionRef)),
ok = ssh_sftp:write_file(SftpPid, DataFile, lists:duplicate(Limit+10,1)),
timer:sleep(?REKEY_DATA_TMO),
- Kex2 = ssh_test_lib:get_kex_init(ConnectionRef),
+ true = (Kex1 == ssh_test_lib:get_kex_init(ConnectionRef)),
- Kex1 = Kex2,
ssh_sftp:stop_channel(SftpPid),
ssh:close(ConnectionRef),
ssh:stop_daemon(Pid).
@@ -1504,39 +1523,41 @@ norekey_limit_daemon(Config) ->
rekey_time_limit_client() -> [{timetrap,{seconds,400}}].
rekey_time_limit_client(Config) ->
- Minutes = 1,
+ Minutes = ?REKEY_DATA_TMO div 60000,
GB = 1024*1000*1000,
Algs = proplists:get_value(preferred_algorithms, Config),
{Pid, Host, Port} = ssh_test_lib:std_daemon(Config,[{max_random_length_padding,0},
{preferred_algorithms,Algs}]),
ConnectionRef = ssh_test_lib:std_connect(Config, Host, Port, [{rekey_limit, {Minutes, GB}},
{max_random_length_padding,0}]),
- {ok, SftpPid} = ssh_sftp:start_channel(ConnectionRef),
- rekey_time_limit(Pid, Minutes, ConnectionRef, SftpPid).
+ rekey_time_limit(Pid, ConnectionRef).
rekey_time_limit_daemon() -> [{timetrap,{seconds,400}}].
rekey_time_limit_daemon(Config) ->
- Minutes = 1,
+ Minutes = ?REKEY_DATA_TMO div 60000,
GB = 1024*1000*1000,
Algs = proplists:get_value(preferred_algorithms, Config),
{Pid, Host, Port} = ssh_test_lib:std_daemon(Config,[{rekey_limit, {Minutes, GB}},
{max_random_length_padding,0},
{preferred_algorithms,Algs}]),
ConnectionRef = ssh_test_lib:std_connect(Config, Host, Port, [{max_random_length_padding,0}]),
- {ok, SftpPid} = ssh_sftp:start_channel(ConnectionRef),
- rekey_time_limit(Pid, Minutes, ConnectionRef, SftpPid).
+ rekey_time_limit(Pid, ConnectionRef).
-rekey_time_limit(Pid, Minutes, ConnectionRef, SftpPid) ->
+rekey_time_limit(Pid, ConnectionRef) ->
+ {ok, SftpPid} = ssh_sftp:start_channel(ConnectionRef),
Kex1 = ssh_test_lib:get_kex_init(ConnectionRef),
timer:sleep(5000),
- Kex1 = ssh_test_lib:get_kex_init(ConnectionRef),
+ true = (Kex1 == ssh_test_lib:get_kex_init(ConnectionRef)),
- timer:sleep((Minutes*60 + 30) * 1000),
- Kex2 = ssh_test_lib:get_kex_init(ConnectionRef),
+ %% Check that it rekeys when the max time + 30s has passed
+ timer:sleep(?REKEY_DATA_TMO + 30*1000),
+ ?wait_match(false, Kex1==(Kex2=ssh_test_lib:get_kex_init(ConnectionRef)), Kex2, 2000, 10),
- false = (Kex2 == Kex1),
+ %% Check that it does not rekey when nothing is transferred
+ timer:sleep(?REKEY_DATA_TMO + 30*1000),
+ ?wait_match(false, Kex2==ssh_test_lib:get_kex_init(ConnectionRef), [], 2000, 10),
ssh_sftp:stop_channel(SftpPid),
ssh:close(ConnectionRef),
@@ -1544,7 +1565,7 @@ rekey_time_limit(Pid, Minutes, ConnectionRef, SftpPid) ->
%%--------------------------------------------------------------------
-%%% Test rekeying with simulataneous send request
+%%% Test rekeying with simultaneous send request
renegotiate1(Config) ->
UserDir = proplists:get_value(priv_dir, Config),
diff --git a/lib/ssh/test/ssh_test_lib.erl b/lib/ssh/test/ssh_test_lib.erl
index 57ae2dbac2..65970535f4 100644
--- a/lib/ssh/test/ssh_test_lib.erl
+++ b/lib/ssh/test/ssh_test_lib.erl
@@ -926,7 +926,7 @@ get_kex_init(Conn, Ref, TRef) ->
end;
false ->
- ct:log("Not in 'connected' state: ~p",[State]),
+ ct:log("~p:~p Not in 'connected' state: ~p",[?MODULE,?LINE,State]),
receive
{reneg_timeout,Ref} ->
ct:log("S = ~p", [S]),
diff --git a/lib/tools/src/xref.erl b/lib/tools/src/xref.erl
index 32efa36fa2..466ec7d331 100644
--- a/lib/tools/src/xref.erl
+++ b/lib/tools/src/xref.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2000-2016. All Rights Reserved.
+%% Copyright Ericsson AB 2000-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -182,7 +182,9 @@ split_args(Opts) ->
end.
stop(Name) ->
- gen_server:call(Name, stop, infinity).
+ try gen_server:call(Name, stop, infinity)
+ after catch unregister(Name) % ensure the name is gone
+ end.
add_release(Name, Dir) ->
gen_server:call(Name, {add_release, Dir}, infinity).
diff --git a/system/doc/efficiency_guide/binaryhandling.xml b/system/doc/efficiency_guide/binaryhandling.xml
index 19f40c9abe..d07ff1325f 100644
--- a/system/doc/efficiency_guide/binaryhandling.xml
+++ b/system/doc/efficiency_guide/binaryhandling.xml
@@ -357,25 +357,8 @@ all_but_zeroes_to_list(<<Byte,T/binary>>, Acc, Remaining) ->
<c>Buffer</c> from a match context to a sub binary (or do nothing if
<c>Buffer</c> is a binary already).</p>
- <p>Before you begin to think that the compiler can optimize any binary
- patterns, the following function cannot be optimized by the compiler
- (currently, at least):</p>
-
- <code type="erl"><![CDATA[
-non_opt_eq([H|T1], <<H,T2/binary>>) ->
- non_opt_eq(T1, T2);
-non_opt_eq([_|_], <<_,_/binary>>) ->
- false;
-non_opt_eq([], <<>>) ->
- true.]]></code>
-
- <p>It was mentioned earlier that the compiler can only delay creation of
- sub binaries if it knows that the binary will not be shared. In this case,
- the compiler cannot know.</p>
-
- <p>Soon it is shown how to rewrite <c>non_opt_eq/2</c> so that the delayed
- sub binary optimization can be applied, and more importantly, it is shown
- how you can find out whether your code can be optimized.</p>
+ <p>But in more complicated code, how can one know whether the
+ optimization is applied or not?</p>
<section>
<marker id="bin_opt_info"></marker>
@@ -422,67 +405,6 @@ after_zero(<<>>) ->
binary cannot be delayed, because it will be returned.
The warning for the second clause says that a sub binary will not be
created (yet).</p>
-
- <p>Let us revisit the earlier example of the code that could not
- be optimized and find out why:</p>
-
- <code type="erl"><![CDATA[
-non_opt_eq([H|T1], <<H,T2/binary>>) ->
- %% INFO: matching anything else but a plain variable to
- %% the left of binary pattern will prevent delayed
- %% sub binary optimization;
- %% SUGGEST changing argument order
- %% NOT OPTIMIZED: called function non_opt_eq/2 does not
- %% begin with a suitable binary matching instruction
- non_opt_eq(T1, T2);
-non_opt_eq([_|_], <<_,_/binary>>) ->
- false;
-non_opt_eq([], <<>>) ->
- true.]]></code>
-
- <p>The compiler emitted two warnings. The <c>INFO</c> warning refers
- to the function <c>non_opt_eq/2</c> as a callee, indicating that any
- function that call <c>non_opt_eq/2</c> cannot make delayed sub binary
- optimization. There is also a suggestion to change argument order.
- The second warning (that happens to refer to the same line) refers to
- the construction of the sub binary itself.</p>
-
- <p>Soon another example will show the difference between the
- <c>INFO</c> and <c>NOT OPTIMIZED</c> warnings somewhat clearer, but
- let us first follow the suggestion to change argument order:</p>
-
- <code type="erl"><![CDATA[
-opt_eq(<<H,T1/binary>>, [H|T2]) ->
- %% OPTIMIZED: creation of sub binary delayed
- opt_eq(T1, T2);
-opt_eq(<<_,_/binary>>, [_|_]) ->
- false;
-opt_eq(<<>>, []) ->
- true.]]></code>
-
- <p>The compiler gives a warning for the following code fragment:</p>
-
- <code type="erl"><![CDATA[
-match_body([0|_], <<H,_/binary>>) ->
- %% INFO: matching anything else but a plain variable to
- %% the left of binary pattern will prevent delayed
- %% sub binary optimization;
- %% SUGGEST changing argument order
- done;
-...]]></code>
-
- <p>The warning means that <em>if</em> there is a call to <c>match_body/2</c>
- (from another clause in <c>match_body/2</c> or another function), the
- delayed sub binary optimization will not be possible. More warnings will
- occur for any place where a sub binary is matched out at the end of and
- passed as the second argument to <c>match_body/2</c>, for example:</p>
-
- <code type="erl"><![CDATA[
-match_head(List, <<_:10,Data/binary>>) ->
- %% NOT OPTIMIZED: called function match_body/2 does not
- %% begin with a suitable binary matching instruction
- match_body(List, Data).]]></code>
-
</section>
<section>
diff --git a/system/doc/efficiency_guide/efficiency_guide.erl b/system/doc/efficiency_guide/efficiency_guide.erl
index e982bdae65..c57785aaa3 100644
--- a/system/doc/efficiency_guide/efficiency_guide.erl
+++ b/system/doc/efficiency_guide/efficiency_guide.erl
@@ -1,5 +1,5 @@
-module(efficiency_guide).
--compile(export_all).
+-compile([export_all,nowarn_export_all).
%% DO NOT
naive_reverse([H|T]) ->
@@ -71,28 +71,6 @@ all_but_zeroes_to_list(<<0,T/binary>>, Acc, Remaining) ->
all_but_zeroes_to_list(<<Byte,T/binary>>, Acc, Remaining) ->
all_but_zeroes_to_list(T, [Byte|Acc], Remaining-1).
-non_opt_eq([H|T1], <<H,T2/binary>>) ->
- non_opt_eq(T1, T2);
-non_opt_eq([_|_], <<_,_/binary>>) ->
- false;
-non_opt_eq([], <<>>) ->
- true.
-
-opt_eq(<<H,T1/binary>>, [H|T2]) ->
- opt_eq(T1, T2);
-opt_eq(<<_,_/binary>>, [_|_]) ->
- false;
-opt_eq(<<>>, []) ->
- true.
-
-match_head(List, <<_:10,Data/binary>>) ->
- match_body(List, Data).
-
-match_body([0|_], <<H,_/binary>>) ->
- done;
-match_body([H|T1], <<H,T2/binary>>) ->
- {T1,T2}.
-
count1(<<_,T/binary>>, Count) -> count1(T, Count+1);
count1(<<>>, Count) -> Count.