From 34a186689a318ebc6fe2afaff396364d832bb21b Mon Sep 17 00:00:00 2001 From: Anders Svensson Date: Sun, 6 Nov 2016 22:20:35 +0100 Subject: Don't assume nodes are eternally connected when sharing transport Service configuration share_peers and use_shared_peers is used to share peer connections with other connected nodes having a service of the same name: a service process asks its neighbours about existing connections when it starts, and pushes new connections as they're established. The problem is that the mechanics assume that nodes() doesn't change. In particular, if a neighbour isn't connected when a service starts then it doesn't receive the request to share connections. Solve by having each service process monitor nodes, a nodeup notification causing it to request connections of its neighbours. Nodes going down is already handled, by remote connections being monitored in diameter_service. --- lib/diameter/src/base/diameter_service.erl | 25 +++++++++++++++++++++++-- 1 file changed, 23 insertions(+), 2 deletions(-) (limited to 'lib') diff --git a/lib/diameter/src/base/diameter_service.erl b/lib/diameter/src/base/diameter_service.erl index a976a8b998..788f697627 100644 --- a/lib/diameter/src/base/diameter_service.erl +++ b/lib/diameter/src/base/diameter_service.erl @@ -514,6 +514,13 @@ transition({tc_timeout, T}, S) -> tc_timeout(T, S), ok; +transition({nodeup, Node, _}, S) -> + nodeup(Node, S), + ok; + +transition({nodedown, _Node, _}, _) -> + ok; + transition(Req, S) -> unexpected(handle_info, [Req], S), ok. @@ -709,6 +716,8 @@ mref(P) -> init_shared(#state{options = #{use_shared_peers := T}, service_name = Svc}) -> + T == false orelse net_kernel:monitor_nodes(true, [{node_type, visible}, + nodedown_reason]), notify(T, Svc, {service, self()}). init_mod(#diameter_app{alias = Alias, @@ -728,6 +737,11 @@ notify(Share, SvcName, T) -> %% Test for the empty list for upgrade reasons: there's no %% diameter_peer:notify/3 in old code. +nodeup(Node, #state{options = #{share_peers := SP}, + service_name = SvcName}) -> + lists:member(Node, remotes(SP)) + andalso diameter_peer:notify([Node], SvcName, {service, self()}). + remotes(false) -> []; @@ -1400,9 +1414,15 @@ is_remote(Pid, T) -> %% # remote_peer_up/4 %% --------------------------------------------------------------------------- -remote_peer_up(TPid, Aliases, Caps, #state{options = #{use_shared_peers := T}} +remote_peer_up(TPid, Aliases, Caps, #state{options = #{use_shared_peers := T}, + remote = {PeerT, _, _}} = S) -> - is_remote(TPid, T) andalso rpu(TPid, Aliases, Caps, S). + is_remote(TPid, T) + andalso not ets:member(PeerT, TPid) + andalso rpu(TPid, Aliases, Caps, S). + +%% Notification can be duplicate since remote nodes push and the local +%% node pulls. rpu(TPid, Aliases, Caps, #state{service = Svc, remote = RT}) -> #diameter_service{applications = Apps} = Svc, @@ -1412,6 +1432,7 @@ rpu(TPid, Aliases, Caps, #state{service = Svc, remote = RT}) -> rpu(_, [] = No, _, _) -> No; + rpu(TPid, Aliases, Caps, {PeerT, _, _} = RT) -> monitor(process, TPid), ets:insert(PeerT, #peer{pid = TPid, -- cgit v1.2.3