aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAnders Svensson <[email protected]>2016-11-06 22:20:35 +0100
committerAnders Svensson <[email protected]>2017-06-21 16:40:32 +0200
commit34a186689a318ebc6fe2afaff396364d832bb21b (patch)
tree7a56031c857a42586d610c45e073e46e3d02e366
parent040bdce67f88d833bfb59adae130a4ffb4c180f0 (diff)
downloadotp-34a186689a318ebc6fe2afaff396364d832bb21b.tar.gz
otp-34a186689a318ebc6fe2afaff396364d832bb21b.tar.bz2
otp-34a186689a318ebc6fe2afaff396364d832bb21b.zip
Don't assume nodes are eternally connected when sharing transport
Service configuration share_peers and use_shared_peers is used to share peer connections with other connected nodes having a service of the same name: a service process asks its neighbours about existing connections when it starts, and pushes new connections as they're established. The problem is that the mechanics assume that nodes() doesn't change. In particular, if a neighbour isn't connected when a service starts then it doesn't receive the request to share connections. Solve by having each service process monitor nodes, a nodeup notification causing it to request connections of its neighbours. Nodes going down is already handled, by remote connections being monitored in diameter_service.
-rw-r--r--lib/diameter/src/base/diameter_service.erl25
1 files changed, 23 insertions, 2 deletions
diff --git a/lib/diameter/src/base/diameter_service.erl b/lib/diameter/src/base/diameter_service.erl
index a976a8b998..788f697627 100644
--- a/lib/diameter/src/base/diameter_service.erl
+++ b/lib/diameter/src/base/diameter_service.erl
@@ -514,6 +514,13 @@ transition({tc_timeout, T}, S) ->
tc_timeout(T, S),
ok;
+transition({nodeup, Node, _}, S) ->
+ nodeup(Node, S),
+ ok;
+
+transition({nodedown, _Node, _}, _) ->
+ ok;
+
transition(Req, S) ->
unexpected(handle_info, [Req], S),
ok.
@@ -709,6 +716,8 @@ mref(P) ->
init_shared(#state{options = #{use_shared_peers := T},
service_name = Svc}) ->
+ T == false orelse net_kernel:monitor_nodes(true, [{node_type, visible},
+ nodedown_reason]),
notify(T, Svc, {service, self()}).
init_mod(#diameter_app{alias = Alias,
@@ -728,6 +737,11 @@ notify(Share, SvcName, T) ->
%% Test for the empty list for upgrade reasons: there's no
%% diameter_peer:notify/3 in old code.
+nodeup(Node, #state{options = #{share_peers := SP},
+ service_name = SvcName}) ->
+ lists:member(Node, remotes(SP))
+ andalso diameter_peer:notify([Node], SvcName, {service, self()}).
+
remotes(false) ->
[];
@@ -1400,9 +1414,15 @@ is_remote(Pid, T) ->
%% # remote_peer_up/4
%% ---------------------------------------------------------------------------
-remote_peer_up(TPid, Aliases, Caps, #state{options = #{use_shared_peers := T}}
+remote_peer_up(TPid, Aliases, Caps, #state{options = #{use_shared_peers := T},
+ remote = {PeerT, _, _}}
= S) ->
- is_remote(TPid, T) andalso rpu(TPid, Aliases, Caps, S).
+ is_remote(TPid, T)
+ andalso not ets:member(PeerT, TPid)
+ andalso rpu(TPid, Aliases, Caps, S).
+
+%% Notification can be duplicate since remote nodes push and the local
+%% node pulls.
rpu(TPid, Aliases, Caps, #state{service = Svc, remote = RT}) ->
#diameter_service{applications = Apps} = Svc,
@@ -1412,6 +1432,7 @@ rpu(TPid, Aliases, Caps, #state{service = Svc, remote = RT}) ->
rpu(_, [] = No, _, _) ->
No;
+
rpu(TPid, Aliases, Caps, {PeerT, _, _} = RT) ->
monitor(process, TPid),
ets:insert(PeerT, #peer{pid = TPid,