From 88b055b4de08e7358f4b49076fc842a124743f52 Mon Sep 17 00:00:00 2001
From: Anders Svensson
Date: Mon, 21 Dec 2015 17:12:46 +0100
Subject: Update appup for 17.5.6.8
OTP-13164 more efficient peer lists
One module. Downgrade not supported.
---
lib/diameter/src/diameter.appup.src | 68 +++++--------------------------------
1 file changed, 8 insertions(+), 60 deletions(-)
(limited to 'lib/diameter')
diff --git a/lib/diameter/src/diameter.appup.src b/lib/diameter/src/diameter.appup.src
index 7d66557162..e030b3c72b 100644
--- a/lib/diameter/src/diameter.appup.src
+++ b/lib/diameter/src/diameter.appup.src
@@ -35,41 +35,15 @@
{"1.4.3", [{restart_application, diameter}]}, %% R16B02
{"1.4.4", [{restart_application, diameter}]},
{"1.5", [{restart_application, diameter}]}, %% R16B03
+ {"1.5.1", [{restart_application, diameter}]},
{"1.6", [{restart_application, diameter}]}, %% 17.0
{<<"^1\\.(7(\\.1)?|8)$">>, %% 17.[134]
[{restart_application, diameter}]},
{<<"^1.9(\\.1)?$">>, %% 17.5(.3)?
[{restart_application, diameter}]},
- {"1.9.2", [{load_module, diameter_peer_fsm}, %% 17.5.5
- {load_module, diameter_watchdog},
- {load_module, diameter_stats},
- {load_module, diameter_codec},
- {load_module, diameter_lib},
- {load_module, diameter_peer},
- {load_module, diameter_reg},
- {load_module, diameter_service},
- {load_module, diameter_session},
- {load_module, diameter_sync},
- {load_module, diameter_traffic},
- {load_module, diameter_sctp},
- {load_module, diameter_gen_base_rfc6733},
- {load_module, diameter_gen_acct_rfc6733},
- {load_module, diameter_gen_base_rfc3588},
- {load_module, diameter_gen_base_accounting},
- {load_module, diameter_gen_relay},
- {load_module, diameter},
- {load_module, diameter_config}]},
- {"1.9.2.1", [{load_module, diameter_watchdog}, %% 17.5.6.3
- {load_module, diameter_codec},
- {load_module, diameter_traffic},
- {load_module, diameter_service},
- {load_module, diameter_gen_base_rfc6733},
- {load_module, diameter_gen_acct_rfc6733},
- {load_module, diameter_gen_base_rfc3588},
- {load_module, diameter_gen_base_accounting},
- {load_module, diameter_gen_relay},
- {load_module, diameter,
- {load_module, diameter_config}}]}
+ {"1.9.2", [{restart_application, diameter}]}, %% 17.5.5
+ {"1.9.2.1", [{restart_application, diameter}]}, %% 17.5.6.3
+ {"1.9.2.2", [{update, diameter_service, {advanced, []}}]} %% 17.5.6.7
],
[
{"0.9", [{restart_application, diameter}]},
@@ -87,40 +61,14 @@
{"1.4.3", [{restart_application, diameter}]},
{"1.4.4", [{restart_application, diameter}]},
{"1.5", [{restart_application, diameter}]},
+ {"1.5.1", [{restart_application, diameter}]},
{"1.6", [{restart_application, diameter}]},
{<<"^1\\.(7(\\.1)?|8)$">>,
[{restart_application, diameter}]},
{<<"^1.9(\\.1)?$">>,
[{restart_application, diameter}]},
- {"1.9.2", [{load_module, diameter_config},
- {load_module, diameter},
- {load_module, diameter_gen_relay},
- {load_module, diameter_gen_base_accounting},
- {load_module, diameter_gen_base_rfc3588},
- {load_module, diameter_gen_acct_rfc6733},
- {load_module, diameter_gen_base_rfc6733},
- {load_module, diameter_sctp},
- {load_module, diameter_traffic},
- {load_module, diameter_sync},
- {load_module, diameter_session},
- {load_module, diameter_service},
- {load_module, diameter_reg},
- {load_module, diameter_peer},
- {load_module, diameter_lib},
- {load_module, diameter_codec},
- {load_module, diameter_stats},
- {load_module, diameter_watchdog},
- {load_module, diameter_peer_fsm}]},
- {"1.9.2.1", [{load_module, diameter_config},
- {load_module, diameter},
- {load_module, diameter_gen_relay},
- {load_module, diameter_gen_base_accounting},
- {load_module, diameter_gen_base_rfc3588},
- {load_module, diameter_gen_acct_rfc6733},
- {load_module, diameter_gen_base_rfc6733},
- {load_module, diameter_service},
- {load_module, diameter_traffic},
- {load_module, diameter_codec},
- {load_module, diameter_watchdog}]}
+ {"1.9.2", [{restart_application, diameter}]},
+ {"1.9.2.1", [{restart_application, diameter}]},
+ {"1.9.2.2", [{restart_application, diameter}]}
]
}.
--
cgit v1.2.3
From 0967c22be52ee5c7dace7adb102b9b8635b2913b Mon Sep 17 00:00:00 2001
From: Anders Svensson
Date: Mon, 21 Dec 2015 17:13:08 +0100
Subject: vsn -> 1.9.2.3
---
lib/diameter/vsn.mk | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
(limited to 'lib/diameter')
diff --git a/lib/diameter/vsn.mk b/lib/diameter/vsn.mk
index 18b77e243f..851ae5c3cf 100644
--- a/lib/diameter/vsn.mk
+++ b/lib/diameter/vsn.mk
@@ -16,5 +16,5 @@
# %CopyrightEnd%
APPLICATION = diameter
-DIAMETER_VSN = 1.9.2.2
+DIAMETER_VSN = 1.9.2.3
APP_VSN = $(APPLICATION)-$(DIAMETER_VSN)$(PRE_VSN)
--
cgit v1.2.3
From 09a4ef659657525b7530416697f92db8a360a28f Mon Sep 17 00:00:00 2001
From: Anders Svensson
Date: Mon, 7 Dec 2015 13:11:07 +0100
Subject: Remove unnecessary erlang:monitor/2 qualification
See commit 862af31d.
---
lib/diameter/src/base/diameter_service.erl | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
(limited to 'lib/diameter')
diff --git a/lib/diameter/src/base/diameter_service.erl b/lib/diameter/src/base/diameter_service.erl
index 87bc9516e0..c5eed539ab 100644
--- a/lib/diameter/src/base/diameter_service.erl
+++ b/lib/diameter/src/base/diameter_service.erl
@@ -180,7 +180,7 @@ stop(SvcName) ->
end.
stop(ok, Pid) ->
- MRef = erlang:monitor(process, Pid),
+ MRef = monitor(process, Pid),
receive {'DOWN', MRef, process, _, _} -> ok end;
stop(No, _) ->
No.
@@ -710,7 +710,7 @@ service_options(Opts) ->
mref(false = No) ->
No;
mref(P) ->
- erlang:monitor(process, P).
+ monitor(process, P).
init_shared(#state{options = [_, _, {_,T} | _],
service_name = Svc}) ->
@@ -1402,7 +1402,7 @@ rpu(Pid, Aliases, Caps, #state{service = Svc, shared_peers = PDict}) ->
rpu(_, [] = No, _, _) ->
No;
rpu(Pid, Aliases, Caps, PDict) ->
- erlang:monitor(process, Pid),
+ monitor(process, Pid),
T = {Pid, Caps},
lists:foreach(fun(A) -> ?Dict:append(A, T, PDict) end, Aliases).
--
cgit v1.2.3
From 8fd4e5f47efd13a6bad7811239438539ae383966 Mon Sep 17 00:00:00 2001
From: Anders Svensson
Date: Mon, 7 Dec 2015 17:06:38 +0100
Subject: Make peer handling more efficient
Each service process maintains a dictionary of peers, mapping an
application alias to a {pid(), #diameter_caps{}} list of connected
peers. These lists are potentially large, peers were appended to the end
of the list for no particular reason, and these long lists were
constructed/deconstructed when filtering them for pick_peer callbacks.
Many simultaneous outgoing request could then slow the VM to a crawl,
with many scheduled processes mired in list manipulation.
The pseudo-dicts are now replaced by plain ets tables. The reason for
them was (once upon a time) to have an interface interchangeable with a
plain dict for debugging purposes, but strict swapablity hasn't been the
case for some time now, and in practice a swap has never taken place.
Additional tables mapping Origin-Host/Realm have also been introduced,
to minimize the size of the peers lists when peers are filtered on
host/realm. For example, a filter like
{any, [{all, [realm, host]}, realm]}
is probably a very common case: preferring a Destination-Realm/Host
match before falling back on Destination-Realm alone. This is now more
efficiently (but not equivalently) expressed as
{first, [{all, [realm, host]}, realm]}
to stop the search when the best match is made, and extracts peers from
host/realm tables instead of searching through the list of all peers
supporting the application in question. The code to try and start with a
lookup isn't exhaustive, and the 'any' filter is still as inefficient as
previously.
---
lib/diameter/doc/src/diameter.xml | 22 +-
lib/diameter/src/base/diameter_service.erl | 494 ++++++++++++++++++++-------
lib/diameter/test/diameter_traffic_SUITE.erl | 3 +-
3 files changed, 377 insertions(+), 142 deletions(-)
(limited to 'lib/diameter')
diff --git a/lib/diameter/doc/src/diameter.xml b/lib/diameter/doc/src/diameter.xml
index b0cff32c9a..dbbbb01138 100644
--- a/lib/diameter/doc/src/diameter.xml
+++ b/lib/diameter/doc/src/diameter.xml
@@ -466,7 +466,7 @@ Matches only those peers whose Origin-Host has the
specified value, or all peers if the atom any.
-{realm, any|&dict_DiameterIdentity;
+{realm, any|&dict_DiameterIdentity;}
-
Matches only those peers whose Origin-Realm has the
@@ -499,18 +499,22 @@ Matches only those peers matched by each filter in the specified list.
-
Matches only those peers matched by at least one filter in the
-specified list.
+specified list.
+The resulting list will be in match order, peers matching the
+first filter of the list sorting before those matched by the second,
+and so on.
+
+{first, [&peer_filter;]}
+-
-The resulting peer list will be in match order, peers matching the
-first filter of the list sorting before those matched by the second,
-and so on.
-For example, the following filter causes peers matching both the host
-and realm filters to be presented before those matching only the realm
-filter.
+Like any, but stops at the first filter for which there are
+matches, which can be much more efficient when there are many peers.
+For example, the following filter causes only peers best matching
+both the host and realm filters to be presented.
-{any, [{all, [host, realm]}, realm]}
+{first, [{all, [host, realm]}, realm]}
diff --git a/lib/diameter/src/base/diameter_service.erl b/lib/diameter/src/base/diameter_service.erl
index c5eed539ab..d83ed9e56b 100644
--- a/lib/diameter/src/base/diameter_service.erl
+++ b/lib/diameter/src/base/diameter_service.erl
@@ -82,16 +82,8 @@
-define(DEFAULT_TC, 30000). %% RFC 3588 ch 2.1
-define(RESTART_TC, 1000). %% if restart was this recent
-%% Used to be able to swap this with anything else dict-like but now
-%% rely on the fact that a service's #state{} record does not change
-%% in storing in it ?STATE table and not always going through the
-%% service process. In particular, rely on the fact that operations on
-%% a ?Dict don't change the handle to it.
--define(Dict, diameter_dict).
-
-%% Maintains state in a table. In contrast to previously, a service's
-%% stat is not constant and is accessed outside of the service
-%% process.
+%% Maintain state in a table since a service's state is accessed
+%% outside of the service process.
-define(STATE_TABLE, ?MODULE).
%% The default sequence mask.
@@ -116,12 +108,11 @@
service :: #diameter_service{},
watchdogT = ets_new(watchdogs) %% #watchdog{} at start
:: ets:tid(),
- peerT = ets_new(peers) %% #peer{pid = TPid} at okay/reopen
- :: ets:tid(),
- shared_peers = ?Dict:new() %% Alias -> [{TPid, Caps}, ...]
- :: ets:tid(),
- local_peers = ?Dict:new() %% Alias -> [{TPid, Caps}, ...]
- :: ets:tid(),
+ peerT, %% undefined in new code, but remain for upgrade
+ shared_peers, %% reasons. Replaced by local/remote.
+ local_peers, %%
+ local :: {ets:tid(), ets:tid(), ets:tid()},
+ remote :: {ets:tid(), ets:tid(), ets:tid()},
monitor = false :: false | pid(), %% process to die with
options
:: [{sequence, diameter:sequence()} %% sequence mask
@@ -149,10 +140,12 @@
%% diameter_peer_fsm.
-record(peer,
{pid :: pid(),
- apps :: [{0..16#FFFFFFFF, diameter:app_alias()}], %% {Id, Alias}
+ apps :: [{0..16#FFFFFFFF, diameter:app_alias()}] %% {Id, Alias}
+ | [diameter:app_alias()], %% remote
caps :: #diameter_caps{},
- started = now(), %% at process start
- watchdog :: pid()}). %% key into watchdogT
+ started = now(), %% at process start or sharing
+ watchdog :: pid() %% key into watchdogT
+ | undefined}). %% undefined if remote
%% ---------------------------------------------------------------------------
%% # start/1
@@ -513,7 +506,7 @@ transition(Req, S) ->
%% # terminate/2
%% ---------------------------------------------------------------------------
-terminate(Reason, #state{service_name = Name, peerT = PeerT} = S) ->
+terminate(Reason, #state{service_name = Name, local = {PeerT, _, _}} = S) ->
send_event(Name, stop),
ets:delete(?STATE_TABLE, Name),
@@ -535,23 +528,81 @@ terminate(Reason, #state{service_name = Name, peerT = PeerT} = S) ->
%% # code_change/3
%% ---------------------------------------------------------------------------
-code_change(FromVsn,
- #state{service_name = SvcName,
- service = #diameter_service{applications = Apps}}
- = S,
- Extra) ->
- lists:foreach(fun(A) ->
- code_change(FromVsn, SvcName, Extra, A)
- end,
- Apps),
+code_change(_FromVsn, #state{} = S, _Extra) ->
+ {ok, S};
+
+%% Don't support downgrade since we won't in appup.
+code_change({down = T, _}, _, _Extra) ->
+ {error, T};
+
+%% Upgrade local/shared peers dicts populated in old code. Don't
+code_change(_FromVsn, S0, _Extra) ->
+ {state, Id, SvcName, Svc, WT, PeerT, SDict, LDict, Monitor, Opts}
+ = S0,
+
+ init_peers(LT = setelement(1, {PT, _, _} = init_peers(), PeerT),
+ fun({_,A}) -> A end),
+ init_peers(init_peers(RT = init_peers(), SDict),
+ fun(A) -> A end),
+
+ S = #state{id = Id,
+ service_name = SvcName,
+ service = Svc,
+ watchdogT = WT,
+ peerT = PT, %% empty
+ shared_peers = SDict,
+ local_peers = LDict,
+ local = LT,
+ remote = RT,
+ monitor = Monitor,
+ options = Opts},
+
+ %% Replacing the table entry and deleting the old shared tables
+ %% can make outgoing requests return {error, no_connection} until
+ %% everyone is running new code. Don't delete the tables to avoid
+ %% crashing request processes.
+ ets:delete_all_objects(SDict),
+ ets:delete_all_objects(LDict),
+ ets:insert(?STATE_TABLE, S),
{ok, S}.
-code_change(FromVsn, SvcName, Extra, #diameter_app{alias = Alias} = A) ->
- {ok, S} = cb(A, code_change, [FromVsn,
- mod_state(Alias),
- Extra,
- SvcName]),
- mod_state(Alias, S).
+%% init_peers/2
+
+%% Populate app and identity bags from a new-style #peer{} sets.
+init_peers({PeerT, _, _} = T, F)
+ when is_function(F) ->
+ ets:foldl(fun(#peer{pid = P, apps = As, caps = C}, N) ->
+ insert_peer(P, lists:map(F, As), C, T),
+ N+1
+ end,
+ 0,
+ PeerT);
+
+%% Populate #peer{} table given a shared peers dict.
+init_peers({PeerT, _, _}, SDict)
+ when is_integer(SDict) ->
+ dict:fold(fun(P, As, N) ->
+ ets:update_element(PeerT, P, {#peer.apps, As}),
+ N+1
+ end,
+ 0,
+ diameter_dict:fold(fun(A, Ps, D) ->
+ init_peers(A, Ps, PeerT, D)
+ end,
+ dict:new(),
+ SDict)).
+
+%% init_peers/4
+
+init_peers(App, TCs, PeerT, Dict) ->
+ lists:foldl(fun({P,C}, D) ->
+ ets:insert(PeerT, #peer{pid = P,
+ apps = [],
+ caps = C}),
+ dict:append(P, App, D)
+ end,
+ Dict,
+ TCs).
%% ===========================================================================
%% ===========================================================================
@@ -559,9 +610,6 @@ code_change(FromVsn, SvcName, Extra, #diameter_app{alias = Alias} = A) ->
unexpected(F, A, #state{service_name = Name}) ->
?UNEXPECTED(F, A ++ [Name]).
-cb(#diameter_app{module = [_|_] = M}, F, A) ->
- eval(M, F, A).
-
eval([M|X], F, A) ->
apply(M, F, A ++ X).
@@ -581,10 +629,6 @@ choose(false, _, X) -> X.
ets_new(Tbl) ->
ets:new(Tbl, [{keypos, 2}]).
-insert(Tbl, Rec) ->
- ets:insert(Tbl, Rec),
- Rec.
-
%% Using the process dictionary for the callback state was initially
%% just a way to make what was horrendous trace (big state record and
%% much else everywhere) somewhat more readable. There's not as much
@@ -685,6 +729,8 @@ cfg_acc({SvcName, #diameter_service{applications = Apps} = Rec, Opts},
lists:foreach(fun init_mod/1, Apps),
S = #state{service_name = SvcName,
service = Rec#diameter_service{pid = self()},
+ local = init_peers(),
+ remote = init_peers(),
monitor = mref(get_value(monitor, Opts)),
options = service_options(Opts)},
{S, Acc};
@@ -694,6 +740,13 @@ cfg_acc({_Ref, Type, _Opts} = T, {S, Acc})
Type == listen ->
{S, [T | Acc]}.
+init_peers() ->
+ {ets_new(caps), %% #peer{}
+ ets:new(apps, [bag]), %% {Alias, TPid}
+ ets:new(idents, [bag])}. %% {{host, OH} | {realm, OR} | {OR, OH},
+ %% Alias,
+ %% TPid}
+
service_options(Opts) ->
[{sequence, proplists:get_value(sequence, Opts, ?NOMASK)},
{share_peers, get_value(share_peers, Opts)},
@@ -809,7 +862,7 @@ start(Ref, Type, Opts, State) ->
%% start/5
start(Ref, Type, Opts, N, #state{watchdogT = WatchdogT,
- peerT = PeerT,
+ local = {PeerT, _, _},
options = SvcOpts,
service_name = SvcName,
service = Svc0})
@@ -840,7 +893,7 @@ binary_caps(#diameter_service{capabilities = Caps} = Svc, false) ->
wd(Type, Ref, T, WatchdogT, Rec) ->
Pid = start_watchdog(Type, Ref, T),
- insert(WatchdogT, Rec#watchdog{pid = Pid}),
+ ets:insert(WatchdogT, Rec#watchdog{pid = Pid}),
Pid.
%% Note that the service record passed into the watchdog is the merged
@@ -897,8 +950,8 @@ accepted(Pid, _TPid, #state{watchdogT = WatchdogT} = S) ->
#watchdog{ref = Ref, type = accept = T, peer = false, options = Opts}
= Wd
= fetch(WatchdogT, Pid),
- insert(WatchdogT, Wd#watchdog{peer = true}),%% mark replacement as started
- start(Ref, T, Opts, S). %% start new watchdog
+ ets:insert(WatchdogT, Wd#watchdog{peer = true}),%% mark replacement started
+ start(Ref, T, Opts, S). %% start new watchdog
fetch(Tid, Key) ->
[T] = ets:lookup(Tid, Key),
@@ -924,13 +977,14 @@ watchdog(TPid, [], ?WD_SUSPECT, ?WD_OKAY, Wd, State) ->
#watchdog{peer = TPid} = Wd, %% assert
connection_up(Wd, State);
-%% Watchdog has an unresponsive connection.
+%% Watchdog has an unresponsive connection. Note that the peer table
+%% entry isn't removed until DOWN.
watchdog(TPid, [], ?WD_OKAY, ?WD_SUSPECT = To, Wd, State) ->
#watchdog{peer = TPid} = Wd, %% assert
watchdog_down(Wd, To, State);
%% Watchdog has lost its connection.
-watchdog(TPid, [], _, ?WD_DOWN = To, Wd, #state{peerT = PeerT} = S) ->
+watchdog(TPid, [], _, ?WD_DOWN = To, Wd, #state{local = {PeerT, _, _}} = S) ->
close(Wd),
watchdog_down(Wd, To, S),
ets:delete(PeerT, TPid);
@@ -939,7 +993,7 @@ watchdog(_, [], _, _, _, _) ->
ok.
watchdog_down(Wd, To, #state{watchdogT = WatchdogT} = S) ->
- insert(WatchdogT, Wd#watchdog{state = To}),
+ ets:insert(WatchdogT, Wd#watchdog{state = To}),
connection_down(Wd, To, S).
%% ---------------------------------------------------------------------------
@@ -951,14 +1005,14 @@ watchdog_down(Wd, To, #state{watchdogT = WatchdogT} = S) ->
connection_up({TPid, {Caps, SupportedApps, Pkt}},
#watchdog{pid = Pid}
= Wd,
- #state{peerT = PeerT}
+ #state{local = {PeerT, _, _}}
= S) ->
- Pr = #peer{pid = TPid,
- apps = SupportedApps,
- caps = Caps,
- watchdog = Pid},
- insert(PeerT, Pr),
- connection_up([Pkt], Wd#watchdog{peer = TPid}, Pr, S).
+ Rec = #peer{pid = TPid,
+ apps = SupportedApps,
+ caps = Caps,
+ watchdog = Pid},
+ ets:insert(PeerT, Rec),
+ connection_up([Pkt], Wd#watchdog{peer = TPid}, Rec, S).
%% ---------------------------------------------------------------------------
%% # reopen/3
@@ -968,22 +1022,23 @@ reopen({TPid, {Caps, SupportedApps, _Pkt}},
#watchdog{pid = Pid}
= Wd,
#state{watchdogT = WatchdogT,
- peerT = PeerT}) ->
- insert(PeerT, #peer{pid = TPid,
- apps = SupportedApps,
- caps = Caps,
- watchdog = Pid}),
- insert(WatchdogT, Wd#watchdog{state = ?WD_REOPEN,
- peer = TPid}).
+ local = {PeerT, _, _}}) ->
+ ets:insert(PeerT, #peer{pid = TPid,
+ apps = SupportedApps,
+ caps = Caps,
+ watchdog = Pid}),
+ ets:insert(WatchdogT, Wd#watchdog{state = ?WD_REOPEN,
+ peer = TPid}).
%% ---------------------------------------------------------------------------
%% # connection_up/2
%% ---------------------------------------------------------------------------
-%% Watchdog has recovered as suspect connection. Note that there has
+%% Watchdog has recovered a suspect connection. Note that there has
%% been no new capabilties exchange in this case.
-connection_up(#watchdog{peer = TPid} = Wd, #state{peerT = PeerT} = S) ->
+connection_up(#watchdog{peer = TPid} = Wd, #state{local = {PeerT, _, _}}
+ = S) ->
connection_up([], Wd, fetch(PeerT, TPid), S).
%% connection_up/4
@@ -994,23 +1049,23 @@ connection_up(Extra,
#peer{apps = SApps, caps = Caps}
= Pr,
#state{watchdogT = WatchdogT,
- local_peers = LDict,
+ local = LT,
service_name = SvcName,
service = #diameter_service{applications = Apps}}
= S) ->
- insert(WatchdogT, Wd#watchdog{state = ?WD_OKAY}),
+ ets:insert(WatchdogT, Wd#watchdog{state = ?WD_OKAY}),
diameter_traffic:peer_up(TPid),
- insert_local_peer(SApps, {{TPid, Caps}, {SvcName, Apps}}, LDict),
+ local_peer_up(SApps, {TPid, Caps}, {SvcName, Apps}, LT),
report_status(up, Wd, Pr, S, Extra).
-insert_local_peer(SApps, T, LDict) ->
- lists:foldl(fun(A,D) -> ilp(A, T, D) end, LDict, SApps).
+local_peer_up(SApps, {TPid, Caps} = TC, SA, LT) ->
+ insert_peer(TPid, [A || {_,A} <- SApps], Caps, LT),
+ lists:foreach(fun(A) -> peer_up(A, TC, SA) end, SApps).
-ilp({Id, Alias}, {TC, SA}, LDict) ->
- init_conn(Id, Alias, TC, SA),
- ?Dict:append(Alias, TC, LDict).
+peer_up({Id, Alias}, TC, SA) ->
+ peer_up(Id, Alias, TC, SA).
-init_conn(Id, Alias, {TPid, _} = TC, {SvcName, Apps}) ->
+peer_up(Id, Alias, {TPid, _} = TC, {SvcName, Apps}) ->
#diameter_app{id = Id} %% assert
= App
= find_app(Alias, Apps),
@@ -1108,17 +1163,17 @@ connection_down(#watchdog{state = ?WD_OKAY,
= Pr,
#state{service_name = SvcName,
service = #diameter_service{applications = Apps},
- local_peers = LDict}
+ local = LT}
= S) ->
report_status(down, Wd, Pr, S, []),
- remove_local_peer(SApps, {{TPid, Caps}, {SvcName, Apps}}, LDict),
+ local_peer_down(SApps, {TPid, Caps}, {SvcName, Apps}, LT),
diameter_traffic:peer_down(TPid);
connection_down(#watchdog{state = ?WD_OKAY,
peer = TPid}
= Wd,
To,
- #state{peerT = PeerT}
+ #state{local = {PeerT, _, _}}
= S)
when is_atom(To) ->
connection_down(Wd, #peer{} = fetch(PeerT, TPid), S);
@@ -1126,15 +1181,14 @@ connection_down(#watchdog{state = ?WD_OKAY,
connection_down(#watchdog{}, _, _) ->
ok.
-remove_local_peer(SApps, T, LDict) ->
- lists:foldl(fun(A,D) -> rlp(A, T, D) end, LDict, SApps).
+local_peer_down(SApps, {TPid, _Caps} = TC, SA, LT) ->
+ delete_peer(TPid, LT),
+ lists:foreach(fun(A) -> peer_down(A, TC, SA) end, SApps).
-rlp({Id, Alias}, {TC, SA}, LDict) ->
- L = ?Dict:fetch(Alias, LDict),
- down_conn(Id, Alias, TC, SA),
- ?Dict:store(Alias, lists:delete(TC, L), LDict).
+peer_down({Id, Alias}, TC, SA) ->
+ peer_down(Id, Alias, TC, SA).
-down_conn(Id, Alias, TC, {SvcName, Apps}) ->
+peer_down(Id, Alias, TC, {SvcName, Apps}) ->
#diameter_app{id = Id} %% assert
= App
= find_app(Alias, Apps),
@@ -1159,7 +1213,7 @@ wd_down(#watchdog{peer = B}, _)
ok;
%% ... or maybe it has.
-wd_down(#watchdog{peer = TPid} = Wd, #state{peerT = PeerT} = S) ->
+wd_down(#watchdog{peer = TPid} = Wd, #state{local = {PeerT, _, _}} = S) ->
connection_down(Wd, ?WD_DOWN, S),
ets:delete(PeerT, TPid).
@@ -1374,12 +1428,29 @@ share_peer(_, _, _, _, _) ->
%% # share_peers/2
%% ---------------------------------------------------------------------------
-share_peers(Pid, #state{options = [_, {_,T} | _], local_peers = PDict}) ->
- is_remote(Pid, T)
- andalso ?Dict:fold(fun(A,Ps,ok) -> sp(Pid, A, Ps), ok end, ok, PDict).
-
-sp(Pid, Alias, Peers) ->
- lists:foreach(fun({P,C}) -> Pid ! {peer, P, [Alias], C} end, Peers).
+share_peers(Pid, #state{options = [_, {_,SP} | _],
+ local = {PeerT, AppT, _}}) ->
+ is_remote(Pid, SP)
+ andalso ets:foldl(fun(T, N) -> N + sp(Pid, AppT, T) end,
+ 0,
+ PeerT).
+
+%% An entry in the peer table doesn't mean the watchdog state is OKAY,
+%% an entry in the app table does.
+
+sp(Pid, AppT, #peer{pid = TPid,
+ apps = [{_, Alias} | _] = Apps,
+ caps = Caps}) ->
+ Spec = [{{'$1', TPid},
+ [{'==', '$1', {const, Alias}}],
+ ['$_']}],
+ case ets:select(AppT, Spec, 1) of
+ '$end_of_table' ->
+ 0;
+ _ ->
+ Pid ! {peer, TPid, [A || {_,A} <- Apps], Caps},
+ 1
+ end.
is_remote(Pid, T) ->
Node = node(Pid),
@@ -1389,32 +1460,49 @@ is_remote(Pid, T) ->
%% # remote_peer_up/4
%% ---------------------------------------------------------------------------
-remote_peer_up(Pid, Aliases, Caps, #state{options = [_, _, {_,T} | _]} = S) ->
- is_remote(Pid, T)
- andalso rpu(Pid, Aliases, Caps, S).
+remote_peer_up(TPid, Aliases, Caps, #state{options = [_, _, {_,T} | _]} = S) ->
+ is_remote(TPid, T) andalso rpu(TPid, Aliases, Caps, S).
-rpu(Pid, Aliases, Caps, #state{service = Svc, shared_peers = PDict}) ->
+rpu(TPid, Aliases, Caps, #state{service = Svc, remote = RT}) ->
#diameter_service{applications = Apps} = Svc,
Key = #diameter_app.alias,
F = fun(A) -> lists:keymember(A, Key, Apps) end,
- rpu(Pid, lists:filter(F, Aliases), Caps, PDict);
+ rpu(TPid, lists:filter(F, Aliases), Caps, RT);
rpu(_, [] = No, _, _) ->
No;
-rpu(Pid, Aliases, Caps, PDict) ->
- monitor(process, Pid),
- T = {Pid, Caps},
- lists:foreach(fun(A) -> ?Dict:append(A, T, PDict) end, Aliases).
+rpu(TPid, Aliases, Caps, {PeerT, _, _} = RT) ->
+ monitor(process, TPid),
+ ets:insert(PeerT, #peer{pid = TPid,
+ apps = Aliases,
+ caps = Caps}),
+ insert_peer(TPid, Aliases, Caps, RT).
+
+%% insert_peer/4
+
+insert_peer(TPid, Aliases, Caps, {_PeerT, AppT, IdentT}) ->
+ #diameter_caps{origin_host = {_, OH},
+ origin_realm = {_, OR}}
+ = Caps,
+ ets:insert(AppT, [{A, TPid} || A <- Aliases]),
+ H = iolist_to_binary(OH),
+ R = iolist_to_binary(OR),
+ ets:insert(IdentT, [{T, A, TPid} || T <- [{host, H}, {realm, R}, {R, H}],
+ A <- Aliases]).
%% ---------------------------------------------------------------------------
%% # remote_peer_down/2
%% ---------------------------------------------------------------------------
-remote_peer_down(Pid, #state{shared_peers = PDict}) ->
- lists:foreach(fun(A) -> rpd(Pid, A, PDict) end, ?Dict:fetch_keys(PDict)).
+remote_peer_down(TPid, #state{remote = {PeerT, _, _} = RT}) ->
+ ets:delete(PeerT, TPid),
+ delete_peer(TPid, RT).
+
+%% delete_peer/2
-rpd(Pid, Alias, PDict) ->
- ?Dict:update(Alias, fun(Ps) -> lists:keydelete(Pid, 1, Ps) end, PDict).
+delete_peer(TPid, {_PeerT, AppT, IdentT}) ->
+ ets:select_delete(AppT, [{{'_', TPid}, [], [true]}]),
+ ets:select_delete(IdentT, [{{'_', '_', TPid}, [], [true]}]).
%% ---------------------------------------------------------------------------
%% pick_peer/4
@@ -1424,12 +1512,12 @@ pick_peer(#diameter_app{alias = Alias}
= App,
RealmAndHost,
Filter,
- #state{local_peers = L,
- shared_peers = S,
+ #state{local = LT,
+ remote = RT,
service_name = SvcName,
service = #diameter_service{pid = Pid}}) ->
- pick_peer(peers(Alias, RealmAndHost, Filter, L),
- peers(Alias, RealmAndHost, Filter, S),
+ pick_peer(peers(Alias, RealmAndHost, Filter, LT),
+ peers(Alias, RealmAndHost, Filter, RT),
Pid,
SvcName,
App).
@@ -1494,54 +1582,196 @@ pick_peer(Local,
%% peers/4
-peers(Alias, RH, Filter, Peers) ->
- case ?Dict:find(Alias, Peers) of
- {ok, L} ->
- filter(L, RH, Filter);
- error ->
+peers(Alias, RH, Filter, T) ->
+ filter(Alias, RH, Filter, T, true).
+
+%% filter/5
+%%
+%% Try to limit the peers list by starting with a host/realm lookup.
+
+filter(Alias, RH, {neg, F}, T, B) ->
+ filter(Alias, RH, F, T, not B);
+
+filter(_, _, none, _, false) ->
+ [];
+
+filter(Alias, _, none, T, true) ->
+ all_peers(Alias, T);
+
+filter(Alias, [DR,DH] = RH, K, T, B)
+ when K == realm, DR == undefined;
+ K == host, DH == undefined ->
+ filter(Alias, RH, none, T, B);
+
+filter(Alias, [DR,_] = RH, realm = K, T, B) ->
+ filter(Alias, RH, {K, DR}, T, B);
+
+filter(Alias, [_,DH] = RH, host = K, T, B) ->
+ filter(Alias, RH, {K, DH}, T, B);
+
+filter(Alias, _, {K, D}, {PeerT, _AppT, IdentT}, true)
+ when K == host;
+ K == realm ->
+ try iolist_to_binary(D) of
+ B ->
+ caps(PeerT, ets:select(IdentT, [{{{K, B}, '$1', '$2'},
+ [{'==', '$1', {const, Alias}}],
+ ['$2']}]))
+ catch
+ error:_ ->
[]
- end.
+ end;
-%% filter/3
+filter(Alias, RH, {all, Filters}, T, B)
+ when is_list(Filters) ->
+ fltr_all(Alias, RH, Filters, T, B);
+
+filter(Alias, RH, {first, Filters}, T, B)
+ when is_list(Filters) ->
+ fltr_first(Alias, RH, Filters, T, B);
+
+filter(Alias, RH, Filter, T, B) ->
+ {Ts, Fs} = filter(all_peers(Alias, T), RH, Filter),
+ choose(B, Ts, Fs).
+
+%% fltr_all/5
+
+fltr_all(Alias, RH, [{K, any} | Filters], T, B)
+ when K == host;
+ K == realm ->
+ fltr_all(Alias, RH, Filters, T, B);
+
+fltr_all(Alias, RH, [{host, _} = H, {realm, _} = R | Filters], T, B) ->
+ fltr_all(Alias, RH, [R, H | Filters], T, B);
+
+fltr_all(Alias, RH, [{realm, _} = R, {host, any} | Filters], T, B) ->
+ fltr_all(Alias, RH, [R | Filters], T, B);
+
+fltr_all(Alias, RH, [{realm, OR}, {host, OH} | Filters], T, true) ->
+ {PeerT, _AppT, IdentT} = T,
+ try {iolist_to_binary(OR), iolist_to_binary(OH)} of
+ BT ->
+ Peers = caps(PeerT,
+ ets:select(IdentT, [{{BT, '$1', '$2'},
+ [{'==', '$1', {const, Alias}}],
+ ['$2']}])),
+ {Ts, _} = filter(Peers, RH, {all, Filters}),
+ Ts
+ catch
+ error:_ ->
+ []
+ end;
+
+fltr_all(Alias, [undefined,_] = RH, [realm | Filters], T, B) ->
+ fltr_all(Alias, RH, Filters, T, B);
+
+fltr_all(Alias, [DR,_] = RH, [realm | Filters], T, B) ->
+ fltr_all(Alias, RH, [{realm, DR} | Filters], T, B);
+
+fltr_all(Alias, [_,undefined] = RH, [host | Filters], T, B) ->
+ fltr_all(Alias, RH, Filters, T, B);
+
+fltr_all(Alias, [_,DH] = RH, [host | Filters], T, B) ->
+ fltr_all(Alias, RH, [{host, DH} | Filters], T, B);
+
+fltr_all(Alias, RH, [{K, _} = KT, KA | Filters], T, B)
+ when K == host, KA == realm;
+ K == realm, KA == host ->
+ fltr_all(Alias, RH, [KA, KT | Filters], T, B);
+
+fltr_all(Alias, RH, [F | Filters], T, B) ->
+ {Ts, Fs} = filter(filter(Alias, RH, F, T, B), RH, {all, Filters}),
+ choose(B, Ts, Fs);
+
+fltr_all(Alias, RH, [], T, B) ->
+ filter(Alias, RH, none, T, B).
+
+%% fltr_first/5
%%
-%% Return peers in match order.
+%% Like any, but stop at the first filter with any matches.
+
+fltr_first(Alias, RH, [F | Filters], T, B) ->
+ case filter(Alias, RH, F, T, B) of
+ [] ->
+ fltr_first(Alias, RH, Filters, T, B);
+ [_|_] = Ts ->
+ Ts
+ end;
+
+fltr_first(Alias, RH, [], T, B) ->
+ filter(Alias, RH, none, T, not B).
+
+%% all_peers/2
+
+all_peers(Alias, {PeerT, AppT, _}) ->
+ ets:select(PeerT, [{#peer{pid = P, caps = '$1', _ = '_'},
+ [],
+ [{{P, '$1'}}]}
+ || {_,P} <- ets:lookup(AppT, Alias)]).
+
+%% caps/2
-filter(Peers, RH, Filter) ->
- {Ts, _} = fltr(Peers, RH, Filter),
- Ts.
+caps(PeerT, Pids) ->
+ ets:select(PeerT, [{#peer{pid = P, caps = '$1', _ = '_'},
+ [],
+ [{{P, '$1'}}]}
+ || P <- Pids]).
-%% fltr/4
+%% filter/3
+%%
+%% Return peers in match order.
-fltr(Peers, _, none) ->
+filter(Peers, _, none) ->
{Peers, []};
-fltr(Peers, RH, {neg, F}) ->
- {Ts, Fs} = fltr(Peers, RH, F),
+filter(Peers, RH, {neg, F}) ->
+ {Ts, Fs} = filter(Peers, RH, F),
{Fs, Ts};
-fltr(Peers, RH, {all, L})
+filter(Peers, RH, {all, L})
when is_list(L) ->
lists:foldl(fun(F,A) -> fltr_all(F, A, RH) end,
{Peers, []},
L);
-fltr(Peers, RH, {any, L})
+filter(Peers, RH, {any, L})
when is_list(L) ->
lists:foldl(fun(F,A) -> fltr_any(F, A, RH) end,
{[], Peers},
L);
-fltr(Peers, RH, F) ->
+filter(Peers, RH, {first, L})
+ when is_list(L) ->
+ fltr_first(Peers, RH, L);
+
+filter(Peers, RH, F) ->
lists:partition(fun({_,C}) -> caps_filter(C, RH, F) end, Peers).
+%% fltr_all/3
+
fltr_all(F, {Ts0, Fs0}, RH) ->
- {Ts1, Fs1} = fltr(Ts0, RH, F),
+ {Ts1, Fs1} = filter(Ts0, RH, F),
{Ts1, Fs0 ++ Fs1}.
+%% fltr_any/3
+
fltr_any(F, {Ts0, Fs0}, RH) ->
- {Ts1, Fs1} = fltr(Fs0, RH, F),
+ {Ts1, Fs1} = filter(Fs0, RH, F),
{Ts0 ++ Ts1, Fs1}.
+%% fltr_first/3
+
+fltr_first(Peers, _, []) ->
+ {[], Peers};
+
+fltr_first(Peers, RH, [F | Filters]) ->
+ case filter(Peers, RH, F) of
+ {[], _} ->
+ fltr_first(Peers, RH, Filters);
+ {_, _} = T ->
+ T
+ end.
+
%% caps_filter/3
caps_filter(#diameter_caps{origin_host = {_,OH}}, [_,DH], host) ->
@@ -1585,8 +1815,8 @@ eq(Any, Id, PeerId) ->
transports(#state{watchdogT = WatchdogT}) ->
ets:select(WatchdogT, [{#watchdog{peer = '$1', _ = '_'},
- [{'is_pid', '$1'}],
- ['$1']}]).
+ [{'is_pid', '$1'}],
+ ['$1']}]).
%% ---------------------------------------------------------------------------
%% # service_info/2
@@ -1639,7 +1869,7 @@ tagged_info(Item, S)
undefined
end;
-tagged_info(TPid, #state{watchdogT = WatchdogT, peerT = PeerT})
+tagged_info(TPid, #state{watchdogT = WatchdogT, local = {PeerT, _, _}})
when is_pid(TPid) ->
try
[#peer{watchdog = Pid}] = ets:lookup(PeerT, TPid),
@@ -1789,7 +2019,7 @@ keys(connect = T, Opts) ->
keys(_, _) ->
[{listen, accept}].
-peer_dict(#state{watchdogT = WatchdogT, peerT = PeerT}, Dict0) ->
+peer_dict(#state{watchdogT = WatchdogT, local = {PeerT, _, _}}, Dict0) ->
try ets:tab2list(WatchdogT) of
L -> lists:foldl(fun(T,A) -> peer_acc(PeerT, A, T) end, Dict0, L)
catch
diff --git a/lib/diameter/test/diameter_traffic_SUITE.erl b/lib/diameter/test/diameter_traffic_SUITE.erl
index fe6dd7b617..76e939f272 100644
--- a/lib/diameter/test/diameter_traffic_SUITE.erl
+++ b/lib/diameter/test/diameter_traffic_SUITE.erl
@@ -724,7 +724,8 @@ send_any_2(Config) ->
Req = ['STR', {'Termination-Cause', ?LOGOUT},
{'Destination-Host', [?HOST(SN, "unknown.org")]}],
?answer_message(?UNABLE_TO_DELIVER)
- = call(Config, Req, [{filter, {any, [host, realm]}}]).
+ = call(Config, Req, [{filter, {first, [{all, [host, realm]},
+ realm]}}]).
%% Send with a conjunctive filter.
send_all_1(Config) ->
--
cgit v1.2.3
From 32b045c47a1cbba3e89a29d669af826d3ed639c1 Mon Sep 17 00:00:00 2001
From: Erlang/OTP
Date: Tue, 26 Jan 2016 10:58:26 +0100
Subject: Update release notes
---
lib/diameter/doc/src/notes.xml | 21 +++++++++++++++++++++
1 file changed, 21 insertions(+)
(limited to 'lib/diameter')
diff --git a/lib/diameter/doc/src/notes.xml b/lib/diameter/doc/src/notes.xml
index 838b1ad7dc..2fc30db324 100644
--- a/lib/diameter/doc/src/notes.xml
+++ b/lib/diameter/doc/src/notes.xml
@@ -42,6 +42,27 @@ first.
+diameter 1.9.2.3
+
+ Fixed Bugs and Malfunctions
+
+ -
+
+ Make peer handling more efficient.
+
+ Inefficient lookup and manipulation of peer lists could
+ result in poor performance when many outgoing requests
+ were sent simultaneously, or when many peers connected
+ simultaneously. Filtering peer lists on realm/host is now
+ also more efficient in many cases.
+
+ Own Id: OTP-13164
+
+
+
+
+
+
diameter 1.9.2.2
Fixed Bugs and Malfunctions
--
cgit v1.2.3