aboutsummaryrefslogtreecommitdiffstats
path: root/lib/diameter/src/base/diameter_service.erl
diff options
context:
space:
mode:
authorAnders Svensson <[email protected]>2015-01-23 20:02:25 +0100
committerAnders Svensson <[email protected]>2015-02-20 01:51:34 +0100
commit4b691d8d8a5eae2b0b34e1e0ec00fa5116384d69 (patch)
treea4dfa83aa227291c76922417f10379c5ca299bcc /lib/diameter/src/base/diameter_service.erl
parentaf87b1c3d4897840d8247589a88d3611106ecedc (diff)
downloadotp-4b691d8d8a5eae2b0b34e1e0ec00fa5116384d69.tar.gz
otp-4b691d8d8a5eae2b0b34e1e0ec00fa5116384d69.tar.bz2
otp-4b691d8d8a5eae2b0b34e1e0ec00fa5116384d69.zip
Add transport_opt() pool_size
Transport processes are started by diameter one at a time. In the listening case, a transport process accepts a connection, tells the peer_fsm process, which tells its watchdog process, which tells its service process, which then starts a new watchdog, which starts a new peer_fsm, which starts a new transport process, which (finally) goes about accepting another connection. In other words, not particularly aggressive in accepting new connections. This behaviour doesn't do particularly well with a large number of concurrent connections: with TCP and 250 connecting peers we see connections being refused. This commit adds the possibilty of configuring a pool of accepting processes, by way of a new transport option, pool_size. Instead of diameter:add_transport/2 starting just a single process, it now starts the configured number, so that instead of a single process waiting for a connection there's now a pool. The option is even available for connecting processes, which provides an alternate to adding multiple transports when multiple connections to the same peer are required. In practice this also means configuring {restrict_connections, false}: this is not implicit. For backwards compatibility, the form of diameter:service_info(_,transport) differs in the connecting case, depending on whether or not pool_size is configured. Note that transport processes for the same transport_ref() can be started concurrently when pool_size > 1. This places additional requirements on diameter_{tcp,sctp}, that will be dealt with in a subsequent commit.
Diffstat (limited to 'lib/diameter/src/base/diameter_service.erl')
-rw-r--r--lib/diameter/src/base/diameter_service.erl85
1 files changed, 54 insertions, 31 deletions
diff --git a/lib/diameter/src/base/diameter_service.erl b/lib/diameter/src/base/diameter_service.erl
index 76b05a2ad4..96897c71eb 100644
--- a/lib/diameter/src/base/diameter_service.erl
+++ b/lib/diameter/src/base/diameter_service.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2010-2014. All Rights Reserved.
+%% Copyright Ericsson AB 2010-2015. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -765,8 +765,9 @@ reason(failure) ->
start(Ref, {T, Opts}, S)
when T == connect;
T == listen ->
+ N = proplists:get_value(pool_size, Opts, 1),
try
- {ok, start(Ref, type(T), Opts, S)}
+ {ok, start(Ref, type(T), Opts, N, S)}
catch
?FAILURE(Reason) ->
{error, Reason}
@@ -784,11 +785,16 @@ type(connect = T) -> T.
%% start/4
-start(Ref, Type, Opts, #state{watchdogT = WatchdogT,
- peerT = PeerT,
- options = SvcOpts,
- service_name = SvcName,
- service = Svc0})
+start(Ref, Type, Opts, State) ->
+ start(Ref, Type, Opts, 1, State).
+
+%% start/5
+
+start(Ref, Type, Opts, N, #state{watchdogT = WatchdogT,
+ peerT = PeerT,
+ options = SvcOpts,
+ service_name = SvcName,
+ service = Svc0})
when Type == connect;
Type == accept ->
#diameter_service{applications = Apps}
@@ -796,14 +802,19 @@ start(Ref, Type, Opts, #state{watchdogT = WatchdogT,
= merge_service(Opts, Svc0),
{_,_} = Mask = proplists:get_value(sequence, SvcOpts),
RecvData = diameter_traffic:make_recvdata([SvcName, PeerT, Apps, Mask]),
- Pid = s(Type, Ref, {{spawn_opts([Opts, SvcOpts]), RecvData},
- Opts,
- SvcOpts,
- Svc}),
- insert(WatchdogT, #watchdog{pid = Pid,
- type = Type,
- ref = Ref,
- options = Opts}),
+ T = {{spawn_opts([Opts, SvcOpts]), RecvData}, Opts, SvcOpts, Svc},
+ Rec = #watchdog{type = Type,
+ ref = Ref,
+ options = Opts},
+ diameter_lib:fold_n(fun(_,A) ->
+ [wd(Type, Ref, T, WatchdogT, Rec) | A]
+ end,
+ [],
+ N).
+
+wd(Type, Ref, T, WatchdogT, Rec) ->
+ Pid = wd(Type, Ref, T),
+ insert(WatchdogT, Rec#watchdog{pid = Pid}),
Pid.
%% Note that the service record passed into the watchdog is the merged
@@ -816,7 +827,7 @@ spawn_opts(Optss) ->
T /= link,
T /= monitor].
-s(Type, Ref, T) ->
+wd(Type, Ref, T) ->
{_MRef, Pid} = diameter_watchdog:start({Type, Ref}, T),
Pid.
@@ -1718,31 +1729,43 @@ info_transport(S) ->
[],
PeerD).
-%% Only a config entry for a listening transport: use it.
-transport([[{type, listen}, _] = L]) ->
- L ++ [{accept, []}];
-
-%% Only one config or peer entry for a connecting transport: use it.
-transport([[{type, connect} | _] = L]) ->
- L;
+%% Single config entry. Distinguish between pool_size config or not on
+%% a connecting transport for backwards compatibility: with the option
+%% the form is similar to the listening case, with connections grouped
+%% in a pool tuple (for lack of a better name), without as before.
+transport([[{type, Type}, {options, Opts}] = L])
+ when Type == listen;
+ Type == connect ->
+ L ++ [{K, []} || [{_,K}] <- [keys(Type, Opts)]];
%% Peer entries: discard config. Note that the peer entries have
%% length at least 3.
transport([[_,_] | L]) ->
transport(L);
-%% Possibly many peer entries for a listening transport. Note that all
-%% have the same options by construction, which is not terribly space
-%% efficient.
-transport([[{type, accept}, {options, Opts} | _] | _] = Ls) ->
- [{type, listen},
+%% Multiple tranports. Note that all have the same options by
+%% construction, which is not terribly space efficient.
+transport([[{type, Type}, {options, Opts} | _] | _] = Ls) ->
+ transport(keys(Type, Opts), Ls).
+
+%% Group transports in an accept or pool tuple ...
+transport([{Type, Key}], [[{type, _}, {options, Opts} | _] | _] = Ls) ->
+ [{type, Type},
{options, Opts},
- {accept, [lists:nthtail(2,L) || L <- Ls]}].
+ {Key, [tl(tl(L)) || L <- Ls]}];
+
+%% ... or not: there can only be one.
+transport([], [L]) ->
+ L.
+
+keys(connect = T, Opts) ->
+ [{T, pool} || lists:keymember(pool_size, 1, Opts)];
+keys(_, _) ->
+ [{listen, accept}].
peer_dict(#state{watchdogT = WatchdogT, peerT = PeerT}, Dict0) ->
try ets:tab2list(WatchdogT) of
- L ->
- lists:foldl(fun(T,A) -> peer_acc(PeerT, A, T) end, Dict0, L)
+ L -> lists:foldl(fun(T,A) -> peer_acc(PeerT, A, T) end, Dict0, L)
catch
error: badarg -> Dict0 %% service has gone down
end.