From 86400f7630699395cbe6f78864de4534c913be4c Mon Sep 17 00:00:00 2001 From: Anders Svensson Date: Sat, 23 Feb 2013 20:40:00 +0100 Subject: Distribution fixes This is the functionality that allows transports to be shared between identically-named services on different nodes, which has been neither documented nor tested (until now). --- lib/diameter/src/base/diameter.erl | 4 +++- lib/diameter/src/base/diameter_config.erl | 2 +- lib/diameter/src/base/diameter_service.erl | 14 ++++++++------ lib/diameter/src/base/diameter_traffic.erl | 16 +++++++++------- 4 files changed, 21 insertions(+), 15 deletions(-) diff --git a/lib/diameter/src/base/diameter.erl b/lib/diameter/src/base/diameter.erl index c67fba5f89..1a96cecc83 100644 --- a/lib/diameter/src/base/diameter.erl +++ b/lib/diameter/src/base/diameter.erl @@ -298,7 +298,9 @@ call(SvcName, App, Message) -> :: capability() | {application, [application_opt()]} | {restrict_connections, restriction()} - | {sequence, sequence() | evaluable()}. + | {sequence, sequence() | evaluable()} + | {share_peers, boolean()} + | {use_shared_peers, boolean()}. -type application_opt() :: {alias, app_alias()} diff --git a/lib/diameter/src/base/diameter_config.erl b/lib/diameter/src/base/diameter_config.erl index 9f73815756..a638849b08 100644 --- a/lib/diameter/src/base/diameter_config.erl +++ b/lib/diameter/src/base/diameter_config.erl @@ -588,7 +588,7 @@ opt(K, false = B) B; opt(K, true = B) - when K == share_peer; + when K == share_peers; K == use_shared_peers -> B; diff --git a/lib/diameter/src/base/diameter_service.erl b/lib/diameter/src/base/diameter_service.erl index f1342df16c..edebd2fc77 100644 --- a/lib/diameter/src/base/diameter_service.erl +++ b/lib/diameter/src/base/diameter_service.erl @@ -1233,12 +1233,12 @@ report_status(Status, peer = TPid, type = Type, options = Opts}, - #peer{apps = [_|_] = As, + #peer{apps = [_|_] = Apps, caps = Caps}, #state{service_name = SvcName} = S, Extra) -> - share_peer(Status, Caps, As, TPid, S), + share_peer(Status, Caps, Apps, TPid, S), Info = [Status, Ref, {TPid, Caps}, {type(Type), Opts} | Extra], send_event(SvcName, list_to_tuple(Info)). @@ -1255,9 +1255,9 @@ send_event(#diameter_event{service = SvcName} = E) -> %% # share_peer/5 %% --------------------------------------------------------------------------- -share_peer(up, Caps, Aliases, TPid, #state{options = [_, {_, true} | _], +share_peer(up, Caps, Apps, TPid, #state{options = [_, {_, true} | _], service_name = Svc}) -> - diameter_peer:notify(Svc, {peer, TPid, Aliases, Caps}); + diameter_peer:notify(Svc, {peer, TPid, [A || {_,A} <- Apps], Caps}); share_peer(_, _, _, _, _) -> ok. @@ -1285,8 +1285,10 @@ remote_peer_up(Pid, Aliases, Caps, #state{options = [_, _, {_, true} | _], shared_peers = PDict}) -> #diameter_service{applications = Apps} = Svc, Key = #diameter_app.alias, - As = lists:filter(fun(A) -> lists:keymember(A, Key, Apps) end, Aliases), - rpu(Pid, Caps, PDict, As); + rpu(Pid, Caps, PDict, lists:filter(fun(A) -> + lists:keymember(A, Key, Apps) + end, + Aliases)); remote_peer_up(_, _, _, #state{options = [_, _, {_, false} | _]}) -> ok. diff --git a/lib/diameter/src/base/diameter_traffic.erl b/lib/diameter/src/base/diameter_traffic.erl index f527f7c754..25b902e3f2 100644 --- a/lib/diameter/src/base/diameter_traffic.erl +++ b/lib/diameter/src/base/diameter_traffic.erl @@ -1479,12 +1479,14 @@ send({TPid, Pkt, #request{handler = Pid} = Req, SvcName, Timeout, TRef}) -> Req#request{handler = self()}, SvcName, Timeout), - Pid ! reref(receive T -> T end, Ref, TRef). - -reref({T, Ref, R}, Ref, TRef) -> - {T, TRef, R}; -reref(T, _, _) -> - T. + receive + {answer, _, _, _, _} = A -> + Pid ! A; + {failover = T, Ref} -> + Pid ! {T, TRef}; + T -> + exit({timeout, Ref, TPid} = T) + end. %% send/2 @@ -1559,7 +1561,7 @@ resend_request(Pkt0, store_request(TPid, Bin, Req, Timeout) -> Seqs = diameter_codec:sequence_numbers(Bin), - TRef = erlang:start_timer(Timeout, self(), timeout), + TRef = erlang:start_timer(Timeout, self(), TPid), ets:insert(?REQUEST_TABLE, {Seqs, Req, TRef}), ets:member(?REQUEST_TABLE, TPid) orelse (self() ! {failover, TRef}), %% failover/1 may have missed -- cgit v1.2.3 From bbd5611dab2d7cbd76a5425a604c8452469ac8ae Mon Sep 17 00:00:00 2001 From: Anders Svensson Date: Sat, 23 Feb 2013 23:40:29 +0100 Subject: Document distribution config --- lib/diameter/doc/src/diameter.xml | 55 +++++++++++++++++++++++++++++++++++ lib/diameter/doc/src/diameter_app.xml | 32 ++++++++++++-------- 2 files changed, 75 insertions(+), 12 deletions(-) diff --git a/lib/diameter/doc/src/diameter.xml b/lib/diameter/doc/src/diameter.xml index 379e9f0738..df9e03b2da 100644 --- a/lib/diameter/doc/src/diameter.xml +++ b/lib/diameter/doc/src/diameter.xml @@ -1,5 +1,7 @@ erlang:nodes/0'> erlang:make_ref/0'> 1 bsl (32-N).

Defaults to {0,32}.

+ + +

+Multiple Erlang nodes implementing the same Diameter node should +be configured with different sequence masks to ensure that each node +uses a unique range of End-to-End and Hop-by-Hop identifiers for +outgoing requests.

+
+ + +{share_peers, boolean()} + +

+Specifies whether or not peer connections on the local Erlang node +are shared with services on visible nodes (as returned by &nodes;). +Peers shared from remote nodes become available in the candidates list +passed as the second argument to &app_pick_peer; callbacks.

+ +

+Defaults to false.

+ + +

+Peers are only shared with other services of the same name. +Since the value of the &application_opt; alias is the handle +for identifying a peer, both local and remote, as a candidate for an +outgoing request, services that share peers should use the same +aliases for identifying their supported applications.

+
+ + +

+Services that share peers can do so in order to distribute the +implementation of a Diameter node across multiple Erlang nodes, in +which case the participating services should typically be configured +with identical &capabilities;.

+
+
+ +{use_shared_peers, boolean()} + +

+Specifies whether or not the service makes use of peer connections +shared by identically named services on other Erlang nodes.

+ +

+Defaults to false.

+ + +

+A service that does not use shared peers will always pass the empty +list as the second argument of &app_pick_peer; callbacks.

+
diff --git a/lib/diameter/doc/src/diameter_app.xml b/lib/diameter/doc/src/diameter_app.xml index d0f1b22ebd..d094e1bade 100644 --- a/lib/diameter/doc/src/diameter_app.xml +++ b/lib/diameter/doc/src/diameter_app.xml @@ -196,7 +196,8 @@ process.

-Invoked to signal the availability of a peer connection. +Invoked to signal the availability of a peer connection on the local +Erlang node. In particular, capabilities exchange with the peer has indicated support for the application in question, the RFC 3539 watchdog state machine for the connection has reached state OKAY and Diameter @@ -230,8 +231,8 @@ handled independently of &peer_up; and &peer_down;.

-Invoked to signal that a peer connection is no longer available -following a previous call to &peer_up;. +Invoked to signal that a peer connection on the local Erlang node is +no longer available following a previous call to &peer_up;. In particular, that the RFC 3539 watchdog state machine for the connection has left state OKAY and the peer will no longer be a candidate in &pick_peer; callbacks.

@@ -240,11 +241,11 @@ candidate in &pick_peer; callbacks.

-Mod:pick_peer(Candidates, _Reserved, SvcName, State) +Mod:pick_peer(LocalCandidates, RemoteCandidates, SvcName, State) -> Selection | false Select a target peer for an outgoing request. -Candidates = [&peer;] +LocalCandidates = RemoteCandidates = [&peer;] SvcName = &mod_service_name; State = NewState = &state; Selection = {ok, Peer} | {Peer, NewState} @@ -257,7 +258,7 @@ peer for an outgoing request. The return value indicates the selected peer.

-The candidate list contains only those peers that have advertised +The candidate lists contain only those peers that have advertised support for the Diameter application in question during capabilities exchange, that have not be excluded by a filter option in the call to &mod_call; @@ -266,7 +267,11 @@ The order of the elements is unspecified except that any peers whose Origin-Host and Origin-Realm matches that of the outgoing request (in the sense of a {filter, {all, [host, realm]}} option to &mod_call;) -will be placed at the head of the list.

+will be placed at the head of the list. +LocalCandidates contains peers whose transport process resides +on the local Erlang node while +RemoteCandidates contains peers that have been communicated +from other nodes by services of the same name.

A callback that returns a peer() will be followed by a @@ -285,10 +290,6 @@ an alternate peer will be followed by any additional callbacks since a retransmission to an alternate peer is abandoned if an answer is received from a previously selected peer.

-

-Returning false or {false, NewState} causes {error, -no_connection} to be returned from &mod_call;.

-

The return values false and {false, State} (that is, NewState = State) are equivalent, as are {ok, Peer} and @@ -296,6 +297,13 @@ The return values false and {false, State} (that is,

+RemoteCandidates is the empty list if the service has been +configured with the (default) &mod_service_opt; +{use_shared_peers, false}.

+ + + +

The return value {Peer, NewState} is only allowed if the Diameter application in question was configured with the &mod_application_opt; {call_mutates_state, true}. @@ -303,7 +311,7 @@ Otherwise, the State argument is always the intial value as configured on the application, not any subsequent value returned by a &peer_up; or &peer_down; callback.

- +
-- cgit v1.2.3 From 63e21fa7d8aa6761640f9cf357663b03578a5446 Mon Sep 17 00:00:00 2001 From: Anders Svensson Date: Sat, 23 Feb 2013 20:42:01 +0100 Subject: Add distribution suite --- lib/diameter/test/diameter_distribution_SUITE.erl | 353 ++++++++++++++++++++++ lib/diameter/test/diameter_util.erl | 3 + lib/diameter/test/modules.mk | 1 + 3 files changed, 357 insertions(+) create mode 100644 lib/diameter/test/diameter_distribution_SUITE.erl diff --git a/lib/diameter/test/diameter_distribution_SUITE.erl b/lib/diameter/test/diameter_distribution_SUITE.erl new file mode 100644 index 0000000000..264def1e98 --- /dev/null +++ b/lib/diameter/test/diameter_distribution_SUITE.erl @@ -0,0 +1,353 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2013. All Rights Reserved. +%% +%% The contents of this file are subject to the Erlang Public License, +%% Version 1.1, (the "License"); you may not use this file except in +%% compliance with the License. You should have received a copy of the +%% Erlang Public License along with this software. If not, it can be +%% retrieved online at http://www.erlang.org/. +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and limitations +%% under the License. +%% +%% %CopyrightEnd% +%% + +%% +%% Tests of traffic between two Diameter nodes, the client being +%% spread across three Erlang nodes. +%% + +-module(diameter_distribution_SUITE). + +-export([suite/0, + all/0]). + +%% testcases +-export([start/1, + ping/1, + connect/1, + send_local/1, + send_remote/1, + send_timeout/1, + send_failover/1, + stop/1]). + +%% diameter callbacks +-export([peer_up/3, + peer_down/3, + pick_peer/5, + prepare_request/4, + prepare_retransmit/4, + handle_answer/5, + handle_error/5, + handle_request/3]). + +-export([call/1]). + +-include("diameter.hrl"). +-include("diameter_gen_base_rfc6733.hrl"). + +%% =========================================================================== + +-define(util, diameter_util). + +-define(CLIENT, 'CLIENT'). +-define(SERVER, 'SERVER'). +-define(REALM, "erlang.org"). +-define(DICT, diameter_gen_base_rfc6733). +-define(ADDR, {127,0,0,1}). + +%% Config for diameter:start_service/2. +-define(SERVICE(Host), + [{'Origin-Host', Host ++ [$.|?REALM]}, + {'Origin-Realm', ?REALM}, + {'Host-IP-Address', [?ADDR]}, + {'Vendor-Id', 12345}, + {'Product-Name', "OTP/diameter"}, + {'Auth-Application-Id', [?DICT:id()]}, + {'Origin-State-Id', origin()}, + {share_peers, true}, + {use_shared_peers, true}, + {restrict_connections, false}, + {sequence, fun sequence/0}, + {application, [{dictionary, ?DICT}, + {module, ?MODULE}, + {request_errors, callback}, + {answer_errors, callback}]}]). + +-define(SUCCESS, 2001). +-define(BUSY, 3004). +-define(LOGOUT, ?'DIAMETER_BASE_TERMINATION-CAUSE_LOGOUT'). +-define(MOVED, ?'DIAMETER_BASE_TERMINATION-CAUSE_USER_MOVED'). +-define(TIMEOUT, ?'DIAMETER_BASE_TERMINATION-CAUSE_SESSION_TIMEOUT'). + +-define(L, atom_to_list). +-define(A, list_to_atom). + +%% The order here is significant and causes the server to listen +%% before the clients connect. +-define(NODES, [{server, ?SERVER}, + {client0, ?CLIENT}, + {client1, ?CLIENT}, + {client2, ?CLIENT}]). + +%% =========================================================================== + +suite() -> + [{timetrap, {seconds, 60}}]. + +all() -> + [start, + ping, + connect, + send_local, + send_remote, + send_timeout, + send_failover, + stop]. + +%% =========================================================================== +%% start/stop testcases + +%% start/1 +%% +%% Start three slave nodes, one that implement a Diameter server and +%% two that implement a client. + +start(SvcName) + when is_atom(SvcName) -> + ok = diameter:start(), + ok = diameter:start_service(SvcName, ?SERVICE((?L(SvcName)))); + +start(Config) -> + Dir = filename:dirname(code:which(?MODULE)), + Nodes = [{N, Svcname} || {Nodename, Svcname} <- ?NODES, + N <- [slave(Nodename, Dir)]], + [] = [RC || {N,S} <- Nodes, + RC <- [rpc:call(N, ?MODULE, start, [S])], + RC /= ok], + ?util:write_priv(Config, nodes, Nodes). + +slave(Name, Dir) -> + {ok, Node} = ct_slave:start(Name), + ok = rpc:call(Node, + code, + add_pathsa, + [[Dir, filename:join([Dir, "..", "ebin"])]]), + Node. + +sequence() -> + sequence(sname()). + +sequence(server) -> + {0,32}; +sequence(Client) -> + "client" ++ N = ?L(Client), + {list_to_integer(N), 30}. + +origin() -> + origin(sname()). + +origin(server) -> + 99; +origin(Client) -> + "client" ++ N = ?L(Client), + list_to_integer(N). + +%% ping/1 +%% +%% Ensure the client nodes are connected since the sharing of +%% transports is only between connected nodes. + +ping({?SERVER, _Nodes}) -> + ok; + +ping({?CLIENT, Nodes}) -> + {_, []} = {node(), [N || {N,_} <- Nodes, + node() /= N, + pang <- [net_adm:ping(N)]]}, + ok; + +ping(Config) -> + Nodes = ?util:read_priv(Config, nodes), + [] = [RC || {N,S} <- Nodes, + RC <- [rpc:call(N, ?MODULE, ping, [{S, Nodes}])], + RC /= ok]. + +%% connect/1 +%% +%% Establish one connection to the server from each of the client +%% nodes. + +connect({?SERVER, Config}) -> + ?util:write_priv(Config, lref, {node(), ?util:listen(?SERVER, tcp)}), + ok; + +connect({?CLIENT, Config}) -> + ?util:connect(?CLIENT, tcp, ?util:read_priv(Config, lref)), + ok; + +connect(Config) -> + Nodes = ?util:read_priv(Config, nodes), + [] = [RC || {N,S} <- Nodes, + RC <- [rpc:call(N, ?MODULE, connect, [{S,Config}])], + RC /= ok]. + +%% stop/1 +%% +%% Stop the slave nodes. + +stop(Name) + when is_atom(Name) -> + {ok, _Node} = ct_slave:stop(Name), + ok; + +stop(_Config) -> + [] = [RC || {N,_} <- ?NODES, + RC <- [stop(N)], + RC /= ok]. + +%% =========================================================================== +%% traffic testcases + +%% send_local/1 +%% +%% Send a request from the first client node, using a the local +%% transport. + +send_local(Config) -> + #diameter_base_STA{'Result-Code' = ?SUCCESS} + = send(Config, local, str(?LOGOUT)). + +%% send_remote/1 +%% +%% Send a request from the first client node, using a transport on the +%% another node. + +send_remote(Config) -> + #diameter_base_STA{'Result-Code' = ?SUCCESS} + = send(Config, remote, str(?LOGOUT)). + +%% send_timeout/1 +%% +%% Send a request that the server discards. + +send_timeout(Config) -> + {error, timeout} = send(Config, remote, str(?TIMEOUT)). + +%% send_failover/1 +%% +%% Send a request that causes the server to remote transports down. + +send_failover(Config) -> + #'diameter_base_answer-message'{'Result-Code' = ?BUSY} + = send(Config, remote, str(?MOVED)). + +%% =========================================================================== + +str(Cause) -> + #diameter_base_STR{'Destination-Realm' = ?REALM, + 'Auth-Application-Id' = ?DICT:id(), + 'Termination-Cause' = Cause}. + +%% send/2 + +send(Config, Where, Req) -> + [_, {Node, _} | _] = ?util:read_priv(Config, nodes) , + rpc:call(Node, ?MODULE, call, [{Where, Req}]). + +%% call/1 + +call({Where, Req}) -> + diameter:call(?CLIENT, ?DICT, Req, [{extra, [{Where, sname()}]}]). + +%% sname/0 + +sname() -> + ?A(hd(string:tokens(?L(node()), "@"))). + +%% =========================================================================== +%% diameter callbacks + +%% peer_up/3 + +peer_up(_SvcName, _Peer, State) -> + State. + +%% peer_down/3 + +peer_down(_SvcName, _Peer, State) -> + State. + +%% pick_peer/4 + +pick_peer([LP], [_, _], ?CLIENT, _State, {local, client0}) -> + {ok, LP}; + +pick_peer([_], [RP | _], ?CLIENT, _State, {remote, client0}) -> + {ok, RP}; + +pick_peer([LP], [], ?CLIENT, _State, {remote, client0}) -> + {ok, LP}. + +%% prepare_request/4 + +prepare_request(Pkt, ?CLIENT, {_Ref, Caps}, {_, client0}) -> + #diameter_packet{msg = Req} + = Pkt, + #diameter_caps{origin_host = {OH, _}, + origin_realm = {OR, _}} + = Caps, + {send, Req#diameter_base_STR{'Origin-Host' = OH, + 'Origin-Realm' = OR, + 'Session-Id' = diameter:session_id(OH)}}. + +prepare_retransmit(Pkt, ?CLIENT, _, {_, client0}) -> + #diameter_packet{msg = #diameter_base_STR{'Termination-Cause' = ?MOVED}} + = Pkt, %% assert + {send, Pkt}. + +%% handle_answer/5 + +handle_answer(Pkt, _Req, ?CLIENT, _Peer, {_, client0}) -> + #diameter_packet{msg = Rec, errors = []} = Pkt, + Rec. + +%% handle_error/5 + +handle_error(Reason, _Req, ?CLIENT, _Peer, {_, client0}) -> + {error, Reason}. + +%% handle_request/3 + +handle_request(Pkt, ?SERVER, Peer) -> + server = sname(), %% assert + #diameter_packet{msg = Req} + = Pkt, + request(Req, Peer). + +request(#diameter_base_STR{'Termination-Cause' = ?TIMEOUT}, _) -> + discard; + +request(#diameter_base_STR{'Termination-Cause' = ?MOVED}, Peer) -> + {TPid, #diameter_caps{origin_state_id = {_, [N]}}} = Peer, + if N == 0 -> %% sent from the originating node ... + {protocol_error, ?BUSY}; + true -> %% ... or through a remote node: force failover + exit(TPid, kill), + discard + end; + +request(#diameter_base_STR{'Session-Id' = SId}, {_, Caps}) -> + #diameter_caps{origin_host = {OH, _}, + origin_realm = {OR, _}} + = Caps, + {reply, #diameter_base_STA{'Result-Code' = ?SUCCESS, + 'Session-Id' = SId, + 'Origin-Host' = OH, + 'Origin-Realm' = OR}}. diff --git a/lib/diameter/test/diameter_util.erl b/lib/diameter/test/diameter_util.erl index 5af4ad9ba5..a9872f32e1 100644 --- a/lib/diameter/test/diameter_util.erl +++ b/lib/diameter/test/diameter_util.erl @@ -258,6 +258,9 @@ path(Config, Name) -> lport(M, Ref) -> lport(M, Ref, 1). +lport(M, {Node, Ref}, Tries) -> + rpc:call(Node, ?MODULE, lport, [M, Ref, Tries]); + lport(M, Ref, Tries) -> lp(tmod(M), Ref, Tries). diff --git a/lib/diameter/test/modules.mk b/lib/diameter/test/modules.mk index c4a713fb10..beff588a02 100644 --- a/lib/diameter/test/modules.mk +++ b/lib/diameter/test/modules.mk @@ -31,6 +31,7 @@ MODULES = \ diameter_codec_test \ diameter_compiler_SUITE \ diameter_dict_SUITE \ + diameter_distribution_SUITE \ diameter_dpr_SUITE \ diameter_event_SUITE \ diameter_failover_SUITE \ -- cgit v1.2.3 From d70a02e7415caccd13fad8dda417d0d18a112a83 Mon Sep 17 00:00:00 2001 From: Anders Svensson Date: Sun, 24 Feb 2013 17:39:46 +0100 Subject: More flexible distribution config Allow both share_peers and use_shared_peers to be a list of nodes, or a function that returns a list of nodes. --- lib/diameter/doc/src/diameter.xml | 75 +++++++++---- lib/diameter/doc/src/diameter_app.xml | 8 +- lib/diameter/src/base/diameter.erl | 10 +- lib/diameter/src/base/diameter_config.erl | 27 +++-- lib/diameter/src/base/diameter_peer.erl | 8 +- lib/diameter/src/base/diameter_service.erl | 88 +++++++++------ lib/diameter/test/diameter_distribution_SUITE.erl | 126 ++++++++++++---------- 7 files changed, 212 insertions(+), 130 deletions(-) diff --git a/lib/diameter/doc/src/diameter.xml b/lib/diameter/doc/src/diameter.xml index df9e03b2da..8ad4af85a3 100644 --- a/lib/diameter/doc/src/diameter.xml +++ b/lib/diameter/doc/src/diameter.xml @@ -43,7 +43,7 @@ under the License. -%VSN% + diameter.xml @@ -774,8 +774,8 @@ Application-Id AVP's in particular.

| evaluable()}

-Specifies the degree to which multiple transport connections to the -same peer are accepted by the service.

+Specifies the degree to which the service allows multiple transport +connections to the same peer.

If type [node()] then a connection is rejected if another already @@ -831,40 +831,60 @@ outgoing requests.

-{share_peers, boolean()} +{share_peers, boolean() | [node()] | evaluable()}

-Specifies whether or not peer connections on the local Erlang node -are shared with services on visible nodes (as returned by &nodes;). -Peers shared from remote nodes become available in the candidates list -passed as the second argument to &app_pick_peer; callbacks.

+Specifies nodes to which peer connections established on the local +Erlang node are communicated. +Shared peers become available in the remote candidates list passed to +&app_pick_peer; callbacks on remote nodes whose services are +configured to use them: see use_shared_peers below.

-Defaults to false.

+If false then peers are not shared. +If [node()] then peers are shared with the specified list of +nodes. +If evaluable() then peers are shared with the nodes returned +by the specified function, evaluated whenever a peer connection +becomes available or a remote service requests information about local +connections. +The value true is equivalent to fun &nodes;. +The value node() in a node list is ignored, so a collection of +services can all be configured to share with the same list of +nodes.

-

-Peers are only shared with other services of the same name. -Since the value of the &application_opt; alias is the handle -for identifying a peer, both local and remote, as a candidate for an -outgoing request, services that share peers should use the same -aliases for identifying their supported applications.

-
+Defaults to false.

-Services that share peers can do so in order to distribute the -implementation of a Diameter node across multiple Erlang nodes, in -which case the participating services should typically be configured -with identical &capabilities;.

+Peers are only shared with services of the same name for the purpose +of sending outgoing requests. +Since the value of the &application_opt; alias, passed to +&call;, is the handle for identifying a peer as a suitable +candidate, services that share peers must use the same aliases to +identify their supported applications. +They should typically also configure identical &capabilities;, since +by sharing peer connections they are distributing the implementation +of a single Diameter node across multiple Erlang nodes.

-{use_shared_peers, boolean()} +{use_shared_peers, boolean() | [node()] | evaluable()}

-Specifies whether or not the service makes use of peer connections -shared by identically named services on other Erlang nodes.

+Specifies nodes from which communicated peers are made available in +the remote candidates list of &app_pick_peer; callbacks.

+ +

+If false then remote peers are not used. +If [node()] then only peers from the specified list of nodes +are used. +If evaluable() then only peers returned by the specified +function are used, evaluated whenever a remote service communicates +information about an available peer connection. +The value true is equivalent to fun &nodes;. +The value node() in a node list is ignored.

Defaults to false.

@@ -874,6 +894,15 @@ Defaults to false.

A service that does not use shared peers will always pass the empty list as the second argument of &app_pick_peer; callbacks.

+ + +

+Sending a request over a peer connection on a remote node is less +efficient than sending it over a local connection. +It may be preferable to make use of the &service_opt; +restrict_connections and maintain a dedicated connection on +each node from which requests are sent.

+
diff --git a/lib/diameter/doc/src/diameter_app.xml b/lib/diameter/doc/src/diameter_app.xml index d094e1bade..d4fb792787 100644 --- a/lib/diameter/doc/src/diameter_app.xml +++ b/lib/diameter/doc/src/diameter_app.xml @@ -37,7 +37,7 @@ under the License. -%REV% + diameter_app.xml @@ -297,9 +297,9 @@ The return values false and {false, State} (that is,

-RemoteCandidates is the empty list if the service has been -configured with the (default) &mod_service_opt; -{use_shared_peers, false}.

+The &mod_service_opt; use_shared_peers determines whether or +not a service uses peers shared from other nodes. +If not then RemoteCandidates is the empty list.

diff --git a/lib/diameter/src/base/diameter.erl b/lib/diameter/src/base/diameter.erl index 1a96cecc83..189f2b01b9 100644 --- a/lib/diameter/src/base/diameter.erl +++ b/lib/diameter/src/base/diameter.erl @@ -45,6 +45,7 @@ -export_type([evaluable/0, restriction/0, + remotes/0, sequence/0, app_alias/0, service_name/0, @@ -292,6 +293,11 @@ call(SvcName, App, Message) -> | [node()] | evaluable(). +-type remotes() + :: boolean() + | [node()] + | evaluable(). + %% Options passed to start_service/2 -type service_opt() @@ -299,8 +305,8 @@ call(SvcName, App, Message) -> | {application, [application_opt()]} | {restrict_connections, restriction()} | {sequence, sequence() | evaluable()} - | {share_peers, boolean()} - | {use_shared_peers, boolean()}. + | {share_peers, remotes()} + | {use_shared_peers, remotes()}. -type application_opt() :: {alias, app_alias()} diff --git a/lib/diameter/src/base/diameter_config.erl b/lib/diameter/src/base/diameter_config.erl index a638849b08..3a2e0d2140 100644 --- a/lib/diameter/src/base/diameter_config.erl +++ b/lib/diameter/src/base/diameter_config.erl @@ -573,7 +573,6 @@ make_config(SvcName, Opts) -> {false, monitor}, {?NOMASK, sequence}, {nodes, restrict_connections}]), - %% share_peers and use_shared_peers are currently undocumented. #service{name = SvcName, rec = #diameter_service{applications = Apps, @@ -592,19 +591,27 @@ opt(K, true = B) K == use_shared_peers -> B; -opt(monitor, P) - when is_pid(P) -> - P; - opt(restrict_connections, T) when T == node; - T == nodes; - T == []; - is_atom(hd(T)) -> + T == nodes -> + T; + +opt(K, T) + when (K == share_peers + orelse K == use_shared_peers + orelse K == restrict_connections), ([] == T + orelse is_atom(hd(T))) -> T; -opt(restrict_connections = K, F) -> - try diameter_lib:eval(F) of %% no guarantee that it won't fail later +opt(monitor, P) + when is_pid(P) -> + P; + +opt(K, F) + when K == restrict_connections; + K == share_peers; + K == use_shared_peers -> + try diameter_lib:eval(F) of %% but no guarantee that it won't fail later Nodes when is_list(Nodes) -> F; V -> diff --git a/lib/diameter/src/base/diameter_peer.erl b/lib/diameter/src/base/diameter_peer.erl index 130bedda84..dfc76eb76e 100644 --- a/lib/diameter/src/base/diameter_peer.erl +++ b/lib/diameter/src/base/diameter_peer.erl @@ -31,7 +31,7 @@ send/2, close/1, abort/1, - notify/2]). + notify/3]). %% Server start. -export([start_link/0]). @@ -63,11 +63,11 @@ -define(DEFAULT_TTMO, infinity). %%% --------------------------------------------------------------------------- -%%% # notify/2 +%%% # notify/3 %%% --------------------------------------------------------------------------- -notify(SvcName, T) -> - rpc:abcast(nodes(), ?SERVER, {notify, SvcName, T}). +notify(Nodes, SvcName, T) -> + rpc:abcast(Nodes, ?SERVER, {notify, SvcName, T}). %%% --------------------------------------------------------------------------- %%% # start/1 diff --git a/lib/diameter/src/base/diameter_service.erl b/lib/diameter/src/base/diameter_service.erl index edebd2fc77..255a3a44fd 100644 --- a/lib/diameter/src/base/diameter_service.erl +++ b/lib/diameter/src/base/diameter_service.erl @@ -125,9 +125,9 @@ monitor = false :: false | pid(), %% process to die with options :: [{sequence, diameter:sequence()} %% sequence mask - | {restrict_connections, diameter:restriction()} - | {share_peers, boolean()} %% broadcast peers to remote nodes? - | {use_shared_peers, boolean()}]}).%% use broadcasted peers? + | {share_peers, diameter:remotes()} %% broadcast to + | {use_shared_peers, diameter:remotes()} %% use from + | {restrict_connections, diameter:restriction()}]}). %% shared_peers reflects the peers broadcast from remote nodes. %% Record representing an RFC 3539 watchdog process implemented by @@ -681,11 +681,9 @@ mref(false = No) -> mref(P) -> erlang:monitor(process, P). -init_shared(#state{options = [_, _, {_, true} | _], +init_shared(#state{options = [_, _, {_,T} | _], service_name = Svc}) -> - diameter_peer:notify(Svc, {service, self()}); -init_shared(#state{options = [_, _, {_, false} | _]}) -> - ok. + notify(T, Svc, {service, self()}). init_mod(#diameter_app{alias = Alias, init_state = S}) -> @@ -698,6 +696,37 @@ get_value(Key, Vs) -> {_, V} = lists:keyfind(Key, 1, Vs), V. +notify(Share, SvcName, T) -> + Nodes = remotes(Share), + [] /= Nodes andalso diameter_peer:notify(Nodes, SvcName, T). +%% Test for the empty list for upgrade reasons: there's no +%% diameter_peer:notify/3 in old code so no call means no load order +%% requirement. + +remotes(false) -> + []; + +remotes(true) -> + nodes(); + +remotes(Nodes) + when is_atom(hd(Nodes)); + Nodes == [] -> + Nodes; + +remotes(F) -> + try diameter_lib:eval(F) of + L when is_list(L) -> + L; + T -> + diameter_lib:error_report({invalid_return, T}, F), + [] + catch + E:R -> + diameter_lib:error_report({failure, {E, R, ?STACK}}, F), + [] + end. + %% --------------------------------------------------------------------------- %% # start/3 %% --------------------------------------------------------------------------- @@ -1255,9 +1284,9 @@ send_event(#diameter_event{service = SvcName} = E) -> %% # share_peer/5 %% --------------------------------------------------------------------------- -share_peer(up, Caps, Apps, TPid, #state{options = [_, {_, true} | _], - service_name = Svc}) -> - diameter_peer:notify(Svc, {peer, TPid, [A || {_,A} <- Apps], Caps}); +share_peer(up, Caps, Apps, TPid, #state{options = [_, {_,T} | _], + service_name = Svc}) -> + notify(T, Svc, {peer, TPid, [A || {_,A} <- Apps], Caps}); share_peer(_, _, _, _, _) -> ok. @@ -1266,36 +1295,34 @@ share_peer(_, _, _, _, _) -> %% # share_peers/2 %% --------------------------------------------------------------------------- -share_peers(Pid, #state{options = [_, {_, true} | _], - local_peers = PDict}) -> - ?Dict:fold(fun(A,Ps,ok) -> sp(Pid, A, Ps), ok end, ok, PDict); - -share_peers(_, _) -> - ok. +share_peers(Pid, #state{options = [_, {_,T} | _], local_peers = PDict}) -> + is_remote(Pid, T) + andalso ?Dict:fold(fun(A,Ps,ok) -> sp(Pid, A, Ps), ok end, ok, PDict). sp(Pid, Alias, Peers) -> lists:foreach(fun({P,C}) -> Pid ! {peer, P, [Alias], C} end, Peers). +is_remote(Pid, T) -> + Node = node(Pid), + Node /= node() andalso lists:member(Node, remotes(T)). + %% --------------------------------------------------------------------------- %% # remote_peer_up/4 %% --------------------------------------------------------------------------- -remote_peer_up(Pid, Aliases, Caps, #state{options = [_, _, {_, true} | _], - service = Svc, - shared_peers = PDict}) -> +remote_peer_up(Pid, Aliases, Caps, #state{options = [_, _, {_,T} | _]} = S) -> + is_remote(Pid, T) + andalso rpu(Pid, Aliases, Caps, S). + +rpu(Pid, Aliases, Caps, #state{service = Svc, shared_peers = PDict}) -> #diameter_service{applications = Apps} = Svc, Key = #diameter_app.alias, - rpu(Pid, Caps, PDict, lists:filter(fun(A) -> - lists:keymember(A, Key, Apps) - end, - Aliases)); - -remote_peer_up(_, _, _, #state{options = [_, _, {_, false} | _]}) -> - ok. + F = fun(A) -> lists:keymember(A, Key, Apps) end, + rpu(Pid, lists:filter(F, Aliases), Caps, PDict); -rpu(_, _, PDict, []) -> - PDict; -rpu(Pid, Caps, PDict, Aliases) -> +rpu(_, [] = No, _, _) -> + No; +rpu(Pid, Aliases, Caps, PDict) -> erlang:monitor(process, Pid), T = {Pid, Caps}, lists:foreach(fun(A) -> ?Dict:append(A, T, PDict) end, Aliases). @@ -1304,8 +1331,7 @@ rpu(Pid, Caps, PDict, Aliases) -> %% # remote_peer_down/2 %% --------------------------------------------------------------------------- -remote_peer_down(Pid, #state{options = [_, _, {_, true} | _], - shared_peers = PDict}) -> +remote_peer_down(Pid, #state{shared_peers = PDict}) -> lists:foreach(fun(A) -> rpd(Pid, A, PDict) end, ?Dict:fetch_keys(PDict)). rpd(Pid, Alias, PDict) -> diff --git a/lib/diameter/test/diameter_distribution_SUITE.erl b/lib/diameter/test/diameter_distribution_SUITE.erl index 264def1e98..08b0870730 100644 --- a/lib/diameter/test/diameter_distribution_SUITE.erl +++ b/lib/diameter/test/diameter_distribution_SUITE.erl @@ -28,8 +28,9 @@ all/0]). %% testcases --export([start/1, +-export([enslave/1, ping/1, + start/1, connect/1, send_local/1, send_remote/1, @@ -71,8 +72,8 @@ {'Product-Name', "OTP/diameter"}, {'Auth-Application-Id', [?DICT:id()]}, {'Origin-State-Id', origin()}, - {share_peers, true}, - {use_shared_peers, true}, + {share_peers, peers()}, + {use_shared_peers, peers()}, {restrict_connections, false}, {sequence, fun sequence/0}, {application, [{dictionary, ?DICT}, @@ -102,8 +103,9 @@ suite() -> [{timetrap, {seconds, 60}}]. all() -> - [start, + [enslave, ping, + start, connect, send_local, send_remote, @@ -114,10 +116,49 @@ all() -> %% =========================================================================== %% start/stop testcases +%% enslave/1 +%% +%% Start four slave nodes, one to implement a Diameter server, +%% two three to implement a client. + +enslave(Config) -> + Here = filename:dirname(code:which(?MODULE)), + Ebin = filename:join([Here, "..", "ebin"]), + Dirs = [Here, Ebin], + Nodes = [{N,S} || {M,S} <- ?NODES, N <- [slave(M, Dirs)]], + ?util:write_priv(Config, nodes, [{N,S} || {{N,ok},S} <- Nodes]), + [] = [{T,S} || {{_,E} = T, S} <- Nodes, E /= ok]. + +slave(Name, Dirs) -> + add_pathsa(Dirs, ct_slave:start(Name)). + +add_pathsa(Dirs, {ok, Node}) -> + {Node, rpc:call(Node, code, add_pathsa, [Dirs])}; +add_pathsa(_, No) -> + {No, error}. + +%% ping/1 +%% +%% Ensure the client nodes are connected since the sharing of +%% transports is only between connected nodes. + +ping({?SERVER, _Nodes}) -> + []; + +ping({?CLIENT, Nodes}) -> + [N || {N,_} <- Nodes, + node() /= N, + pang <- [net_adm:ping(N)]]; + +ping(Config) -> + Nodes = ?util:read_priv(Config, nodes), + [] = [{N,RC} || {N,S} <- Nodes, + RC <- [rpc:call(N, ?MODULE, ping, [{S, Nodes}])], + RC /= []]. + %% start/1 %% -%% Start three slave nodes, one that implement a Diameter server and -%% two that implement a client. +%% Start diameter services. start(SvcName) when is_atom(SvcName) -> @@ -125,21 +166,10 @@ start(SvcName) ok = diameter:start_service(SvcName, ?SERVICE((?L(SvcName)))); start(Config) -> - Dir = filename:dirname(code:which(?MODULE)), - Nodes = [{N, Svcname} || {Nodename, Svcname} <- ?NODES, - N <- [slave(Nodename, Dir)]], - [] = [RC || {N,S} <- Nodes, - RC <- [rpc:call(N, ?MODULE, start, [S])], - RC /= ok], - ?util:write_priv(Config, nodes, Nodes). - -slave(Name, Dir) -> - {ok, Node} = ct_slave:start(Name), - ok = rpc:call(Node, - code, - add_pathsa, - [[Dir, filename:join([Dir, "..", "ebin"])]]), - Node. + Nodes = ?util:read_priv(Config, nodes), + [] = [{N,RC} || {N,S} <- Nodes, + RC <- [rpc:call(N, ?MODULE, start, [S])], + RC /= ok]. sequence() -> sequence(sname()). @@ -159,25 +189,13 @@ origin(Client) -> "client" ++ N = ?L(Client), list_to_integer(N). -%% ping/1 -%% -%% Ensure the client nodes are connected since the sharing of -%% transports is only between connected nodes. +peers() -> + peers(sname()). -ping({?SERVER, _Nodes}) -> - ok; - -ping({?CLIENT, Nodes}) -> - {_, []} = {node(), [N || {N,_} <- Nodes, - node() /= N, - pang <- [net_adm:ping(N)]]}, - ok; - -ping(Config) -> - Nodes = ?util:read_priv(Config, nodes), - [] = [RC || {N,S} <- Nodes, - RC <- [rpc:call(N, ?MODULE, ping, [{S, Nodes}])], - RC /= ok]. +peers(server) -> true; +peers(client0) -> [node() | nodes()]; +peers(client1) -> fun erlang:nodes/0; +peers(client2) -> nodes(). %% connect/1 %% @@ -194,23 +212,17 @@ connect({?CLIENT, Config}) -> connect(Config) -> Nodes = ?util:read_priv(Config, nodes), - [] = [RC || {N,S} <- Nodes, - RC <- [rpc:call(N, ?MODULE, connect, [{S,Config}])], - RC /= ok]. + [] = [{N,RC} || {N,S} <- Nodes, + RC <- [rpc:call(N, ?MODULE, connect, [{S,Config}])], + RC /= ok]. %% stop/1 %% %% Stop the slave nodes. -stop(Name) - when is_atom(Name) -> - {ok, _Node} = ct_slave:stop(Name), - ok; - stop(_Config) -> - [] = [RC || {N,_} <- ?NODES, - RC <- [stop(N)], - RC /= ok]. + [] = [{N,E} || {N,_} <- ?NODES, + {error, _, _} = E <- [ct_slave:stop(N)]]. %% =========================================================================== %% traffic testcases @@ -336,12 +348,7 @@ request(#diameter_base_STR{'Termination-Cause' = ?TIMEOUT}, _) -> request(#diameter_base_STR{'Termination-Cause' = ?MOVED}, Peer) -> {TPid, #diameter_caps{origin_state_id = {_, [N]}}} = Peer, - if N == 0 -> %% sent from the originating node ... - {protocol_error, ?BUSY}; - true -> %% ... or through a remote node: force failover - exit(TPid, kill), - discard - end; + fail(N, TPid); request(#diameter_base_STR{'Session-Id' = SId}, {_, Caps}) -> #diameter_caps{origin_host = {OH, _}, @@ -351,3 +358,10 @@ request(#diameter_base_STR{'Session-Id' = SId}, {_, Caps}) -> 'Session-Id' = SId, 'Origin-Host' = OH, 'Origin-Realm' = OR}}. + +fail(0, _) -> %% sent from the originating node ... + {protocol_error, ?BUSY}; + +fail(_, TPid) -> %% ... or through a remote node: force failover + exit(TPid, kill), + discard. -- cgit v1.2.3 From ecebd37eb5e874e307812373c072f685455a2eee Mon Sep 17 00:00:00 2001 From: Anders Svensson Date: Fri, 8 Mar 2013 18:30:15 +0100 Subject: Specify timeouts to ct_slave:start/2 --- lib/diameter/test/diameter_distribution_SUITE.erl | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/lib/diameter/test/diameter_distribution_SUITE.erl b/lib/diameter/test/diameter_distribution_SUITE.erl index 08b0870730..01d3507b27 100644 --- a/lib/diameter/test/diameter_distribution_SUITE.erl +++ b/lib/diameter/test/diameter_distribution_SUITE.erl @@ -97,6 +97,11 @@ {client1, ?CLIENT}, {client2, ?CLIENT}]). +%% Options to ct_slave:start/2. +-define(TIMEOUTS, [{T, 15000} || T <- [boot_timeout, + init_timeout, + start_timeout]]). + %% =========================================================================== suite() -> @@ -130,7 +135,7 @@ enslave(Config) -> [] = [{T,S} || {{_,E} = T, S} <- Nodes, E /= ok]. slave(Name, Dirs) -> - add_pathsa(Dirs, ct_slave:start(Name)). + add_pathsa(Dirs, ct_slave:start(Name, ?TIMEOUTS)). add_pathsa(Dirs, {ok, Node}) -> {Node, rpc:call(Node, code, add_pathsa, [Dirs])}; -- cgit v1.2.3