aboutsummaryrefslogtreecommitdiffstats
path: root/lib/inets/examples/httpd_load_test
diff options
context:
space:
mode:
Diffstat (limited to 'lib/inets/examples/httpd_load_test')
-rw-r--r--lib/inets/examples/httpd_load_test/Makefile123
-rw-r--r--lib/inets/examples/httpd_load_test/hdlt.config.skel20
-rw-r--r--lib/inets/examples/httpd_load_test/hdlt.erl74
-rw-r--r--lib/inets/examples/httpd_load_test/hdlt.sh.skel44
-rw-r--r--lib/inets/examples/httpd_load_test/hdlt_client.erl370
-rw-r--r--lib/inets/examples/httpd_load_test/hdlt_ctrl.erl1530
-rw-r--r--lib/inets/examples/httpd_load_test/hdlt_logger.erl138
-rw-r--r--lib/inets/examples/httpd_load_test/hdlt_logger.hrl33
-rw-r--r--lib/inets/examples/httpd_load_test/hdlt_random_html.erl59
-rw-r--r--lib/inets/examples/httpd_load_test/hdlt_server.erl163
-rw-r--r--lib/inets/examples/httpd_load_test/hdlt_slave.erl291
l---------lib/inets/examples/httpd_load_test/hdlt_ssl_client_cert.pem1
l---------lib/inets/examples/httpd_load_test/hdlt_ssl_server_cert.pem1
-rw-r--r--lib/inets/examples/httpd_load_test/modules.mk44
14 files changed, 2891 insertions, 0 deletions
diff --git a/lib/inets/examples/httpd_load_test/Makefile b/lib/inets/examples/httpd_load_test/Makefile
new file mode 100644
index 0000000000..1cc61ad8ae
--- /dev/null
+++ b/lib/inets/examples/httpd_load_test/Makefile
@@ -0,0 +1,123 @@
+#
+# %CopyrightBegin%
+#
+# Copyright Ericsson AB 2010. All Rights Reserved.
+#
+# The contents of this file are subject to the Erlang Public License,
+# Version 1.1, (the "License"); you may not use this file except in
+# compliance with the License. You should have received a copy of the
+# Erlang Public License along with this software. If not, it can be
+# retrieved online at http://www.erlang.org/.
+#
+# Software distributed under the License is distributed on an "AS IS"
+# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+# the License for the specific language governing rights and limitations
+# under the License.
+#
+# %CopyrightEnd%
+
+include $(ERL_TOP)/make/target.mk
+
+EBIN = .
+
+include $(ERL_TOP)/make/$(TARGET)/otp.mk
+
+# ----------------------------------------------------
+# Application version
+# ----------------------------------------------------
+include ../../vsn.mk
+
+VSN=$(INETS_VSN)
+
+
+# ----------------------------------------------------
+# Release directory specification
+# ----------------------------------------------------
+RELSYSDIR = $(RELEASE_PATH)/lib/inets-$(VSN)
+EXAMPLE_RELSYSDIR = $(RELSYSDIR)/examples
+HDLT_RELSYSDIR = $(EXAMPLE_RELSYSDIR)/httpd_load_test
+
+
+# ----------------------------------------------------
+# Target Specs
+# ----------------------------------------------------
+
+include modules.mk
+
+ERL_FILES = $(MODULES:%=%.erl)
+
+SOURCE = $(ERL_FILES) $(INTERNAL_HRL_FILES)
+
+TARGET_FILES = \
+ $(ERL_FILES:%.erl=$(EBIN)/%.$(EMULATOR))
+
+ifeq ($(TYPE),debug)
+ERL_COMPILE_FLAGS += -Ddebug -W
+endif
+
+
+# ----------------------------------------------------
+# FLAGS
+# ----------------------------------------------------
+
+include ../../src/inets_app/inets.mk
+
+ERL_COMPILE_FLAGS += \
+ $(INETS_FLAGS) \
+ $(INETS_ERL_COMPILE_FLAGS) \
+ -I../../include
+
+
+# ----------------------------------------------------
+# Special Build Targets
+# ----------------------------------------------------
+
+
+# ----------------------------------------------------
+# Targets
+# ----------------------------------------------------
+debug:
+ @${MAKE} TYPE=debug opt
+
+opt: $(TARGET_FILES)
+
+clean:
+ rm -f $(TARGET_FILES)
+ rm -f errs core *~
+
+docs:
+
+
+# ----------------------------------------------------
+# Release Target
+# ----------------------------------------------------
+include $(ERL_TOP)/make/otp_release_targets.mk
+
+
+release_spec: opt
+ $(INSTALL_DIR) $(EXAMPLE_RELSYSDIR)
+ $(INSTALL_DIR) $(HDLT_RELSYSDIR)
+ $(INSTALL_DATA) $(SCRIPT_SKELETONS) $(HDLT_RELSYSDIR)
+ $(INSTALL_DATA) $(CONF_SKELETONS) $(HDLT_RELSYSDIR)
+ $(INSTALL_DATA) $(CERT_FILES) $(HDLT_RELSYSDIR)
+ $(INSTALL_DATA) $(TARGET_FILES) $(HDLT_RELSYSDIR)
+ $(INSTALL_DATA) $(ERL_FILES) $(HDLT_RELSYSDIR)
+
+
+release_docs_spec:
+
+
+# ----------------------------------------------------
+# Include dependencies
+# ----------------------------------------------------
+
+megaco_codec_transform.$(EMULATOR): megaco_codec_transform.erl
+
+megaco_codec_meas.$(EMULATOR): megaco_codec_meas.erl
+
+megaco_codec_mstone1.$(EMULATOR): megaco_codec_mstone1.erl
+
+megaco_codec_mstone2.$(EMULATOR): megaco_codec_mstone2.erl
+
+megaco_codec_mstone_lib.$(EMULATOR): megaco_codec_mstone_lib.erl
+
diff --git a/lib/inets/examples/httpd_load_test/hdlt.config.skel b/lib/inets/examples/httpd_load_test/hdlt.config.skel
new file mode 100644
index 0000000000..640867ebac
--- /dev/null
+++ b/lib/inets/examples/httpd_load_test/hdlt.config.skel
@@ -0,0 +1,20 @@
+%% Debug: silence | info | log | debug
+{debug, [{ctrl, info}, {proxy, silence}, {slave, silence}, {client, silence}]}.
+{server, {"/usr/local/bin", "fooserver"}}.
+%% {port, 8888}. % integer() > 0
+{server_dir, "/tmp/hdlt"}. % Absolute path
+{work_dir, "/tmp/hdlt"}. % Absolute path
+{clients,
+ [
+ {"/opt/local/bin", "foo"},
+ {"/usr/local/bin", "bar"}
+ ]
+}.
+%% {send_rate, 80}. % Max number of outstanding requests, integer() > 0
+%% {test_time, 120}. % Number of seconds,
+%% {max_nof_schedulers, 8}. % integer() >= 0
+%% {work_simulator, 10000}. % integer() > 0
+%% {data_size, {100, 500, 2}}. % {integer() > 0, integer() > 0, integer() > 0}
+%% {socket_type, ip_comm}. % ip_comm | ssl | essl | ossl
+%% {server_cert_file, "hdlt_ssl_server_cert.pem"}.
+%% {client_cert_file, "hdlt_ssl_client_cert.pem"}. \ No newline at end of file
diff --git a/lib/inets/examples/httpd_load_test/hdlt.erl b/lib/inets/examples/httpd_load_test/hdlt.erl
new file mode 100644
index 0000000000..18d8c34ccf
--- /dev/null
+++ b/lib/inets/examples/httpd_load_test/hdlt.erl
@@ -0,0 +1,74 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2010. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+%%
+%%----------------------------------------------------------------------
+%% Purpose: Main API module for the httpd load test utility
+%%----------------------------------------------------------------------
+
+-module(hdlt).
+
+
+%%-----------------------------------------------------------------
+%% Public interface
+%%-----------------------------------------------------------------
+
+-export([start/0, start/1, stop/0, help/0]).
+
+
+%%-----------------------------------------------------------------
+%% Start the HDLT utility
+%%-----------------------------------------------------------------
+
+start() ->
+ ConfigFile = "hdlt.config",
+ case file:consult(ConfigFile) of
+ {ok, Config} when is_list(Config) ->
+ start(Config);
+ Error ->
+ Error
+ end.
+
+start(Config) ->
+ Flag = process_flag(trap_exit, true),
+ Result =
+ case hdlt_ctrl:start(Config) of
+ {ok, Pid} ->
+ receive
+ {'EXIT', Pid, normal} ->
+ ok;
+ {'EXIT', Pid, Reason} ->
+ io:format("HDLT failed: "
+ "~n ~p"
+ "~n", [Reason]),
+ {error, Reason}
+ end;
+ Error ->
+ Error
+ end,
+ process_flag(trap_exit, Flag),
+ Result.
+
+
+
+stop() ->
+ hdlt_ctrl:stop().
+
+
+help() ->
+ hdlt_ctrl:help().
diff --git a/lib/inets/examples/httpd_load_test/hdlt.sh.skel b/lib/inets/examples/httpd_load_test/hdlt.sh.skel
new file mode 100644
index 0000000000..a250bad9c5
--- /dev/null
+++ b/lib/inets/examples/httpd_load_test/hdlt.sh.skel
@@ -0,0 +1,44 @@
+#!/bin/sh
+
+#
+# %CopyrightBegin%
+#
+# Copyright Ericsson AB 2010. All Rights Reserved.
+#
+# The contents of this file are subject to the Erlang Public License,
+# Version 1.1, (the "License"); you may not use this file except in
+# compliance with the License. You should have received a copy of the
+# Erlang Public License along with this software. If not, it can be
+# retrieved online at http://www.erlang.org/.
+#
+# Software distributed under the License is distributed on an "AS IS"
+# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+# the License for the specific language governing rights and limitations
+# under the License.
+#
+# %CopyrightEnd%
+
+# Skeleton for a script intended to run the hdlt(N)
+# performance test.
+#
+# This test can be used for several things depending on the
+# configuration: SMP or SocketType performance tests
+#
+
+ERL_HOME=<path to otp top dir>
+INETS_HOME=$ERL_HOME/lib/erlang/lib/<inets dir>
+HDLT_HOME=$INETS_HOME/examples/httpd_load_test
+PATH=$ERL_HOME/bin:$PATH
+
+HDLT="-s hdlt start"
+STOP="-s init stop"
+
+ERL="erl \
+ -noshell \
+ -pa $HDLT_HOME \
+ $HDLT \
+ $STOP"
+
+echo $ERL
+$ERL | tee hdlt.log
+
diff --git a/lib/inets/examples/httpd_load_test/hdlt_client.erl b/lib/inets/examples/httpd_load_test/hdlt_client.erl
new file mode 100644
index 0000000000..d65ac5a885
--- /dev/null
+++ b/lib/inets/examples/httpd_load_test/hdlt_client.erl
@@ -0,0 +1,370 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2010. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+%%
+%%----------------------------------------------------------------------
+%% Purpose: The HDLT client module.
+%% This is the traffic generator
+%%----------------------------------------------------------------------
+
+-module(hdlt_client).
+
+-export([
+ start/1,
+ stop/0,
+ start_inets/0,
+ start_service/1,
+ release/0,
+ node_info/0
+ ]).
+
+-export([
+ proxy/1
+ ]).
+
+-include("hdlt_logger.hrl").
+
+-define(CTRL, hdlt_ctrl).
+-define(PROXY, hdlt_proxy).
+
+-record(state,
+ {
+ mode = initial,
+ send_rate,
+ time,
+ stop_time,
+ url,
+ nof_reqs = 0,
+ nof_reps = 0,
+ last_req,
+ sizes,
+ socket_type,
+ cert_file
+ }).
+
+
+
+start(Debug) ->
+ proc_lib:start_link(?MODULE, proxy, [Debug]).
+
+stop() ->
+ (catch erlang:send(?PROXY, stop)),
+ ok.
+
+start_inets() ->
+ ?PROXY ! start_inets.
+
+start_service(Args) ->
+ ?PROXY ! {start_client, Args, self()},
+ receive
+ client_started ->
+ %% ?LOG("client service started"),
+ ok
+ end.
+
+release() ->
+ ?PROXY ! release.
+
+node_info() ->
+ ?PROXY ! {node_info, self()},
+ receive
+ {node_info, NodeInfo} ->
+ NodeInfo
+ end.
+
+
+%% ---------------------------------------------------------------------
+%%
+%% The proxy process
+%%
+
+proxy(Debug) ->
+ process_flag(trap_exit, true),
+ erlang:register(?PROXY, self()),
+ SName = lists:flatten(
+ io_lib:format("HDLT PROXY[~p,~p]", [self(), node()])),
+ ?SET_NAME(SName),
+ ?SET_LEVEL(Debug),
+ ?LOG("starting", []),
+ Ref = await_for_controller(10),
+ CtrlNode = node(Ref),
+ erlang:monitor_node(CtrlNode, true),
+ proc_lib:init_ack({ok, self()}),
+ ?DEBUG("started", []),
+ proxy_loop(Ref, CtrlNode, undefined).
+
+await_for_controller(N) when N > 0 ->
+ case global:whereis_name(hdlt_ctrl) of
+ Pid when is_pid(Pid) ->
+ erlang:monitor(process, Pid);
+ _ ->
+ timer:sleep(1000),
+ await_for_controller(N-1)
+ end;
+await_for_controller(_) ->
+ proc_lib:init_ack({error, controller_not_found, nodes()}),
+ timer:sleep(500),
+ init:stop().
+
+
+proxy_loop(Ref, CtrlNode, Client) ->
+ ?DEBUG("await command", []),
+ receive
+ stop ->
+ ?LOG("stop", []),
+ timer:sleep(1000),
+ halt();
+
+ start_inets ->
+ ?LOG("start the inets service framework", []),
+ %% inets:enable_trace(max, "/tmp/inets-httpc-trace.log", all),
+ case (catch inets:start()) of
+ ok ->
+ ?LOG("framework started", []),
+ proxy_loop(Ref, CtrlNode, Client);
+ Error ->
+ ?LOG("failed starting inets service framework: "
+ "~n Error: ~p", [Error]),
+ timer:sleep(1000),
+ halt()
+ end;
+
+ {start_client, Args, From} ->
+ ?LOG("start client with"
+ "~n Args: ~p", [Args]),
+ Client2 = spawn_link(fun() -> client(Args) end),
+ From ! client_started,
+ proxy_loop(Ref, CtrlNode, Client2);
+
+ release ->
+ ?LOG("release", []),
+ Client ! go,
+ proxy_loop(Ref, CtrlNode, Client);
+
+ {node_info, Pid} ->
+ ?LOG("received requets for node info", []),
+ NodeInfo = get_node_info(),
+ Pid ! {node_info, NodeInfo},
+ proxy_loop(Ref, CtrlNode, Client);
+
+ {'EXIT', Client, normal} ->
+ ?LOG("received normal exit message from client (~p)",
+ [Client]),
+ exit(normal);
+
+ {'EXIT', Client, Reason} ->
+ ?INFO("received exit message from client (~p)"
+ "~n Reason: ~p", [Client, Reason]),
+ %% Unexpected client termination, inform the controller and die
+ global:send(hdlt_ctrl, {client_exit, Client, node(), Reason}),
+ exit({client_exit, Reason});
+
+ {nodedown, CtrlNode} ->
+ ?LOG("received nodedown for controller node - terminate", []),
+ halt();
+
+ {'DOWN', Ref, process, _, _} ->
+ ?INFO("received DOWN message for controller - terminate", []),
+ %% The controller has terminated, dont care why, time to die
+ halt()
+
+ end.
+
+
+
+%% ---------------------------------------------------------------------
+%%
+%% The client process
+%%
+
+client([SocketType, CertFile, URLBase, Sizes, Time, SendRate, Debug]) ->
+ SName = lists:flatten(
+ io_lib:format("HDLT CLIENT[~p,~p]", [self(), node()])),
+ ?SET_NAME(SName),
+ ?SET_LEVEL(Debug),
+ ?LOG("starting with"
+ "~n SocketType: ~p"
+ "~n Time: ~p"
+ "~n SendRate: ~p", [SocketType, Time, SendRate]),
+ httpc:set_options([{max_pipeline_length, 0}]),
+ if
+ (SocketType =:= ssl) orelse
+ (SocketType =:= ossl) orelse
+ (SocketType =:= essl) ->
+ %% Ensure crypto and ssl started:
+ crypto:start(),
+ ssl:start();
+ true ->
+ ok
+ end,
+ State = #state{mode = idle,
+ url = URLBase,
+ time = Time,
+ send_rate = SendRate,
+ sizes = Sizes,
+ socket_type = SocketType,
+ cert_file = CertFile},
+ ?DEBUG("started", []),
+ client_loop(State).
+
+%% The point is to first start all client nodes and then this
+%% process. Then, when they are all started, the go-ahead, go,
+%% message is sent to let them lose at the same time.
+client_loop(#state{mode = idle,
+ time = Time,
+ send_rate = SendRate} = State) ->
+ ?DEBUG("[idle] awaiting the go command", []),
+ receive
+ go ->
+ ?LOG("[idle] received go", []),
+ erlang:send_after(Time, self(), stop),
+ NewState = send_requests(State, SendRate),
+ client_loop(NewState#state{mode = generating,
+ nof_reqs = SendRate})
+ end;
+
+%% In this mode the client is generating traffic.
+%% It will continue to do so until the stop message
+%% is received.
+client_loop(#state{mode = generating} = State) ->
+ receive
+ stop ->
+ ?LOG("[generating] received stop", []),
+ StopTime = timestamp(),
+ req_reply(State),
+ client_loop(State#state{mode = stopping, stop_time = StopTime});
+
+ {http, {_, {{_, 200, _}, _, _}}} ->
+ %% ?DEBUG("[generating] received reply - send another request", []),
+ NewState = send_requests(State, 1),
+ client_loop(NewState#state{nof_reps = NewState#state.nof_reps + 1,
+ nof_reqs = NewState#state.nof_reqs + 1});
+
+ {http, {ReqId, {error, Reason}}} ->
+ ?INFO("[generating] request ~p failed: "
+ "~n Reason: ~p"
+ "~n NofReqs: ~p"
+ "~n NofReps: ~p",
+ [ReqId, Reason, State#state.nof_reqs, State#state.nof_reps]),
+ exit({Reason, generating, State#state.nof_reqs, State#state.nof_reps});
+
+ Else ->
+ ?LOG("[generating] received unexpected message: "
+ "~n~p", [Else]),
+ unexpected_data(Else),
+ client_loop(State)
+ end;
+
+%% The client no longer issues any new requests, instead it
+%% waits for replies for all the oustanding requests to
+%% arrive.
+client_loop(#state{mode = stopping,
+ time = Time,
+ last_req = LastReqId} = State) ->
+ receive
+ {http, {LastReqId, {{_, 200, _}, _, _}}} ->
+ ?DEBUG("[stopping] received reply for last request (~p)", [LastReqId]),
+ time_to_complete(State),
+ ok;
+
+ {http, {ReqId, {{_, 200, _}, _, _}}} ->
+ ?DEBUG("[stopping] received reply ~p", [ReqId]),
+ client_loop(State);
+
+ {http, {ReqId, {error, Reason}}} ->
+ ?INFO("[stopping] request ~p failed: "
+ "~n Reason: ~p"
+ "~n NofReqs: ~p"
+ "~n NofReps: ~p",
+ [ReqId, Reason, State#state.nof_reqs, State#state.nof_reps]),
+ exit({Reason, stopping, State#state.nof_reqs, State#state.nof_reps});
+
+ Else ->
+ ?LOG("[stopping] received unexpected message: "
+ "~n~p", [Else]),
+ unexpected_data(Else),
+ client_loop(State)
+
+ after Time ->
+ ?INFO("timeout when"
+ "~n Number of requests: ~p"
+ "~n Number of replies: ~p",
+ [State#state.nof_reqs, State#state.nof_reps]),
+ exit({timeout, State#state.nof_reqs, State#state.nof_reps})
+ end.
+
+req_reply(#state{nof_reqs = NofReqs, nof_reps = NofReps}) ->
+ load_data({req_reply, node(), NofReqs, NofReps}).
+
+time_to_complete(#state{stop_time = StopTime}) ->
+ StoppedTime = os:timestamp(),
+ load_data({time_to_complete, node(), StopTime, StoppedTime}).
+
+load_data(Data) ->
+ global:send(?CTRL, {load_data, Data}).
+
+unexpected_data(Else) ->
+ global:send(?CTRL, {unexpected_data, Else}).
+
+
+send_requests(#state{sizes = Sizes} = State, N) ->
+ send_requests(State, N, Sizes).
+
+send_requests(State, 0, Sizes) ->
+ State#state{sizes = Sizes};
+send_requests(#state{socket_type = SocketType,
+ cert_file = CertFile} = State, N, [Sz | Sizes]) ->
+ URL = lists:flatten(io_lib:format("~s~w", [State#state.url, Sz])),
+ Method = get,
+ Request = {URL, []},
+ HTTPOptions =
+ case SocketType of
+ ip_comm ->
+ [];
+ _ ->
+ SslOpts = [{verify, 0},
+ {certfile, CertFile},
+ {keyfile, CertFile}],
+ case SocketType of
+ ssl ->
+ [{ssl, SslOpts}];
+ ossl ->
+ [{ssl, {ossl, SslOpts}}];
+ essl ->
+ [{ssl, {essl, SslOpts}}]
+ end
+ end,
+ Options = [{sync, false}],
+ {ok, Ref} = httpc:request(Method, Request, HTTPOptions, Options),
+ send_requests(State#state{last_req = Ref}, N-1, lists:append(Sizes, [Sz])).
+
+
+timestamp() ->
+ os:timestamp().
+
+
+get_node_info() ->
+ [{cpu_topology, erlang:system_info(cpu_topology)},
+ {heap_type, erlang:system_info(heap_type)},
+ {nof_schedulers, erlang:system_info(schedulers)},
+ {otp_release, erlang:system_info(otp_release)},
+ {version, erlang:system_info(version)},
+ {system_version, erlang:system_info(system_version)},
+ {system_architecture, erlang:system_info(system_architecture)}].
+
+
diff --git a/lib/inets/examples/httpd_load_test/hdlt_ctrl.erl b/lib/inets/examples/httpd_load_test/hdlt_ctrl.erl
new file mode 100644
index 0000000000..950d2632f7
--- /dev/null
+++ b/lib/inets/examples/httpd_load_test/hdlt_ctrl.erl
@@ -0,0 +1,1530 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2010. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+%%
+%%----------------------------------------------------------------------
+%% Purpose: The httpd load test (hdlt) controller/collector module,
+%% This module contains all the code of the httpd load test
+%% controller/collector. It sets up the test, starts all
+%% server and client nodes and applications and finally
+%% collects test data.
+%%----------------------------------------------------------------------
+
+-module(hdlt_ctrl).
+
+-export([start/1, stop/0, help/0]).
+
+-export([init/1, proxy/7]).
+
+-include_lib("kernel/include/file.hrl").
+-include("hdlt_logger.hrl").
+
+-define(DEFAULT_SENDRATE, 89).
+-define(DEFAULT_TEST_TIME, 120). % 2 minutes
+-define(DEFAULT_PORT, 8889).
+-define(TIMEOUT, 60000).
+-define(DEFAULT_MAX_NOF_SCHEDULERS, 8).
+-define(DEFAULT_SERVER_DIR, "/tmp/hdlt").
+-define(DEFAULT_WORK_DIR, "/tmp/hdlt").
+-define(SSH_PORT, 22).
+-define(DEFAULT_SOCKET_TYPE, ip_comm).
+-define(DEFAULT_SERVER_CERT, "hdlt_ssl_server_cert.pem").
+-define(DEFAULT_CLIENT_CERT, "hdlt_ssl_client_cert.pem").
+-define(SSH_CONNECT_TIMEOUT, 5000).
+-define(NODE_START_TIMEOUT, 5000).
+-define(LOCAL_PROXY_START_TIMEOUT, ?NODE_START_TIMEOUT * 4).
+-define(DEFAULT_DEBUGS,
+ [{ctrl, info}, {slave, silence}, {proxy, silence}, {client, silence}]).
+-define(DEFAULT_WORK_SIM, 10000).
+-define(DEFAULT_DATA_SIZE_START, 500).
+-define(DEFAULT_DATA_SIZE_END, 1500).
+-define(DEFAULT_DATA_SIZE_INCR, 1).
+-define(DEFAULT_DATA_SIZE, {?DEFAULT_DATA_SIZE_START,
+ ?DEFAULT_DATA_SIZE_END,
+ ?DEFAULT_DATA_SIZE_INCR}).
+
+
+%% hdlt = httpd load test
+
+-define(COLLECTOR, hdlt_ctrl).
+-define(RESULTS_TAB, hdlt_results).
+
+-define(CLIENT_MOD, hdlt_client).
+-define(CLIENT_NODE_NAME, ?CLIENT_MOD).
+
+-define(SERVER_MOD, hdlt_server).
+-define(SERVER_NODE_NAME, ?SERVER_MOD).
+
+-define(LOGGER, hdlt_logger).
+
+
+-record(state,
+ {
+ url,
+ test_time,
+ send_rate,
+ http_server,
+ http_port,
+ results = ?RESULTS_TAB,
+ nodes,
+ server_root,
+ doc_root,
+ server_dir,
+ work_dir,
+ server_conn,
+ client_conns = [],
+ client_mod = ?CLIENT_MOD,
+ clients,
+ nof_schedulers = 0,
+ max_nof_schedulers,
+ socket_type,
+ server_cert_file,
+ client_cert_file,
+ debugs,
+ client_sz_from,
+ client_sz_to,
+ client_sz_incr
+ }
+ ).
+
+-record(proxy,
+ {
+ mode,
+ mod,
+ connection,
+ channel,
+ host,
+ cmd,
+ node_name,
+ node,
+ ref,
+ erl_path,
+ paths,
+ args
+ }).
+
+-record(connection,
+ {
+ proxy,
+ node,
+ node_name,
+ host
+ }).
+
+
+-record(client, {host, path, version}).
+-record(server, {host, path, version}).
+
+
+start(Config) when is_list(Config) ->
+ proc_lib:start_link(?MODULE, init, [Config]).
+
+stop() ->
+ global:send(?COLLECTOR, stop).
+
+init(Config) ->
+ %% io:format("Config: ~n~p~n", [Config]),
+ case (catch do_init(Config)) of
+ {ok, State} ->
+ proc_lib:init_ack({ok, self()}),
+ loop(State);
+ {error, _Reason} = Error ->
+ proc_lib:init_ack(Error),
+ ok;
+ {'EXIT', Reason} ->
+ proc_lib:init_ack({error, Reason}),
+ ok
+ end.
+
+do_init(Config) ->
+ %% Do not trap exit, but register ourself
+ global:register_name(?COLLECTOR, self()),
+
+ State = #state{},
+ ets:new(State#state.results, [bag, named_table]),
+
+ hdlt_logger:start(),
+ global:sync(),
+
+ %% Maybe enable debug
+ Debugs = get_debugs(Config),
+ ?SET_NAME("HDLT CTRL"),
+ set_debug_level(Debugs),
+
+ ?DEBUG("network info: "
+ "~n Global names: ~p"
+ "~n Nodes: ~p", [global:registered_names(), nodes()]),
+
+ %% Read config
+ ?LOG("read config", []),
+ SendRate = get_send_rate(Config),
+ Clients = get_clients(Config),
+ TestTime = get_test_time(Config),
+ Server = get_server(Config),
+ Port = get_port(Config),
+ ServerDir = get_server_dir(Config),
+ WorkingDir = get_work_dir(Config),
+ MaxNofSchedulers = get_max_nof_schedulers(Config),
+ SocketType = get_socket_type(Config),
+ ServerCertFile = get_server_cert_file(Config),
+ ClientCertFile = get_client_cert_file(Config),
+ WorkSim = get_work_sim(Config),
+ {From, To, Incr} = get_data_size(Config),
+
+ URL = url(Server, Port, SocketType, WorkSim),
+ ServerRoot = filename:join(ServerDir, "server_root"),
+ DocRoot = ServerRoot, %% Not really used in this test
+
+ ?DEBUG("randomize setup", []),
+ randomized_sizes_init(),
+
+ %% Start used applications
+ ?DEBUG("ensure crypto started", []),
+ crypto:start(),
+ ?DEBUG("ensure ssh started", []),
+ ssh:start(),
+
+ State2 = State#state{server_root = ServerRoot,
+ doc_root = DocRoot,
+ server_dir = ServerDir,
+ work_dir = WorkingDir,
+ max_nof_schedulers = MaxNofSchedulers,
+ socket_type = SocketType,
+ server_cert_file = ServerCertFile,
+ client_cert_file = ClientCertFile,
+ http_server = Server,
+ http_port = Port,
+ url = URL,
+ test_time = TestTime,
+ send_rate = SendRate,
+ clients = Clients,
+ debugs = Debugs,
+ client_sz_from = From,
+ client_sz_to = To,
+ client_sz_incr = Incr},
+
+ ?LOG("prepare server host", []),
+ prepare_server_host(State2),
+
+ ?LOG("prepare client hosts", []),
+ State3 = prepare_client_hosts(State2),
+
+ ?LOG("basic init done", []),
+ {ok, State3}.
+
+
+loop(#state{nof_schedulers = N, max_nof_schedulers = M} = State) when N > M ->
+
+ ?INFO("Starting to analyse data", []),
+
+ AnalysedTab = analyse_data(State),
+
+ Files = save_results_to_file(AnalysedTab, State),
+ io:format("~n******************************************************"
+ "~n~nResult(s) saved to: ~n~p~n", [Files]),
+ clean_up(State);
+
+loop(#state{url = URL,
+ test_time = TestTime,
+ send_rate = SendRate,
+ nof_schedulers = NofSchedulers} = State) ->
+
+ {StartH, StartM, StartS} = erlang:time(),
+
+ ?INFO("Performing test with ~p smp-scheduler(s): ~n"
+ " It will take a minimum of: ~p seconds. ~n"
+ " Start time: ~.2.0w:~.2.0w:~.2.0w",
+ [NofSchedulers, round(TestTime/1000), StartH, StartM, StartS]),
+
+ %% Start the server node
+ %% (The local proxy, the node, the remote proxy, and the inets framework)
+ State1 = start_server_node(State),
+ ?DEBUG("nodes after server start: ~p", [nodes() -- [node()]]),
+
+ %% Start the client node(s)
+ %% (The local proxy, the node, the remote proxy, and the inets framework)
+ ?LOG("start client node(s)", []),
+ State2 = start_client_nodes(State1),
+ ?DEBUG("nodes after client(s) start: ~p", [nodes() -- [node()]]),
+
+ ?LOG("start server", []),
+ start_server(State2),
+
+ ?LOG("start clients", []),
+ start_clients(State2, URL, TestTime, SendRate),
+
+ ?LOG("release clients", []),
+ release_clients(State2),
+
+ ?LOG("collect data", []),
+ collect_data(State2),
+
+ ?LOG("stop all nodes", []),
+ State3 = stop_nodes(State2),
+
+ ?INFO("Test with ~p smp-scheduler(s) complete"
+ "~n~n"
+ "****************************************************************"
+ "~n",
+ [NofSchedulers]),
+ loop(State3#state{nof_schedulers = NofSchedulers + 1}).
+
+
+prepare_server_host(#state{server_root = ServerRoot,
+ http_server = #server{host = Host},
+ socket_type = SocketType,
+ server_cert_file = CertFile}) ->
+ ?INFO("prepare server host ~s", [Host]),
+ Opts = [{user_interaction, false},
+ {silently_accept_hosts, true},
+ {timeout, 2*?SSH_CONNECT_TIMEOUT},
+ {connect_timeout, ?SSH_CONNECT_TIMEOUT}],
+ case ssh_sftp:start_channel(Host, Opts) of
+ {ok, Sftp, ConnectionRef} ->
+ ?DEBUG("sftp connection established - now transer server content",
+ []),
+ create_server_content(Sftp, ServerRoot, SocketType, CertFile),
+ ?DEBUG("server content transfered - now close ssh connection ",
+ []),
+ ssh:close(ConnectionRef),
+ ?DEBUG("server preparation complete ", []),
+ ok;
+ Error ->
+ ?INFO("FAILED creating sftp channel to server host ~s: "
+ "~n ~p", [Host, Error]),
+ exit({failed_establishing_sftp_connection, Error})
+ end.
+
+create_server_content(Sftp, ServerRoot, SocketType, CertFile) ->
+ %% Create server root
+ ?DEBUG("ensure existence of ~p", [ServerRoot]),
+ ensure_remote_dir_exist(Sftp, ServerRoot),
+
+ %% Create the server ebin dir (for the starter module)
+ EBIN = filename:join(ServerRoot, "ebin"),
+ ?DEBUG("make ebin dir: ~p", [EBIN]),
+ maybe_create_remote_dir(Sftp, EBIN),
+
+ %% Create the server ebin dir (for the starter module)
+ LOG = filename:join(ServerRoot, "log"),
+ ?DEBUG("make log dir: ~p", [LOG]),
+ maybe_create_remote_dir(Sftp, LOG),
+
+ LocalServerMod = local_server_module(),
+ ?DEBUG("copy server stub/proxy module ~s", [LocalServerMod]),
+ RemoteServerMod = remote_server_module(EBIN),
+ {ok, ServerModBin} = file:read_file(LocalServerMod),
+ ok = ssh_sftp:write_file(Sftp, RemoteServerMod, ServerModBin),
+
+ LocalSlaveMod = local_slave_module(),
+ ?DEBUG("copy slave module ~s", [LocalSlaveMod]),
+ RemoteSlaveMod = remote_slave_module(EBIN),
+ {ok, SlaveModBin} = file:read_file(LocalSlaveMod),
+ ok = ssh_sftp:write_file(Sftp, RemoteSlaveMod, SlaveModBin),
+
+ LocalLoggerMod = local_logger_module(),
+ ?DEBUG("copy logger module ~s", [LocalLoggerMod]),
+ RemoteLoggerMod = remote_logger_module(EBIN),
+ {ok, LoggerModBin} = file:read_file(LocalLoggerMod),
+ ok = ssh_sftp:write_file(Sftp, RemoteLoggerMod, LoggerModBin),
+
+ %% Create the inets server data dir
+ CGI = filename:join(ServerRoot, "cgi-bin"),
+ ?DEBUG("make cgi dir: ~p", [CGI]),
+ maybe_create_remote_dir(Sftp, CGI),
+
+ LocalRandomMod = local_random_html_module(),
+ ?DEBUG("copy random-html module ~s", [LocalRandomMod]),
+ RemoteRandomMod = remote_random_html_module(EBIN),
+ {ok, RandomModBin} = file:read_file(LocalRandomMod),
+ ok = ssh_sftp:write_file(Sftp, RemoteRandomMod, RandomModBin),
+
+ case SocketType of
+ ip_comm ->
+ ok;
+ _ ->
+ SSLDir = filename:join(ServerRoot, "ssl"),
+ ?DEBUG("make conf dir: ~p", [SSLDir]),
+ maybe_create_remote_dir(Sftp, SSLDir),
+ ?DEBUG("copy ssl cert file ~s", [CertFile]),
+ {ok, CertBin} = file:read_file(CertFile),
+ RemoteCertFile = filename:join(SSLDir,
+ filename:basename(CertFile)),
+ ok = ssh_sftp:write_file(Sftp, RemoteCertFile, CertBin),
+ ok
+ end,
+
+ ?DEBUG("done", []),
+ ok.
+
+remote_server_module(Path) ->
+ Mod = server_module(),
+ filename:join(Path, Mod).
+
+local_server_module() ->
+ Mod = server_module(),
+ case code:where_is_file(Mod) of
+ Path when is_list(Path) ->
+ Path;
+ _ ->
+ exit({server_module_not_found, Mod})
+ end.
+
+server_module() ->
+ module(?SERVER_MOD).
+
+
+prepare_client_hosts(#state{work_dir = WorkDir,
+ clients = Clients,
+ socket_type = SocketType,
+ client_cert_file = CertFile} = State) ->
+ Clients2 =
+ prepare_client_hosts(WorkDir, SocketType, CertFile, Clients, []),
+ State#state{clients = Clients2}.
+
+prepare_client_hosts(_WorkDir, _SocketType, _CertFile, [], Acc) ->
+ lists:reverse(Acc);
+prepare_client_hosts(WorkDir, SocketType, CertFile, [Client|Clients], Acc) ->
+ case prepare_client_host(WorkDir, SocketType, CertFile, Client) of
+ ok ->
+ prepare_client_hosts(WorkDir, SocketType, CertFile, Clients,
+ [Client|Acc]);
+ _ ->
+ prepare_client_hosts(WorkDir, SocketType, CertFile, Clients, Acc)
+ end.
+
+prepare_client_host(WorkDir, SocketType, CertFile, #client{host = Host}) ->
+ ?INFO("prepare client host ~s", [Host]),
+ Opts = [{user_interaction, false},
+ {silently_accept_hosts, true},
+ {timeout, 2*?SSH_CONNECT_TIMEOUT},
+ {connect_timeout, ?SSH_CONNECT_TIMEOUT}],
+ case ssh_sftp:start_channel(Host, Opts) of
+ {ok, Sftp, ConnectionRef} ->
+ ?DEBUG("sftp connection established - now transer client content",
+ []),
+ create_client_content(Sftp, WorkDir, SocketType, CertFile),
+ ?DEBUG("client content transered - now close ssh connection ", []),
+ ssh:close(ConnectionRef),
+ ?DEBUG("client preparation complete ", []),
+ ok;
+ Error ->
+ ?INFO("FAILED creating sftp channel to client host ~s: skipping"
+ "~n ~p", [Host, Error]),
+ Error
+ end.
+
+create_client_content(Sftp, WorkDir, SocketType, CertFile) ->
+ %% Create work dir
+ ?DEBUG("ensure existence of ~p", [WorkDir]),
+ ensure_remote_dir_exist(Sftp, WorkDir),
+
+ %% Create the client ebin dir
+ EBIN = filename:join(WorkDir, "ebin"),
+ RemoteClientMod = remote_client_module(EBIN),
+ ?DEBUG("make ebin dir: ~p", [EBIN]),
+ maybe_create_remote_dir(Sftp, EBIN),
+
+ LocalClientMod = local_client_module(),
+ ?DEBUG("copy client stub/proxy module ~s", [LocalClientMod]),
+ {ok, ClientModBin} = file:read_file(LocalClientMod),
+ ok = ssh_sftp:write_file(Sftp, RemoteClientMod, ClientModBin),
+
+ LocalSlaveMod = local_slave_module(),
+ ?DEBUG("copy slave module ~s", [LocalSlaveMod]),
+ RemoteSlaveMod = remote_slave_module(EBIN),
+ {ok, SlaveModBin} = file:read_file(LocalSlaveMod),
+ ok = ssh_sftp:write_file(Sftp, RemoteSlaveMod, SlaveModBin),
+
+ LocalLoggerMod = local_logger_module(),
+ ?DEBUG("copy logger module ~s", [LocalLoggerMod]),
+ RemoteLoggerMod = remote_logger_module(EBIN),
+ {ok, LoggerModBin} = file:read_file(LocalLoggerMod),
+ ok = ssh_sftp:write_file(Sftp, RemoteLoggerMod, LoggerModBin),
+
+ case SocketType of
+ ip_comm ->
+ ok;
+ _ ->
+ %% We should really store the remote path somewhere as
+ %% we use it when starting the client service...
+ SSLDir = filename:join(WorkDir, "ssl"),
+ ?DEBUG("make ssl dir: ~p", [SSLDir]),
+ maybe_create_remote_dir(Sftp, SSLDir),
+ ?DEBUG("copy ssl cert file ~s", [CertFile]),
+ {ok, CertBin} = file:read_file(CertFile),
+ RemoteCertFile = filename:join(SSLDir,
+ filename:basename(CertFile)),
+ ok = ssh_sftp:write_file(Sftp, RemoteCertFile, CertBin),
+ ok
+ end,
+
+ ?DEBUG("done", []),
+ ok.
+
+remote_client_module(Path) ->
+ Mod = client_module(),
+ filename:join(Path, Mod).
+
+local_client_module() ->
+ Mod = client_module(),
+ case code:where_is_file(Mod) of
+ Path when is_list(Path) ->
+ Path;
+ _ ->
+ exit({client_module_not_found, Mod})
+ end.
+
+client_module() ->
+ module(?CLIENT_MOD).
+
+
+remote_slave_module(Path) ->
+ Mod = slave_module(),
+ filename:join(Path, Mod).
+
+local_slave_module() ->
+ Mod = slave_module(),
+ case code:where_is_file(Mod) of
+ Path when is_list(Path) ->
+ Path;
+ _ ->
+ exit({slave_module_not_found, Mod})
+ end.
+
+slave_module() ->
+ module(hdlt_slave).
+
+
+remote_logger_module(Path) ->
+ Mod = logger_module(),
+ filename:join(Path, Mod).
+
+local_logger_module() ->
+ Mod = logger_module(),
+ case code:where_is_file(Mod) of
+ Path when is_list(Path) ->
+ Path;
+ _ ->
+ exit({logger_module_not_found, Mod})
+ end.
+
+logger_module() ->
+ module(hdlt_logger).
+
+
+remote_random_html_module(Path) ->
+ Mod = random_html_module(),
+ filename:join(Path, Mod).
+
+local_random_html_module() ->
+ Mod = random_html_module(),
+ case code:where_is_file(Mod) of
+ Path when is_list(Path) ->
+ Path;
+ _ ->
+ exit({random_module_not_found, Mod})
+ end.
+
+random_html_module() ->
+ module(hdlt_random_html).
+
+
+module(Mod) ->
+ Ext = string:to_lower(erlang:system_info(machine)),
+ lists:flatten(io_lib:format("~w.~s", [Mod, Ext])).
+
+
+%% -----------------------------------------------------------------------
+%% - For every node created (server and client both) there is both
+%% a local and remote proxy.
+%% - The local proxy is running on the local (controller/collector) node.
+%% - The remote proxy is running on the client or server node(s).
+%% - The local (ctrl) proxy monitor the remote (server/client) proxy.
+%% - The remote (server/client) proxy monitor the local (ctrl) proxy.
+%%
+
+start_client_nodes(#state{clients = Clients,
+ work_dir = WorkDir,
+ debugs = Debugs} = State) ->
+ Connections =
+ [start_client_node(Client, WorkDir, Debugs) || Client <- Clients],
+ State#state{client_conns = Connections}.
+
+start_client_node(#client{path = ErlPath, host = Host}, WorkDir, Debugs) ->
+ ?INFO("start client on host ~p", [Host]),
+ EbinDir = filename:join(WorkDir, "ebin"),
+ start_client_node(Host, ErlPath, [EbinDir], Debugs).
+
+start_client_node(Host, ErlPath, Paths, Debugs) ->
+ start_node(Host, ?CLIENT_NODE_NAME,
+ ErlPath, Paths, [], ?CLIENT_MOD, Debugs).
+
+
+start_server_node(#state{http_server = #server{path = ErlPath, host = Host},
+ server_root = ServerRoot,
+ nof_schedulers = NofScheds,
+ debugs = Debugs} = State) ->
+ ?INFO("start server on host ~p", [Host]),
+ CgiBinDir = filename:join(ServerRoot, "cgi-bin"),
+ EbinDir = filename:join(ServerRoot, "ebin"),
+ Connection =
+ start_server_node(Host, ErlPath, [CgiBinDir, EbinDir],
+ Debugs, NofScheds),
+ State#state{server_conn = Connection}.
+
+start_server_node(Host, ErlPath, Paths, Debugs, NofScheds) ->
+ Args =
+ if
+ NofScheds =:= 0 ->
+ "-smp disable";
+ true ->
+ lists:flatten(io_lib:format("-smp +S ~w", [NofScheds]))
+ end,
+ start_node(Host, ?SERVER_NODE_NAME,
+ ErlPath, Paths, Args, ?SERVER_MOD, Debugs).
+
+
+%% -----------------------------------------------------------------------
+%% - For every node created (server and client both) there is both
+%% a local and remote proxy.
+%% - The local proxy is running on the local (controller/collector) node.
+%% - The remote proxy is running on the client or server node(s).
+%% - The local (ctrl) proxy monitor the remote (server/client) proxy.
+%% - The remote (server/client) proxy monitor the local (ctrl) proxy.
+%%
+
+start_node(Host, NodeName, ErlPath, Paths, Args, Module, Debugs) ->
+ %% Start the (local) proxy
+ ?DEBUG("start_node -> start local proxy and remote node", []),
+ ProxyDebug = proplists:get_value(proxy, Debugs, silence),
+ Proxy = proxy_start(Host, NodeName, ErlPath, Paths, Args, Module,
+ ProxyDebug),
+
+ ?DEBUG("start_node -> local proxy started - now start node", []),
+ SlaveDebug = proplists:get_value(slave, Debugs, silence),
+ Node = proxy_start_node(Proxy, SlaveDebug),
+
+ ?DEBUG("start_node -> sync global", []),
+ global:sync(),
+
+ ?DEBUG("start_node -> start remote proxy", []),
+ proxy_start_remote(Proxy),
+
+ ?DEBUG("start_node -> start (remote) inets framework", []),
+ proxy_start_inets(Proxy),
+
+ ?DEBUG("start_node -> done", []),
+ #connection{proxy = Proxy, node = Node, node_name = NodeName, host = Host}.
+
+
+proxy_start(Host, NodeName, ErlPath, Paths, Args, Module, Debug) ->
+ ?LOG("try starting local proxy for ~p@~s", [NodeName, Host]),
+ ProxyArgs = [Host, NodeName, ErlPath, Paths, Args, Module, Debug],
+ case proc_lib:start_link(?MODULE, proxy,
+ ProxyArgs, ?LOCAL_PROXY_START_TIMEOUT) of
+ {ok, Proxy} ->
+ Proxy;
+ Error ->
+ exit({failed_starting_proxy, Error})
+ end.
+
+proxy_start_node(Proxy, Debug) ->
+ {ok, Node} = proxy_request(Proxy, {start_node, Debug}),
+ Node.
+
+proxy_start_remote(Proxy) ->
+ proxy_request(Proxy, start_remote_proxy).
+
+proxy_start_inets(Proxy) ->
+ proxy_request(Proxy, start_inets).
+
+proxy_start_service(Proxy, Args) ->
+ proxy_request(Proxy, {start_service, Args}).
+
+proxy_release(Proxy) ->
+ proxy_request(Proxy, release).
+
+proxy_stop(Proxy) ->
+ StopResult = proxy_request(Proxy, stop),
+ ?DEBUG("proxy stop result: ~p", [StopResult]),
+ StopResult.
+
+proxy_request(Proxy, Req) ->
+ Ref = make_ref(),
+ Proxy ! {proxy_request, Ref, self(), Req},
+ receive
+ {proxy_reply, Ref, Proxy, Rep} ->
+ Rep
+ end.
+
+proxy_reply(From, Ref, Rep) ->
+ From ! {proxy_reply, Ref, self(), Rep}.
+
+proxy(Host, NodeName, ErlPath, Paths, Args, Module, Debug) ->
+ process_flag(trap_exit, true),
+ SName = lists:flatten(
+ io_lib:format("HDLT CTRL PROXY[~p,~s,~w]",
+ [self(), Host, NodeName])),
+ ?SET_NAME(SName),
+ ?SET_LEVEL(Debug),
+ ?LOG("starting with"
+ "~n Host: ~p"
+ "~n NodeName: ~p"
+ "~n ErlPath: ~p"
+ "~n Paths: ~p"
+ "~n Args: ~p"
+ "~n Module: ~p", [Host, NodeName, ErlPath, Paths, Args, Module]),
+ State = #proxy{mode = started,
+ mod = Module,
+ host = Host,
+ node_name = NodeName,
+ erl_path = ErlPath,
+ paths = Paths,
+ args = Args},
+ proc_lib:init_ack({ok, self()}),
+ ?DEBUG("started", []),
+ proxy_loop(State).
+
+
+proxy_loop(#proxy{mode = stopping}) ->
+ receive
+ {proxy_request, Ref, From, stop} ->
+ ?LOG("[stopping] received stop order", []),
+ proxy_reply(From, Ref, ok),
+ exit(normal);
+
+ {'EXIT', Pid, Reason} ->
+ ?INFO("[stopping] received exit message from ~p: "
+ "~n Reason: ~p", [Pid, Reason]),
+ exit(Reason)
+
+ end;
+
+proxy_loop(#proxy{mode = started,
+ host = Host,
+ node_name = NodeName,
+ erl_path = ErlPath,
+ paths = Paths,
+ args = Args} = State) ->
+ receive
+ {proxy_request, Ref, From, {start_node, Debug}} ->
+ ?LOG("[starting] received start_node order", []),
+ case hdlt_slave:start_link(Host, NodeName,
+ ErlPath, Paths, Args,
+ Debug) of
+ {ok, Node} ->
+ ?DEBUG("[starting] node ~p started - now monitor", [Node]),
+ erlang:monitor_node(Node, true),
+ State2 = State#proxy{mode = operational,
+ node = Node},
+ proxy_reply(From, Ref, {ok, Node}),
+ proxy_loop(State2);
+ {error, Reason} ->
+ ?INFO("[starting] failed starting node: "
+ "~n Reason: ~p", [Reason]),
+ exit({failed_starting_node, {Host, NodeName, Reason}})
+ end;
+
+ {'EXIT', Pid, Reason} ->
+ ?INFO("[stopping] received exit message from ~p: "
+ "~n Reason: ~p", [Pid, Reason]),
+ exit(Reason)
+
+ end;
+
+proxy_loop(#proxy{mode = operational,
+ mod = Mod,
+ node = Node} = State) ->
+ ?DEBUG("[operational] await command", []),
+ receive
+ {proxy_request, Ref, From, start_remote_proxy} ->
+ ?LOG("[operational] start remote proxy", []),
+ case rpc:call(Node, Mod, start, [?GET_LEVEL()]) of
+ {ok, Pid} ->
+ ?DEBUG("[operational] remote proxy started (~p) - "
+ "create monitor", [Pid]),
+ ProxyRef = erlang:monitor(process, Pid),
+ ?DEBUG("[operational] monitor: ~p", [Ref]),
+ proxy_reply(From, Ref, ok),
+ proxy_loop(State#proxy{ref = ProxyRef});
+ Error ->
+ ?INFO("[operational] failed starting remote proxy"
+ "~n Error: ~p", [Error]),
+ ReplyReason = {failed_starting_remote_proxy,
+ {Node, Error}},
+ Reply = {error, ReplyReason},
+ proxy_reply(From, Ref, Reply),
+ exit({failed_starting_remote_proxy, {Node, Error}})
+ end;
+
+ {proxy_request, Ref, From, start_inets} ->
+ ?INFO("[operational] start inets framework", []),
+ rpc:cast(Node, Mod, start_inets, []),
+ proxy_reply(From, Ref, ok),
+ proxy_loop(State);
+
+ {proxy_request, Ref, From, {start_service, Args}} ->
+ ?INFO("[operational] start service with"
+ "~n ~p", [Args]),
+ case rpc:call(Node, Mod, start_service, Args) of
+ ok ->
+ ?DEBUG("[operational] service started", []),
+ proxy_reply(From, Ref, ok),
+ proxy_loop(State);
+ Error ->
+ ?INFO("[operational] failed starting service: "
+ "~n Args. ~p"
+ "~n Error: ~p", [Args, Error]),
+ erlang:demonitor(State#proxy.ref, [flush]),
+ Reply = {error, {failed_starting_service, Node, Error}},
+ proxy_reply(From, Ref, Reply),
+ exit({failed_starting_service, Node, Error})
+ end;
+
+ {proxy_request, Ref, From, release} ->
+ ?INFO("[operational] release", []),
+ rpc:call(Node, Mod, release, []),
+ proxy_reply(From, Ref, ok),
+ proxy_loop(State);
+
+ {proxy_request, Ref, From, stop} ->
+ ?INFO("[operational] received stop order", []),
+ erlang:demonitor(State#proxy.ref, [flush]),
+ ?DEBUG("[operational] rpc cast stop order", []),
+ rpc:cast(Node, Mod, stop, []),
+ %% And wait for the node death to be reported
+ Reason =
+ receive
+ {nodedown, Node} when State#proxy.node =:= Node ->
+ ok
+ after 10000 ->
+ ?INFO("Node did not die within expected time frame",
+ []),
+ {node_death_timeout, Node}
+ end,
+ ?DEBUG("[operational] ack stop", []),
+ proxy_reply(From, Ref, Reason),
+ exit(normal);
+
+ {nodedown, Node} when State#proxy.node =:= Node ->
+ ?INFO("[operational] received unexpected nodedoen message", []),
+ exit({node_died, Node});
+
+ {'DOWN', Ref, process, _, normal} when State#proxy.ref =:= Ref ->
+ ?INFO("[operational] remote proxy terminated normally", []),
+ proxy_loop(State#proxy{ref = undefined,
+ connection = undefined,
+ mode = stopping});
+
+ {'DOWN', Ref, process, _, noconnection} when State#proxy.ref =:= Ref ->
+ ?INFO("[operational] remote proxy terminated - no node", []),
+ proxy_loop(State#proxy{ref = undefined,
+ connection = undefined,
+ mode = stopping});
+
+ {'DOWN', Ref, process, _, Reason} when State#proxy.ref =:= Ref ->
+ ?INFO("[operational] remote proxy terminated: "
+ "~n Reason: ~p", [Reason]),
+ exit({remote_proxy_crash, Reason});
+
+ {'EXIT', Pid, Reason} ->
+ ?INFO("[operational] received unexpected exit message from ~p: "
+ "~n Reason: ~p", [Pid, Reason]),
+ proxy_loop(State)
+
+ end.
+
+
+stop_nodes(#state{server_conn = ServerConn,
+ client_conns = ClientConns} = State) ->
+ lists:foreach(
+ fun(#connection{proxy = Proxy, node_name = NodeName, host = Host}) ->
+ ?DEBUG("stop_erlang_nodes -> send stop order to local proxy ~p"
+ "~n for node ~p on ~s", [Proxy, NodeName, Host]),
+ proxy_stop(Proxy)
+ end,
+ ClientConns ++ [ServerConn]),
+ ?DEBUG("stop_erlang_nodes -> sleep some to give the nodes time to die",
+ []),
+ timer:sleep(1000),
+ ?DEBUG("stop_erlang_nodes -> and a final cleanup round", []),
+ lists:foreach(fun(Node) ->
+ ?INFO("try brutal stop node ~p", [Node]),
+ rpc:cast(Node, erlang, halt, [])
+ end,
+ nodes() -- [node()]),
+ ?DEBUG("stop_erlang_nodes -> done", []),
+ State#state{server_conn = undefined, client_conns = []}.
+
+
+%% The nodes on which the HDLT clients run have been started previously
+start_clients(#state{client_conns = Connections,
+ debugs = Debugs,
+ work_dir = WorkDir,
+ socket_type = SocketType,
+ client_cert_file = CertFile,
+ client_sz_from = From,
+ client_sz_to = To,
+ client_sz_incr = Incr},
+ URL, TestTime, SendRate) ->
+ Debug = proplists:get_value(client, Debugs, silence),
+ StartClient =
+ fun(#connection{host = Host} = Connection) ->
+ ?DEBUG("start client on ~p", [Host]),
+ start_client(Connection,
+ WorkDir, SocketType, CertFile,
+ URL, From, To, Incr,
+ TestTime, SendRate, Debug);
+ (_) ->
+ ok
+ end,
+ lists:foreach(StartClient, Connections).
+
+start_client(#connection{proxy = Proxy},
+ WorkDir, SocketType, LocalCertFile,
+ URL, From, To, Incr,
+ TestTime, SendRate, Debug) ->
+ SSLDir = filename:join(WorkDir, "ssl"),
+ CertFile = filename:join(SSLDir, filename:basename(LocalCertFile)),
+ Sizes = randomized_sizes(From, To, Incr),
+ Args = [SocketType, CertFile, URL, Sizes, TestTime, SendRate, Debug],
+ proxy_start_service(Proxy, [Args]).
+
+release_clients(#state{client_conns = Connections}) ->
+ ReleaseClient =
+ fun(#connection{proxy = Proxy,
+ host = Host}) ->
+ ?DEBUG("release client on ~p", [Host]),
+ proxy_release(Proxy);
+ (_) ->
+ ok
+ end,
+ lists:foreach(ReleaseClient, Connections).
+
+
+start_server(#state{server_conn = #connection{proxy = Proxy},
+ http_port = Port,
+ server_root = ServerRoot,
+ doc_root = DocRoot,
+ socket_type = SocketType,
+ server_cert_file = CertFile}) ->
+
+ HttpdConfig =
+ httpd_config(Port, "hdlt", ServerRoot, DocRoot, SocketType, CertFile),
+ ?LOG("start the httpd inets service with config: "
+ "~n ~p", [HttpdConfig]),
+ proxy_start_service(Proxy, [HttpdConfig]),
+ ?DEBUG("start_server -> done", []),
+ ok.
+
+
+httpd_config(Port, ServerName, ServerRoot, DocRoot,
+ SocketType, LocalCertFile) ->
+ LogDir = filename:join(ServerRoot, "log"),
+ ErrorLog = filename:join(LogDir, "error_log"),
+ TransferLog = filename:join(LogDir, "access_log"),
+
+ SSL =
+ case SocketType of
+ ip_comm ->
+ [];
+ _ -> % ssl
+ SSLDir = filename:join(ServerRoot, "ssl"),
+ CertFile =
+ filename:join(SSLDir, filename:basename(LocalCertFile)),
+ [
+ {ssl_certificate_file, CertFile},
+ {ssl_certificate_key_file, CertFile},
+ {ssl_verify_client, 0}
+ ]
+ end,
+ [{port, Port},
+ {server_name, ServerName},
+ {server_root, ServerRoot},
+ {document_root, DocRoot},
+ {error_log, ErrorLog},
+ {error_log_format, pretty},
+ {transfer_log, TransferLog},
+ {socket_type, SocketType},
+ {max_clients, 10000},
+ {modules, [mod_alias, mod_auth, mod_esi, mod_actions, mod_cgi,
+ mod_dir, mod_get, mod_head, mod_log, mod_disk_log]},
+ {script_alias, {"/cgi-bin", filename:join(ServerRoot, "cgi-bin")}},
+ {erl_script_alias, {"/cgi-bin", [hdlt_random_html]}},
+ {erl_script_timeout, 120000} | SSL].
+
+
+clean_up(#state{server_root = ServerRoot,
+ work_dir = WorkDir,
+ http_server = #server{host = Host},
+ clients = Clients}) ->
+ ?DEBUG("begin server cleanup", []),
+ server_clean_up(ServerRoot, WorkDir, Host),
+ ?DEBUG("begin lient cleanup", []),
+ clients_clean_up(WorkDir, Clients),
+ ?DEBUG("cleanup done", []),
+ ok.
+
+server_clean_up(ServerRoot, WorkDir, Host) ->
+ ?DEBUG("server cleanup - create sftp channel", []),
+ {ok, Sftp, ConnectionRef} =
+ ssh_sftp:start_channel(Host, [{user_interaction, false},
+ {silently_accept_hosts, true}]),
+ ?DEBUG("server cleanup - delete ~p dirs", [ServerRoot]),
+ del_dirs(Sftp, ServerRoot),
+ ?DEBUG("server cleanup - delete ~p dirs", [WorkDir]),
+ del_dirs(Sftp, WorkDir),
+ ?DEBUG("server cleanup - close sftp channel", []),
+ ssh:close(ConnectionRef).
+
+clients_clean_up(_WorkDir, []) ->
+ ok;
+clients_clean_up(WorkDir, [Client|Clients]) ->
+ client_clean_up(WorkDir, Client),
+ clients_clean_up(WorkDir, Clients).
+
+client_clean_up(WorkDir, #client{host = Host}) ->
+ ?DEBUG("client cleanup - create sftp channel to ~p", [Host]),
+ {ok, Sftp, ConnectionRef} =
+ ssh_sftp:start_channel(Host, [{user_interaction, false},
+ {silently_accept_hosts, true}]),
+ ?DEBUG("client cleanup - delete ~p dirs", [WorkDir]),
+ del_dirs(Sftp, WorkDir),
+ ?DEBUG("client cleanup - close sftp channel", []),
+ ssh:close(ConnectionRef).
+
+
+del_dirs(Sftp, Dir) ->
+ case ssh_sftp:list_dir(Sftp, Dir) of
+ {ok, []} ->
+ ssh_sftp:del_dir(Sftp, Dir);
+ {ok, Files} ->
+ Files2 = [F || F <- Files, (F =/= "..") andalso (F =/= ".")],
+ lists:foreach(fun(File) when ((File =/= "..") andalso
+ (File =/= ".")) ->
+ FullPath = filename:join(Dir, File),
+ case ssh_sftp:read_file_info(Sftp,
+ FullPath) of
+ {ok, #file_info{type = directory}} ->
+ del_dirs(Sftp, FullPath),
+ ssh_sftp:del_dir(Sftp, FullPath);
+ {ok, _} ->
+ ssh_sftp:delete(Sftp, FullPath)
+ end
+ end, Files2);
+ _ ->
+ ok
+ end.
+
+collect_data(#state{clients = Clients} = State) ->
+ N = length(Clients),
+ collect_req_reply(N, State),
+ collect_time(N, State).
+
+collect_req_reply(0, _State) ->
+ ?DEBUG("all reply data collected", []),
+ ok;
+collect_req_reply(N, #state{nof_schedulers = NofScheduler,
+ results = Db,
+ client_conns = Conns} = State) ->
+ ?DEBUG("await reply data from ~p client(s)", [N]),
+ receive
+ {load_data,
+ {req_reply, Client, NoRequests, NoReplys}} ->
+ ?DEBUG("received req_reply load-data from client ~p: "
+ "~n Number of requests: ~p"
+ "~n Number of replies: ~p",
+ [Client, NoRequests, NoReplys]),
+ ets:insert(Db, {{NofScheduler, Client},
+ {req_reply, NoRequests, NoReplys}});
+ stop ->
+ ?INFO("received stop", []),
+ exit(self(), stop);
+
+ {client_exit, Client, Node, Reason} ->
+ ?INFO("Received unexpected client exit from ~p on node ~p "
+ "while collecting replies: "
+ "~n ~p", [Client, Node, Reason]),
+ case lists:keysearch(Node, #connection.node, Conns) of
+ {value, Conn} ->
+ ?LOG("Found problem connection: "
+ "~n ~p", [Conn]),
+ exit({unexpected_client_exit, Reason});
+ false ->
+ collect_req_reply(N, State)
+ end
+ end,
+ collect_req_reply(N-1, State).
+
+collect_time(0, _State) ->
+ ?DEBUG("all time data collected", []),
+ ok;
+collect_time(N, #state{nof_schedulers = NofScheduler,
+ results = Db,
+ client_conns = Conns} = State) ->
+ ?DEBUG("await time data from ~p clients", [N]),
+ receive
+ {load_data,
+ {time_to_complete, Client, StopTime, LastResponseTime}} ->
+ ?LOG("received time load-data from client ~p: "
+ "~n Time of stop: ~p"
+ "~n Time of last response: ~p",
+ [Client, StopTime, LastResponseTime]),
+ ets:insert(Db, {{NofScheduler, Client},
+ {time, StopTime, LastResponseTime}});
+ stop ->
+ ?INFO("received stop while collecting data, when N = ~p", [N]),
+ exit(self(), stop);
+
+ {client_exit, Client, Node, Reason} ->
+ ?INFO("Received unexpected exit from client ~p on node ~p "
+ "while collecting time data: "
+ "~n ~p", [Client, Node, Reason]),
+ case lists:keysearch(Node, #connection.node, Conns) of
+ {value, Conn} ->
+ ?LOG("Found problem connection: "
+ "~n ~p", [Conn]),
+ exit({unexpected_client_exit, Reason});
+ false ->
+ collect_req_reply(N, State)
+ end;
+
+ Else -> %%% Something is wrong!
+ ?INFO("RECEIVED UNEXPECTED MESSAGE WHILE COLLECTING TIME DATA: "
+ "~n ~p", [Else]),
+ collect_time(N, State)
+ end,
+ collect_time(N-1, State).
+
+analyse_data(#state{results = Db,
+ max_nof_schedulers = MaxNofSchedulers,
+ test_time = MicroSec}) ->
+ Tab = ets:new(analysed_results, [set]),
+ lists:foreach(fun(NofSchedulers) ->
+ Result = analyse(NofSchedulers, Db, MicroSec),
+ ets:insert(Tab, Result)
+ end, [N || N <- lists:seq(0, MaxNofSchedulers)]),
+ Tab.
+
+
+no_requests_replys(NoSchedulers, Tab) ->
+ NoRequests =
+ ets:select(Tab, [{{{NoSchedulers,'_'},{req_reply, '$1', '_'}},
+ [],['$$']}]),
+ NoReplys =
+ ets:select(Tab, [{{{NoSchedulers, '_'}, {req_reply, '_', '$1'}},
+ [], ['$$']}]),
+
+ {lists:sum(lists:append(NoRequests)),
+ lists:sum(lists:append(NoReplys))}.
+
+max_time_to_final_response(NofSchedulers, Tab) ->
+ Candidates =
+ ets:select(Tab, [{{{NofSchedulers, '_'}, {time, '$1', '$2'}},
+ [], ['$$']}]),
+
+ NewCandidates = lists:map(
+ fun([StopTime, LastTime]) ->
+ round(
+ timer:now_diff(LastTime, StopTime) / 100000)/10
+ end, Candidates),
+
+ lists:max(NewCandidates).
+
+
+analyse(NofSchedulers, Db, TestTime) ->
+ Sec = TestTime / 1000,
+ {NoRequests, NoReplys} = no_requests_replys(NofSchedulers, Db),
+ {NofSchedulers, round(NoReplys / Sec), NoRequests,
+ max_time_to_final_response(NofSchedulers, Db)}.
+
+
+save_results_to_file(AnalysedTab,
+ #state{socket_type = SocketType,
+ http_server = #server{host = Server},
+ max_nof_schedulers = MaxNofSchedulers}) ->
+ FileName = fun(Post) ->
+ File =
+ lists:flatten(
+ io_lib:format("~s_~w_~s",
+ [Server, SocketType, Post])),
+ filename:join("./", File)
+ end,
+ Reps = FileName("replys_per_sec.txt"),
+ Reqs = FileName("total_requests.txt"),
+ Decay = FileName("decay_time.txt"),
+
+ [FdReps, FdReqs, FdDecay] =
+ lists:map(fun(File) ->
+ {ok, Fd} = file:open(File, [write]),
+ Fd
+ end, [Reps, Reqs, Decay]),
+ lists:foreach(fun(NofSchedulers) ->
+ save_result_to_file(NofSchedulers,
+ FdReps, FdReqs,
+ FdDecay, AnalysedTab)
+ end, [N || N <- lists:seq(0, MaxNofSchedulers)]),
+ [Reps, Reqs, Decay].
+
+save_result_to_file(NofSchedulers,
+ FdReps, FdReqs, FdDecay, AnalysedTab) ->
+
+ [{NofSchedulers, NofRepsPerSec, NofReqs, MaxFinalResponseTime}] =
+ ets:lookup(AnalysedTab, NofSchedulers),
+
+ file:write(FdReps, io_lib:format("~p,~p~n",
+ [NofRepsPerSec, NofSchedulers])),
+ file:write(FdReqs, io_lib:format("~p,~p~n",
+ [NofReqs, NofSchedulers])),
+ file:write(FdDecay, io_lib:format("~p,~p~n", [MaxFinalResponseTime,
+ NofSchedulers])).
+
+
+help() ->
+ io:format("hdlt:start(Options). Where options:~n "
+ " ~n~p~n~n hdlt:start([]). -> hdlt:start(~p)~n~n",
+ [[{send_rate, "integer()",
+ "Numer of outstanding requests that a client "
+ "should have during the test to create a load situation."},
+ {clients, "[{path(), host()}]", "Paths to erlang and names of hosts to run clients on."},
+ {test_time, "{hours(), mins(), sec()}",
+ "How long the test should be run."},
+ {server, "{path(), host()}", "Path to erl and name of host to run the HTTP-server on."},
+ {port, "port()", "The port that the HTTP-server should use."},
+ {server_dir, "dir()", "The directory where the HTTP server "
+ " stores its contents and configuration."},
+ {work_dir, "dir()", "Path on the computer, where the test "
+ "is run, to a directory where the results can be saved."},
+ {max_no_schedulers, "integer()",
+ "Max number of schedulers to run."},
+ {socket_type, "Httpd configuration option socket_type"}],
+ defaults()]).
+
+
+defaults() ->
+ [{send_rate, ?DEFAULT_SENDRATE},
+ %% {clients, []},
+ {test_time, ?DEFAULT_TEST_TIME},
+ %% {server, ?DEFAULT_SERVER},
+ {port, ?DEFAULT_PORT},
+ {server_dir, ?DEFAULT_SERVER_DIR},
+ {work_dir, ?DEFAULT_WORK_DIR},
+ {max_nof_schedulers, ?DEFAULT_MAX_NOF_SCHEDULERS},
+ {socket_type, ?DEFAULT_SOCKET_TYPE}].
+
+
+get_debugs(Config) ->
+ ?DEBUG("get debugs", []),
+ Debugs = proplists:get_value(debug, Config, ?DEFAULT_DEBUGS),
+ verify_debugs(Debugs),
+ Debugs.
+
+verify_debugs([]) ->
+ ok;
+verify_debugs([{Tag, Debug}|Debugs]) ->
+ verify_debug(Tag, Debug),
+ verify_debugs(Debugs).
+
+verify_debug(Tag, Debug) ->
+ case lists:member(Tag, [ctrl, proxy, slave, client]) of
+ true ->
+ ok;
+ false ->
+ exit({bad_debug_tag, Tag})
+ end,
+ case lists:member(Debug, [silence, info, log, debug]) of
+ true ->
+ ok;
+ false ->
+ exit({bad_debug_level, Debug})
+ end.
+
+get_send_rate(Config) ->
+ ?DEBUG("get send_rate", []),
+ case proplists:get_value(send_rate, Config, ?DEFAULT_SENDRATE) of
+ SendRate when is_integer(SendRate) andalso (SendRate > 0) ->
+ SendRate;
+ BadSendRate ->
+ exit({bad_sendrate, BadSendRate})
+ end.
+
+
+get_clients(Config) ->
+ ?DEBUG("get clients", []),
+ case proplists:get_value(clients, Config, undefined) of
+ undefined ->
+ missing_mandatory_config(clients);
+ Clients when is_list(Clients) andalso (length(Clients) > 0) ->
+ case [#client{path = Path, host = Host} ||
+ {Path, Host} <- Clients] of
+ Clients2 when (length(Clients2) > 0) ->
+ Clients2;
+ _ ->
+ exit({bad_clients, Clients})
+ end;
+
+ BadClients ->
+ exit({bad_clients, BadClients})
+
+ end.
+
+get_server(Config) ->
+ ?DEBUG("get server", []),
+ case proplists:get_value(server, Config) of
+ {Path, Host} when is_list(Path) andalso is_list(Host) ->
+ #server{path = Path, host = Host};
+ undefined ->
+ missing_mandatory_config(server)
+ end.
+
+get_server_dir(Config) ->
+ ?DEBUG("get server_dir", []),
+ get_dir(server_dir, Config, ?DEFAULT_SERVER_DIR).
+
+get_work_dir(Config) ->
+ ?DEBUG("get work_dir", []),
+ get_dir(work_dir, Config, ?DEFAULT_WORK_DIR).
+
+get_dir(Key, Config, Default) ->
+ Dir = proplists:get_value(Key, Config, Default),
+ ensure_absolute(Dir),
+ Dir.
+
+ensure_absolute(Path) ->
+ case filename:pathtype(Path) of
+ absolute ->
+ ok;
+ PathType ->
+ exit({bad_pathtype, Path, PathType})
+ end.
+
+get_port(Config) ->
+ ?DEBUG("get port", []),
+ case proplists:get_value(port, Config, ?DEFAULT_PORT) of
+ Port when is_integer(Port) andalso (Port > 0) ->
+ Port;
+ BadPort ->
+ exit({bad_port, BadPort})
+ end.
+
+get_socket_type(Config) ->
+ ?DEBUG("get socket_type", []),
+ case proplists:get_value(socket_type, Config, ?DEFAULT_SOCKET_TYPE) of
+ SocketType when ((SocketType =:= ip_comm) orelse
+ (SocketType =:= ssl) orelse
+ (SocketType =:= essl) orelse
+ (SocketType =:= ossl)) ->
+ SocketType;
+ BadSocketType ->
+ exit({bad_socket_type, BadSocketType})
+ end.
+
+get_test_time(Config) ->
+ ?DEBUG("get test_time", []),
+ case proplists:get_value(test_time, Config, ?DEFAULT_TEST_TIME) of
+ Seconds when is_integer(Seconds) andalso (Seconds > 0) ->
+ timer:seconds(Seconds);
+ BadTestTime ->
+ exit({bad_test_time, BadTestTime})
+ end.
+
+get_max_nof_schedulers(Config) ->
+ ?DEBUG("get max_nof_schedulers", []),
+ case proplists:get_value(max_nof_schedulers,
+ Config,
+ ?DEFAULT_MAX_NOF_SCHEDULERS) of
+ MaxNofScheds when (is_integer(MaxNofScheds) andalso
+ (MaxNofScheds >= 0)) ->
+ MaxNofScheds;
+ BadMaxNofScheds ->
+ exit({bad_max_nof_schedulers, BadMaxNofScheds})
+ end.
+
+
+get_server_cert_file(Config) ->
+ ?DEBUG("get server cert file", []),
+ get_cert_file(server_cert_file, ?DEFAULT_SERVER_CERT, Config).
+
+get_client_cert_file(Config) ->
+ ?DEBUG("get client cert file", []),
+ get_cert_file(client_cert_file, ?DEFAULT_CLIENT_CERT, Config).
+
+get_cert_file(Tag, DefaultCertFileName, Config) ->
+ LibDir = code:lib_dir(inets),
+ HdltDir = filename:join(LibDir, "examples/httpd_load_test"),
+ DefaultCertFile = filename:join(HdltDir, DefaultCertFileName),
+ case proplists:get_value(Tag, Config, DefaultCertFile) of
+ F when is_list(F) ->
+ case file:read_file_info(F) of
+ {ok, #file_info{type = regular}} ->
+ F;
+ {ok, #file_info{type = Type}} ->
+ exit({wrong_file_type, Tag, F, Type});
+ {error, Reason} ->
+ exit({failed_readin_file_info, Tag, F, Reason})
+ end;
+ BadFile ->
+ exit({bad_cert_file, Tag, BadFile})
+ end.
+
+
+get_work_sim(Config) ->
+ ?DEBUG("get work_sim", []),
+ case proplists:get_value(work_simulator, Config, ?DEFAULT_WORK_SIM) of
+ WS when is_integer(WS) andalso (WS > 0) ->
+ WS;
+ BadWS ->
+ exit({bad_work_simulator, BadWS})
+ end.
+
+
+get_data_size(Config) ->
+ ?DEBUG("get data_size", []),
+ case proplists:get_value(data_size, Config, ?DEFAULT_DATA_SIZE) of
+ {From, To, Incr} = DS when (is_integer(From) andalso
+ is_integer(To) andalso
+ is_integer(Incr) andalso
+ (To > From) andalso
+ (From > 0) andalso
+ (Incr > 0)) ->
+ DS;
+ {From, To} when (is_integer(From) andalso
+ is_integer(To) andalso
+ (To > From) andalso
+ (From > 0)) ->
+ {From, To, ?DEFAULT_DATA_SIZE_INCR};
+ BadDS ->
+ exit({bad_data_size, BadDS})
+ end.
+
+
+url(#server{host = Host}, Port, SocketType, WorkSim) ->
+ Scheme =
+ case SocketType of
+ ip_comm ->
+ "http";
+ _ -> %% SSL
+ "https"
+ end,
+ lists:flatten(
+ io_lib:format("~s://~s:~w/cgi-bin/hdlt_random_html:page?~w:",
+ [Scheme, Host, Port, WorkSim])).
+
+
+missing_mandatory_config(Missing) ->
+ exit({missing_mandatory_config, Missing}).
+
+
+ensure_remote_dir_exist(Sftp, Path0) ->
+ case filename:split(Path0) of
+ [Root, Dir | Rest] ->
+ %% We never accept creating the root directory,
+ %% or the next level, so these *must* exist:
+ Path = filename:join(Root, Dir),
+ case ssh_sftp:read_file_info(Sftp, Path) of
+ {ok, #file_info{type = directory}} ->
+ ensure_remote_dir_exist(Sftp, Path, Rest);
+ {ok, #file_info{type = Type}} ->
+ ?INFO("Not a dir: ~p (~p)", [Path, Type]),
+ exit({not_a_dir, Path, Type});
+ {error, Reason} ->
+ ?INFO("Failed reading file info for ~p: ~p",
+ [Path, Reason]),
+ exit({failed_reading_file_info, Path, Reason})
+ end;
+ BadSplit ->
+ ?INFO("Bad remote dir path: ~p -> ~p", [Path0, BadSplit]),
+ exit({bad_dir, Path0})
+ end.
+
+ensure_remote_dir_exist(_Sftp, _Dir, []) ->
+ ok;
+ensure_remote_dir_exist(Sftp, Path, [Dir|Rest]) ->
+ NewPath = filename:join(Path, Dir),
+ case ssh_sftp:read_file_info(Sftp, NewPath) of
+ {ok, #file_info{type = directory}} ->
+ ensure_remote_dir_exist(Sftp, NewPath, Rest);
+ {ok, #file_info{type = Type}} ->
+ %% Exist, but is not a dir
+ ?INFO("Not a dir: ~p (~p)", [NewPath, Type]),
+ exit({not_a_dir, NewPath, Type});
+ {error, Reason} ->
+ %% This *could* be because the dir does not exist,
+ %% but it could also be some other error.
+ %% As usual, the error reason of the sftp is
+ %% a pease of crap, so we cannot use the
+ %% error reason.
+ %% The simplest way to test this is to simply
+ %% try to create the directory, since we should
+ %% ensure its existence anyway..
+ case ssh_sftp:make_dir(Sftp, NewPath) of
+ ok ->
+ ensure_remote_dir_exist(Sftp, NewPath, Rest);
+ _ ->
+ ?INFO("Failed reading file info for ~p: ~p",
+ [Dir, Reason]),
+ exit({failed_reading_file_info, NewPath, Reason})
+ end
+ end.
+
+maybe_create_remote_dir(Sftp, Dir) ->
+ case ssh_sftp:read_file_info(Sftp, Dir) of
+ {ok, #file_info{type = directory}} ->
+ ok;
+ {ok, #file_info{type = Type}} ->
+ %% Exist, but is not a dir
+ ?INFO("Not a dir: ~p (~p)", [Dir, Type]),
+ exit({not_a_dir, Dir, Type});
+ {error, Reason} ->
+ %% Assume dir noes not exist...
+ case ssh_sftp:make_dir(Sftp, Dir) of
+ ok ->
+ ok;
+ _ ->
+ ?INFO("Failed reading file info for ~p: ~p",
+ [Dir, Reason]),
+ exit({failed_reading_file_info, Dir, Reason})
+ end
+ end.
+
+
+set_debug_level(Debugs) ->
+ Debug = proplists:get_value(ctrl, Debugs, silence),
+ ?SET_LEVEL(Debug).
+
+
+%% Generates a list of numbers between A and B, such that
+%% there is exact one number between A and B and then
+%% randomizes that list.
+
+randomized_sizes_init() ->
+ {A, B, C} = os:timestamp(),
+ random:seed(A, B, C).
+
+randomized_sizes(From, To, Incr) ->
+ L = lists:seq(From, To, Incr),
+ Len = length(L),
+ randomized_sizes2(L, 0, Len-1).
+
+randomized_sizes2(L, N, Len) when N >= Len ->
+ L;
+randomized_sizes2(L, N, Len) ->
+ SplitWhere = random:uniform(Len),
+ {A, B} = lists:split(SplitWhere, L),
+ randomized_sizes2(B ++ A, N+1, Len).
diff --git a/lib/inets/examples/httpd_load_test/hdlt_logger.erl b/lib/inets/examples/httpd_load_test/hdlt_logger.erl
new file mode 100644
index 0000000000..b0c7eab2d1
--- /dev/null
+++ b/lib/inets/examples/httpd_load_test/hdlt_logger.erl
@@ -0,0 +1,138 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2010. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+%%----------------------------------------------------------------------
+%% Purpose: This is a simple logger utility for the HDLT toolkit.
+%% It assumesd that the debug level and the "name" of the
+%% logging entity has been put in process environment
+%% (using the set_level and set_name functions respectively).
+%%----------------------------------------------------------------------
+
+%%
+
+-module(hdlt_logger).
+
+-export([
+ start/0,
+ set_level/1, get_level/0, set_name/1,
+ info/2, log/2, debug/2
+ ]).
+
+-export([logger/1]).
+
+-define(LOGGER, ?MODULE).
+-define(MSG, hdlt_logger_msg).
+-define(LEVEL, hdlt_logger_level).
+-define(NAME, hdlt_logger_name).
+-define(INFO_STR, "INFO").
+-define(LOG_STR, "LOG ").
+-define(DEBUG_STR, "DBG ").
+
+
+start() ->
+ Self = self(),
+ proc_lib:start(?MODULE, logger, [Self]).
+
+set_name(Name) when is_list(Name) ->
+ put(?NAME, Name),
+ ok.
+
+get_level() ->
+ get(?LEVEL).
+
+set_level(Level) ->
+ case lists:member(Level, [silence, info, log, debug]) of
+ true ->
+ put(?LEVEL, Level),
+ ok;
+ false ->
+ erlang:error({bad_debug_level, Level})
+ end.
+
+
+info(F, A) ->
+%% io:format("info -> " ++ F ++ "~n", A),
+ do_log(info, get(?LEVEL), F, A).
+
+log(F, A) ->
+%% io:format("log -> " ++ F ++ "~n", A),
+ do_log(log, get(?LEVEL), F, A).
+
+debug(F, A) ->
+%% io:format("debug -> " ++ F ++ "~n", A),
+ do_log(debug, get(?LEVEL), F, A).
+
+
+logger(Parent) ->
+ global:register_name(?LOGGER, self()),
+ Ref = erlang:monitor(process, Parent),
+ proc_lib:init_ack(self()),
+ logger_loop(Ref).
+
+logger_loop(Ref) ->
+ receive
+ {?MSG, F, A} ->
+ io:format(F, A),
+ logger_loop(Ref);
+ {'DOWN', Ref, process, _Object, _Info} ->
+ %% start the stop timer
+ erlang:send_after(timer:seconds(5), self(), stop),
+ logger_loop(undefined);
+ stop ->
+ global:unregister_name(?LOGGER),
+ ok
+ end.
+
+
+formated_timestamp() ->
+ {Date, Time} = erlang:localtime(),
+ {YYYY,MM,DD} = Date,
+ {Hour,Min,Sec} = Time,
+ FormatDate =
+ io_lib:format("~.4w-~.2.0w-~.2.0w ~.2.0w:~.2.0w:~.2.0w",
+ [YYYY,MM,DD,Hour,Min,Sec]),
+ lists:flatten(FormatDate).
+
+do_log(_, silence, _, _) ->
+ ok;
+do_log(info, info, F, A) ->
+ do_log(?INFO_STR, F, A);
+do_log(info, log, F, A) ->
+ do_log(?INFO_STR, F, A);
+do_log(log, log, F, A) ->
+ do_log(?LOG_STR, F, A);
+do_log(info, debug, F, A) ->
+ do_log(?INFO_STR, F, A);
+do_log(log, debug, F, A) ->
+ do_log(?LOG_STR, F, A);
+do_log(debug, debug, F, A) ->
+ do_log(?DEBUG_STR, F, A);
+do_log(_, _, _F, _A) ->
+ ok.
+
+do_log(SEV, F, A) ->
+ Name =
+ case get(?NAME) of
+ L when is_list(L) ->
+ L;
+ _ ->
+ "UNDEFINED"
+ end,
+ Msg = {?MSG, "~s ~s [~s] " ++ F ++ "~n",
+ [SEV, Name, formated_timestamp() | A]},
+ (catch global:send(?LOGGER, Msg)).
diff --git a/lib/inets/examples/httpd_load_test/hdlt_logger.hrl b/lib/inets/examples/httpd_load_test/hdlt_logger.hrl
new file mode 100644
index 0000000000..aa94babc48
--- /dev/null
+++ b/lib/inets/examples/httpd_load_test/hdlt_logger.hrl
@@ -0,0 +1,33 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2010. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+%%
+
+-ifndef(hdlt_logger_hrl).
+-define(hdlt_logger_hrl, true).
+
+%% Various log macros
+-define(SET_LEVEL(N), hdlt_logger:set_level(N)).
+-define(GET_LEVEL(), hdlt_logger:get_level()).
+-define(SET_NAME(N), hdlt_logger:set_name(N)).
+
+-define(INFO(F, A), hdlt_logger:info(F, A)).
+-define(LOG(F, A), hdlt_logger:log(F, A)).
+-define(DEBUG(F, A), hdlt_logger:debug(F, A)).
+
+-endif. % -ifdef(hdlt_logger_hrl).
diff --git a/lib/inets/examples/httpd_load_test/hdlt_random_html.erl b/lib/inets/examples/httpd_load_test/hdlt_random_html.erl
new file mode 100644
index 0000000000..e3a572c61f
--- /dev/null
+++ b/lib/inets/examples/httpd_load_test/hdlt_random_html.erl
@@ -0,0 +1,59 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2010. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+%%
+
+-module(hdlt_random_html).
+-export([page/3]).
+
+page(SessionID, _Env, Input) ->
+%% log("page(~p) -> deliver content-type when"
+%% "~n SessionID: ~p"
+%% "~n Env: ~p"
+%% "~n Input: ~p", [self(), SessionID, Env, Input]),
+ [WorkSimStr, SzSimStr] = string:tokens(Input, [$:]),
+ WorkSim = list_to_integer(WorkSimStr),
+ SzSim = list_to_integer(SzSimStr),
+ mod_esi:deliver(SessionID, "Content-Type:text/html\r\n\r\n"),
+ mod_esi:deliver(SessionID, start("Random test page")),
+ mod_esi:deliver(SessionID, content(WorkSim, SzSim)),
+ mod_esi:deliver(SessionID, stop()),
+ ok.
+
+start(Title) ->
+ "<HTML>
+<HEAD>
+<TITLE>" ++ Title ++ "</TITLE>
+ </HEAD>
+<BODY>\n".
+
+stop() ->
+ "</BODY>
+</HTML>
+".
+
+content(WorkSim, SzSim) ->
+ {A, B, C} = now(),
+ random:seed(A, B, C),
+ lists:sort([random:uniform(X) || X <- lists:seq(1, WorkSim)]),
+ lists:flatten(lists:duplicate(SzSim, "Dummy data ")).
+
+%% log(F, A) ->
+%% hdlt_logger:set_name("HDLT RANDOM-HTML"),
+%% hdlt_logger:set_level(debug),
+%% hdlt_logger:log(F, A).
diff --git a/lib/inets/examples/httpd_load_test/hdlt_server.erl b/lib/inets/examples/httpd_load_test/hdlt_server.erl
new file mode 100644
index 0000000000..3e5a849d5b
--- /dev/null
+++ b/lib/inets/examples/httpd_load_test/hdlt_server.erl
@@ -0,0 +1,163 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2010. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+%%
+%%----------------------------------------------------------------------
+%% Purpose: The HDLT server module.
+%% This is just a stub, making future expansion easy.
+%% All code in this module is executed in the local node!
+%%----------------------------------------------------------------------
+
+-module(hdlt_server).
+
+-export([start/1, stop/0, start_inets/0, start_service/1]).
+
+-export([proxy/1]).
+
+-include_lib("kernel/include/file.hrl").
+-include("hdlt_logger.hrl").
+
+
+-define(PROXY, hdlt_proxy).
+
+
+%% This function is used to start the proxy process
+%% This function is called *after* the nodes has been
+%% "connected" with the controller/collector node.
+
+start(Debug) ->
+ proc_lib:start(?MODULE, proxy, [Debug]).
+
+stop() ->
+ ?PROXY ! stop.
+
+start_inets() ->
+ ?PROXY ! start_inets.
+
+start_service(Config) ->
+ ?PROXY ! {server_start, Config, self()},
+ receive
+ {server_start_result, Result} ->
+ Result
+ after 15000 ->
+ {error, timeout}
+ end.
+
+
+proxy(Debug) ->
+ process_flag(trap_exit, true),
+ erlang:register(?PROXY, self()),
+ ?SET_NAME("HDLT PROXY"),
+ ?SET_LEVEL(Debug),
+ ?LOG("starting", []),
+ Ref = await_for_controller(10),
+ CtrlNode = node(Ref),
+ erlang:monitor_node(CtrlNode, true),
+ proc_lib:init_ack({ok, self()}),
+ ?DEBUG("started", []),
+ proxy_loop(Ref, CtrlNode).
+
+await_for_controller(N) when N > 0 ->
+ case global:whereis_name(hdlt_ctrl) of
+ Pid when is_pid(Pid) ->
+ erlang:monitor(process, Pid);
+ _ ->
+ timer:sleep(1000),
+ await_for_controller(N-1)
+ end;
+await_for_controller(_) ->
+ proc_lib:init_ack({error, controller_not_found, nodes()}),
+ timer:sleep(500),
+ halt().
+
+
+proxy_loop(Ref, CtrlNode) ->
+ ?DEBUG("await command", []),
+ receive
+ stop ->
+ ?LOG("received stop", []),
+ halt();
+
+ start_inets ->
+ ?LOG("start the inets service framework", []),
+ case (catch inets:start()) of
+ ok ->
+ ?LOG("framework started", []),
+ proxy_loop(Ref, CtrlNode);
+ Error ->
+ ?LOG("failed starting inets service framework: "
+ "~n Error: ~p", [Error]),
+ halt()
+ end;
+
+ {server_start, Config, From} ->
+ ?LOG("start-server", []),
+ maybe_start_crypto_and_ssl(Config),
+ %% inets:enable_trace(max, "/tmp/inets-httpd-trace.log", httpd),
+ %% inets:enable_trace(max, "/tmp/inets-httpd-trace.log", all),
+ case (catch inets:start(httpd, Config)) of
+ {ok, _} ->
+ ?LOG("server started when"
+ "~n which(inets): ~p"
+ "~n RootDir: ~p"
+ "~n System info: ~p", [code:which(inets),
+ code:root_dir(),
+ get_node_info()]),
+ From ! {server_start_result, ok},
+ proxy_loop(Ref, CtrlNode);
+ Error ->
+ ?INFO("server start failed"
+ "~n Error: ~p", [Error]),
+ From ! {server_start_result, Error},
+ halt()
+ end;
+
+ {nodedown, CtrlNode} ->
+ ?LOG("received nodedown for controller node - terminate", []),
+ halt();
+
+ {'DOWN', Ref, process, _, _} ->
+ ?LOG("received DOWN message for controller - terminate", []),
+ %% The controller has terminated, time to die
+ halt()
+
+ end.
+
+
+maybe_start_crypto_and_ssl(Config) ->
+ case lists:keysearch(socket_type, 1, Config) of
+ {value, {socket_type, SocketType}} when ((SocketType =:= ssl) orelse
+ (SocketType =:= ossl) orelse
+ (SocketType =:= essl)) ->
+ ?LOG("maybe start crypto and ssl", []),
+ (catch crypto:start()),
+ ssl:start();
+ _ ->
+ ok
+ end.
+
+
+get_node_info() ->
+ [{cpu_topology, erlang:system_info(cpu_topology)},
+ {heap_type, erlang:system_info(heap_type)},
+ {nof_schedulers, erlang:system_info(schedulers)},
+ {otp_release, erlang:system_info(otp_release)},
+ {version, erlang:system_info(version)},
+ {system_version, erlang:system_info(system_version)},
+ {system_architecture, erlang:system_info(system_architecture)}].
+
diff --git a/lib/inets/examples/httpd_load_test/hdlt_slave.erl b/lib/inets/examples/httpd_load_test/hdlt_slave.erl
new file mode 100644
index 0000000000..52af9b5b90
--- /dev/null
+++ b/lib/inets/examples/httpd_load_test/hdlt_slave.erl
@@ -0,0 +1,291 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2010. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(hdlt_slave).
+
+
+-export([start_link/4, start_link/5, start_link/6, stop/1]).
+
+%% Internal exports
+-export([wait_for_slave/9, slave_start/1, wait_for_master_to_die/3]).
+
+-include("hdlt_logger.hrl").
+
+-define(SSH_PORT, 22).
+-define(TIMEOUT, 60000).
+-define(LOGGER, hdlt_logger).
+
+
+%% ***********************************************************************
+%% start_link/4,5 --
+%%
+%% The start/4,5 functions are used to start a slave Erlang node.
+%% The node on which the start/N functions are used is called the
+%% master in the description below.
+%%
+%% If hostname is the same for the master and the slave,
+%% the Erlang node will simply be spawned. The only requirment for
+%% this to work is that the 'erl' program can be found in PATH.
+%%
+%% If the master and slave are on different hosts, start/N uses
+%% the 'rsh' program to spawn an Erlang node on the other host.
+%% Alternative, if the master was started as
+%% 'erl -sname xxx -rsh my_rsh...', then 'my_rsh' will be used instead
+%% of 'rsh' (this is useful for systems where the rsh program is named
+%% 'remsh').
+%%
+%% For this to work, the following conditions must be fulfilled:
+%%
+%% 1. There must be an Rsh program on computer; if not an error
+%% is returned.
+%%
+%% 2. The hosts must be configured to allowed 'rsh' access without
+%% prompts for password.
+%%
+%% The slave node will have its filer and user server redirected
+%% to the master. When the master node dies, the slave node will
+%% terminate. For the start_link functions, the slave node will
+%% terminate also if the process which called start_link terminates.
+%%
+%% Returns: {ok, Name@Host} |
+%% {error, timeout} |
+%% {error, no_rsh} |
+%% {error, {already_running, Name@Host}}
+
+start_link(Host, Name, ErlPath, Paths) ->
+ start_link(Host, Name, ErlPath, Paths, [], silence).
+
+start_link(Host, Name, ErlPath, Paths, DebugLevel) when is_atom(DebugLevel) ->
+ start_link(Host, Name, ErlPath, Paths, [], DebugLevel);
+start_link(Host, Name, ErlPath, Paths, Args) when is_list(Args) ->
+ start_link(Host, Name, ErlPath, Paths, Args, silence).
+
+start_link(Host, Name, ErlPath, Paths, Args, DebugLevel) ->
+ Node = list_to_atom(lists:concat([Name, "@", Host])),
+ case net_adm:ping(Node) of
+ pang ->
+ start_it(Host, Name, Node, ErlPath, Paths, Args, DebugLevel);
+ pong ->
+ {error, {already_running, Node}}
+ end.
+
+%% Stops a running node.
+
+stop(Node) ->
+ rpc:call(Node, erlang, halt, []),
+ ok.
+
+
+%% Starts a new slave node.
+
+start_it(Host, Name, Node, ErlPath, Paths, Args, DebugLevel) ->
+ Prog = filename:join([ErlPath, "erl"]),
+ spawn(?MODULE, wait_for_slave, [self(), Host, Name, Node, Paths, Args, self(), Prog, DebugLevel]),
+ receive
+ {result, Result} -> Result
+ end.
+
+%% Waits for the slave to start.
+
+wait_for_slave(Parent, Host, Name, Node, Paths, Args,
+ LinkTo, Prog, DebugLevel) ->
+ ?SET_NAME("HDLT SLAVE STARTER"),
+ ?SET_LEVEL(DebugLevel),
+ ?DEBUG("begin", []),
+ Waiter = register_unique_name(0),
+ case mk_cmd(Host, Name, Paths, Args, Waiter, Prog) of
+ {ok, Cmd} ->
+ ?DEBUG("command generated: ~n~s", [Cmd]),
+ case (catch ssh_slave_start(Host, Cmd)) of
+ {ok, Conn, _Chan} ->
+ ?DEBUG("ssh channel created", []),
+ receive
+ {SlavePid, slave_started} ->
+ ?DEBUG("slave started: ~p", [SlavePid]),
+ unregister(Waiter),
+ slave_started(Parent, LinkTo, SlavePid, Conn,
+ DebugLevel)
+ after 32000 ->
+ ?INFO("slave node failed to report in on time",
+ []),
+ %% If it seems that the node was partially started,
+ %% try to kill it.
+ case net_adm:ping(Node) of
+ pong ->
+ spawn(Node, erlang, halt, []),
+ ok;
+ _ ->
+ ok
+ end,
+ Parent ! {result, {error, timeout}}
+ end;
+ {error, Reason} = Error ->
+ ?INFO("FAILED starting node: "
+ "~n ~p"
+ "~n ~p", [Reason, Cmd]),
+ Parent ! {result, Error}
+ end;
+ Other ->
+ ?INFO("FAILED creating node command string: "
+ "~n ~p", [Other]),
+ Parent ! {result, Other}
+ end.
+
+
+ssh_slave_start(Host, ErlCmd) ->
+ ?DEBUG("ssh_slave_start -> try connect to ~p", [Host]),
+ Connection =
+ case (catch ssh:connect(Host, ?SSH_PORT,
+ [{silently_accept_hosts, true}])) of
+ {ok, Conn} ->
+ ?DEBUG("ssh_exec_erl -> connected: ~p", [Conn]),
+ Conn;
+ Error1 ->
+ ?LOG("failed connecting to ~p: ~p", [Host, Error1]),
+ throw({error, {ssh_connect_failed, Error1}})
+ end,
+
+ ?DEBUG("ssh_exec_erl -> connected - now create channel", []),
+ Channel =
+ case (catch ssh_connection:session_channel(Connection, ?TIMEOUT)) of
+ {ok, Chan} ->
+ ?DEBUG("ssh_exec_erl -> channel ~p created", [Chan]),
+ Chan;
+ Error2 ->
+ ?LOG("failed creating channel: ~p", [Error2]),
+ throw({error, {ssh_channel_create_failed, Error2}})
+ end,
+
+ ?DEBUG("ssh_exec_erl -> channel created - now exec command: "
+ "~n ~p", [ErlCmd]),
+ case (catch ssh_connection:exec(Connection, Channel, ErlCmd, infinity)) of
+ success ->
+ ?DEBUG("ssh_exec_erl -> command exec'ed - clean ssh msg", []),
+ clean_ssh_msg(),
+ ?DEBUG("ssh_exec_erl -> done", []),
+ {ok, Connection, Channel};
+ Error3 ->
+ ?LOG("failed exec comand: ~p", [Error3]),
+ throw({error, {ssh_exec_failed, Error3}})
+ end.
+
+clean_ssh_msg() ->
+ receive
+ {ssh_cm, _X, _Y} ->
+ clean_ssh_msg()
+ after 1000 ->
+ ok
+ end.
+
+
+slave_started(ReplyTo, Master, Slave, Conn, Level)
+ when is_pid(Master) andalso is_pid(Slave) ->
+ process_flag(trap_exit, true),
+ SName = lists:flatten(
+ io_lib:format("HDLT SLAVE CTRL[~p,~p]",
+ [self(), node(Slave)])),
+ ?SET_NAME(SName),
+ ?SET_LEVEL(Level),
+ ?LOG("initiating", []),
+ MasterRef = erlang:monitor(process, Master),
+ SlaveRef = erlang:monitor(process, Slave),
+ ReplyTo ! {result, {ok, node(Slave)}},
+ slave_running(Master, MasterRef, Slave, SlaveRef, Conn).
+
+
+%% The slave node will be killed if the master process terminates,
+%% The master process will not be killed if the slave node terminates.
+
+slave_running(Master, MasterRef, Slave, SlaveRef, Conn) ->
+ ?DEBUG("await message", []),
+ receive
+ {'DOWN', MasterRef, process, _Object, _Info} ->
+ ?LOG("received DOWN from master", []),
+ erlang:demonitor(SlaveRef, [flush]),
+ Slave ! {nodedown, node()},
+ ssh:close(Conn);
+
+ {'DOWN', SlaveRef, process, Object, _Info} ->
+ ?LOG("received DOWN from slave (~p)", [Object]),
+ erlang:demonitor(MasterRef, [flush]),
+ ssh:close(Conn);
+
+ Other ->
+ ?DEBUG("received unknown: ~n~p", [Other]),
+ slave_running(Master, MasterRef, Slave, SlaveRef, Conn)
+
+ end.
+
+register_unique_name(Number) ->
+ Name = list_to_atom(lists:concat([?MODULE, "_waiter_", Number])),
+ case catch register(Name, self()) of
+ true ->
+ Name;
+ {'EXIT', {badarg, _}} ->
+ register_unique_name(Number+1)
+ end.
+
+
+%% Makes up the command to start the nodes.
+%% If the node should run on the local host, there is
+%% no need to use rsh.
+
+mk_cmd(Host, Name, Paths, Args, Waiter, Prog) ->
+ PaPaths = [[" -pa ", Path] || Path <- Paths],
+ {ok, lists:flatten(
+ lists:concat([Prog,
+ " -detached -nopinput ",
+ Args, " ",
+ " -sname ", Name, "@", Host,
+ " -s ", ?MODULE, " slave_start ", node(),
+ " ", Waiter,
+ " ", PaPaths]))}.
+
+
+%% This function will be invoked on the slave, using the -s option of erl.
+%% It will wait for the master node to terminate.
+
+slave_start([Master, Waiter]) ->
+ spawn(?MODULE, wait_for_master_to_die, [Master, Waiter, silence]);
+slave_start([Master, Waiter, Level]) ->
+ spawn(?MODULE, wait_for_master_to_die, [Master, Waiter, Level]).
+
+
+wait_for_master_to_die(Master, Waiter, Level) ->
+ process_flag(trap_exit, true),
+ SName = lists:flatten(
+ io_lib:format("HDLT-SLAVE MASTER MONITOR[~p,~p,~p]",
+ [self(), node(), Master])),
+ ?SET_NAME(SName),
+ ?SET_LEVEL(Level),
+ erlang:monitor_node(Master, true),
+ {Waiter, Master} ! {self(), slave_started},
+ wloop(Master).
+
+wloop(Master) ->
+ ?DEBUG("await message", []),
+ receive
+ {nodedown, Master} ->
+ ?INFO("received master nodedown", []),
+ halt();
+ _Other ->
+ wloop(Master)
+ end.
+
+
+
diff --git a/lib/inets/examples/httpd_load_test/hdlt_ssl_client_cert.pem b/lib/inets/examples/httpd_load_test/hdlt_ssl_client_cert.pem
new file mode 120000
index 0000000000..41644a1098
--- /dev/null
+++ b/lib/inets/examples/httpd_load_test/hdlt_ssl_client_cert.pem
@@ -0,0 +1 @@
+../../test/httpc_SUITE_data/ssl_client_cert.pem \ No newline at end of file
diff --git a/lib/inets/examples/httpd_load_test/hdlt_ssl_server_cert.pem b/lib/inets/examples/httpd_load_test/hdlt_ssl_server_cert.pem
new file mode 120000
index 0000000000..41644a1098
--- /dev/null
+++ b/lib/inets/examples/httpd_load_test/hdlt_ssl_server_cert.pem
@@ -0,0 +1 @@
+../../test/httpc_SUITE_data/ssl_client_cert.pem \ No newline at end of file
diff --git a/lib/inets/examples/httpd_load_test/modules.mk b/lib/inets/examples/httpd_load_test/modules.mk
new file mode 100644
index 0000000000..9d0d7103d5
--- /dev/null
+++ b/lib/inets/examples/httpd_load_test/modules.mk
@@ -0,0 +1,44 @@
+#-*-makefile-*- ; force emacs to enter makefile-mode
+
+# %CopyrightBegin%
+#
+# Copyright Ericsson AB 2010. All Rights Reserved.
+#
+# The contents of this file are subject to the Erlang Public License,
+# Version 1.1, (the "License"); you may not use this file except in
+# compliance with the License. You should have received a copy of the
+# Erlang Public License along with this software. If not, it can be
+# retrieved online at http://www.erlang.org/.
+#
+# Software distributed under the License is distributed on an "AS IS"
+# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+# the License for the specific language governing rights and limitations
+# under the License.
+#
+# %CopyrightEnd%
+
+SCRIPT_SKELETONS = \
+ hdlt.sh.skel
+
+CONF_SKELETONS = \
+ hdlt.config.skel
+
+CERT_FILES = \
+ hdlt_ssl_client_cert.pem \
+ hdlt_ssl_server_cert.pem
+
+README = HDLT_README
+
+MODULES = \
+ hdlt \
+ hdlt_ctrl \
+ hdlt_client \
+ hdlt_logger \
+ hdlt_random_html \
+ hdlt_server \
+ hdlt_slave
+
+INTERNAL_HRL_FILES = \
+ hdlt_logger.hrl
+
+