From 671b2bd68fad4f793e485051329438a9e084db0a Mon Sep 17 00:00:00 2001 From: Lukas Larsson Date: Tue, 8 May 2012 17:28:46 +0200 Subject: Add framework to ts to run benchmarks --- HOWTO/BENCHMARKS.md | 73 +++++++++++++++++++++++++++++ lib/test_server/src/Makefile | 3 +- lib/test_server/src/ts.erl | 44 +++++++++++++---- lib/test_server/src/ts_benchmark.erl | 91 ++++++++++++++++++++++++++++++++++++ lib/test_server/src/ts_lib.erl | 12 ++++- 5 files changed, 212 insertions(+), 11 deletions(-) create mode 100644 HOWTO/BENCHMARKS.md create mode 100644 lib/test_server/src/ts_benchmark.erl diff --git a/HOWTO/BENCHMARKS.md b/HOWTO/BENCHMARKS.md new file mode 100644 index 0000000000..361d99256d --- /dev/null +++ b/HOWTO/BENCHMARKS.md @@ -0,0 +1,73 @@ +Benchmarking Erlang/OTP +======================= + +The Erlang/OTP source tree contains a number of benchmarks. The same framework +is used to run these benchmarks as is used to run tests. Therefore in order to +run benchmarks you have to [release the tests][] just as you normally would. + +Note that many of these benchmarks were developed to test a specific feature +under a specific setting. We strive to keep the benchmarks up-to-date, but alas +time is not an endless resource so some benchmarks will be outdated and +irrelevant. + +Running the benchmarks +---------------------- + +As with testing, `ts` is used to run the benchmarks. Before running any +benchmarks you have to [install the tests][]. To get a listing of all +benchmarks you have available call `ts:benchmarks()`. + +To run all benchmarks call `ts:bench()`. This will run all benchmarks using +the emulator which is in you `$PATH` (Note that this does not have to be the +same as from which the benchmarks were built from). All the results of the +benchmarks are put in a folder in `$TESTROOT/test_server/` called +`YYYY_MO_DDTHH_MI_SS`. + +Each benchmark is run multiple times and the data for all runs is collected in +the files within the benchmark folder. All benchmarks are written so that a +higher values are better. + +Writing benchmarks +------------------ + +Benchmarks are just normal testcases in Common Test suites. They are marked as +benchmarks by being included in the `AppName_bench.spec` which is located in +`lib/AppName/test/` for the applications which have benchmarks. Note that you +might want to add a skip clause to `AppName.spec` for the benchmarks if you do +not want them to be run in the nightly tests. + +Results of benchmarks are sent using the ct_event mechanism and automatically +collected and formatted by ts. + + ct_event:notify( + #event{name = benchmark_data, + data = [{value,TPS}]}). + +The application, suite and testcase associated with the value is automatically +detected. If you want to supply your own you can include `suite` andor `name` +with the data. i.e. + + ct_event:notify( + #event{name = benchmark_data, + data = [{suite,"erts_bench"}, + {name,"ets_transactions_per_sec"}, + {value,TPS}]}). + +The reason for using the internal ct_event and not ct is because the benchmark +code has to be backwards compatible with at least R14. + +The value which is reported should be as raw as possible. i.e. you should not +do any averaging of the value before reporting. The tools we use to collect the +benchmark data over time will do averages, means, stddev and more with the data. +So the more data which is sent using `ct_event` the better. + +Viewing benchmarks +------------------ + +At the moment of writing this HOWTO the tool for viewing benchmark results is +not available as opensource. This will hopefully change in the near future. + + + [release the tests]: README.testing.md#releasing-tests + [install the tests]: README.testing.md#configuring-the-test-environment + diff --git a/lib/test_server/src/Makefile b/lib/test_server/src/Makefile index a75855eaab..b47cf9ff64 100644 --- a/lib/test_server/src/Makefile +++ b/lib/test_server/src/Makefile @@ -57,7 +57,8 @@ TS_MODULES= \ ts_autoconf_win32 \ ts_autoconf_vxworks \ ts_install \ - ts_install_cth + ts_install_cth \ + ts_benchmark TARGET_MODULES= $(MODULES:%=$(EBIN)/%) TS_TARGET_MODULES= $(TS_MODULES:%=$(EBIN)/%) diff --git a/lib/test_server/src/ts.erl b/lib/test_server/src/ts.erl index 4899f38d2b..75631216b0 100644 --- a/lib/test_server/src/ts.erl +++ b/lib/test_server/src/ts.erl @@ -28,6 +28,7 @@ clean/0, clean/1, tests/0, tests/1, install/0, install/1, index/0, + bench/0, bench/1, bench/2, benchmarks/0, estone/0, estone/1, cross_cover_analyse/1, compile_testcases/0, compile_testcases/1, @@ -48,12 +49,12 @@ %%% | +------ ts_make %%% | | %%% +-- ts_run -----+ -%%% | ts_filelib -%%% +------ ts_make_erl -%%% | -%%% +------ ts_reports (indirectly) -%%% -%%% +%%% | | ts_filelib +%%% | +------ ts_make_erl +%%% | | +%%% | +------ ts_reports (indirectly) +%%% | +%%% +-- ts_benchmark %%% %%% The modules ts_lib and ts_filelib contains utilities used by %%% the other modules. @@ -81,6 +82,7 @@ %%% of the tests run. %%% ts_lib Miscellanous utility functions, each used by several %%% other modules. +%%% ts_benchmark Supervises otp benchmarks and collects results. %%%---------------------------------------------------------------------- -include_lib("kernel/include/file.hrl"). @@ -128,7 +130,7 @@ help(installed) -> " ts:run(Spec, Mod) - Run a single test suite.\n", " ts:run(Spec, Mod, Case)\n", " - Run a single test case.\n", - " All above run functions can have the additional Options argument\n", + " All above run functions can have an additional Options argument\n", " which is a list of options.\n", "\n", "Run options supported:\n", @@ -158,7 +160,7 @@ help(installed) -> " {ctp | ctpl, Mod, Func}\n", " {ctp | ctpl, Mod, Func, Arity}\n", "\n", - "Support functions\n", + "Support functions:\n", " ts:tests() - Shows all available families of tests.\n", " ts:tests(Spec) - Shows all available test modules in Spec,\n", " i.e. ../Spec_test/*_SUITE.erl\n", @@ -179,6 +181,13 @@ help(installed) -> " - Compile all testcases for usage in a cross ~n" " compile environment." " \n" + "Benchmark functions:\n" + " ts:benchmarks() - Get all available families of benchmarks\n" + " ts:bench() - Runs all benchmarks\n" + " ts:bench(Spec) - Runs all benchmarks in the given spec file.\n" + " The spec file is actually ../*_test/Spec_bench.spec\n\n" + " ts:bench can take the same Options argument as ts:run.\n" + "\n" "Installation (already done):\n" ], show_help([H,?install_help]). @@ -491,6 +500,25 @@ tests(Spec) -> {ok, Cwd} = file:get_cwd(), ts_lib:suites(Cwd, atom_to_list(Spec)). +%% Benchmark related functions + +bench() -> + bench([]). + +bench(Opts) when is_list(Opts) -> + bench(benchmarks(),Opts); +bench(Spec) -> + bench([Spec],[]). + +bench(Spec, Opts) when is_atom(Spec) -> + bench([Spec],Opts); +bench(Specs, Opts) -> + check_and_run(fun(Vars) -> ts_benchmark:run(Specs, Opts, Vars) end). + +benchmarks() -> + ts_benchmark:benchmarks(). + + %% %% estone/0, estone/1 diff --git a/lib/test_server/src/ts_benchmark.erl b/lib/test_server/src/ts_benchmark.erl new file mode 100644 index 0000000000..516d22fd2d --- /dev/null +++ b/lib/test_server/src/ts_benchmark.erl @@ -0,0 +1,91 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2012-2012. All Rights Reserved. +%% +%% The contents of this file are subject to the Erlang Public License, +%% Version 1.1, (the "License"); you may not use this file except in +%% compliance with the License. You should have received a copy of the +%% Erlang Public License along with this software. If not, it can be +%% retrieved online at http://www.erlang.org/. +%% +%% Software distributed under the License is distributed on an "AS IS" +%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See +%% the License for the specific language governing rights and limitations +%% under the License. +%% +%% %CopyrightEnd% +%% +-module(ts_benchmark). + +-include_lib("common_test/include/ct_event.hrl"). +-include_lib("kernel/include/file.hrl"). +-include("ts.hrl"). + +-export([benchmarks/0, + run/3]). + +%% gen_event callbacks +-export([init/1, handle_event/2]). + +benchmarks() -> + {ok, Cwd} = file:get_cwd(), + Benches = filelib:wildcard( + filename:join([Cwd,"..","*_test","*_bench.spec"])), + [begin + Base = filename:basename(N), + list_to_atom(string:substr(Base,1,string:rstr(Base,"_")-1)) + end || N <- Benches]. + +run(Specs, Opts, Vars) -> + {ok, Cwd} = file:get_cwd(), + {{YY,MM,DD},{HH,Mi,SS}} = calendar:local_time(), + BName = lists:concat([YY,"_",MM,"_",DD,"T",HH,"_",Mi,"_",SS]), + BDir = filename:join([Cwd,BName]), + file:make_dir(BDir), + [ts_run:run(atom_to_list(Spec), + [{spec, [atom_to_list(Spec)++"_bench.spec"]}], + [{event_handler, {ts_benchmark, [Spec,BDir]}}|Opts],Vars) + || Spec <- Specs], + file:delete(filename:join(Cwd,"latest_benchmark")), + {ok,D} = file:open(filename:join(Cwd,"latest_benchmark"),[write]), + io:format(D,BDir,[]), + file:close(D). + + +%%%=================================================================== +%%% gen_event callbacks +%%%=================================================================== + +-record(state, { spec, suite, tc, stats_dir}). + +init([Spec,Dir]) -> + {ok, #state{ spec = Spec, stats_dir = Dir }}. + +handle_event(#event{name = tc_start, data = {Suite,Tc}}, State) -> + {ok,State#state{ suite = Suite, tc = Tc}}; +handle_event(#event{name = benchmark_data, data = Data}, State) -> + Spec = proplists:get_value(application, Data, State#state.spec), + Suite = proplists:get_value(suite, Data, State#state.suite), + Tc = proplists:get_value(name, Data, State#state.tc), + Value = proplists:get_value(value, Data), + {ok, D} = file:open(filename:join( + [State#state.stats_dir, + lists:concat([e(Spec),"-",e(Suite),"-", + e(Tc),".ebench"])]), + [append]), + io:format(D, "~p~n",[Value]), + file:close(D), + {ok, State}; +handle_event(_Event, State) -> + {ok, State}. + + +e(Atom) when is_atom(Atom) -> + Atom; +e(Str) when is_list(Str) -> + lists:map(fun($/) -> + $\\; + (C) -> + C + end,Str). diff --git a/lib/test_server/src/ts_lib.erl b/lib/test_server/src/ts_lib.erl index d521d2beda..93d5d78289 100644 --- a/lib/test_server/src/ts_lib.erl +++ b/lib/test_server/src/ts_lib.erl @@ -108,8 +108,16 @@ interesting_logs(Dir) -> specs(Dir) -> Specs = filelib:wildcard(filename:join([filename:dirname(Dir), - "*_test", "*.{dyn,}spec"])), - sort_tests([filename_to_atom(Name) || Name <- Specs]). + "*_test", "*.{dyn,}spec"])), + % Filter away all spec which end with _bench.spec + NoBench = fun(SpecName) -> + case lists:reverse(SpecName) of + "ceps.hcneb_"++_ -> false; + _ -> true + end + end, + + sort_tests([filename_to_atom(Name) || Name <- Specs, NoBench(Name)]). suites(Dir, Spec) -> Glob=filename:join([filename:dirname(Dir), Spec++"_test", -- cgit v1.2.3