aboutsummaryrefslogtreecommitdiffstats
path: root/lib/common_test
diff options
context:
space:
mode:
Diffstat (limited to 'lib/common_test')
-rw-r--r--lib/common_test/test/ct_repeat_testrun_SUITE.erl60
1 files changed, 41 insertions, 19 deletions
diff --git a/lib/common_test/test/ct_repeat_testrun_SUITE.erl b/lib/common_test/test/ct_repeat_testrun_SUITE.erl
index 632597c214..f8b6a379f6 100644
--- a/lib/common_test/test/ct_repeat_testrun_SUITE.erl
+++ b/lib/common_test/test/ct_repeat_testrun_SUITE.erl
@@ -66,25 +66,47 @@
%% there will be clashes with logging processes etc).
%%--------------------------------------------------------------------
init_per_suite(Config0) ->
- Config = ct_test_support:init_per_suite(Config0),
- DataDir = ?config(data_dir, Config),
- Suite1 = filename:join([DataDir,"a_test","r1_SUITE"]),
- Suite2 = filename:join([DataDir,"b_test","r2_SUITE"]),
- Opts0 = ct_test_support:get_opts(Config),
- Opts1 = Opts0 ++ [{suite,Suite1},{testcase,tc2},{label,timing1}],
- Opts2 = Opts0 ++ [{suite,Suite2},{testcase,tc2},{label,timing2}],
-
- %% Make sure both suites are compiled
- {1,0,{0,0}} = ct_test_support:run(ct,run_test,[Opts1],Config),
- {1,0,{0,0}} = ct_test_support:run(ct,run_test,[Opts2],Config),
-
- %% Time the shortest testcase to use for offset
- {_T0,{1,0,{0,0}}} = timer:tc(ct_test_support,run,[ct,run_test,[Opts1],Config]),
-
- %% -2 is to ensure we hit inside the target test case and not after
-% T = round(T0/1000000)-2,
- T=0,
- [{offset,T}|Config].
+ TTInfo = {_T,{_Scaled,ScaleVal}} = ct:get_timetrap_info(),
+ ct:pal("Timetrap info = ~w", [TTInfo]),
+ if ScaleVal > 1 ->
+ {skip,"Skip on systems running e.g. cover or debug!"};
+ ScaleVal =< 1 ->
+ Config = ct_test_support:init_per_suite(Config0),
+ DataDir = ?config(data_dir, Config),
+ Suite1 = filename:join([DataDir,"a_test","r1_SUITE"]),
+ Suite2 = filename:join([DataDir,"b_test","r2_SUITE"]),
+ Opts0 = ct_test_support:get_opts(Config),
+ Opts1 = Opts0 ++ [{suite,Suite1},{testcase,tc2},{label,timing1}],
+ Opts2 = Opts0 ++ [{suite,Suite2},{testcase,tc2},{label,timing2}],
+
+ %% Make sure both suites are compiled
+ {1,0,{0,0}} = ct_test_support:run(ct,run_test,[Opts1],Config),
+ {1,0,{0,0}} = ct_test_support:run(ct,run_test,[Opts2],Config),
+
+ %% Check if file i/o is too slow for correct measurements
+ Opts3 = Opts0 ++ [{suite,Suite1},{testcase,tc1},{label,timing3}],
+ {T,_} =
+ timer:tc(
+ fun() ->
+ {1,0,{0,0}} = ct_test_support:run(ct,run_test,
+ [Opts3],Config),
+ {1,0,{0,0}} = ct_test_support:run(ct,run_test,
+ [Opts3],Config)
+ end),
+ %% The time to compare with here must match the timeout value
+ %% in the test suite. Accept 30% logging overhead (26 sec total).
+ if T > 26000000 ->
+ ct:pal("Timing test took ~w sec (< 27 sec expected). "
+ "Skipping the suite!",
+ [trunc(T/1000000)]),
+ ct_test_support:end_per_suite(Config),
+ {skip,"File I/O too slow for this suite"};
+ true ->
+ ct:pal("Timing test took ~w sec. Proceeding...",
+ [trunc(T/1000000)]),
+ [{offset,0}|Config]
+ end
+ end.
end_per_suite(Config) ->
ct_test_support:end_per_suite(Config).