aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorBjörn Gustavsson <[email protected]>2015-03-19 10:12:07 +0100
committerBjörn Gustavsson <[email protected]>2015-04-10 12:33:24 +0200
commitcb80bb4b49816699178bcbb5986c75515330a0a9 (patch)
tree54949a0acc9d5c831121a7eecc037b3882625076
parent74c7b439cf9f376eb1c772ee1d8bc9370f05c7e4 (diff)
downloadotp-cb80bb4b49816699178bcbb5986c75515330a0a9.tar.gz
otp-cb80bb4b49816699178bcbb5986c75515330a0a9.tar.bz2
otp-cb80bb4b49816699178bcbb5986c75515330a0a9.zip
Speed up timer_SUITE
The single test case in timer_SUITE is annoyingly slow. On average, its running time is about 4 minutes (estimated by a back-of-the-envelope calculation). Scale down the delay times in the main loop and the length of timers started. The running average time should now be around 80 seconds.
-rw-r--r--lib/stdlib/test/timer_SUITE.erl34
1 files changed, 12 insertions, 22 deletions
diff --git a/lib/stdlib/test/timer_SUITE.erl b/lib/stdlib/test/timer_SUITE.erl
index bea2b3fb2a..c4dfeff8e5 100644
--- a/lib/stdlib/test/timer_SUITE.erl
+++ b/lib/stdlib/test/timer_SUITE.erl
@@ -25,14 +25,11 @@
-include_lib("test_server/include/test_server.hrl").
-%% Test suite for timer module. This is a really nasty test it runs a
-%% lot of timeouts and then checks in the end if any of them was
-%% trigggered too early or if any late timeouts was much too
-%% late. What should be added is more testing of the interface
-%% functions I guess. But I don't have time for that now.
+%% Random test of the timer module. This is a really nasty test, as it
+%% runs a lot of timeouts and then checks in the end if any of them
+%% was triggered too early or if any late timeouts was much too late.
%%
-%% Expect it to run for at least 5-10 minutes!
-
+%% Running time on average is about 90 seconds.
%% The main test case in this module is "do_big_test", which
%% orders a large number of timeouts and measures how
@@ -40,15 +37,8 @@
%% also a number of other concurrent processes running "nrev" at the same
%% time. The result is analyzed afterwards by trying to check if the
%% measured values are reasonable. It is hard to determine what is
-%% reasonable on different machines therefore the test can sometimes
-%% fail, even though the timer module is ok. I have checked against
-%% previous versions of the timer module (which contained bugs) and it
-%% seems it fails every time when running the buggy timer modules.
-%%
-%% The solution is to rewrite the test suite. Possible strategies for a
-%% rewrite: smarter math on the measuring data, test cases with varying
-%% amount of load. The test suite should also include tests that test the
-%% interface of the timer module.
+%% reasonable on different machines; therefore the test can sometimes
+%% fail, even though the timer module is ok.
suite() -> [{ct_hooks,[ts_install_cth]}].
@@ -146,7 +136,7 @@ big_loop(C, N, Pids) ->
%%Pids2=Pids1,
%% wait a little while
- timer:sleep(random:uniform(200)*10),
+ timer:sleep(random:uniform(200)*3),
%% spawn zero, one or two nrev to get some load ;-/
Pids3 = start_nrev(Pids2, random:uniform(100)),
@@ -166,14 +156,14 @@ start_nrev(Pids, _N) ->
start_after_test(Pids, C, 1) ->
- TO1 = random:uniform(100)*100,
+ TO1 = random:uniform(100)*47,
[s_a_t(C, TO1)|Pids];
start_after_test(Pids, C, 2) ->
- TO1 = random:uniform(100)*100,
- TO2 = TO1 div random:uniform(3) + 200,
+ TO1 = random:uniform(100)*47,
+ TO2 = TO1 div random:uniform(3) + 101,
[s_a_t(C, TO1),s_a_t(C, TO2)|Pids];
start_after_test(Pids, C, N) ->
- TO1 = random:uniform(100)*100,
+ TO1 = random:uniform(100)*47,
start_after_test([s_a_t(C, TO1)|Pids], C, N-1).
s_a_t(C, TimeOut) ->
@@ -199,7 +189,7 @@ a_t(C, TimeOut) ->
maybe_start_i_test(Pids, C, 1) ->
%% ok do it
- TOI = random:uniform(100)*100,
+ TOI = random:uniform(53)*49,
CountI = random:uniform(10) + 3, % at least 4 times
[spawn_link(timer_SUITE, i_t, [C, TOI, CountI])|Pids];
maybe_start_i_test(Pids, _C, _) ->