aboutsummaryrefslogtreecommitdiffstats
path: root/lib/reltool/test/reltool_test_lib.erl
diff options
context:
space:
mode:
authorHenrik Nord <[email protected]>2014-01-21 15:12:30 +0100
committerHenrik Nord <[email protected]>2014-01-21 15:12:30 +0100
commit65459a5c2d809a301b144463768690626a046d47 (patch)
tree32d94acfdb18750afa8763072256e92ad6d19224 /lib/reltool/test/reltool_test_lib.erl
parent58bdc6e7b509aec1a42458696fd73e700dc4b7d9 (diff)
parentd6607e21d574ae49c49dc454f13b274d53af4bf7 (diff)
downloadotp-65459a5c2d809a301b144463768690626a046d47.tar.gz
otp-65459a5c2d809a301b144463768690626a046d47.tar.bz2
otp-65459a5c2d809a301b144463768690626a046d47.zip
Merge branch 'hawk/reltool_test_server'
* hawk/reltool_test_server: Adapted reltool test server to common test usage of tc_status
Diffstat (limited to 'lib/reltool/test/reltool_test_lib.erl')
-rw-r--r--lib/reltool/test/reltool_test_lib.erl53
1 files changed, 44 insertions, 9 deletions
diff --git a/lib/reltool/test/reltool_test_lib.erl b/lib/reltool/test/reltool_test_lib.erl
index 3485365ed9..530d0a9985 100644
--- a/lib/reltool/test/reltool_test_lib.erl
+++ b/lib/reltool/test/reltool_test_lib.erl
@@ -258,8 +258,8 @@ run_test([{Module, TC} | Rest], Config) ->
true ->
[do_run_test(Module, TC, NewConfig)]
end,
- Module:end_per_suite(NewConfig),
- Res ++ run_test(Rest, NewConfig);
+ CommonTestRes = worst_res(Res),
+ Res ++ run_test(Rest, [{tc_status,CommonTestRes}|NewConfig]);
Error ->
?error("Test suite skipped: ~w~n", [Error]),
[{skipped, Error}]
@@ -267,6 +267,36 @@ run_test([{Module, TC} | Rest], Config) ->
run_test([], _Config) ->
[].
+worst_res(Res) ->
+ NewRes = [{dummy, {ok,dummy, dummy}} | Res],
+ [{_,WorstRes}|_] = lists:sort(fun compare_res/2, NewRes),
+ common_test_res(WorstRes).
+
+common_test_res(ok) ->
+ ok;
+common_test_res({Res,_,Reason}) ->
+ common_test_res({Res,Reason});
+common_test_res({Res,Reason}) ->
+ case Res of
+ ok -> ok;
+ skip -> {skipped, Reason};
+ skipped -> {skipped, Reason};
+ failed -> {failed, Reason};
+ crash -> {failed, Reason}
+ end.
+
+% crash < failed < skip < ok
+compare_res({_,{ResA,_,_}},{_,{ResB,_,_}}) ->
+ res_to_int(ResA) < res_to_int(ResB).
+
+res_to_int(Res) ->
+ case Res of
+ ok -> 4;
+ skip -> 3;
+ failed -> 2;
+ crash -> 1
+ end.
+
do_run_test(Module, all, Config) ->
All = [{Module, Test} || Test <- Module:all()],
run_test(All, Config);
@@ -290,9 +320,10 @@ eval_test_case(Mod, Fun, Config) ->
test_case_evaluator(Mod, Fun, [Config]) ->
NewConfig = Mod:init_per_testcase(Fun, Config),
- R = apply(Mod, Fun, [NewConfig]),
- Mod:end_per_testcase(Fun, NewConfig),
- exit({test_case_ok, R}).
+ Res = apply(Mod, Fun, [NewConfig]),
+ CommonTestRes = common_test_res(Res),
+ Mod:end_per_testcase(Fun, [{tc_status,CommonTestRes}|NewConfig]),
+ exit({test_case_ok, Res}).
wait_for_evaluator(Pid, Mod, Fun, Config) ->
receive
@@ -307,13 +338,17 @@ wait_for_evaluator(Pid, Mod, Fun, Config) ->
{'EXIT', Pid, {skipped, Reason}} ->
log("<WARNING> Test case ~w skipped, because ~p~n",
[{Mod, Fun}, Reason]),
- Mod:end_per_testcase(Fun, Config),
- {skip, {Mod, Fun}, Reason};
+ Res = {skipped, {Mod, Fun}, Reason},
+ CommonTestRes = common_test_res(Res),
+ Mod:end_per_testcase(Fun, [{tc_status,CommonTestRes}|Config]),
+ Res;
{'EXIT', Pid, Reason} ->
log("<ERROR> Eval process ~w exited, because\n\t~p~n",
[{Mod, Fun}, Reason]),
- Mod:end_per_testcase(Fun, Config),
- {crash, {Mod, Fun}, Reason}
+ Res = {crash, {Mod, Fun}, Reason},
+ CommonTestRes = common_test_res(Res),
+ Mod:end_per_testcase(Fun, [{tc_status,CommonTestRes}|Config]),
+ Res
end.
flush() ->