From 992ee6241d76b768d6091c27696afe3bc437a40f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Lo=C3=AFc=20Hoguin?= Date: Thu, 18 Jan 2024 11:13:51 +0100 Subject: Retry the read_urlencoded_body_too_large if timeout triggers This is caused by the timeout being 1s after the period. When the CI environment is overloaded, sometimes the timeout will trigger. We retry, knowing that the timetrap will catch us if we retry too much. --- test/req_SUITE.erl | 16 ++++++++++++++-- 1 file changed, 14 insertions(+), 2 deletions(-) (limited to 'test/req_SUITE.erl') diff --git a/test/req_SUITE.erl b/test/req_SUITE.erl index 04da8fa..6e111bb 100644 --- a/test/req_SUITE.erl +++ b/test/req_SUITE.erl @@ -591,8 +591,20 @@ do_read_urlencoded_body_too_large(Path, Body, Config) -> {<<"content-length">>, integer_to_binary(iolist_size(Body))} ]), gun:data(ConnPid, Ref, fin, Body), - {response, _, 413, _} = gun:await(ConnPid, Ref, infinity), - gun:close(ConnPid). + Response = gun:await(ConnPid, Ref, infinity), + gun:close(ConnPid), + case Response of + {response, _, 413, _} -> + ok; + %% We got the wrong crash, likely because the environment + %% was overloaded and the timeout triggered. Try again. + {response, _, 408, _} -> + do_read_urlencoded_body_too_large(Path, Body, Config); + %% Timing issues make it possible for the connection to be + %% closed before the data went through. We retry. + {error, {stream_error, {closed, {error,closed}}}} -> + do_read_urlencoded_body_too_large(Path, Body, Config) + end. read_urlencoded_body_too_long(Config) -> doc("application/x-www-form-urlencoded request body sent too slow. " -- cgit v1.2.3