aboutsummaryrefslogtreecommitdiffstats
path: root/lib/inets/src/http_client
diff options
context:
space:
mode:
authorMasatake Daimon <[email protected]>2013-05-17 13:01:20 +0900
committerMasatake Daimon <[email protected]>2013-05-20 16:36:51 +0900
commit789490eca8382edca34bae593e88cdc5fd4fe444 (patch)
tree5bdffdd123248ed68d02a7bc75a98d8d82235b9f /lib/inets/src/http_client
parent89939207898fc68a2ee0c080e603aa65fb866b25 (diff)
downloadotp-789490eca8382edca34bae593e88cdc5fd4fe444.tar.gz
otp-789490eca8382edca34bae593e88cdc5fd4fe444.tar.bz2
otp-789490eca8382edca34bae593e88cdc5fd4fe444.zip
Fix {stream, {self, once}} in httpc
Previously the only difference between {stream, self} and {stream, {self, once}} was an extra Pid in the stream_start message due to a bug in httpc_handler. It was immediately sending a bunch of messages till the end instead of waiting for httpc:stream_next/1 being called.
Diffstat (limited to 'lib/inets/src/http_client')
-rw-r--r--lib/inets/src/http_client/httpc_handler.erl54
1 files changed, 48 insertions, 6 deletions
diff --git a/lib/inets/src/http_client/httpc_handler.erl b/lib/inets/src/http_client/httpc_handler.erl
index f6b13c2998..ef16cddc81 100644
--- a/lib/inets/src/http_client/httpc_handler.erl
+++ b/lib/inets/src/http_client/httpc_handler.erl
@@ -65,7 +65,7 @@
options, % #options{}
timers = #timers{}, % #timers{}
profile_name, % atom() - id of httpc_manager process.
- once % send | undefined
+ once = inactive % inactive | once
}).
@@ -231,6 +231,8 @@ init([Parent, Request, Options, ProfileName]) ->
ProxyOptions = handle_proxy_options(Request#request.scheme, Options),
Address = handle_proxy(Request#request.address, ProxyOptions),
{ok, State} =
+ %% #state.once should initially be 'inactive' because we
+ %% activate the socket at first regardless of the state.
case {Address /= Request#request.address, Request#request.scheme} of
{true, https} ->
connect_and_send_upgrade_request(Address, Request,
@@ -425,7 +427,9 @@ handle_cast({cancel, RequestId, From},
handle_cast(stream_next, #state{session = Session} = State) ->
activate_once(Session),
- {noreply, State#state{once = once}}.
+ %% Inactivate the #state.once here because we don't want
+ %% next_body_chunk/1 to activate the socket twice.
+ {noreply, State#state{once = inactive}}.
%%--------------------------------------------------------------------
@@ -478,6 +482,41 @@ handle_info({Proto, _Socket, Data},
NewMFA = {Module, whole_body, [NewBody, NewLength]},
{noreply, NewState#state{mfa = NewMFA,
request = NewRequest}};
+ {Module, decode_size,
+ [TotalChunk, HexList,
+ {MaxBodySize, BodySoFar, AccLength, MaxHeaderSize}]}
+ when BodySoFar =/= <<>> ->
+ ?hcrd("data processed - decode_size", []),
+ %% The response body is chunk-encoded. Steal decoded
+ %% chunks as much as possible to stream.
+ {_, Code, _} = StatusLine,
+ {NewBody, NewRequest} = stream(BodySoFar, Request, Code),
+ NewState = next_body_chunk(State),
+ NewMFA = {Module, decode_size,
+ [TotalChunk, HexList,
+ {MaxBodySize, NewBody, AccLength, MaxHeaderSize}]},
+ {noreply, NewState#state{mfa = NewMFA,
+ request = NewRequest}};
+ {Module, decode_data,
+ [ChunkSize, TotalChunk,
+ {MaxBodySize, BodySoFar, AccLength, MaxHeaderSize, false}]}
+ when TotalChunk =/= <<>> orelse BodySoFar =/= <<>> ->
+ ?hcrd("data processed - decode_data", []),
+ %% The response body is chunk-encoded. Steal decoded
+ %% chunks as much as possible to stream.
+ ChunkSizeToSteal = min(ChunkSize, byte_size(TotalChunk)),
+ <<StolenChunk:ChunkSizeToSteal/binary, NewTotalChunk/binary>> = TotalChunk,
+ StolenBody = <<BodySoFar/binary, StolenChunk/binary>>,
+ NewChunkSize = ChunkSize - ChunkSizeToSteal,
+ {_, Code, _} = StatusLine,
+
+ {NewBody, NewRequest} = stream(StolenBody, Request, Code),
+ NewState = next_body_chunk(State),
+ NewMFA = {Module, decode_data,
+ [NewChunkSize, NewTotalChunk,
+ {MaxBodySize, NewBody, AccLength, MaxHeaderSize, false}]},
+ {noreply, NewState#state{mfa = NewMFA,
+ request = NewRequest}};
NewMFA ->
?hcrd("data processed - new mfa", []),
activate_once(Session),
@@ -1027,11 +1066,15 @@ handle_http_msg({Version, StatusCode, ReasonPharse, Headers, Body},
status_line = StatusLine,
headers = Headers})
end;
-handle_http_msg({ChunkedHeaders, Body}, #state{headers = Headers} = State) ->
+handle_http_msg({ChunkedHeaders, Body},
+ #state{status_line = {_, Code, _}, headers = Headers} = State) ->
?hcrt("handle_http_msg",
[{chunked_headers, ChunkedHeaders}, {headers, Headers}]),
NewHeaders = http_chunk:handle_headers(Headers, ChunkedHeaders),
- handle_response(State#state{headers = NewHeaders, body = Body});
+ {NewBody, NewRequest} = stream(Body, State#state.request, Code),
+ handle_response(State#state{headers = NewHeaders,
+ body = NewBody,
+ request = NewRequest});
handle_http_msg(Body, #state{status_line = {_,Code, _}} = State) ->
?hcrt("handle_http_msg", [{code, Code}]),
{NewBody, NewRequest} = stream(Body, State#state.request, Code),
@@ -1070,8 +1113,7 @@ handle_http_body(Body, #state{headers = Headers,
"chunked" ->
?hcrt("handle_http_body - chunked", []),
case http_chunk:decode(Body, State#state.max_body_size,
- State#state.max_header_size,
- {Code, Request}) of
+ State#state.max_header_size) of
{Module, Function, Args} ->
?hcrt("handle_http_body - new mfa",
[{module, Module},