aboutsummaryrefslogtreecommitdiffstats
path: root/lib/dialyzer/src/dialyzer_worker.erl
diff options
context:
space:
mode:
authorHans Bolinder <[email protected]>2017-01-24 08:46:45 +0100
committerHans Bolinder <[email protected]>2017-02-03 08:58:01 +0100
commitc2a3a1cdf56009630532805f9bed830e2041427d (patch)
treec18c36f22cd225f48e5dbae1d1399048e70e58ad /lib/dialyzer/src/dialyzer_worker.erl
parent416711a87dcc937d78990ada5da0b8df98a8e1ee (diff)
downloadotp-c2a3a1cdf56009630532805f9bed830e2041427d.tar.gz
otp-c2a3a1cdf56009630532805f9bed830e2041427d.tar.bz2
otp-c2a3a1cdf56009630532805f9bed830e2041427d.zip
dialyzer: Do not spawn all workers at once
Spawning all worker processes at once has the potential to increase peak memory consumption. Therefore the implementation is now slightly modified: `20 * dialyzer_utils:parallelism()' processes are running in parallel. 20 i quite a big factor, but seems necessary to keep all schedulers busy according to the Observer application's Load Charts, with a 100 ms update interval.
Diffstat (limited to 'lib/dialyzer/src/dialyzer_worker.erl')
-rw-r--r--lib/dialyzer/src/dialyzer_worker.erl42
1 files changed, 17 insertions, 25 deletions
diff --git a/lib/dialyzer/src/dialyzer_worker.erl b/lib/dialyzer/src/dialyzer_worker.erl
index 1a251b5307..af0f2e9e08 100644
--- a/lib/dialyzer/src/dialyzer_worker.erl
+++ b/lib/dialyzer/src/dialyzer_worker.erl
@@ -56,10 +56,14 @@ launch(Mode, Job, InitData, Coordinator) ->
%%--------------------------------------------------------------------
-init(#state{job = SCC, mode = Mode, init_data = InitData} = State) when
+init(#state{job = SCC, mode = Mode, init_data = InitData,
+ coordinator = Coordinator} = State) when
Mode =:= 'typesig'; Mode =:= 'dataflow' ->
- DependsOn = dialyzer_succ_typings:find_depends_on(SCC, InitData),
- ?debug("Deps ~p: ~p\n",[SCC, DependsOn]),
+ DependsOnSCCs = dialyzer_succ_typings:find_depends_on(SCC, InitData),
+ ?debug("~w: Deps ~p: ~p\n", [self(), SCC, DependsOnSCCs]),
+ Pids = dialyzer_coordinator:sccs_to_pids(DependsOnSCCs, Coordinator),
+ ?debug("~w: PidsDeps ~p\n", [self(), Pids]),
+ DependsOn = [{Pid, erlang:monitor(process, Pid)} || Pid <- Pids],
loop(updating, State#state{depends_on = DependsOn});
init(#state{mode = Mode} = State) when
Mode =:= 'compile'; Mode =:= 'warnings' ->
@@ -67,7 +71,7 @@ init(#state{mode = Mode} = State) when
loop(updating, #state{mode = Mode} = State) when
Mode =:= 'typesig'; Mode =:= 'dataflow' ->
- ?debug("Update: ~p\n",[State#state.job]),
+ ?debug("~w: Update: ~p\n", [self(), State#state.job]),
NextStatus =
case waits_more_success_typings(State) of
true -> waiting;
@@ -76,11 +80,11 @@ loop(updating, #state{mode = Mode} = State) when
loop(NextStatus, State);
loop(waiting, #state{mode = Mode} = State) when
Mode =:= 'typesig'; Mode =:= 'dataflow' ->
- ?debug("Wait: ~p\n",[State#state.job]),
+ ?debug("~w: Wait: ~p\n", [self(), State#state.job]),
NewState = wait_for_success_typings(State),
loop(updating, NewState);
loop(running, #state{mode = 'compile'} = State) ->
- dialyzer_coordinator:request_activation(State#state.coordinator),
+ request_activation(State),
?debug("Compile: ~s\n",[State#state.job]),
Result =
case start_compilation(State) of
@@ -92,40 +96,28 @@ loop(running, #state{mode = 'compile'} = State) ->
end,
report_to_coordinator(Result, State);
loop(running, #state{mode = 'warnings'} = State) ->
- dialyzer_coordinator:request_activation(State#state.coordinator),
+ request_activation(State),
?debug("Warning: ~s\n",[State#state.job]),
Result = collect_warnings(State),
report_to_coordinator(Result, State);
loop(running, #state{mode = Mode} = State) when
Mode =:= 'typesig'; Mode =:= 'dataflow' ->
request_activation(State),
- ?debug("Run: ~p\n",[State#state.job]),
+ ?debug("~w: Run: ~p\n", [self(), State#state.job]),
NotFixpoint = do_work(State),
- ok = broadcast_done(State),
report_to_coordinator(NotFixpoint, State).
waits_more_success_typings(#state{depends_on = Depends}) ->
Depends =/= [].
-broadcast_done(#state{job = SCC, init_data = InitData,
- coordinator = Coordinator}) ->
- RequiredBy = dialyzer_succ_typings:find_required_by(SCC, InitData),
- Callers = dialyzer_coordinator:sccs_to_pids(RequiredBy, Coordinator),
- send_done(Callers, SCC).
-
-send_done(Callers, SCC) ->
- ?debug("Sending ~p: ~p\n",[SCC, Callers]),
- SendSTFun = fun(PID) -> PID ! {done, SCC} end,
- lists:foreach(SendSTFun, Callers).
-
wait_for_success_typings(#state{depends_on = DependsOn} = State) ->
receive
- {done, SCC} ->
- ?debug("GOT ~p: ~p\n",[State#state.job, SCC]),
- State#state{depends_on = DependsOn -- [SCC]}
+ {'DOWN', Ref, process, Pid, _Info} ->
+ ?debug("~w: ~p got DOWN: ~p\n", [self(), State#state.job, Pid]),
+ State#state{depends_on = DependsOn -- [{Pid, Ref}]}
after
5000 ->
- ?debug("Still Waiting ~p: ~p\n",[State#state.job, DependsOn]),
+ ?debug("~w: Still Waiting ~p:\n ~p\n", [self(), State#state.job, DependsOn]),
State
end.
@@ -139,7 +131,7 @@ do_work(#state{mode = Mode, job = Job, init_data = InitData}) ->
end.
report_to_coordinator(Result, #state{job = Job, coordinator = Coordinator}) ->
- ?debug("Done: ~p\n",[Job]),
+ ?debug("~w: Done: ~p\n",[self(), Job]),
dialyzer_coordinator:job_done(Job, Result, Coordinator).
start_compilation(#state{job = Job, init_data = InitData}) ->