aboutsummaryrefslogtreecommitdiffstats
path: root/lib/compiler/src
diff options
context:
space:
mode:
Diffstat (limited to 'lib/compiler/src')
-rw-r--r--lib/compiler/src/beam_block.erl108
-rw-r--r--lib/compiler/src/beam_bsm.erl31
-rw-r--r--lib/compiler/src/beam_dead.erl7
-rw-r--r--lib/compiler/src/beam_flatten.erl1
-rw-r--r--lib/compiler/src/beam_jump.erl22
-rw-r--r--lib/compiler/src/beam_split.erl3
-rw-r--r--lib/compiler/src/beam_utils.erl12
-rw-r--r--lib/compiler/src/beam_validator.erl4
-rw-r--r--lib/compiler/src/compile.erl65
-rw-r--r--lib/compiler/src/sys_core_fold.erl13
10 files changed, 164 insertions, 102 deletions
diff --git a/lib/compiler/src/beam_block.erl b/lib/compiler/src/beam_block.erl
index 85d332c56e..6a35191f6e 100644
--- a/lib/compiler/src/beam_block.erl
+++ b/lib/compiler/src/beam_block.erl
@@ -58,13 +58,6 @@ blockify(Is) ->
blockify([{loop_rec,{f,Fail},{x,0}},{loop_rec_end,_Lbl},{label,Fail}|Is], Acc) ->
%% Useless instruction sequence.
blockify(Is, Acc);
-blockify([{get_map_elements,F,S,{list,Gets}}|Is0], Acc) ->
- %% A get_map_elements instruction is only safe at the beginning of
- %% a block because of the failure label.
- {Ss,Ds} = beam_utils:split_even(Gets),
- I = {set,Ds,[S|Ss],{get_map_elements,F}},
- {Block,Is} = collect_block(Is0, [I]),
- blockify(Is, [{block,Block}|Acc]);
blockify([I|Is0]=IsAll, Acc) ->
case collect(I) of
error -> blockify(Is0, [I|Acc]);
@@ -159,14 +152,43 @@ find_fixpoint(OptFun, Is0) ->
end.
%% move_allocates(Is0) -> Is
-%% Move allocate instructions upwards in the instruction stream, in the
-%% hope of getting more possibilities for optimizing away moves later.
+%% Move allocate instructions upwards in the instruction stream
+%% (within the same block), in the hope of getting more possibilities
+%% for optimizing away moves later.
%%
-%% NOTE: Moving allocation instructions is only safe because it is done
-%% immediately after code generation so that we KNOW that if {x,X} is
-%% initialized, all x registers with lower numbers are also initialized.
-%% That assumption may not be true after other optimizations, such as
-%% the beam_utils:live_opt/1 optimization.
+%% For example, we can transform the following instructions:
+%%
+%% get_tuple_element x(1) Element => x(2)
+%% allocate_zero StackSize 3 %% x(0), x(1), x(2) are live
+%%
+%% to the following instructions:
+%%
+%% allocate_zero StackSize 2 %% x(0) and x(1) are live
+%% get_tuple_element x(1) Element => x(2)
+%%
+%% NOTE: Since the beam_reorder pass has been run, it is no longer
+%% safe to assume that if x(N) is initialized, then all lower-numbered
+%% x registers are also initialized.
+%%
+%% For example, in general it is not safe to transform the following
+%% instructions:
+%%
+%% get_tuple_element x(0) Element => x(1)
+%% allocate_zero StackSize 3 %x(0), x(1), x(2) are live
+%%
+%% to the following instructions:
+%%
+%% allocate_zero StackSize 3
+%% get_tuple_element x(0) Element => x(1)
+%%
+%% The transformation is safe if and only if x(1) has been
+%% initialized previously. Unfortunately, beam_reorder may have moved
+%% a get_tuple_element instruction so that x(1) is not always
+%% initialized when this code is reached. To find whether or not x(1)
+%% is initialized, we would need to analyze all code preceding these
+%% two instructions (across branches). Since we currently don't have
+%% any practical mechanism for doing that, we will have to
+%% conservatively assume that the transformation is unsafe.
move_allocates([{block,Bl0}|Is]) ->
Bl = move_allocates_1(reverse(Bl0), []),
@@ -175,38 +197,26 @@ move_allocates([I|Is]) ->
[I|move_allocates(Is)];
move_allocates([]) -> [].
-move_allocates_1([{set,[],[],{alloc,_,_}=Alloc}|Is0], Acc0) ->
- {Is,Acc} = move_allocates_2(Alloc, Is0, Acc0),
- move_allocates_1(Is, Acc);
+move_allocates_1([I|Is], [{set,[],[],{alloc,Live0,Info}}|Acc]=Acc0) ->
+ case {alloc_may_pass(I),alloc_live_regs(I, Live0)} of
+ {false,_} ->
+ move_allocates_1(Is, [I|Acc0]);
+ {true,not_possible} ->
+ move_allocates_1(Is, [I|Acc0]);
+ {true,Live} when is_integer(Live) ->
+ A = {set,[],[],{alloc,Live,Info}},
+ move_allocates_1(Is, [A,I|Acc])
+ end;
move_allocates_1([I|Is], Acc) ->
move_allocates_1(Is, [I|Acc]);
-move_allocates_1([], Is) -> Is.
-
-move_allocates_2({alloc,Live,Info}, [{set,[],[],{alloc,Live0,Info0}}|Is], Acc) ->
- Live = Live0, % Assertion.
- Alloc = {alloc,Live,combine_alloc(Info0, Info)},
- move_allocates_2(Alloc, Is, Acc);
-move_allocates_2({alloc,Live,Info}=Alloc0, [I|Is]=Is0, Acc) ->
- case alloc_may_pass(I) of
- false ->
- {Is0,[{set,[],[],Alloc0}|Acc]};
- true ->
- Alloc = {alloc,alloc_live_regs(I, Live),Info},
- move_allocates_2(Alloc, Is, [I|Acc])
- end;
-move_allocates_2(Alloc, [], Acc) ->
- {[],[{set,[],[],Alloc}|Acc]}.
+move_allocates_1([], Acc) -> Acc.
alloc_may_pass({set,_,_,{alloc,_,_}}) -> false;
alloc_may_pass({set,_,_,{set_tuple_element,_}}) -> false;
-alloc_may_pass({set,_,_,{get_map_elements,_}}) -> false;
alloc_may_pass({set,_,_,put_list}) -> false;
alloc_may_pass({set,_,_,put}) -> false;
alloc_may_pass({set,_,_,_}) -> true.
-combine_alloc({_,Ns,Nh1,Init}, {_,nostack,Nh2,[]}) ->
- {zero,Ns,beam_utils:combine_heap_needs(Nh1, Nh2),Init}.
-
%% opt([Instruction]) -> [Instruction]
%% Optimize the instruction stream inside a basic block.
@@ -217,8 +227,6 @@ opt([{set,_,_,{line,_}}=Line1,
{set,[D2],[{integer,Idx2},Reg],{bif,element,{f,0}}}=I2|Is])
when Idx1 < Idx2, D1 =/= D2, D1 =/= Reg, D2 =/= Reg ->
opt([Line2,I2,Line1,I1|Is]);
-opt([{set,[_|_],_Ss,{get_map_elements,_F}}=I|Is]) ->
- [I|opt(Is)];
opt([{set,Ds0,Ss,Op}|Is0]) ->
{Ds,Is} = opt_moves(Ds0, Is0),
[{set,Ds,Ss,Op}|opt(Is)];
@@ -393,10 +401,19 @@ eliminate_use_of_from_reg([I]=Is, From, _To, Acc) ->
%% opt_alloc(Instructions) -> Instructions'
%% Optimises all allocate instructions.
+opt_alloc([{set,[],[],{alloc,Live0,Info0}},
+ {set,[],[],{alloc,Live,Info}}|Is]) ->
+ Live = Live0, %Assertion.
+ Alloc = combine_alloc(Info0, Info),
+ I = {set,[],[],{alloc,Live,Alloc}},
+ opt_alloc([I|Is]);
opt_alloc([{set,[],[],{alloc,R,{_,Ns,Nh,[]}}}|Is]) ->
[{set,[],[],opt_alloc(Is, Ns, Nh, R)}|Is];
opt_alloc([I|Is]) -> [I|opt_alloc(Is)];
opt_alloc([]) -> [].
+
+combine_alloc({_,Ns,Nh1,Init}, {_,nostack,Nh2,[]}) ->
+ {zero,Ns,beam_utils:combine_heap_needs(Nh1, Nh2),Init}.
%% opt_alloc(Instructions, FrameSize, HeapNeed, LivingRegs) -> [Instr]
%% Generates the optimal sequence of instructions for
@@ -445,13 +462,14 @@ count_ones(Bits, Acc) ->
alloc_live_regs({set,Ds,Ss,_}, Regs0) ->
Rset = x_live(Ss, x_dead(Ds, (1 bsl Regs0)-1)),
- live_regs(Rset).
+ live_regs(0, Rset).
-live_regs(Regs) ->
- live_regs_1(0, Regs).
-
-live_regs_1(N, 0) -> N;
-live_regs_1(N, Regs) -> live_regs_1(N+1, Regs bsr 1).
+live_regs(N, 0) ->
+ N;
+live_regs(N, Regs) when Regs band 1 =:= 1 ->
+ live_regs(N+1, Regs bsr 1);
+live_regs(_, _) ->
+ not_possible.
x_dead([{x,N}|Rs], Regs) -> x_dead(Rs, Regs band (bnot (1 bsl N)));
x_dead([_|Rs], Regs) -> x_dead(Rs, Regs);
diff --git a/lib/compiler/src/beam_bsm.erl b/lib/compiler/src/beam_bsm.erl
index 286307a4be..ae1b34ba49 100644
--- a/lib/compiler/src/beam_bsm.erl
+++ b/lib/compiler/src/beam_bsm.erl
@@ -205,8 +205,15 @@ btb_reaches_match_1(Is, Regs, D) ->
btb_reaches_match_2([{block,Bl}|Is], Regs0, D) ->
Regs = btb_reaches_match_block(Bl, Regs0),
btb_reaches_match_1(Is, Regs, D);
-btb_reaches_match_2([{call,Arity,{f,Lbl}}|Is], Regs, D) ->
- btb_call(Arity, Lbl, Regs, Is, D);
+btb_reaches_match_2([{call,Arity,{f,Lbl}}|Is], Regs0, D) ->
+ case is_tail_call(Is) of
+ true ->
+ Regs1 = btb_kill_not_live(Arity, Regs0),
+ Regs = btb_kill_yregs(Regs1),
+ btb_tail_call(Lbl, Regs, D);
+ false ->
+ btb_call(Arity, Lbl, Regs0, Is, D)
+ end;
btb_reaches_match_2([{apply,Arity}|Is], Regs, D) ->
btb_call(Arity+2, apply, Regs, Is, D);
btb_reaches_match_2([{call_fun,Live}=I|Is], Regs, D) ->
@@ -360,6 +367,10 @@ btb_reaches_match_2([{line,_}|Is], Regs, D) ->
btb_reaches_match_2([I|_], Regs, _) ->
btb_error({btb_context_regs(Regs),I,not_handled}).
+is_tail_call([{deallocate,_}|_]) -> true;
+is_tail_call([return|_]) -> true;
+is_tail_call(_) -> false.
+
btb_call(Arity, Lbl, Regs0, Is, D0) ->
Regs = btb_kill_not_live(Arity, Regs0),
case btb_are_x_registers_empty(Regs) of
@@ -369,15 +380,15 @@ btb_call(Arity, Lbl, Regs0, Is, D0) ->
D = btb_tail_call(Lbl, Regs, D0),
%% No problem so far (the called function can handle a
- %% match context). Now we must make sure that the rest
- %% of this function following the call does not attempt
- %% to use the match context in case there is a copy
- %% tucked away in a y register.
+ %% match context). Now we must make sure that we don't
+ %% have any copies of the match context tucked away in an
+ %% y register.
RegList = btb_context_regs(Regs),
- YRegs = [R || {y,_}=R <- RegList],
- case btb_are_all_unused(YRegs, Is, D) of
- true -> D;
- false -> btb_error({multiple_uses,RegList})
+ case [R || {y,_}=R <- RegList] of
+ [] ->
+ D;
+ [_|_] ->
+ btb_error({multiple_uses,RegList})
end;
true ->
%% No match context in any x register. It could have been
diff --git a/lib/compiler/src/beam_dead.erl b/lib/compiler/src/beam_dead.erl
index b01f58f683..6f6d742293 100644
--- a/lib/compiler/src/beam_dead.erl
+++ b/lib/compiler/src/beam_dead.erl
@@ -272,17 +272,14 @@ backward([{jump,{f,To}}=J|[{bif,Op,_,Ops,Reg}|Is]=Is0], D, Acc) ->
catch
throw:not_possible -> backward(Is0, D, [J|Acc])
end;
-backward([{test,bs_start_match2,F,Live,[R,_]=Args,Ctxt}|Is], D,
+backward([{test,bs_start_match2,F,_,[R,_],Ctxt}=I|Is], D,
[{test,bs_match_string,F,[Ctxt,Bs]},
{test,bs_test_tail2,F,[Ctxt,0]}|Acc0]=Acc) ->
- {f,To0} = F,
- To = shortcut_bs_start_match(To0, R, D),
case beam_utils:is_killed(Ctxt, Acc0, D) of
true ->
- Eq = {test,is_eq_exact,{f,To},[R,{literal,Bs}]},
+ Eq = {test,is_eq_exact,F,[R,{literal,Bs}]},
backward(Is, D, [Eq|Acc0]);
false ->
- I = {test,bs_start_match2,{f,To},Live,Args,Ctxt},
backward(Is, D, [I|Acc])
end;
backward([{test,bs_start_match2,{f,To0},Live,[Src|_]=Info,Dst}|Is], D, Acc) ->
diff --git a/lib/compiler/src/beam_flatten.erl b/lib/compiler/src/beam_flatten.erl
index 36369bd0b4..c9ff07b496 100644
--- a/lib/compiler/src/beam_flatten.erl
+++ b/lib/compiler/src/beam_flatten.erl
@@ -64,7 +64,6 @@ norm({set,[],[S,D],{set_tuple_element,I}}) -> {set_tuple_element,S,D,I};
norm({set,[D1,D2],[S],get_list}) -> {get_list,S,D1,D2};
norm({set,[D],[S|Puts],{alloc,R,{put_map,Op,F}}}) ->
{put_map,F,Op,S,D,R,{list,Puts}};
-%% get_map_elements is always handled in beam_split (moved out of block)
norm({set,[],[],remove_message}) -> remove_message;
norm({set,[],[],fclearerror}) -> fclearerror;
norm({set,[],[],fcheckerror}) -> {fcheckerror,{f,0}}.
diff --git a/lib/compiler/src/beam_jump.erl b/lib/compiler/src/beam_jump.erl
index 09cd3aa2d4..48b5a32814 100644
--- a/lib/compiler/src/beam_jump.erl
+++ b/lib/compiler/src/beam_jump.erl
@@ -167,12 +167,18 @@ share_1([{label,L}=Lbl|Is], Dict0, Seq, Acc) ->
end;
share_1([{func_info,_,_,_}=I|Is], _, [], Acc) ->
reverse(Is, [I|Acc]);
+share_1([{'catch',_,_}=I|Is], Dict0, Seq, Acc) ->
+ Dict = clean_non_sharable(Dict0),
+ share_1(Is, Dict, [I|Seq], Acc);
share_1([{'try',_,_}=I|Is], Dict0, Seq, Acc) ->
Dict = clean_non_sharable(Dict0),
share_1(Is, Dict, [I|Seq], Acc);
share_1([{try_case,_}=I|Is], Dict0, Seq, Acc) ->
Dict = clean_non_sharable(Dict0),
share_1(Is, Dict, [I|Seq], Acc);
+share_1([{catch_end,_}=I|Is], Dict0, Seq, Acc) ->
+ Dict = clean_non_sharable(Dict0),
+ share_1(Is, Dict, [I|Seq], Acc);
share_1([I|Is], Dict, Seq, Acc) ->
case is_unreachable_after(I) of
false ->
@@ -182,18 +188,18 @@ share_1([I|Is], Dict, Seq, Acc) ->
end.
clean_non_sharable(Dict) ->
- %% We are passing in or out of a 'try' block. Remove
- %% sequences that should not shared over the boundaries
- %% of a 'try' block. Since the end of the sequence must match,
- %% the only possible match between a sequence outside and
- %% a sequence inside the 'try' block is a sequence that ends
- %% with an instruction that causes an exception. Any sequence
- %% that causes an exception must contain a line/1 instruction.
+ %% We are passing in or out of a 'catch' or 'try' block. Remove
+ %% sequences that should not be shared over the boundaries of the
+ %% block. Since the end of the sequence must match, the only
+ %% possible match between a sequence outside and a sequence inside
+ %% the 'catch'/'try' block is a sequence that ends with an
+ %% instruction that causes an exception. Any sequence that causes
+ %% an exception must contain a line/1 instruction.
maps:filter(fun(K, _V) -> sharable_with_try(K) end, Dict).
sharable_with_try([{line,_}|_]) ->
%% This sequence may cause an exception and may potentially
- %% match a sequence on the other side of the 'try' block
+ %% match a sequence on the other side of the 'catch'/'try' block
%% boundary.
false;
sharable_with_try([_|Is]) ->
diff --git a/lib/compiler/src/beam_split.erl b/lib/compiler/src/beam_split.erl
index c83c686953..feeab0af50 100644
--- a/lib/compiler/src/beam_split.erl
+++ b/lib/compiler/src/beam_split.erl
@@ -56,9 +56,6 @@ split_block([{set,[D],[S|Puts],{alloc,R,{put_map,Op,{f,Lbl}=Fail}}}|Is],
Bl, Acc) when Lbl =/= 0 ->
split_block(Is, [], [{put_map,Fail,Op,S,D,R,{list,Puts}}|
make_block(Bl, Acc)]);
-split_block([{set,Ds,[S|Ss],{get_map_elements,Fail}}|Is], Bl, Acc) ->
- Gets = beam_utils:join_even(Ss,Ds),
- split_block(Is, [], [{get_map_elements,Fail,S,{list,Gets}}|make_block(Bl, Acc)]);
split_block([{set,[R],[],{try_catch,Op,L}}|Is], Bl, Acc) ->
split_block(Is, [], [{Op,R,L}|make_block(Bl, Acc)]);
split_block([{set,[],[],{line,_}=Line}|Is], Bl, Acc) ->
diff --git a/lib/compiler/src/beam_utils.erl b/lib/compiler/src/beam_utils.erl
index a15ecf633e..249d9395ca 100644
--- a/lib/compiler/src/beam_utils.erl
+++ b/lib/compiler/src/beam_utils.erl
@@ -26,7 +26,7 @@
empty_label_index/0,index_label/3,index_labels/1,
code_at/2,bif_to_test/3,is_pure_test/1,
live_opt/1,delete_live_annos/1,combine_heap_needs/2,
- join_even/2,split_even/1]).
+ split_even/1]).
-import(lists, [member/2,sort/1,reverse/1,splitwith/2]).
@@ -233,11 +233,6 @@ combine_heap_needs(H1, H2) when is_integer(H1), is_integer(H2) ->
split_even(Rs) -> split_even(Rs, [], []).
-%% join_even/1
-%% {[1,3,5],[2,4,6]} -> [1,2,3,4,5,6]
-
-join_even([], []) -> [];
-join_even([S|Ss], [D|Ds]) -> [S,D|join_even(Ss, Ds)].
%%%
%%% Local functions.
@@ -753,6 +748,11 @@ live_opt([timeout=I|Is], _, D, Acc) ->
live_opt(Is, 0, D, [I|Acc]);
live_opt([{wait,_}=I|Is], _, D, Acc) ->
live_opt(Is, 0, D, [I|Acc]);
+live_opt([{get_map_elements,Fail,Src,{list,List}}=I|Is], Regs0, D, Acc) ->
+ {Ss,Ds} = split_even(List),
+ Regs1 = x_live([Src|Ss], x_dead(Ds, Regs0)),
+ Regs = live_join_label(Fail, D, Regs1),
+ live_opt(Is, Regs, D, [I|Acc]);
%% Transparent instructions - they neither use nor modify x registers.
live_opt([{deallocate,_}=I|Is], Regs, D, Acc) ->
diff --git a/lib/compiler/src/beam_validator.erl b/lib/compiler/src/beam_validator.erl
index 4c0cb6780a..16dba35adc 100644
--- a/lib/compiler/src/beam_validator.erl
+++ b/lib/compiler/src/beam_validator.erl
@@ -808,9 +808,11 @@ validate_bs_skip_utf(Fail, Ctx, Live, Vst0) ->
%% A possibility for garbage collection must not occur between setelement/3 and
%% set_tuple_element/3.
%%
+%% Note that #vst.current will be 'none' if the instruction is unreachable.
+%%
val_dsetel({move,_,_}, Vst) ->
Vst;
-val_dsetel({call_ext,3,{extfunc,erlang,setelement,3}}, #vst{current=St}=Vst) ->
+val_dsetel({call_ext,3,{extfunc,erlang,setelement,3}}, #vst{current=#st{}=St}=Vst) ->
Vst#vst{current=St#st{setelem=true}};
val_dsetel({set_tuple_element,_,_,_}, #vst{current=#st{setelem=false}}) ->
error(illegal_context_for_set_tuple_element);
diff --git a/lib/compiler/src/compile.erl b/lib/compiler/src/compile.erl
index 82ff8a95f3..e37ca31704 100644
--- a/lib/compiler/src/compile.erl
+++ b/lib/compiler/src/compile.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2015. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -43,6 +43,10 @@
-type abstract_code() :: [erl_parse:abstract_form()].
+%% Internal representations used for 'from_asm' and 'from_beam' compilation can
+%% also be valid, but have no relevant types defined.
+-type forms() :: abstract_code() | cerl:c_module().
+
-type option() :: atom() | {atom(), term()} | {'d', atom(), term()}.
-type err_info() :: {erl_anno:line() | 'none',
@@ -88,7 +92,7 @@ file(File, Opt) ->
forms(Forms) -> forms(Forms, ?DEFAULT_OPTIONS).
--spec forms(abstract_code(), [option()] | option()) -> comp_ret().
+-spec forms(forms(), [option()] | option()) -> comp_ret().
forms(Forms, Opts) when is_list(Opts) ->
do_compile({forms,Forms}, [binary|Opts++env_default_opts()]);
@@ -116,7 +120,7 @@ noenv_file(File, Opts) when is_list(Opts) ->
noenv_file(File, Opt) ->
noenv_file(File, [Opt|?DEFAULT_OPTIONS]).
--spec noenv_forms(abstract_code(), [option()] | option()) -> comp_ret().
+-spec noenv_forms(forms(), [option()] | option()) -> comp_ret().
noenv_forms(Forms, Opts) when is_list(Opts) ->
do_compile({forms,Forms}, [binary|Opts]);
@@ -169,17 +173,25 @@ env_default_opts() ->
do_compile(Input, Opts0) ->
Opts = expand_opts(Opts0),
- {Pid,Ref} =
- spawn_monitor(fun() ->
- exit(try
- internal(Input, Opts)
- catch
- error:Reason ->
- {error,Reason}
- end)
- end),
- receive
- {'DOWN',Ref,process,Pid,Rep} -> Rep
+ IntFun = fun() -> try
+ internal(Input, Opts)
+ catch
+ error:Reason ->
+ {error,Reason}
+ end
+ end,
+ %% Dialyzer has already spawned workers.
+ case lists:member(dialyzer, Opts) of
+ true ->
+ IntFun();
+ false ->
+ {Pid,Ref} =
+ spawn_monitor(fun() ->
+ exit(IntFun())
+ end),
+ receive
+ {'DOWN',Ref,process,Pid,Rep} -> Rep
+ end
end.
expand_opts(Opts0) ->
@@ -236,6 +248,8 @@ format_error({epp,E}) ->
epp:format_error(E);
format_error(write_error) ->
"error writing file";
+format_error({write_error, Error}) ->
+ io_lib:format("error writing file: ~ts", [file:format_error(Error)]);
format_error({rename,From,To,Error}) ->
io_lib:format("failed to rename ~ts to ~ts: ~ts",
[From,To,file:format_error(Error)]);
@@ -678,7 +692,7 @@ kernel_passes() ->
{iff,core,?pass(save_core_code)},
%% Kernel Erlang and code generation.
- {pass,v3_kernel},
+ ?pass(v3_kernel),
{iff,dkern,{listing,"kernel"}},
{iff,'to_kernel',{done,"kernel"}},
{pass,v3_life},
@@ -1206,7 +1220,7 @@ makedep_output(#compile{code=Code,options=Opts,ofile=Ofile}=St) ->
end,
{ok,St}
catch
- exit:_ ->
+ error:_ ->
%% Couldn't write to output Makefile.
Err = {St#compile.ifile,[{none,?MODULE,write_error}]},
{error,St#compile{errors=St#compile.errors++[Err]}}
@@ -1235,6 +1249,17 @@ core_fold_module_after_inlining(#compile{code=Code0,options=Opts}=St) ->
{ok,Code,_Ws} = sys_core_fold:module(Code0, Opts),
{ok,St#compile{code=Code}}.
+v3_kernel(#compile{code=Code0,options=Opts,warnings=Ws0}=St) ->
+ {ok,Code,Ws} = v3_kernel:module(Code0, Opts),
+ case Ws =:= [] orelse test_core_inliner(St) of
+ false ->
+ {ok,St#compile{code=Code,warnings=Ws0++Ws}};
+ true ->
+ %% cerl_inline may produce code that generates spurious
+ %% warnings. Ignore any such warnings.
+ {ok,St#compile{code=Code}}
+ end.
+
test_old_inliner(#compile{options=Opts}) ->
%% The point of this test is to avoid loading the old inliner
%% if we know that it will not be used.
@@ -1479,8 +1504,8 @@ save_binary_1(St) ->
end,
{error,St#compile{errors=St#compile.errors ++ Es}}
end;
- {error,_Error} ->
- Es = [{Tfile,[{none,compile,write_error}]}],
+ {error,Error} ->
+ Es = [{Tfile,[{none,compile,{write_error,Error}}]}],
{error,St#compile{errors=St#compile.errors ++ Es}}
end.
@@ -1628,8 +1653,8 @@ listing(LFun, Ext, St) ->
LFun(Lf, Code),
ok = file:close(Lf),
{ok,St};
- {error,_Error} ->
- Es = [{Lfile,[{none,compile,write_error}]}],
+ {error,Error} ->
+ Es = [{Lfile,[{none,compile,{write_error,Error}}]}],
{error,St#compile{errors=St#compile.errors ++ Es}}
end.
diff --git a/lib/compiler/src/sys_core_fold.erl b/lib/compiler/src/sys_core_fold.erl
index e0de50f3ae..4922953407 100644
--- a/lib/compiler/src/sys_core_fold.erl
+++ b/lib/compiler/src/sys_core_fold.erl
@@ -468,7 +468,8 @@ bitstr(#c_bitstr{val=Val,size=Size}=BinSeg, Sub) ->
%% Currently, we don't attempt to check binaries because they
%% are difficult to check.
-is_safe_simple(#c_var{}, _) -> true;
+is_safe_simple(#c_var{}=Var, _) ->
+ not cerl:is_c_fname(Var);
is_safe_simple(#c_cons{hd=H,tl=T}, Sub) ->
is_safe_simple(H, Sub) andalso is_safe_simple(T, Sub);
is_safe_simple(#c_tuple{es=Es}, Sub) -> is_safe_simple_list(Es, Sub);
@@ -733,7 +734,7 @@ call(#c_call{args=As}=Call, #c_literal{val=M}=M0, #c_literal{val=N}=N0, Sub) ->
false ->
case sys_core_fold_lists:call(Call, M, N, As) of
none ->
- call_1(Call, M, N, As, Sub);
+ call_1(Call, M0, N0, As, Sub);
Core ->
expr(Core, Sub)
end
@@ -1129,7 +1130,13 @@ clause_1(#c_clause{guard=G0,body=B0}=Cl, Ps1, Cexpr, Ctxt, Sub1) ->
%%
%% case A of NewVar when true -> ...
%%
- sub_set_var(Var, Cexpr, Sub2);
+ case cerl:is_c_fname(Cexpr) of
+ false ->
+ sub_set_var(Var, Cexpr, Sub2);
+ true ->
+ %% We must not copy funs, and especially not into guards.
+ Sub2
+ end;
_ ->
Sub2
end,