aboutsummaryrefslogtreecommitdiffstats
path: root/lib/compiler
diff options
context:
space:
mode:
Diffstat (limited to 'lib/compiler')
-rw-r--r--lib/compiler/src/beam_block.erl17
-rw-r--r--lib/compiler/src/beam_validator.erl4
-rw-r--r--lib/compiler/src/sys_core_fold.erl14
-rw-r--r--lib/compiler/src/v3_codegen.erl60
-rw-r--r--lib/compiler/src/v3_core.erl11
-rw-r--r--lib/compiler/test/beam_block_SUITE.erl21
-rw-r--r--lib/compiler/test/core_fold_SUITE.erl49
-rw-r--r--lib/compiler/test/map_SUITE.erl24
-rw-r--r--lib/compiler/test/misc_SUITE.erl7
9 files changed, 163 insertions, 44 deletions
diff --git a/lib/compiler/src/beam_block.erl b/lib/compiler/src/beam_block.erl
index a8cfdffdf3..85d332c56e 100644
--- a/lib/compiler/src/beam_block.erl
+++ b/lib/compiler/src/beam_block.erl
@@ -262,12 +262,17 @@ opt_move_1(R, [{set,[D],[R],move}|Is0], Acc) ->
{yes,Is} -> opt_move_rev(D, Acc, Is);
no -> not_possible
end;
-opt_move_1({x,_}, [{set,_,_,{alloc,_,_}}|_], _) ->
- %% The optimization is not possible. If the X register is not
- %% killed by allocation, the optimization would not be safe.
- %% If the X register is killed, it means that there cannot
- %% follow a 'move' instruction with this X register as the
- %% source.
+opt_move_1(_R, [{set,_,_,{alloc,_,_}}|_], _) ->
+ %% The optimization is either not possible or not safe.
+ %%
+ %% If R is an X register killed by allocation, the optimization is
+ %% not safe. On the other hand, if the X register is killed, there
+ %% will not follow a 'move' instruction with this X register as
+ %% the source.
+ %%
+ %% If R is a Y register, the optimization is still not safe
+ %% because the new target register is an X register that cannot
+ %% safely pass the alloc instruction.
not_possible;
opt_move_1(R, [{set,_,_,_}=I|Is], Acc) ->
%% If the source register is either killed or used by this
diff --git a/lib/compiler/src/beam_validator.erl b/lib/compiler/src/beam_validator.erl
index 13aa31b7c9..4c0cb6780a 100644
--- a/lib/compiler/src/beam_validator.erl
+++ b/lib/compiler/src/beam_validator.erl
@@ -658,8 +658,10 @@ valfun_4({test,is_map,{f,Lbl},[Src]}, Vst0) ->
case Src of
{Tag,_} when Tag =:= x; Tag =:= y ->
set_type_reg(map, Src, Vst);
+ {literal,Map} when is_map(Map) ->
+ Vst;
_ ->
- Vst
+ kill_state(Vst)
end;
valfun_4({test,_Op,{f,Lbl},Src}, Vst) ->
validate_src(Src, Vst),
diff --git a/lib/compiler/src/sys_core_fold.erl b/lib/compiler/src/sys_core_fold.erl
index dbc27db377..e0de50f3ae 100644
--- a/lib/compiler/src/sys_core_fold.erl
+++ b/lib/compiler/src/sys_core_fold.erl
@@ -786,7 +786,7 @@ fold_lit_args(Call, Module, Name, Args0) ->
Val ->
case cerl:is_literal_term(Val) of
true ->
- cerl:abstract(Val);
+ cerl:ann_abstract(cerl:get_ann(Call), Val);
false ->
%% Successful evaluation, but it was not possible
%% to express the computed value as a literal.
@@ -2176,24 +2176,22 @@ opt_not_in_let_1(V, Call, Body) ->
#c_call{module=#c_literal{val=erlang},
name=#c_literal{val='not'},
args=[#c_var{name=V}]} ->
- opt_not_in_let_2(Body);
+ opt_not_in_let_2(Body, Call);
_ ->
no
end.
-opt_not_in_let_2(#c_case{clauses=Cs0}=Case) ->
+opt_not_in_let_2(#c_case{clauses=Cs0}=Case, NotCall) ->
Vars = make_vars([], 1),
- Body = #c_call{module=#c_literal{val=erlang},
- name=#c_literal{val='not'},
- args=Vars},
+ Body = NotCall#c_call{args=Vars},
Cs = [begin
Let = #c_let{vars=Vars,arg=B,body=Body},
C#c_clause{body=opt_not_in_let(Let)}
end || #c_clause{body=B}=C <- Cs0],
{yes,Case#c_case{clauses=Cs}};
-opt_not_in_let_2(#c_call{}=Call0) ->
+opt_not_in_let_2(#c_call{}=Call0, _NotCall) ->
invert_call(Call0);
-opt_not_in_let_2(_) -> no.
+opt_not_in_let_2(_, _) -> no.
invert_call(#c_call{module=#c_literal{val=erlang},
name=#c_literal{val=Name0},
diff --git a/lib/compiler/src/v3_codegen.erl b/lib/compiler/src/v3_codegen.erl
index f5f3c73793..4df1aadd0a 100644
--- a/lib/compiler/src/v3_codegen.erl
+++ b/lib/compiler/src/v3_codegen.erl
@@ -1551,12 +1551,10 @@ set_cg([{var,R}], {binary,Segs}, Le, Vdb, Bef, #cg{bfail=Bfail}=St) ->
%% Now generate the complete code for constructing the binary.
Code = cg_binary(PutCode, Target, Temp, Fail, MaxRegs, Le#l.a),
{Sis++Code,Aft,St};
-% Map single variable key
-set_cg([{var,R}], {map,Op,Map,[{map_pair,{var,_}=K,V}]}, Le, Vdb, Bef,
- #cg{bfail=Bfail}=St) ->
- Fail = {f,Bfail},
- {Sis,Int0} = maybe_adjust_stack(Bef, Le#l.i, Le#l.i+1, Vdb, St),
+%% Map: single variable key.
+set_cg([{var,R}], {map,Op,Map,[{map_pair,{var,_}=K,V}]}, Le, Vdb, Bef, St0) ->
+ {Sis,Int0} = maybe_adjust_stack(Bef, Le#l.i, Le#l.i+1, Vdb, St0),
SrcReg = cg_reg_arg_prefer_y(Map, Int0),
Line = line(Le#l.a),
@@ -1570,21 +1568,16 @@ set_cg([{var,R}], {map,Op,Map,[{map_pair,{var,_}=K,V}]}, Le, Vdb, Bef,
Aft = Aft0#sr{reg=put_reg(R, Aft0#sr.reg)},
Target = fetch_reg(R, Aft#sr.reg),
- I = case Op of
- assoc -> put_map_assoc;
- exact -> put_map_exact
- end,
- {Sis++[Line]++[{I,Fail,SrcReg,Target,Live,{list,List}}],Aft,St};
+ {Is,St1} = set_cg_map(Line, Op, SrcReg, Target, Live, List, St0),
+ {Sis++Is,Aft,St1};
-% Map (possibly) multiple literal keys
-set_cg([{var,R}], {map,Op,Map,Es}, Le, Vdb, Bef,
- #cg{bfail=Bfail}=St) ->
+%% Map: (possibly) multiple literal keys.
+set_cg([{var,R}], {map,Op,Map,Es}, Le, Vdb, Bef, St0) ->
%% assert key literals
[] = [Var||{map_pair,{var,_}=Var,_} <- Es],
- Fail = {f,Bfail},
- {Sis,Int0} = maybe_adjust_stack(Bef, Le#l.i, Le#l.i+1, Vdb, St),
+ {Sis,Int0} = maybe_adjust_stack(Bef, Le#l.i, Le#l.i+1, Vdb, St0),
SrcReg = cg_reg_arg_prefer_y(Map, Int0),
Line = line(Le#l.a),
@@ -1599,11 +1592,10 @@ set_cg([{var,R}], {map,Op,Map,Es}, Le, Vdb, Bef,
Aft = Aft0#sr{reg=put_reg(R, Aft0#sr.reg)},
Target = fetch_reg(R, Aft#sr.reg),
- I = case Op of
- assoc -> put_map_assoc;
- exact -> put_map_exact
- end,
- {Sis++[Line]++[{I,Fail,SrcReg,Target,Live,{list,List}}],Aft,St};
+ {Is,St1} = set_cg_map(Line, Op, SrcReg, Target, Live, List, St0),
+ {Sis++Is,Aft,St1};
+
+%% Everything else.
set_cg([{var,R}], Con, Le, Vdb, Bef, St) ->
%% Find a place for the return register first.
Int = Bef#sr{reg=put_reg(R, Bef#sr.reg)},
@@ -1616,6 +1608,34 @@ set_cg([{var,R}], Con, Le, Vdb, Bef, St) ->
end,
{Ais,clear_dead(Int, Le#l.i, Vdb),St}.
+
+set_cg_map(Line, Op0, SrcReg, Target, Live, List, St0) ->
+ Bfail = St0#cg.bfail,
+ Fail = {f,St0#cg.bfail},
+ Op = case Op0 of
+ assoc -> put_map_assoc;
+ exact -> put_map_exact
+ end,
+ {OkLbl,St1} = new_label(St0),
+ {BadLbl,St2} = new_label(St1),
+ Is = if
+ Bfail =:= 0 orelse Op =:= put_map_assoc ->
+ [Line,{Op,{f,0},SrcReg,Target,Live,{list,List}}];
+ true ->
+ %% Ensure that Target is always set, even if
+ %% the map update operation fails. That is necessary
+ %% because Target may be included in a test_heap
+ %% instruction.
+ [Line,
+ {Op,{f,BadLbl},SrcReg,Target,Live,{list,List}},
+ {jump,{f,OkLbl}},
+ {label,BadLbl},
+ {move,{atom,ok},Target},
+ {jump,Fail},
+ {label,OkLbl}]
+ end,
+ {Is,St2}.
+
%%%
%%% Code generation for constructing binaries.
%%%
diff --git a/lib/compiler/src/v3_core.erl b/lib/compiler/src/v3_core.erl
index a3b0236134..d71411de80 100644
--- a/lib/compiler/src/v3_core.erl
+++ b/lib/compiler/src/v3_core.erl
@@ -868,12 +868,16 @@ try_exception(Ecs0, St0) ->
{Evs,St1} = new_vars(3, St0), % Tag, Value, Info
{Ecs1,Ceps,St2} = clauses(Ecs0, St1),
[_,Value,Info] = Evs,
- Ec = #iclause{anno=#a{anno=[compiler_generated]},
+ LA = case Ecs1 of
+ [] -> [];
+ [C|_] -> get_lineno_anno(C)
+ end,
+ Ec = #iclause{anno=#a{anno=[compiler_generated|LA]},
pats=[c_tuple(Evs)],guard=[#c_literal{val=true}],
body=[#iprimop{anno=#a{}, %Must have an #a{}
name=#c_literal{val=raise},
args=[Info,Value]}]},
- Hs = [#icase{anno=#a{},args=[c_tuple(Evs)],clauses=Ecs1,fc=Ec}],
+ Hs = [#icase{anno=#a{anno=LA},args=[c_tuple(Evs)],clauses=Ecs1,fc=Ec}],
{Evs,Ceps++Hs,St2}.
try_after(As, St0) ->
@@ -2098,7 +2102,8 @@ upattern(#c_var{name=V}=Var, Ks, St0) ->
true ->
{N,St1} = new_var_name(St0),
New = #c_var{name=N},
- Test = #icall{anno=#a{us=add_element(N, [V])},
+ LA = get_lineno_anno(Var),
+ Test = #icall{anno=#a{anno=LA,us=add_element(N, [V])},
module=#c_literal{val=erlang},
name=#c_literal{val='=:='},
args=[New,Var]},
diff --git a/lib/compiler/test/beam_block_SUITE.erl b/lib/compiler/test/beam_block_SUITE.erl
index d343e26737..4bcb252833 100644
--- a/lib/compiler/test/beam_block_SUITE.erl
+++ b/lib/compiler/test/beam_block_SUITE.erl
@@ -21,7 +21,7 @@
-export([all/0,suite/0,groups/0,init_per_suite/1,end_per_suite/1,
init_per_group/2,end_per_group/2,
- get_map_elements/1,otp_7345/1]).
+ get_map_elements/1,otp_7345/1,move_opt_across_gc_bif/1]).
%% The only test for the following functions is that
%% the code compiles and is accepted by beam_validator.
@@ -36,7 +36,8 @@ all() ->
groups() ->
[{p,[parallel],
[get_map_elements,
- otp_7345
+ otp_7345,
+ move_opt_across_gc_bif
]}].
init_per_suite(Config) ->
@@ -118,6 +119,22 @@ otp_7345(ObjRef, _RdEnv, Args) ->
10},
id(LlUnitdataReq).
+
+%% Doing move optimizations across GC bifs are in general not safe.
+move_opt_across_gc_bif(_Config) ->
+ [0,true,1] = positive(speaking),
+ ok.
+
+positive(speaking) ->
+ try
+ Positive = 0,
+ [+Positive, case Positive of _ -> true end, paris([], Positive)]
+ after
+ mailing
+ end.
+
+paris([], P) -> P + 1.
+
%%%
%%% The only test of the following code is that it compiles.
%%%
diff --git a/lib/compiler/test/core_fold_SUITE.erl b/lib/compiler/test/core_fold_SUITE.erl
index 442b2d424c..376d2c8e9a 100644
--- a/lib/compiler/test/core_fold_SUITE.erl
+++ b/lib/compiler/test/core_fold_SUITE.erl
@@ -25,7 +25,8 @@
eq/1,nested_call_in_case/1,guard_try_catch/1,coverage/1,
unused_multiple_values_error/1,unused_multiple_values/1,
multiple_aliases/1,redundant_boolean_clauses/1,
- mixed_matching_clauses/1,unnecessary_building/1]).
+ mixed_matching_clauses/1,unnecessary_building/1,
+ no_no_file/1]).
-export([foo/0,foo/1,foo/2,foo/3]).
@@ -43,7 +44,8 @@ groups() ->
eq,nested_call_in_case,guard_try_catch,coverage,
unused_multiple_values_error,unused_multiple_values,
multiple_aliases,redundant_boolean_clauses,
- mixed_matching_clauses,unnecessary_building]}].
+ mixed_matching_clauses,unnecessary_building,
+ no_no_file]}].
init_per_suite(Config) ->
@@ -454,4 +456,47 @@ do_unnecessary_building_2({a,_,_}=T) ->
[_,_] = [T,none],
x}.
+%% This test tests that v3_core has provided annotations and that
+%% sys_core_fold retains them, so that warnings produced by
+%% sys_core_fold will have proper filenames and line numbers. Thus, no
+%% "no_file" warnings.
+no_no_file(_Config) ->
+ {'EXIT',{{case_clause,0},_}} = (catch source(true, any)),
+ surgery = (tim(#{reduction => any}))(),
+
+ false = soul(#{[] => true}),
+ {'EXIT',{{case_clause,true},_}} = (catch soul(#{[] => false})),
+
+ ok = experiment(),
+ ok.
+
+source(true, Activities) ->
+ case 0 of
+ Activities when [] ->
+ Activities
+ end.
+
+tim(#{reduction := Emergency}) ->
+ try
+ fun() -> surgery end
+ catch
+ _ when [] ->
+ planet
+ end.
+
+soul(#{[] := Properly}) ->
+ not case true of
+ Properly -> true;
+ Properly -> 0
+ end.
+
+experiment() ->
+ case kingdom of
+ _ ->
+ +case "map" of
+ _ -> 0.0
+ end
+ end,
+ ok.
+
id(I) -> I.
diff --git a/lib/compiler/test/map_SUITE.erl b/lib/compiler/test/map_SUITE.erl
index c3c4862794..36e82c1459 100644
--- a/lib/compiler/test/map_SUITE.erl
+++ b/lib/compiler/test/map_SUITE.erl
@@ -1287,6 +1287,7 @@ t_guard_update(Config) when is_list(Config) ->
first = map_guard_update(#{}, #{x=>first}),
second = map_guard_update(#{y=>old}, #{x=>second,y=>old}),
third = map_guard_update(#{x=>old,y=>old}, #{x=>third,y=>old}),
+ bad_map_guard_update(),
ok.
t_guard_update_large(Config) when is_list(Config) ->
@@ -1353,6 +1354,29 @@ map_guard_update(M1, M2) when M1#{x=>second} =:= M2 -> second;
map_guard_update(M1, M2) when M1#{x:=third} =:= M2 -> third;
map_guard_update(_, _) -> error.
+bad_map_guard_update() ->
+ do_bad_map_guard_update(fun burns/1),
+ do_bad_map_guard_update(fun turns/1),
+ ok.
+
+do_bad_map_guard_update(Fun) ->
+ do_bad_map_guard_update_1(Fun, #{}),
+ do_bad_map_guard_update_1(Fun, #{true=>1}),
+ ok.
+
+do_bad_map_guard_update_1(Fun, Value) ->
+ %% Note: The business with the seemingly redundant fun
+ %% disables inlining, which would otherwise change the
+ %% EXIT reason.
+ {'EXIT',{function_clause,_}} = (catch Fun(Value)),
+ ok.
+
+burns(Richmond) when not (Richmond#{true := 0}); [Richmond] ->
+ specification.
+
+turns(Richmond) when not (Richmond#{true => 0}); [Richmond] ->
+ specification.
+
t_guard_receive(Config) when is_list(Config) ->
M0 = #{ id => 0 },
Pid = spawn_link(fun() -> guard_receive_loop() end),
diff --git a/lib/compiler/test/misc_SUITE.erl b/lib/compiler/test/misc_SUITE.erl
index f05fe6c943..f543f0d4de 100644
--- a/lib/compiler/test/misc_SUITE.erl
+++ b/lib/compiler/test/misc_SUITE.erl
@@ -256,12 +256,15 @@ silly_coverage(Config) when is_list(Config) ->
{jump,{f,42}}]}],99},
expect_error(fun() -> beam_clean:module(CleanInput, []) end),
- %% beam_peep
+ %% beam_peep. This is tricky. Use a select instruction with
+ %% an odd number of elements in the list to crash
+ %% prune_redundant_values/2 but not beam_clean:clean_labels/1.
PeepInput = {?MODULE,[{foo,0}],[],
[{function,foo,0,2,
[{label,1},
{func_info,{atom,?MODULE},{atom,foo},0},
- {label,2}|non_proper_list]}],99},
+ {label,2},{select,op,r,{f,2},[{f,2}]}]}],
+ 2},
expect_error(fun() -> beam_peep:module(PeepInput, []) end),
%% beam_bsm. This is tricky. Our function must be sane enough to not crash