diff options
Diffstat (limited to 'lib/compiler')
-rw-r--r-- | lib/compiler/doc/src/notes.xml | 17 | ||||
-rw-r--r-- | lib/compiler/src/beam_block.erl | 98 | ||||
-rw-r--r-- | lib/compiler/src/beam_dead.erl | 7 | ||||
-rw-r--r-- | lib/compiler/src/beam_jump.erl | 22 | ||||
-rw-r--r-- | lib/compiler/test/beam_block_SUITE.erl | 27 | ||||
-rw-r--r-- | lib/compiler/test/beam_jump_SUITE.erl | 19 | ||||
-rw-r--r-- | lib/compiler/test/match_SUITE.erl | 13 | ||||
-rw-r--r-- | lib/compiler/vsn.mk | 2 |
8 files changed, 150 insertions, 55 deletions
diff --git a/lib/compiler/doc/src/notes.xml b/lib/compiler/doc/src/notes.xml index e25cdc580c..92b3d809fc 100644 --- a/lib/compiler/doc/src/notes.xml +++ b/lib/compiler/doc/src/notes.xml @@ -32,6 +32,23 @@ <p>This document describes the changes made to the Compiler application.</p> +<section><title>Compiler 7.0.1</title> + + <section><title>Fixed Bugs and Malfunctions</title> + <list> + <item> + <p> + A literal binary matching regression was introduced in + 19.0 where a match could fail to resolve to the right + clause. This has now been fixed.</p> + <p> + Own Id: OTP-13738</p> + </item> + </list> + </section> + +</section> + <section><title>Compiler 7.0</title> <section><title>Fixed Bugs and Malfunctions</title> diff --git a/lib/compiler/src/beam_block.erl b/lib/compiler/src/beam_block.erl index 85d332c56e..ec41925beb 100644 --- a/lib/compiler/src/beam_block.erl +++ b/lib/compiler/src/beam_block.erl @@ -159,14 +159,43 @@ find_fixpoint(OptFun, Is0) -> end. %% move_allocates(Is0) -> Is -%% Move allocate instructions upwards in the instruction stream, in the -%% hope of getting more possibilities for optimizing away moves later. +%% Move allocate instructions upwards in the instruction stream +%% (within the same block), in the hope of getting more possibilities +%% for optimizing away moves later. %% -%% NOTE: Moving allocation instructions is only safe because it is done -%% immediately after code generation so that we KNOW that if {x,X} is -%% initialized, all x registers with lower numbers are also initialized. -%% That assumption may not be true after other optimizations, such as -%% the beam_utils:live_opt/1 optimization. +%% For example, we can transform the following instructions: +%% +%% get_tuple_element x(1) Element => x(2) +%% allocate_zero StackSize 3 %% x(0), x(1), x(2) are live +%% +%% to the following instructions: +%% +%% allocate_zero StackSize 2 %% x(0) and x(1) are live +%% get_tuple_element x(1) Element => x(2) +%% +%% NOTE: Since the beam_reorder pass has been run, it is no longer +%% safe to assume that if x(N) is initialized, then all lower-numbered +%% x registers are also initialized. +%% +%% For example, in general it is not safe to transform the following +%% instructions: +%% +%% get_tuple_element x(0) Element => x(1) +%% allocate_zero StackSize 3 %x(0), x(1), x(2) are live +%% +%% to the following instructions: +%% +%% allocate_zero StackSize 3 +%% get_tuple_element x(0) Element => x(1) +%% +%% The transformation is safe if and only if x(1) has been +%% initialized previously. Unfortunately, beam_reorder may have moved +%% a get_tuple_element instruction so that x(1) is not always +%% initialized when this code is reached. To find whether or not x(1) +%% is initialized, we would need to analyze all code preceding these +%% two instructions (across branches). Since we currently don't have +%% any practical mechanism for doing that, we will have to +%% conservatively assume that the transformation is unsafe. move_allocates([{block,Bl0}|Is]) -> Bl = move_allocates_1(reverse(Bl0), []), @@ -175,27 +204,19 @@ move_allocates([I|Is]) -> [I|move_allocates(Is)]; move_allocates([]) -> []. -move_allocates_1([{set,[],[],{alloc,_,_}=Alloc}|Is0], Acc0) -> - {Is,Acc} = move_allocates_2(Alloc, Is0, Acc0), - move_allocates_1(Is, Acc); +move_allocates_1([I|Is], [{set,[],[],{alloc,Live0,Info}}|Acc]=Acc0) -> + case {alloc_may_pass(I),alloc_live_regs(I, Live0)} of + {false,_} -> + move_allocates_1(Is, [I|Acc0]); + {true,not_possible} -> + move_allocates_1(Is, [I|Acc0]); + {true,Live} when is_integer(Live) -> + A = {set,[],[],{alloc,Live,Info}}, + move_allocates_1(Is, [A,I|Acc]) + end; move_allocates_1([I|Is], Acc) -> move_allocates_1(Is, [I|Acc]); -move_allocates_1([], Is) -> Is. - -move_allocates_2({alloc,Live,Info}, [{set,[],[],{alloc,Live0,Info0}}|Is], Acc) -> - Live = Live0, % Assertion. - Alloc = {alloc,Live,combine_alloc(Info0, Info)}, - move_allocates_2(Alloc, Is, Acc); -move_allocates_2({alloc,Live,Info}=Alloc0, [I|Is]=Is0, Acc) -> - case alloc_may_pass(I) of - false -> - {Is0,[{set,[],[],Alloc0}|Acc]}; - true -> - Alloc = {alloc,alloc_live_regs(I, Live),Info}, - move_allocates_2(Alloc, Is, [I|Acc]) - end; -move_allocates_2(Alloc, [], Acc) -> - {[],[{set,[],[],Alloc}|Acc]}. +move_allocates_1([], Acc) -> Acc. alloc_may_pass({set,_,_,{alloc,_,_}}) -> false; alloc_may_pass({set,_,_,{set_tuple_element,_}}) -> false; @@ -204,9 +225,6 @@ alloc_may_pass({set,_,_,put_list}) -> false; alloc_may_pass({set,_,_,put}) -> false; alloc_may_pass({set,_,_,_}) -> true. -combine_alloc({_,Ns,Nh1,Init}, {_,nostack,Nh2,[]}) -> - {zero,Ns,beam_utils:combine_heap_needs(Nh1, Nh2),Init}. - %% opt([Instruction]) -> [Instruction] %% Optimize the instruction stream inside a basic block. @@ -393,10 +411,19 @@ eliminate_use_of_from_reg([I]=Is, From, _To, Acc) -> %% opt_alloc(Instructions) -> Instructions' %% Optimises all allocate instructions. +opt_alloc([{set,[],[],{alloc,Live0,Info0}}, + {set,[],[],{alloc,Live,Info}}|Is]) -> + Live = Live0, %Assertion. + Alloc = combine_alloc(Info0, Info), + I = {set,[],[],{alloc,Live,Alloc}}, + opt_alloc([I|Is]); opt_alloc([{set,[],[],{alloc,R,{_,Ns,Nh,[]}}}|Is]) -> [{set,[],[],opt_alloc(Is, Ns, Nh, R)}|Is]; opt_alloc([I|Is]) -> [I|opt_alloc(Is)]; opt_alloc([]) -> []. + +combine_alloc({_,Ns,Nh1,Init}, {_,nostack,Nh2,[]}) -> + {zero,Ns,beam_utils:combine_heap_needs(Nh1, Nh2),Init}. %% opt_alloc(Instructions, FrameSize, HeapNeed, LivingRegs) -> [Instr] %% Generates the optimal sequence of instructions for @@ -445,13 +472,14 @@ count_ones(Bits, Acc) -> alloc_live_regs({set,Ds,Ss,_}, Regs0) -> Rset = x_live(Ss, x_dead(Ds, (1 bsl Regs0)-1)), - live_regs(Rset). + live_regs(0, Rset). -live_regs(Regs) -> - live_regs_1(0, Regs). - -live_regs_1(N, 0) -> N; -live_regs_1(N, Regs) -> live_regs_1(N+1, Regs bsr 1). +live_regs(N, 0) -> + N; +live_regs(N, Regs) when Regs band 1 =:= 1 -> + live_regs(N+1, Regs bsr 1); +live_regs(_, _) -> + not_possible. x_dead([{x,N}|Rs], Regs) -> x_dead(Rs, Regs band (bnot (1 bsl N))); x_dead([_|Rs], Regs) -> x_dead(Rs, Regs); diff --git a/lib/compiler/src/beam_dead.erl b/lib/compiler/src/beam_dead.erl index b01f58f683..6f6d742293 100644 --- a/lib/compiler/src/beam_dead.erl +++ b/lib/compiler/src/beam_dead.erl @@ -272,17 +272,14 @@ backward([{jump,{f,To}}=J|[{bif,Op,_,Ops,Reg}|Is]=Is0], D, Acc) -> catch throw:not_possible -> backward(Is0, D, [J|Acc]) end; -backward([{test,bs_start_match2,F,Live,[R,_]=Args,Ctxt}|Is], D, +backward([{test,bs_start_match2,F,_,[R,_],Ctxt}=I|Is], D, [{test,bs_match_string,F,[Ctxt,Bs]}, {test,bs_test_tail2,F,[Ctxt,0]}|Acc0]=Acc) -> - {f,To0} = F, - To = shortcut_bs_start_match(To0, R, D), case beam_utils:is_killed(Ctxt, Acc0, D) of true -> - Eq = {test,is_eq_exact,{f,To},[R,{literal,Bs}]}, + Eq = {test,is_eq_exact,F,[R,{literal,Bs}]}, backward(Is, D, [Eq|Acc0]); false -> - I = {test,bs_start_match2,{f,To},Live,Args,Ctxt}, backward(Is, D, [I|Acc]) end; backward([{test,bs_start_match2,{f,To0},Live,[Src|_]=Info,Dst}|Is], D, Acc) -> diff --git a/lib/compiler/src/beam_jump.erl b/lib/compiler/src/beam_jump.erl index 09cd3aa2d4..48b5a32814 100644 --- a/lib/compiler/src/beam_jump.erl +++ b/lib/compiler/src/beam_jump.erl @@ -167,12 +167,18 @@ share_1([{label,L}=Lbl|Is], Dict0, Seq, Acc) -> end; share_1([{func_info,_,_,_}=I|Is], _, [], Acc) -> reverse(Is, [I|Acc]); +share_1([{'catch',_,_}=I|Is], Dict0, Seq, Acc) -> + Dict = clean_non_sharable(Dict0), + share_1(Is, Dict, [I|Seq], Acc); share_1([{'try',_,_}=I|Is], Dict0, Seq, Acc) -> Dict = clean_non_sharable(Dict0), share_1(Is, Dict, [I|Seq], Acc); share_1([{try_case,_}=I|Is], Dict0, Seq, Acc) -> Dict = clean_non_sharable(Dict0), share_1(Is, Dict, [I|Seq], Acc); +share_1([{catch_end,_}=I|Is], Dict0, Seq, Acc) -> + Dict = clean_non_sharable(Dict0), + share_1(Is, Dict, [I|Seq], Acc); share_1([I|Is], Dict, Seq, Acc) -> case is_unreachable_after(I) of false -> @@ -182,18 +188,18 @@ share_1([I|Is], Dict, Seq, Acc) -> end. clean_non_sharable(Dict) -> - %% We are passing in or out of a 'try' block. Remove - %% sequences that should not shared over the boundaries - %% of a 'try' block. Since the end of the sequence must match, - %% the only possible match between a sequence outside and - %% a sequence inside the 'try' block is a sequence that ends - %% with an instruction that causes an exception. Any sequence - %% that causes an exception must contain a line/1 instruction. + %% We are passing in or out of a 'catch' or 'try' block. Remove + %% sequences that should not be shared over the boundaries of the + %% block. Since the end of the sequence must match, the only + %% possible match between a sequence outside and a sequence inside + %% the 'catch'/'try' block is a sequence that ends with an + %% instruction that causes an exception. Any sequence that causes + %% an exception must contain a line/1 instruction. maps:filter(fun(K, _V) -> sharable_with_try(K) end, Dict). sharable_with_try([{line,_}|_]) -> %% This sequence may cause an exception and may potentially - %% match a sequence on the other side of the 'try' block + %% match a sequence on the other side of the 'catch'/'try' block %% boundary. false; sharable_with_try([_|Is]) -> diff --git a/lib/compiler/test/beam_block_SUITE.erl b/lib/compiler/test/beam_block_SUITE.erl index 4bcb252833..9fcb6e497d 100644 --- a/lib/compiler/test/beam_block_SUITE.erl +++ b/lib/compiler/test/beam_block_SUITE.erl @@ -21,7 +21,8 @@ -export([all/0,suite/0,groups/0,init_per_suite/1,end_per_suite/1, init_per_group/2,end_per_group/2, - get_map_elements/1,otp_7345/1,move_opt_across_gc_bif/1]). + get_map_elements/1,otp_7345/1,move_opt_across_gc_bif/1, + erl_202/1]). %% The only test for the following functions is that %% the code compiles and is accepted by beam_validator. @@ -37,7 +38,8 @@ groups() -> [{p,[parallel], [get_map_elements, otp_7345, - move_opt_across_gc_bif + move_opt_across_gc_bif, + erl_202 ]}]. init_per_suite(Config) -> @@ -135,6 +137,27 @@ positive(speaking) -> paris([], P) -> P + 1. + +%% See https://bugs.erlang.org/browse/ERL-202. +%% Test that move_allocates/1 in beam_block doesn't move allocate +%% when it would not be safe. + +-record(erl_202_r1, {y}). +-record(erl_202_r2, {x}). + +erl_202(_Config) -> + Ref = make_ref(), + Ref = erl_202({{1,2},Ref}, 42), + + {Ref} = erl_202({7,8}, #erl_202_r1{y=#erl_202_r2{x=Ref}}), + + ok. + +erl_202({{_, _},X}, _) -> + X; +erl_202({_, _}, #erl_202_r1{y=R2}) -> + {R2#erl_202_r2.x}. + %%% %%% The only test of the following code is that it compiles. %%% diff --git a/lib/compiler/test/beam_jump_SUITE.erl b/lib/compiler/test/beam_jump_SUITE.erl index 0b13adaff2..088f63606c 100644 --- a/lib/compiler/test/beam_jump_SUITE.erl +++ b/lib/compiler/test/beam_jump_SUITE.erl @@ -21,7 +21,7 @@ -export([all/0,suite/0,groups/0,init_per_suite/1,end_per_suite/1, init_per_group/2,end_per_group/2, - undefined_label/1]). + undefined_label/1,ambiguous_catch_try_state/1]). suite() -> [{ct_hooks,[ts_install_cth]}]. @@ -32,7 +32,8 @@ all() -> groups() -> [{p,[parallel], - [undefined_label + [undefined_label, + ambiguous_catch_try_state ]}]. init_per_suite(Config) -> @@ -57,3 +58,17 @@ flights(0, [], []) when [], 0; 0.0, [], false -> clark; flights(_, Reproduction, introduction) when false, Reproduction -> responsible. + +%% [ERL-209] beam_jump would share 'catch' blocks, causing an +%% ambiguous_catch_try_state error in beam_validator. + +ambiguous_catch_try_state(_Config) -> + {{'EXIT',{{case_clause,song},_}},{'EXIT',{{case_clause,song},_}}} = + checks(42), + ok. + +river() -> song. + +checks(Wanted) -> + %% Must be one line to cause the unsafe optimization. + {catch case river() of sheet -> begin +Wanted, if "da" -> Wanted end end end, catch case river() of sheet -> begin + Wanted, if "da" -> Wanted end end end}. diff --git a/lib/compiler/test/match_SUITE.erl b/lib/compiler/test/match_SUITE.erl index 31402ac717..127679ba69 100644 --- a/lib/compiler/test/match_SUITE.erl +++ b/lib/compiler/test/match_SUITE.erl @@ -24,7 +24,7 @@ pmatch/1,mixed/1,aliases/1,non_matching_aliases/1, match_in_call/1,untuplify/1,shortcut_boolean/1,letify_guard/1, selectify/1,underscore/1,match_map/1,map_vars_used/1, - coverage/1,grab_bag/1]). + coverage/1,grab_bag/1,literal_binary/1]). -include_lib("common_test/include/ct.hrl"). @@ -40,7 +40,7 @@ groups() -> match_in_call,untuplify, shortcut_boolean,letify_guard,selectify, underscore,match_map,map_vars_used,coverage, - grab_bag]}]. + grab_bag,literal_binary]}]. init_per_suite(Config) -> @@ -574,6 +574,15 @@ grab_bag_remove_failure([{stretch,_,Mi}=Stretch | Specs], Unit, _MaxFailure) -> ok end. +%% Regression in 19.0, reported by Alexei Sholik +literal_binary(_Config) -> + 3 = literal_binary_match(bar,<<"y">>), + ok. + +literal_binary_match(bar, <<"x">>) -> 1; +literal_binary_match(_, <<"x">>) -> 2; +literal_binary_match(_, <<"y">>) -> 3; +literal_binary_match(_, _) -> fail. id(I) -> I. diff --git a/lib/compiler/vsn.mk b/lib/compiler/vsn.mk index 23dd4bd4b1..334784657e 100644 --- a/lib/compiler/vsn.mk +++ b/lib/compiler/vsn.mk @@ -1 +1 @@ -COMPILER_VSN = 7.0 +COMPILER_VSN = 7.0.1 |