From 921cf991e09698d63d0f4f9a20bf2cfc23b2e896 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20Gustavsson?= Date: Mon, 1 Aug 2016 17:06:40 +0200 Subject: beam_block: Fix potentially unsafe optimization in move_allocates/1 beam_block has an optimization that only is safe when it is applied immediately after code generation. That is pointed out in a comment: NOTE: Moving allocation instructions is only safe because it is done immediately after code generation so that we KNOW that if {x,X} is initialized, all x registers with lower numbers are also initialized. That assumption may not be true after other optimizations, such as the beam_utils:live_opt/1 optimization. The new beam_reorder pass added in OTP 19 runs before beam_block. Therefore, the optimization is potentially unsafe. The optimization is also unsafe if compilation is started from assembly code in a .S file. Rewrite the optimization to make it safe. See the newly added comment for details. ERL-202 --- lib/compiler/src/beam_block.erl | 98 ++++++++++++++++++++++------------ lib/compiler/test/beam_block_SUITE.erl | 27 +++++++++- 2 files changed, 88 insertions(+), 37 deletions(-) (limited to 'lib') diff --git a/lib/compiler/src/beam_block.erl b/lib/compiler/src/beam_block.erl index 85d332c56e..ec41925beb 100644 --- a/lib/compiler/src/beam_block.erl +++ b/lib/compiler/src/beam_block.erl @@ -159,14 +159,43 @@ find_fixpoint(OptFun, Is0) -> end. %% move_allocates(Is0) -> Is -%% Move allocate instructions upwards in the instruction stream, in the -%% hope of getting more possibilities for optimizing away moves later. +%% Move allocate instructions upwards in the instruction stream +%% (within the same block), in the hope of getting more possibilities +%% for optimizing away moves later. %% -%% NOTE: Moving allocation instructions is only safe because it is done -%% immediately after code generation so that we KNOW that if {x,X} is -%% initialized, all x registers with lower numbers are also initialized. -%% That assumption may not be true after other optimizations, such as -%% the beam_utils:live_opt/1 optimization. +%% For example, we can transform the following instructions: +%% +%% get_tuple_element x(1) Element => x(2) +%% allocate_zero StackSize 3 %% x(0), x(1), x(2) are live +%% +%% to the following instructions: +%% +%% allocate_zero StackSize 2 %% x(0) and x(1) are live +%% get_tuple_element x(1) Element => x(2) +%% +%% NOTE: Since the beam_reorder pass has been run, it is no longer +%% safe to assume that if x(N) is initialized, then all lower-numbered +%% x registers are also initialized. +%% +%% For example, in general it is not safe to transform the following +%% instructions: +%% +%% get_tuple_element x(0) Element => x(1) +%% allocate_zero StackSize 3 %x(0), x(1), x(2) are live +%% +%% to the following instructions: +%% +%% allocate_zero StackSize 3 +%% get_tuple_element x(0) Element => x(1) +%% +%% The transformation is safe if and only if x(1) has been +%% initialized previously. Unfortunately, beam_reorder may have moved +%% a get_tuple_element instruction so that x(1) is not always +%% initialized when this code is reached. To find whether or not x(1) +%% is initialized, we would need to analyze all code preceding these +%% two instructions (across branches). Since we currently don't have +%% any practical mechanism for doing that, we will have to +%% conservatively assume that the transformation is unsafe. move_allocates([{block,Bl0}|Is]) -> Bl = move_allocates_1(reverse(Bl0), []), @@ -175,27 +204,19 @@ move_allocates([I|Is]) -> [I|move_allocates(Is)]; move_allocates([]) -> []. -move_allocates_1([{set,[],[],{alloc,_,_}=Alloc}|Is0], Acc0) -> - {Is,Acc} = move_allocates_2(Alloc, Is0, Acc0), - move_allocates_1(Is, Acc); +move_allocates_1([I|Is], [{set,[],[],{alloc,Live0,Info}}|Acc]=Acc0) -> + case {alloc_may_pass(I),alloc_live_regs(I, Live0)} of + {false,_} -> + move_allocates_1(Is, [I|Acc0]); + {true,not_possible} -> + move_allocates_1(Is, [I|Acc0]); + {true,Live} when is_integer(Live) -> + A = {set,[],[],{alloc,Live,Info}}, + move_allocates_1(Is, [A,I|Acc]) + end; move_allocates_1([I|Is], Acc) -> move_allocates_1(Is, [I|Acc]); -move_allocates_1([], Is) -> Is. - -move_allocates_2({alloc,Live,Info}, [{set,[],[],{alloc,Live0,Info0}}|Is], Acc) -> - Live = Live0, % Assertion. - Alloc = {alloc,Live,combine_alloc(Info0, Info)}, - move_allocates_2(Alloc, Is, Acc); -move_allocates_2({alloc,Live,Info}=Alloc0, [I|Is]=Is0, Acc) -> - case alloc_may_pass(I) of - false -> - {Is0,[{set,[],[],Alloc0}|Acc]}; - true -> - Alloc = {alloc,alloc_live_regs(I, Live),Info}, - move_allocates_2(Alloc, Is, [I|Acc]) - end; -move_allocates_2(Alloc, [], Acc) -> - {[],[{set,[],[],Alloc}|Acc]}. +move_allocates_1([], Acc) -> Acc. alloc_may_pass({set,_,_,{alloc,_,_}}) -> false; alloc_may_pass({set,_,_,{set_tuple_element,_}}) -> false; @@ -204,9 +225,6 @@ alloc_may_pass({set,_,_,put_list}) -> false; alloc_may_pass({set,_,_,put}) -> false; alloc_may_pass({set,_,_,_}) -> true. -combine_alloc({_,Ns,Nh1,Init}, {_,nostack,Nh2,[]}) -> - {zero,Ns,beam_utils:combine_heap_needs(Nh1, Nh2),Init}. - %% opt([Instruction]) -> [Instruction] %% Optimize the instruction stream inside a basic block. @@ -393,10 +411,19 @@ eliminate_use_of_from_reg([I]=Is, From, _To, Acc) -> %% opt_alloc(Instructions) -> Instructions' %% Optimises all allocate instructions. +opt_alloc([{set,[],[],{alloc,Live0,Info0}}, + {set,[],[],{alloc,Live,Info}}|Is]) -> + Live = Live0, %Assertion. + Alloc = combine_alloc(Info0, Info), + I = {set,[],[],{alloc,Live,Alloc}}, + opt_alloc([I|Is]); opt_alloc([{set,[],[],{alloc,R,{_,Ns,Nh,[]}}}|Is]) -> [{set,[],[],opt_alloc(Is, Ns, Nh, R)}|Is]; opt_alloc([I|Is]) -> [I|opt_alloc(Is)]; opt_alloc([]) -> []. + +combine_alloc({_,Ns,Nh1,Init}, {_,nostack,Nh2,[]}) -> + {zero,Ns,beam_utils:combine_heap_needs(Nh1, Nh2),Init}. %% opt_alloc(Instructions, FrameSize, HeapNeed, LivingRegs) -> [Instr] %% Generates the optimal sequence of instructions for @@ -445,13 +472,14 @@ count_ones(Bits, Acc) -> alloc_live_regs({set,Ds,Ss,_}, Regs0) -> Rset = x_live(Ss, x_dead(Ds, (1 bsl Regs0)-1)), - live_regs(Rset). + live_regs(0, Rset). -live_regs(Regs) -> - live_regs_1(0, Regs). - -live_regs_1(N, 0) -> N; -live_regs_1(N, Regs) -> live_regs_1(N+1, Regs bsr 1). +live_regs(N, 0) -> + N; +live_regs(N, Regs) when Regs band 1 =:= 1 -> + live_regs(N+1, Regs bsr 1); +live_regs(_, _) -> + not_possible. x_dead([{x,N}|Rs], Regs) -> x_dead(Rs, Regs band (bnot (1 bsl N))); x_dead([_|Rs], Regs) -> x_dead(Rs, Regs); diff --git a/lib/compiler/test/beam_block_SUITE.erl b/lib/compiler/test/beam_block_SUITE.erl index 4bcb252833..9fcb6e497d 100644 --- a/lib/compiler/test/beam_block_SUITE.erl +++ b/lib/compiler/test/beam_block_SUITE.erl @@ -21,7 +21,8 @@ -export([all/0,suite/0,groups/0,init_per_suite/1,end_per_suite/1, init_per_group/2,end_per_group/2, - get_map_elements/1,otp_7345/1,move_opt_across_gc_bif/1]). + get_map_elements/1,otp_7345/1,move_opt_across_gc_bif/1, + erl_202/1]). %% The only test for the following functions is that %% the code compiles and is accepted by beam_validator. @@ -37,7 +38,8 @@ groups() -> [{p,[parallel], [get_map_elements, otp_7345, - move_opt_across_gc_bif + move_opt_across_gc_bif, + erl_202 ]}]. init_per_suite(Config) -> @@ -135,6 +137,27 @@ positive(speaking) -> paris([], P) -> P + 1. + +%% See https://bugs.erlang.org/browse/ERL-202. +%% Test that move_allocates/1 in beam_block doesn't move allocate +%% when it would not be safe. + +-record(erl_202_r1, {y}). +-record(erl_202_r2, {x}). + +erl_202(_Config) -> + Ref = make_ref(), + Ref = erl_202({{1,2},Ref}, 42), + + {Ref} = erl_202({7,8}, #erl_202_r1{y=#erl_202_r2{x=Ref}}), + + ok. + +erl_202({{_, _},X}, _) -> + X; +erl_202({_, _}, #erl_202_r1{y=R2}) -> + {R2#erl_202_r2.x}. + %%% %%% The only test of the following code is that it compiles. %%% -- cgit v1.2.3