aboutsummaryrefslogtreecommitdiffstats
path: root/lib/compiler/src/beam_block.erl
diff options
context:
space:
mode:
Diffstat (limited to 'lib/compiler/src/beam_block.erl')
-rw-r--r--lib/compiler/src/beam_block.erl599
1 files changed, 216 insertions, 383 deletions
diff --git a/lib/compiler/src/beam_block.erl b/lib/compiler/src/beam_block.erl
index 0321b1c07b..6a35191f6e 100644
--- a/lib/compiler/src/beam_block.erl
+++ b/lib/compiler/src/beam_block.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1999-2013. All Rights Reserved.
+%% Copyright Ericsson AB 1999-2016. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -23,14 +23,13 @@
-module(beam_block).
-export([module/2]).
--import(lists, [mapfoldl/3,reverse/1,reverse/2,foldl/3,member/2]).
--define(MAXREG, 1024).
+-import(lists, [reverse/1,reverse/2,foldl/3,member/2]).
-module({Mod,Exp,Attr,Fs0,Lc0}, _Opt) ->
- {Fs,Lc} = mapfoldl(fun function/2, Lc0, Fs0),
+module({Mod,Exp,Attr,Fs0,Lc}, _Opt) ->
+ Fs = [function(F) || F <- Fs0],
{ok,{Mod,Exp,Attr,Fs,Lc}}.
-function({function,Name,Arity,CLabel,Is0}, Lc0) ->
+function({function,Name,Arity,CLabel,Is0}) ->
try
%% Collect basic blocks and optimize them.
Is1 = blockify(Is0),
@@ -40,11 +39,8 @@ function({function,Name,Arity,CLabel,Is0}, Lc0) ->
Is5 = opt_blocks(Is4),
Is6 = beam_utils:delete_live_annos(Is5),
- %% Optimize bit syntax.
- {Is,Lc} = bsm_opt(Is6, Lc0),
-
%% Done.
- {{function,Name,Arity,CLabel,Is},Lc}
+ {function,Name,Arity,CLabel,Is6}
catch
Class:Error ->
Stack = erlang:get_stacktrace(),
@@ -62,56 +58,15 @@ blockify(Is) ->
blockify([{loop_rec,{f,Fail},{x,0}},{loop_rec_end,_Lbl},{label,Fail}|Is], Acc) ->
%% Useless instruction sequence.
blockify(Is, Acc);
-blockify([{test,is_atom,{f,Fail},[Reg]}=I|
- [{select,select_val,Reg,{f,Fail},
- [{atom,false},{f,_}=BrFalse,
- {atom,true}=AtomTrue,{f,_}=BrTrue]}|Is]=Is0],
- [{block,Bl}|_]=Acc) ->
- case is_last_bool(Bl, Reg) of
- false ->
- blockify(Is0, [I|Acc]);
- true ->
- %% The last instruction is a boolean operator/guard BIF that can't fail.
- %% We can convert the three-way branch to a two-way branch (eliminating
- %% the reference to the failure label).
- blockify(Is, [{jump,BrTrue},
- {test,is_eq_exact,BrFalse,[Reg,AtomTrue]}|Acc])
- end;
-blockify([{test,is_atom,{f,Fail},[Reg]}=I|
- [{select,select_val,Reg,{f,Fail},
- [{atom,true}=AtomTrue,{f,_}=BrTrue,
- {atom,false},{f,_}=BrFalse]}|Is]=Is0],
- [{block,Bl}|_]=Acc) ->
- case is_last_bool(Bl, Reg) of
- false ->
- blockify(Is0, [I|Acc]);
- true ->
- blockify(Is, [{jump,BrTrue},
- {test,is_eq_exact,BrFalse,[Reg,AtomTrue]}|Acc])
- end;
blockify([I|Is0]=IsAll, Acc) ->
- case is_bs_put(I) of
- true ->
- {BsPuts0,Is} = collect_bs_puts(IsAll),
- BsPuts = opt_bs_puts(BsPuts0),
- blockify(Is, reverse(BsPuts, Acc));
- false ->
- case collect(I) of
- error -> blockify(Is0, [I|Acc]);
- Instr when is_tuple(Instr) ->
- {Block,Is} = collect_block(IsAll),
- blockify(Is, [{block,Block}|Acc])
- end
+ case collect(I) of
+ error -> blockify(Is0, [I|Acc]);
+ Instr when is_tuple(Instr) ->
+ {Block,Is} = collect_block(IsAll),
+ blockify(Is, [{block,Block}|Acc])
end;
blockify([], Acc) -> reverse(Acc).
-is_last_bool([{set,[Reg],As,{bif,N,_}}], Reg) ->
- Ar = length(As),
- erl_internal:new_type_test(N, Ar) orelse erl_internal:comp_op(N, Ar)
- orelse erl_internal:bool_op(N, Ar);
-is_last_bool([_|Is], Reg) -> is_last_bool(Is, Reg);
-is_last_bool([], _) -> false.
-
collect_block(Is) ->
collect_block(Is, []).
@@ -126,7 +81,9 @@ collect_block([I|Is]=Is0, Acc) ->
case collect(I) of
error -> {reverse(Acc),Is0};
Instr -> collect_block(Is, [Instr|Acc])
- end.
+ end;
+collect_block([], Acc) ->
+ {reverse(Acc),[]}.
collect({allocate,N,R}) -> {set,[],[],{alloc,R,{nozero,N,0,[]}}};
collect({allocate_zero,N,R}) -> {set,[],[],{alloc,R,{zero,N,0,[]}}};
@@ -146,10 +103,10 @@ collect({get_list,S,D1,D2}) -> {set,[D1,D2],[S],get_list};
collect(remove_message) -> {set,[],[],remove_message};
collect({put_map,F,Op,S,D,R,{list,Puts}}) ->
{set,[D],[S|Puts],{alloc,R,{put_map,Op,F}}};
-collect({get_map_elements,F,S,{list,Gets}}) ->
- {Ss,Ds} = beam_utils:split_even(Gets),
- {set,Ds,[S|Ss],{get_map_elements,F}};
-collect({'catch',R,L}) -> {set,[R],[],{'catch',L}};
+collect({'catch'=Op,R,L}) ->
+ {set,[R],[],{try_catch,Op,L}};
+collect({'try'=Op,R,L}) ->
+ {set,[R],[],{try_catch,Op,L}};
collect(fclearerror) -> {set,[],[],fclearerror};
collect({fcheckerror,{f,0}}) -> {set,[],[],fcheckerror};
collect({fmove,S,D}) -> {set,[D],[S],fmove};
@@ -183,7 +140,9 @@ opt_blocks([I|Is]) ->
opt_blocks([]) -> [].
opt_block(Is0) ->
- Is = find_fixpoint(fun opt/1, Is0),
+ Is = find_fixpoint(fun(Is) ->
+ opt_tuple_element(opt(Is))
+ end, Is0),
opt_alloc(Is).
find_fixpoint(OptFun, Is0) ->
@@ -193,14 +152,43 @@ find_fixpoint(OptFun, Is0) ->
end.
%% move_allocates(Is0) -> Is
-%% Move allocate instructions upwards in the instruction stream, in the
-%% hope of getting more possibilities for optimizing away moves later.
+%% Move allocate instructions upwards in the instruction stream
+%% (within the same block), in the hope of getting more possibilities
+%% for optimizing away moves later.
+%%
+%% For example, we can transform the following instructions:
+%%
+%% get_tuple_element x(1) Element => x(2)
+%% allocate_zero StackSize 3 %% x(0), x(1), x(2) are live
+%%
+%% to the following instructions:
+%%
+%% allocate_zero StackSize 2 %% x(0) and x(1) are live
+%% get_tuple_element x(1) Element => x(2)
+%%
+%% NOTE: Since the beam_reorder pass has been run, it is no longer
+%% safe to assume that if x(N) is initialized, then all lower-numbered
+%% x registers are also initialized.
+%%
+%% For example, in general it is not safe to transform the following
+%% instructions:
+%%
+%% get_tuple_element x(0) Element => x(1)
+%% allocate_zero StackSize 3 %x(0), x(1), x(2) are live
%%
-%% NOTE: Moving allocation instructions is only safe because it is done
-%% immediately after code generation so that we KNOW that if {x,X} is
-%% initialized, all x registers with lower numbers are also initialized.
-%% That assumption may not be true after other optimizations, such as
-%% the beam_utils:live_opt/1 optimization.
+%% to the following instructions:
+%%
+%% allocate_zero StackSize 3
+%% get_tuple_element x(0) Element => x(1)
+%%
+%% The transformation is safe if and only if x(1) has been
+%% initialized previously. Unfortunately, beam_reorder may have moved
+%% a get_tuple_element instruction so that x(1) is not always
+%% initialized when this code is reached. To find whether or not x(1)
+%% is initialized, we would need to analyze all code preceding these
+%% two instructions (across branches). Since we currently don't have
+%% any practical mechanism for doing that, we will have to
+%% conservatively assume that the transformation is unsafe.
move_allocates([{block,Bl0}|Is]) ->
Bl = move_allocates_1(reverse(Bl0), []),
@@ -209,38 +197,26 @@ move_allocates([I|Is]) ->
[I|move_allocates(Is)];
move_allocates([]) -> [].
-move_allocates_1([{set,[],[],{alloc,_,_}=Alloc}|Is0], Acc0) ->
- {Is,Acc} = move_allocates_2(Alloc, Is0, Acc0),
- move_allocates_1(Is, Acc);
+move_allocates_1([I|Is], [{set,[],[],{alloc,Live0,Info}}|Acc]=Acc0) ->
+ case {alloc_may_pass(I),alloc_live_regs(I, Live0)} of
+ {false,_} ->
+ move_allocates_1(Is, [I|Acc0]);
+ {true,not_possible} ->
+ move_allocates_1(Is, [I|Acc0]);
+ {true,Live} when is_integer(Live) ->
+ A = {set,[],[],{alloc,Live,Info}},
+ move_allocates_1(Is, [A,I|Acc])
+ end;
move_allocates_1([I|Is], Acc) ->
move_allocates_1(Is, [I|Acc]);
-move_allocates_1([], Is) -> Is.
-
-move_allocates_2({alloc,Live,Info}, [{set,[],[],{alloc,Live0,Info0}}|Is], Acc) ->
- Live = Live0, % Assertion.
- Alloc = {alloc,Live,combine_alloc(Info0, Info)},
- move_allocates_2(Alloc, Is, Acc);
-move_allocates_2({alloc,Live,Info}=Alloc0, [I|Is]=Is0, Acc) ->
- case alloc_may_pass(I) of
- false ->
- {Is0,[{set,[],[],Alloc0}|Acc]};
- true ->
- Alloc = {alloc,alloc_live_regs(I, Live),Info},
- move_allocates_2(Alloc, Is, [I|Acc])
- end;
-move_allocates_2(Alloc, [], Acc) ->
- {[],[{set,[],[],Alloc}|Acc]}.
+move_allocates_1([], Acc) -> Acc.
alloc_may_pass({set,_,_,{alloc,_,_}}) -> false;
alloc_may_pass({set,_,_,{set_tuple_element,_}}) -> false;
-alloc_may_pass({set,_,_,{get_map_elements,_}}) -> false;
alloc_may_pass({set,_,_,put_list}) -> false;
alloc_may_pass({set,_,_,put}) -> false;
alloc_may_pass({set,_,_,_}) -> true.
-combine_alloc({_,Ns,Nh1,Init}, {_,nostack,Nh2,[]}) ->
- {zero,Ns,beam_utils:combine_heap_needs(Nh1, Nh2),Init}.
-
%% opt([Instruction]) -> [Instruction]
%% Optimize the instruction stream inside a basic block.
@@ -251,8 +227,6 @@ opt([{set,_,_,{line,_}}=Line1,
{set,[D2],[{integer,Idx2},Reg],{bif,element,{f,0}}}=I2|Is])
when Idx1 < Idx2, D1 =/= D2, D1 =/= Reg, D2 =/= Reg ->
opt([Line2,I2,Line1,I1|Is]);
-opt([{set,[_|_],_Ss,{get_map_elements,_F}}=I|Is]) ->
- [I|opt(Is)];
opt([{set,Ds0,Ss,Op}|Is0]) ->
{Ds,Is} = opt_moves(Ds0, Is0),
[{set,Ds,Ss,Op}|opt(Is)];
@@ -279,78 +253,167 @@ opt_moves([X0,Y0], Is0) ->
not_possible -> {[X,Y0],Is2};
{X,_} -> {[X,Y0],Is2};
{Y,Is} -> {[X,Y],Is}
- end;
-opt_moves(Ds, Is) ->
- %% multiple destinations -> pass through
- {Ds,Is}.
-
+ end.
%% opt_move(Dest, [Instruction]) -> {UpdatedDest,[Instruction]} | not_possible
%% If there is a {move,Dest,FinalDest} instruction
%% in the instruction stream, remove the move instruction
%% and let FinalDest be the destination.
-%%
-%% For this optimization to be safe, we must be sure that
-%% Dest will not be referenced in any other by other instructions
-%% in the rest of the instruction stream. Not even the indirect
-%% reference by an instruction that may allocate (such as
-%% test_heap/2 or a GC Bif) is allowed.
opt_move(Dest, Is) ->
- opt_move_1(Dest, Is, ?MAXREG, []).
-
-opt_move_1(R, [{set,_,_,{alloc,Live,_}}|_]=Is, SafeRegs, Acc) when Live < SafeRegs ->
- %% Downgrade number of safe regs and rescan the instruction, as it most probably
- %% is a gc_bif instruction.
- opt_move_1(R, Is, Live, Acc);
-opt_move_1(R, [{set,[{x,X}=D],[R],move}|Is], SafeRegs, Acc) ->
- case X < SafeRegs andalso beam_utils:is_killed_block(R, Is) of
- true -> opt_move_2(D, Acc, Is);
- false -> not_possible
+ opt_move_1(Dest, Is, []).
+
+opt_move_1(R, [{set,[D],[R],move}|Is0], Acc) ->
+ %% Provided that the source register is killed by instructions
+ %% that follow, the optimization is safe.
+ case eliminate_use_of_from_reg(Is0, R, D, []) of
+ {yes,Is} -> opt_move_rev(D, Acc, Is);
+ no -> not_possible
end;
-opt_move_1(R, [{set,[D],[R],move}|Is], _SafeRegs, Acc) ->
- case beam_utils:is_killed_block(R, Is) of
- true -> opt_move_2(D, Acc, Is);
- false -> not_possible
+opt_move_1(_R, [{set,_,_,{alloc,_,_}}|_], _) ->
+ %% The optimization is either not possible or not safe.
+ %%
+ %% If R is an X register killed by allocation, the optimization is
+ %% not safe. On the other hand, if the X register is killed, there
+ %% will not follow a 'move' instruction with this X register as
+ %% the source.
+ %%
+ %% If R is a Y register, the optimization is still not safe
+ %% because the new target register is an X register that cannot
+ %% safely pass the alloc instruction.
+ not_possible;
+opt_move_1(R, [{set,_,_,_}=I|Is], Acc) ->
+ %% If the source register is either killed or used by this
+ %% instruction, the optimimization is not possible.
+ case is_killed_or_used(R, I) of
+ true -> not_possible;
+ false -> opt_move_1(R, Is, [I|Acc])
end;
-opt_move_1(R, [I|Is], SafeRegs, Acc) ->
- case is_transparent(R, I) of
- false -> not_possible;
- true -> opt_move_1(R, Is, SafeRegs, [I|Acc])
- end.
+opt_move_1(_, _, _) ->
+ not_possible.
-%% Reverse the instructions, while checking that there are no instructions that
-%% would interfere with using the new destination register chosen.
+%% opt_tuple_element([Instruction]) -> [Instruction]
+%% If possible, move get_tuple_element instructions forward
+%% in the instruction stream to a move instruction, eliminating
+%% the move instruction. Example:
+%%
+%% get_tuple_element Tuple Pos Dst1
+%% ...
+%% move Dst1 Dst2
+%%
+%% This code may be possible to rewrite to:
+%%
+%% %%(Moved get_tuple_element instruction)
+%% ...
+%% get_tuple_element Tuple Pos Dst2
+%%
-opt_move_2(D, [I|Is], Acc) ->
- case is_transparent(D, I) of
- false -> not_possible;
- true -> opt_move_2(D, Is, [I|Acc])
+opt_tuple_element([{set,[D],[S],{get_tuple_element,_}}=I|Is0]) ->
+ case opt_tuple_element_1(Is0, I, {S,D}, []) of
+ no ->
+ [I|opt_tuple_element(Is0)];
+ {yes,Is} ->
+ opt_tuple_element(Is)
end;
-opt_move_2(D, [], Acc) -> {D,Acc}.
-
-%% is_transparent(Register, Instruction) -> true | false
-%% Returns true if Instruction does not in any way references Register
-%% (even indirectly by an allocation instruction).
-%% Returns false if Instruction does reference Register, or we are
-%% not sure.
-
-is_transparent({x,X}, {set,_,_,{alloc,Live,_}}) when X < Live ->
- false;
-is_transparent(R, {set,Ds,Ss,_Op}) ->
- case member(R, Ds) of
- true -> false;
- false -> not member(R, Ss)
+opt_tuple_element([I|Is]) ->
+ [I|opt_tuple_element(Is)];
+opt_tuple_element([]) -> [].
+
+opt_tuple_element_1([{set,_,_,{alloc,_,_}}|_], _, _, _) ->
+ no;
+opt_tuple_element_1([{set,_,_,{try_catch,_,_}}|_], _, _, _) ->
+ no;
+opt_tuple_element_1([{set,[D],[S],move}|Is0], I0, {_,S}, Acc) ->
+ case eliminate_use_of_from_reg(Is0, S, D, []) of
+ no ->
+ no;
+ {yes,Is} ->
+ {set,[S],Ss,Op} = I0,
+ I = {set,[D],Ss,Op},
+ {yes,reverse(Acc, [I|Is])}
end;
-is_transparent(_, _) -> false.
+opt_tuple_element_1([{set,Ds,Ss,_}=I|Is], MovedI, {S,D}=Regs, Acc) ->
+ case member(S, Ds) orelse member(D, Ss) of
+ true ->
+ no;
+ false ->
+ opt_tuple_element_1(Is, MovedI, Regs, [I|Acc])
+ end;
+opt_tuple_element_1(_, _, _, _) -> no.
+
+%% Reverse the instructions, while checking that there are no
+%% instructions that would interfere with using the new destination
+%% register (D).
+
+opt_move_rev(D, [I|Is], Acc) ->
+ case is_killed_or_used(D, I) of
+ true -> not_possible;
+ false -> opt_move_rev(D, Is, [I|Acc])
+ end;
+opt_move_rev(D, [], Acc) -> {D,Acc}.
+
+%% is_killed_or_used(Register, {set,_,_,_}) -> bool()
+%% Test whether the register is used by the instruction.
+
+is_killed_or_used(R, {set,Ss,Ds,_}) ->
+ member(R, Ds) orelse member(R, Ss).
+
+%% eliminate_use_of_from_reg([Instruction], FromRegister, ToRegister, Acc) ->
+%% {yes,Is} | no
+%% Eliminate any use of FromRegister in the instruction sequence
+%% by replacing uses of FromRegister with ToRegister. If FromRegister
+%% is referenced by an allocation instruction, return 'no' to indicate
+%% that FromRegister is still used and that the optimization is not
+%% possible.
+
+eliminate_use_of_from_reg([{set,_,_,{alloc,Live,_}}|_]=Is0, {x,X}, _, Acc) ->
+ if
+ X < Live ->
+ no;
+ true ->
+ {yes,reverse(Acc, Is0)}
+ end;
+eliminate_use_of_from_reg([{set,Ds,Ss0,Op}=I0|Is], From, To, Acc) ->
+ I = case member(From, Ss0) of
+ true ->
+ Ss = [case S of
+ From -> To;
+ _ -> S
+ end || S <- Ss0],
+ {set,Ds,Ss,Op};
+ false ->
+ I0
+ end,
+ case member(From, Ds) of
+ true ->
+ {yes,reverse(Acc, [I|Is])};
+ false ->
+ eliminate_use_of_from_reg(Is, From, To, [I|Acc])
+ end;
+eliminate_use_of_from_reg([I]=Is, From, _To, Acc) ->
+ case beam_utils:is_killed_block(From, [I]) of
+ true ->
+ {yes,reverse(Acc, Is)};
+ false ->
+ no
+ end.
%% opt_alloc(Instructions) -> Instructions'
%% Optimises all allocate instructions.
+opt_alloc([{set,[],[],{alloc,Live0,Info0}},
+ {set,[],[],{alloc,Live,Info}}|Is]) ->
+ Live = Live0, %Assertion.
+ Alloc = combine_alloc(Info0, Info),
+ I = {set,[],[],{alloc,Live,Alloc}},
+ opt_alloc([I|Is]);
opt_alloc([{set,[],[],{alloc,R,{_,Ns,Nh,[]}}}|Is]) ->
- [{set,[],[],opt_alloc(Is, Ns, Nh, R)}|opt(Is)];
+ [{set,[],[],opt_alloc(Is, Ns, Nh, R)}|Is];
opt_alloc([I|Is]) -> [I|opt_alloc(Is)];
opt_alloc([]) -> [].
+
+combine_alloc({_,Ns,Nh1,Init}, {_,nostack,Nh2,[]}) ->
+ {zero,Ns,beam_utils:combine_heap_needs(Nh1, Nh2),Init}.
%% opt_alloc(Instructions, FrameSize, HeapNeed, LivingRegs) -> [Instr]
%% Generates the optimal sequence of instructions for
@@ -399,13 +462,14 @@ count_ones(Bits, Acc) ->
alloc_live_regs({set,Ds,Ss,_}, Regs0) ->
Rset = x_live(Ss, x_dead(Ds, (1 bsl Regs0)-1)),
- live_regs(Rset).
-
-live_regs(Regs) ->
- live_regs_1(0, Regs).
+ live_regs(0, Rset).
-live_regs_1(N, 0) -> N;
-live_regs_1(N, Regs) -> live_regs_1(N+1, Regs bsr 1).
+live_regs(N, 0) ->
+ N;
+live_regs(N, Regs) when Regs band 1 =:= 1 ->
+ live_regs(N+1, Regs bsr 1);
+live_regs(_, _) ->
+ not_possible.
x_dead([{x,N}|Rs], Regs) -> x_dead(Rs, Regs band (bnot (1 bsl N)));
x_dead([_|Rs], Regs) -> x_dead(Rs, Regs);
@@ -414,234 +478,3 @@ x_dead([], Regs) -> Regs.
x_live([{x,N}|Rs], Regs) -> x_live(Rs, Regs bor (1 bsl N));
x_live([_|Rs], Regs) -> x_live(Rs, Regs);
x_live([], Regs) -> Regs.
-
-%%%
-%%% Evaluation of constant bit fields.
-%%%
-
-is_bs_put({bs_put,_,{bs_put_integer,_,_},_}) -> true;
-is_bs_put({bs_put,_,{bs_put_float,_,_},_}) -> true;
-is_bs_put(_) -> false.
-
-collect_bs_puts(Is) ->
- collect_bs_puts_1(Is, []).
-
-collect_bs_puts_1([I|Is]=Is0, Acc) ->
- case is_bs_put(I) of
- false -> {reverse(Acc),Is0};
- true -> collect_bs_puts_1(Is, [I|Acc])
- end.
-
-opt_bs_puts(Is) ->
- opt_bs_1(Is, []).
-
-opt_bs_1([{bs_put,Fail,
- {bs_put_float,1,Flags0},[{integer,Sz},Src]}=I0|Is], Acc) ->
- try eval_put_float(Src, Sz, Flags0) of
- <<Int:Sz>> ->
- Flags = force_big(Flags0),
- I = {bs_put,Fail,{bs_put_integer,1,Flags},
- [{integer,Sz},{integer,Int}]},
- opt_bs_1([I|Is], Acc)
- catch
- error:_ ->
- opt_bs_1(Is, [I0|Acc])
- end;
-opt_bs_1([{bs_put,_,{bs_put_integer,1,_},[{integer,8},{integer,_}]}|_]=IsAll,
- Acc0) ->
- {Is,Acc} = bs_collect_string(IsAll, Acc0),
- opt_bs_1(Is, Acc);
-opt_bs_1([{bs_put,Fail,{bs_put_integer,1,F},[{integer,Sz},{integer,N}]}=I|Is0],
- Acc) when Sz > 8 ->
- case field_endian(F) of
- big ->
- %% We can do this optimization for any field size without risk
- %% for code explosion.
- case bs_split_int(N, Sz, Fail, Is0) of
- no_split -> opt_bs_1(Is0, [I|Acc]);
- Is -> opt_bs_1(Is, Acc)
- end;
- little when Sz < 128 ->
- %% We only try to optimize relatively small fields, to avoid
- %% an explosion in code size.
- <<Int:Sz>> = <<N:Sz/little>>,
- Flags = force_big(F),
- Is = [{bs_put,Fail,{bs_put_integer,1,Flags},
- [{integer,Sz},{integer,Int}]}|Is0],
- opt_bs_1(Is, Acc);
- _ -> %native or too wide little field
- opt_bs_1(Is0, [I|Acc])
- end;
-opt_bs_1([{bs_put,Fail,{Op,U,F},[{integer,Sz},Src]}|Is], Acc) when U > 1 ->
- opt_bs_1([{bs_put,Fail,{Op,1,F},[{integer,U*Sz},Src]}|Is], Acc);
-opt_bs_1([I|Is], Acc) ->
- opt_bs_1(Is, [I|Acc]);
-opt_bs_1([], Acc) -> reverse(Acc).
-
-eval_put_float(Src, Sz, Flags) when Sz =< 256 -> %Only evaluate if Sz is reasonable.
- Val = value(Src),
- case field_endian(Flags) of
- little -> <<Val:Sz/little-float-unit:1>>;
- big -> <<Val:Sz/big-float-unit:1>>
- %% native intentionally not handled here - we can't optimize it.
- end.
-
-value({integer,I}) -> I;
-value({float,F}) -> F.
-
-bs_collect_string(Is, [{bs_put,_,{bs_put_string,Len,{string,Str}},[]}|Acc]) ->
- bs_coll_str_1(Is, Len, reverse(Str), Acc);
-bs_collect_string(Is, Acc) ->
- bs_coll_str_1(Is, 0, [], Acc).
-
-bs_coll_str_1([{bs_put,_,{bs_put_integer,U,_},[{integer,Sz},{integer,V}]}|Is],
- Len, StrAcc, IsAcc) when U*Sz =:= 8 ->
- Byte = V band 16#FF,
- bs_coll_str_1(Is, Len+1, [Byte|StrAcc], IsAcc);
-bs_coll_str_1(Is, Len, StrAcc, IsAcc) ->
- {Is,[{bs_put,{f,0},{bs_put_string,Len,{string,reverse(StrAcc)}},[]}|IsAcc]}.
-
-field_endian({field_flags,F}) -> field_endian_1(F).
-
-field_endian_1([big=E|_]) -> E;
-field_endian_1([little=E|_]) -> E;
-field_endian_1([native=E|_]) -> E;
-field_endian_1([_|Fs]) -> field_endian_1(Fs).
-
-force_big({field_flags,F}) ->
- {field_flags,force_big_1(F)}.
-
-force_big_1([big|_]=Fs) -> Fs;
-force_big_1([little|Fs]) -> [big|Fs];
-force_big_1([F|Fs]) -> [F|force_big_1(Fs)].
-
-bs_split_int(0, Sz, _, _) when Sz > 64 ->
- %% We don't want to split in this case because the
- %% string will consist of only zeroes.
- no_split;
-bs_split_int(-1, Sz, _, _) when Sz > 64 ->
- %% We don't want to split in this case because the
- %% string will consist of only 255 bytes.
- no_split;
-bs_split_int(N, Sz, Fail, Acc) ->
- FirstByteSz = case Sz rem 8 of
- 0 -> 8;
- Rem -> Rem
- end,
- bs_split_int_1(N, FirstByteSz, Sz, Fail, Acc).
-
-bs_split_int_1(-1, _, Sz, Fail, Acc) when Sz > 64 ->
- I = {bs_put,Fail,{bs_put_integer,1,{field_flags,[big]}},
- [{integer,Sz},{integer,-1}]},
- [I|Acc];
-bs_split_int_1(0, _, Sz, Fail, Acc) when Sz > 64 ->
- I = {bs_put,Fail,{bs_put_integer,1,{field_flags,[big]}},
- [{integer,Sz},{integer,0}]},
- [I|Acc];
-bs_split_int_1(N, ByteSz, Sz, Fail, Acc) when Sz > 0 ->
- Mask = (1 bsl ByteSz) - 1,
- I = {bs_put,Fail,{bs_put_integer,1,{field_flags,[big]}},
- [{integer,ByteSz},{integer,N band Mask}]},
- bs_split_int_1(N bsr ByteSz, 8, Sz-ByteSz, Fail, [I|Acc]);
-bs_split_int_1(_, _, _, _, Acc) -> Acc.
-
-
-%%%
-%%% Optimization of new bit syntax matching: get rid
-%%% of redundant bs_restore2/2 instructions across select_val
-%%% instructions, as well as a few other simple peep-hole optimizations.
-%%%
-
-bsm_opt(Is0, Lc0) ->
- {Is1,D0,Lc} = bsm_scan(Is0, [], Lc0, []),
- Is2 = case D0 of
- [] ->
- Is1;
- _ ->
- D = gb_trees:from_orddict(orddict:from_list(D0)),
- bsm_reroute(Is1, D, none, [])
- end,
- Is = beam_clean:bs_clean_saves(Is2),
- {bsm_opt_2(Is, []),Lc}.
-
-bsm_scan([{label,L}=Lbl,{bs_restore2,_,Save}=R|Is], D0, Lc, Acc0) ->
- D = [{{L,Save},Lc}|D0],
- Acc = [{label,Lc},R,Lbl|Acc0],
- bsm_scan(Is, D, Lc+1, Acc);
-bsm_scan([I|Is], D, Lc, Acc) ->
- bsm_scan(Is, D, Lc, [I|Acc]);
-bsm_scan([], D, Lc, Acc) ->
- {reverse(Acc),D,Lc}.
-
-bsm_reroute([{bs_save2,Reg,Save}=I|Is], D, _, Acc) ->
- bsm_reroute(Is, D, {Reg,Save}, [I|Acc]);
-bsm_reroute([{bs_restore2,Reg,Save}=I|Is], D, _, Acc) ->
- bsm_reroute(Is, D, {Reg,Save}, [I|Acc]);
-bsm_reroute([{label,_}=I|Is], D, S, Acc) ->
- bsm_reroute(Is, D, S, [I|Acc]);
-bsm_reroute([{select,select_val,Reg,F0,Lbls0}|Is], D, {_,Save}=S, Acc0) ->
- [F|Lbls] = bsm_subst_labels([F0|Lbls0], Save, D),
- Acc = [{select,select_val,Reg,F,Lbls}|Acc0],
- bsm_reroute(Is, D, S, Acc);
-bsm_reroute([{test,TestOp,F0,TestArgs}=I|Is], D, {_,Save}=S, Acc0) ->
- F = bsm_subst_label(F0, Save, D),
- Acc = [{test,TestOp,F,TestArgs}|Acc0],
- case bsm_not_bs_test(I) of
- true ->
- %% The test instruction will not update the bit offset for the
- %% binary being matched. Therefore the save position can be kept.
- bsm_reroute(Is, D, S, Acc);
- false ->
- %% The test instruction might update the bit offset. Kill our
- %% remembered Save position.
- bsm_reroute(Is, D, none, Acc)
- end;
-bsm_reroute([{test,TestOp,F0,Live,TestArgs,Dst}|Is], D, {_,Save}, Acc0) ->
- F = bsm_subst_label(F0, Save, D),
- Acc = [{test,TestOp,F,Live,TestArgs,Dst}|Acc0],
- %% The test instruction will update the bit offset. Kill our
- %% remembered Save position.
- bsm_reroute(Is, D, none, Acc);
-bsm_reroute([{block,[{set,[],[],{alloc,_,_}}]}=Bl,
- {bs_context_to_binary,_}=I|Is], D, S, Acc) ->
- %% To help further bit syntax optimizations.
- bsm_reroute([I,Bl|Is], D, S, Acc);
-bsm_reroute([I|Is], D, _, Acc) ->
- bsm_reroute(Is, D, none, [I|Acc]);
-bsm_reroute([], _, _, Acc) -> reverse(Acc).
-
-bsm_opt_2([{test,bs_test_tail2,F,[Ctx,Bits]}|Is],
- [{test,bs_skip_bits2,F,[Ctx,{integer,I},Unit,_Flags]}|Acc]) ->
- bsm_opt_2(Is, [{test,bs_test_tail2,F,[Ctx,Bits+I*Unit]}|Acc]);
-bsm_opt_2([{test,bs_skip_bits2,F,[Ctx,{integer,I1},Unit1,_]}|Is],
- [{test,bs_skip_bits2,F,[Ctx,{integer,I2},Unit2,Flags]}|Acc]) ->
- bsm_opt_2(Is, [{test,bs_skip_bits2,F,
- [Ctx,{integer,I1*Unit1+I2*Unit2},1,Flags]}|Acc]);
-bsm_opt_2([I|Is], Acc) ->
- bsm_opt_2(Is, [I|Acc]);
-bsm_opt_2([], Acc) -> reverse(Acc).
-
-%% bsm_not_bs_test({test,Name,_,Operands}) -> true|false.
-%% Test whether is the test is a "safe", i.e. does not move the
-%% bit offset for a binary.
-%%
-%% 'true' means that the test is safe, 'false' that we don't know or
-%% that the test moves the offset (e.g. bs_get_integer2).
-
-bsm_not_bs_test({test,bs_test_tail2,_,[_,_]}) -> true;
-bsm_not_bs_test(Test) -> beam_utils:is_pure_test(Test).
-
-bsm_subst_labels(Fs, Save, D) ->
- bsm_subst_labels_1(Fs, Save, D, []).
-
-bsm_subst_labels_1([F|Fs], Save, D, Acc) ->
- bsm_subst_labels_1(Fs, Save, D, [bsm_subst_label(F, Save, D)|Acc]);
-bsm_subst_labels_1([], _, _, Acc) ->
- reverse(Acc).
-
-bsm_subst_label({f,Lbl0}=F, Save, D) ->
- case gb_trees:lookup({Lbl0,Save}, D) of
- {value,Lbl} -> {f,Lbl};
- none -> F
- end;
-bsm_subst_label(Other, _, _) -> Other.