diff options
Diffstat (limited to 'lib/compiler/src')
-rw-r--r-- | lib/compiler/src/Makefile | 3 | ||||
-rw-r--r-- | lib/compiler/src/beam_a.erl | 12 | ||||
-rw-r--r-- | lib/compiler/src/beam_asm.erl | 4 | ||||
-rw-r--r-- | lib/compiler/src/beam_bs.erl | 183 | ||||
-rw-r--r-- | lib/compiler/src/beam_dict.erl | 15 | ||||
-rw-r--r-- | lib/compiler/src/beam_jump.erl | 55 | ||||
-rw-r--r-- | lib/compiler/src/beam_peep.erl | 12 | ||||
-rw-r--r-- | lib/compiler/src/beam_ssa_bsm.erl | 3 | ||||
-rw-r--r-- | lib/compiler/src/beam_ssa_codegen.erl | 40 | ||||
-rw-r--r-- | lib/compiler/src/beam_ssa_opt.erl | 853 | ||||
-rw-r--r-- | lib/compiler/src/beam_ssa_opt.hrl | 53 | ||||
-rw-r--r-- | lib/compiler/src/beam_ssa_pre_codegen.erl | 225 | ||||
-rw-r--r-- | lib/compiler/src/beam_ssa_share.erl | 370 | ||||
-rw-r--r-- | lib/compiler/src/beam_ssa_type.erl | 525 | ||||
-rw-r--r-- | lib/compiler/src/beam_trim.erl | 8 | ||||
-rw-r--r-- | lib/compiler/src/beam_validator.erl | 450 | ||||
-rw-r--r-- | lib/compiler/src/beam_z.erl | 29 | ||||
-rw-r--r-- | lib/compiler/src/compile.erl | 11 | ||||
-rw-r--r-- | lib/compiler/src/compiler.app.src | 2 | ||||
-rw-r--r-- | lib/compiler/src/erl_bifs.erl | 1 | ||||
-rw-r--r-- | lib/compiler/src/sys_core_fold.erl | 20 | ||||
-rw-r--r-- | lib/compiler/src/sys_core_inline.erl | 3 |
22 files changed, 2221 insertions, 656 deletions
diff --git a/lib/compiler/src/Makefile b/lib/compiler/src/Makefile index d475e5a19a..97c73d0e07 100644 --- a/lib/compiler/src/Makefile +++ b/lib/compiler/src/Makefile @@ -49,7 +49,6 @@ MODULES = \ beam_a \ beam_asm \ beam_block \ - beam_bs \ beam_clean \ beam_dict \ beam_disasm \ @@ -69,6 +68,7 @@ MODULES = \ beam_ssa_pp \ beam_ssa_pre_codegen \ beam_ssa_recv \ + beam_ssa_share \ beam_ssa_type \ beam_kernel_to_ssa \ beam_trim \ @@ -103,6 +103,7 @@ BEAM_H = $(wildcard ../priv/beam_h/*.h) HRL_FILES= \ beam_disasm.hrl \ + beam_ssa_opt.hrl \ beam_ssa.hrl \ core_parse.hrl \ v3_kernel.hrl diff --git a/lib/compiler/src/beam_a.erl b/lib/compiler/src/beam_a.erl index dd2537a699..1ac892a8f1 100644 --- a/lib/compiler/src/beam_a.erl +++ b/lib/compiler/src/beam_a.erl @@ -100,8 +100,12 @@ rename_instr({bs_put_utf16=I,F,Fl,Src}) -> {bs_put,F,{I,Fl},[Src]}; rename_instr({bs_put_utf32=I,F,Fl,Src}) -> {bs_put,F,{I,Fl},[Src]}; -rename_instr({bs_put_string,_,_}=I) -> - {bs_put,{f,0},I,[]}; +rename_instr({bs_put_string,_,{string,String}}) -> + %% Only happens when compiling from .S files. In old + %% .S files, String is a list. In .S in OTP 22 and later, + %% String is a binary. + {bs_put,{f,0},{bs_put_binary,8,{field_flags,[unsigned,big]}}, + [{atom,all},{literal,iolist_to_binary([String])}]}; rename_instr({bs_add=I,F,[Src1,Src2,U],Dst}) when is_integer(U) -> {bif,I,F,[Src1,Src2,{integer,U}],Dst}; rename_instr({bs_utf8_size=I,F,Src,Dst}) -> @@ -118,8 +122,8 @@ rename_instr({bs_private_append=I,F,Sz,U,Src,Flags,Dst}) -> {bs_init,F,{I,U,Flags},none,[Sz,Src],Dst}; rename_instr(bs_init_writable=I) -> {bs_init,{f,0},I,1,[{x,0}],{x,0}}; -rename_instr({test,Op,F,[Ctx,Bits,{string,Str}]}) -> - %% When compiling from a .S file. +rename_instr({test,bs_match_string=Op,F,[Ctx,Bits,{string,Str}]}) when is_list(Str) -> + %% When compiling from an old .S file. Starting from OTP 22, Str is a binary. <<Bs:Bits/bits,_/bits>> = list_to_binary(Str), {test,Op,F,[Ctx,Bs]}; rename_instr({put_map_assoc,Fail,S,D,R,L}) -> diff --git a/lib/compiler/src/beam_asm.erl b/lib/compiler/src/beam_asm.erl index df0321e85a..bc1290f6fd 100644 --- a/lib/compiler/src/beam_asm.erl +++ b/lib/compiler/src/beam_asm.erl @@ -424,8 +424,8 @@ encode_arg({f, W}, Dict) -> {encode(?tag_f, W), Dict}; %% encode_arg({'char', C}, Dict) -> %% {encode(?tag_h, C), Dict}; -encode_arg({string, String}, Dict0) -> - {Offset, Dict} = beam_dict:string(String, Dict0), +encode_arg({string, BinString}, Dict0) when is_binary(BinString) -> + {Offset, Dict} = beam_dict:string(BinString, Dict0), {encode(?tag_u, Offset), Dict}; encode_arg({extfunc, M, F, A}, Dict0) -> {Index, Dict} = beam_dict:import(M, F, A, Dict0), diff --git a/lib/compiler/src/beam_bs.erl b/lib/compiler/src/beam_bs.erl deleted file mode 100644 index 15d8d687fc..0000000000 --- a/lib/compiler/src/beam_bs.erl +++ /dev/null @@ -1,183 +0,0 @@ -%% -%% %CopyrightBegin% -%% -%% Copyright Ericsson AB 1999-2018. All Rights Reserved. -%% -%% Licensed under the Apache License, Version 2.0 (the "License"); -%% you may not use this file except in compliance with the License. -%% You may obtain a copy of the License at -%% -%% http://www.apache.org/licenses/LICENSE-2.0 -%% -%% Unless required by applicable law or agreed to in writing, software -%% distributed under the License is distributed on an "AS IS" BASIS, -%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -%% See the License for the specific language governing permissions and -%% limitations under the License. -%% -%% %CopyrightEnd% -%% -%% Purpose: Peephole optimization of binary syntax instructions. - --module(beam_bs). - --export([module/2]). --import(lists, [reverse/1]). - --spec module(beam_utils:module_code(), [compile:option()]) -> - {'ok',beam_utils:module_code()}. - -module({Mod,Exp,Attr,Fs0,Lc}, _Opt) -> - Fs = [function(F) || F <- Fs0], - {ok,{Mod,Exp,Attr,Fs,Lc}}. - -function({function,Name,Arity,CLabel,Is0}) -> - try - Is = bs_opt(Is0), - {function,Name,Arity,CLabel,Is} - catch - Class:Error:Stack -> - io:fwrite("Function: ~w/~w\n", [Name,Arity]), - erlang:raise(Class, Error, Stack) - end. - -%%% -%%% Evaluate construction of constant bit fields. -%%% Combine bs_skip_bits2 and bs_test_tail2 instructions. -%%% - -bs_opt([{bs_put,_,_,_}=I|Is0]) -> - {BsPuts0,Is} = collect_bs_puts(Is0, [I]), - BsPuts = opt_bs_puts(BsPuts0), - BsPuts ++ bs_opt(Is); -bs_opt([{test,bs_skip_bits2,F,[Ctx,{integer,I},Unit,_Flags]}, - {test,bs_test_tail2,F,[Ctx,Bits]}|Is]) -> - [{test,bs_test_tail2,F,[Ctx,Bits+I*Unit]}|bs_opt(Is)]; -bs_opt([{test,bs_skip_bits2,F,[Ctx,{integer,I1},Unit1,Flags]}, - {test,bs_skip_bits2,F,[Ctx,{integer,I2},Unit2,_]}|Is]) -> - I = {test,bs_skip_bits2,F, - [Ctx,{integer,I1*Unit1+I2*Unit2},1,Flags]}, - bs_opt([I|Is]); -bs_opt([I|Is]) -> - [I|bs_opt(Is)]; -bs_opt([]) -> []. - -collect_bs_puts([{bs_put,_,_,_}=I|Is], Acc) -> - collect_bs_puts(Is, [I|Acc]); -collect_bs_puts([_|_]=Is, Acc) -> - {reverse(Acc),Is}. - -opt_bs_puts(Is) -> - opt_bs_1(Is, []). - -opt_bs_1([{bs_put,Fail, - {bs_put_float,1,Flags0},[{integer,Sz},Src]}=I0|Is], Acc) -> - try eval_put_float(Src, Sz, Flags0) of - <<Int:Sz>> -> - Flags = force_big(Flags0), - I = {bs_put,Fail,{bs_put_integer,1,Flags}, - [{integer,Sz},{integer,Int}]}, - opt_bs_1([I|Is], Acc) - catch - error:_ -> - opt_bs_1(Is, [I0|Acc]) - end; -opt_bs_1([{bs_put,_,{bs_put_integer,1,_},[{integer,8},{integer,_}]}|_]=IsAll, - Acc0) -> - {Is,Acc} = bs_collect_string(IsAll, Acc0), - opt_bs_1(Is, Acc); -opt_bs_1([{bs_put,Fail,{bs_put_integer,1,F},[{integer,Sz},{integer,N}]}=I|Is0], - Acc) when Sz > 8 -> - case field_endian(F) of - big -> - %% We can do this optimization for any field size without - %% risk for code explosion. - case bs_split_int(N, Sz, Fail, Is0) of - no_split -> opt_bs_1(Is0, [I|Acc]); - Is -> opt_bs_1(Is, Acc) - end; - little when Sz < 128 -> - %% We only try to optimize relatively small fields, to - %% avoid an explosion in code size. - <<Int:Sz>> = <<N:Sz/little>>, - Flags = force_big(F), - Is = [{bs_put,Fail,{bs_put_integer,1,Flags}, - [{integer,Sz},{integer,Int}]}|Is0], - opt_bs_1(Is, Acc); - _ -> %native or too wide little field - opt_bs_1(Is0, [I|Acc]) - end; -opt_bs_1([{bs_put,Fail,{Op,U,F},[{integer,Sz},Src]}|Is], Acc) when U > 1 -> - opt_bs_1([{bs_put,Fail,{Op,1,F},[{integer,U*Sz},Src]}|Is], Acc); -opt_bs_1([I|Is], Acc) -> - opt_bs_1(Is, [I|Acc]); -opt_bs_1([], Acc) -> reverse(Acc). - -eval_put_float(Src, Sz, Flags) when Sz =< 256 -> - %%Only evaluate if Sz is reasonable. - Val = value(Src), - case field_endian(Flags) of - little -> <<Val:Sz/little-float-unit:1>>; - big -> <<Val:Sz/big-float-unit:1>> - %% native intentionally not handled here - we can't optimize - %% it. - end. - -value({integer,I}) -> I; -value({float,F}) -> F. - -bs_collect_string(Is, [{bs_put,_,{bs_put_string,Len,{string,Str}},[]}|Acc]) -> - bs_coll_str_1(Is, Len, reverse(Str), Acc); -bs_collect_string(Is, Acc) -> - bs_coll_str_1(Is, 0, [], Acc). - -bs_coll_str_1([{bs_put,_,{bs_put_integer,U,_},[{integer,Sz},{integer,V}]}|Is], - Len, StrAcc, IsAcc) when U*Sz =:= 8 -> - Byte = V band 16#FF, - bs_coll_str_1(Is, Len+1, [Byte|StrAcc], IsAcc); -bs_coll_str_1(Is, Len, StrAcc, IsAcc) -> - {Is,[{bs_put,{f,0},{bs_put_string,Len,{string,reverse(StrAcc)}},[]}|IsAcc]}. - -field_endian({field_flags,F}) -> field_endian_1(F). - -field_endian_1([big=E|_]) -> E; -field_endian_1([little=E|_]) -> E; -field_endian_1([native=E|_]) -> E; -field_endian_1([_|Fs]) -> field_endian_1(Fs). - -force_big({field_flags,F}) -> - {field_flags,force_big_1(F)}. - -force_big_1([big|_]=Fs) -> Fs; -force_big_1([little|Fs]) -> [big|Fs]; -force_big_1([F|Fs]) -> [F|force_big_1(Fs)]. - -bs_split_int(0, Sz, _, _) when Sz > 64 -> - %% We don't want to split in this case because the - %% string will consist of only zeroes. - no_split; -bs_split_int(-1, Sz, _, _) when Sz > 64 -> - %% We don't want to split in this case because the - %% string will consist of only 255 bytes. - no_split; -bs_split_int(N, Sz, Fail, Acc) -> - FirstByteSz = case Sz rem 8 of - 0 -> 8; - Rem -> Rem - end, - bs_split_int_1(N, FirstByteSz, Sz, Fail, Acc). - -bs_split_int_1(-1, _, Sz, Fail, Acc) when Sz > 64 -> - I = {bs_put,Fail,{bs_put_integer,1,{field_flags,[big]}}, - [{integer,Sz},{integer,-1}]}, - [I|Acc]; -bs_split_int_1(0, _, Sz, Fail, Acc) when Sz > 64 -> - I = {bs_put,Fail,{bs_put_integer,1,{field_flags,[big]}}, - [{integer,Sz},{integer,0}]}, - [I|Acc]; -bs_split_int_1(N, ByteSz, Sz, Fail, Acc) when Sz > 0 -> - Mask = (1 bsl ByteSz) - 1, - I = {bs_put,Fail,{bs_put_integer,1,{field_flags,[big]}}, - [{integer,ByteSz},{integer,N band Mask}]}, - bs_split_int_1(N bsr ByteSz, 8, Sz-ByteSz, Fail, [I|Acc]); -bs_split_int_1(_, _, _, _, Acc) -> Acc. diff --git a/lib/compiler/src/beam_dict.erl b/lib/compiler/src/beam_dict.erl index 990e86062a..b2056332e6 100644 --- a/lib/compiler/src/beam_dict.erl +++ b/lib/compiler/src/beam_dict.erl @@ -126,18 +126,17 @@ import(Mod0, Name0, Arity, #asm{imports=Imp0,next_import=NextIndex}=D0) {NextIndex,D2#asm{imports=Imp,next_import=NextIndex+1}} end. -%% Returns the index for a string in the string table (adding the string to the -%% table if necessary). +%% Returns the index for a binary string in the string table (adding +%% the string to the table if necessary). %% string(String, Dict) -> {Offset, Dict'} --spec string(string(), bdict()) -> {non_neg_integer(), bdict()}. +-spec string(binary(), bdict()) -> {non_neg_integer(), bdict()}. -string(Str, Dict) when is_list(Str) -> +string(BinString, Dict) when is_binary(BinString) -> #asm{strings=Strings,string_offset=NextOffset} = Dict, - StrBin = list_to_binary(Str), - case old_string(StrBin, Strings) of + case old_string(BinString, Strings) of none -> - NewDict = Dict#asm{strings = <<Strings/binary,StrBin/binary>>, - string_offset=NextOffset+byte_size(StrBin)}, + NewDict = Dict#asm{strings = <<Strings/binary,BinString/binary>>, + string_offset=NextOffset+byte_size(BinString)}, {NextOffset,NewDict}; Offset when is_integer(Offset) -> {NextOffset-Offset,Dict} diff --git a/lib/compiler/src/beam_jump.erl b/lib/compiler/src/beam_jump.erl index d3a618d211..8b0e3e32f8 100644 --- a/lib/compiler/src/beam_jump.erl +++ b/lib/compiler/src/beam_jump.erl @@ -101,6 +101,10 @@ %%% always keep the label. (beam_clean will remove any unused %%% labels.) %%% +%%% (7) Replace a jump to a return instruction with a return instruction. +%%% Similarly, replace a jump to deallocate + return with those +%%% instructions. +%%% %%% Note: This modules depends on (almost) all branches and jumps only %%% going forward, so that we can remove instructions (including definition %%% of labels) after any label that has not been referenced by the code @@ -150,7 +154,8 @@ function({function,Name,Arity,CLabel,Asm0}, Lc0) -> Asm3 = share(Asm2), Asm4 = move(Asm3), Asm5 = opt(Asm4, CLabel), - Asm = remove_unused_labels(Asm5), + Asm6 = unshare(Asm5), + Asm = remove_unused_labels(Asm6), {{function,Name,Arity,CLabel,Asm},Lc} catch Class:Error:Stack -> @@ -393,14 +398,13 @@ extract_seq_1(_, _) -> no. { entry :: beam_asm:label(), %Entry label (must not be moved). replace :: #{beam_asm:label() := beam_asm:label()}, %Labels to replace. - labels :: cerl_sets:set(), %Set of referenced labels. - index :: beam_utils:code_index() | {lazy,[beam_utils:instruction()]} %Index built lazily only if needed + labels :: cerl_sets:set() %Set of referenced labels. }). opt(Is0, CLabel) -> find_fixpoint(fun(Is) -> Lbls = initial_labels(Is), - St = #st{entry=CLabel,replace=#{},labels=Lbls,index={lazy,Is}}, + St = #st{entry=CLabel,replace=#{},labels=Lbls}, opt(Is, [], St) end, Is0). @@ -425,16 +429,6 @@ opt([{test,_,{f,L}=Lbl,_}=I|[{jump,{f,L}}|_]=Is], Acc, St) -> %% as in the jump that follows -- thus it is not needed. opt(Is, Acc, St) end; -opt([{test,_,{f,L}=Lbl,_}=I|[{label,L}|_]=Is], Acc, St) -> - %% Similar to the above, except we have a fall-through rather than jump - %% Test Label Ops - %% label Label - case beam_utils:is_pure_test(I) of - false -> - opt(Is, [I|Acc], label_used(Lbl, St)); - true -> - opt(Is, Acc, St) - end; opt([{test,Test0,{f,L}=Lbl,Ops}=I|[{jump,To}|Is]=Is0], Acc, St) -> case is_label_defined(Is, L) of false -> @@ -625,6 +619,39 @@ drop_upto_label([{label,_}|_]=Is) -> Is; drop_upto_label([_|Is]) -> drop_upto_label(Is); drop_upto_label([]) -> []. +%% unshare([Instruction]) -> [Instruction]. +%% Replace a jump to a return sequence (a `return` instruction +%% optionally preced by a `deallocate` instruction) with the return +%% sequence. This always saves execution time and may also save code +%% space (depending on the architecture). Eliminating `jump` +%% instructions also gives beam_trim more opportunities to trim the +%% stack. + +unshare(Is) -> + Short = unshare_collect_short(Is, #{}), + unshare_short(Is, Short). + +unshare_collect_short([{label,L},return|Is], Map) -> + unshare_collect_short(Is, Map#{L=>[return]}); +unshare_collect_short([{label,L},{deallocate,_}=D,return|Is], Map) -> + %% `deallocate` and `return` are combined into one instruction by + %% the loader. + unshare_collect_short(Is, Map#{L=>[D,return]}); +unshare_collect_short([_|Is], Map) -> + unshare_collect_short(Is, Map); +unshare_collect_short([], Map) -> Map. + +unshare_short([{jump,{f,F}}=I|Is], Map) -> + case Map of + #{F:=Seq} -> + Seq ++ unshare_short(Is, Map); + #{} -> + [I|unshare_short(Is, Map)] + end; +unshare_short([I|Is], Map) -> + [I|unshare_short(Is, Map)]; +unshare_short([], _Map) -> []. + %% ulbl(Instruction, UsedCerlSet) -> UsedCerlSet' %% Update the cerl_set UsedCerlSet with any function-local labels %% (i.e. not with labels in call instructions) referenced by diff --git a/lib/compiler/src/beam_peep.erl b/lib/compiler/src/beam_peep.erl index 2323a439e9..5730e9704e 100644 --- a/lib/compiler/src/beam_peep.erl +++ b/lib/compiler/src/beam_peep.erl @@ -94,30 +94,26 @@ peep([{gc_bif,_,_,_,_,Dst}=I|Is], SeenTests0, Acc) -> peep([{jump,{f,L}},{label,L}=I|Is], _, Acc) -> %% Sometimes beam_jump has missed this optimization. peep(Is, gb_sets:empty(), [I|Acc]); -peep([{select,Op,R,F,Vls0}|Is], SeenTests0, Acc0) -> +peep([{select,select_val,R,F,Vls0}|Is], SeenTests0, Acc0) -> case prune_redundant_values(Vls0, F) of [] -> %% No values left. Must convert to plain jump. I = {jump,F}, peep([I|Is], gb_sets:empty(), Acc0); - [{atom,_}=Value,Lbl] when Op =:= select_val -> + [{atom,_}=Value,Lbl] -> %% Single value left. Convert to regular test. Is1 = [{test,is_eq_exact,F,[R,Value]},{jump,Lbl}|Is], peep(Is1, SeenTests0, Acc0); - [{integer,_}=Value,Lbl] when Op =:= select_val -> + [{integer,_}=Value,Lbl] -> %% Single value left. Convert to regular test. Is1 = [{test,is_eq_exact,F,[R,Value]},{jump,Lbl}|Is], peep(Is1, SeenTests0, Acc0); - [Arity,Lbl] when Op =:= select_tuple_arity -> - %% Single value left. Convert to regular test - Is1 = [{test,test_arity,F,[R,Arity]},{jump,Lbl}|Is], - peep(Is1, SeenTests0, Acc0); [{atom,B1},Lbl,{atom,B2},Lbl] when B1 =:= not B2 -> %% Replace with is_boolean test. Is1 = [{test,is_boolean,F,[R]},{jump,Lbl}|Is], peep(Is1, SeenTests0, Acc0); [_|_]=Vls -> - I = {select,Op,R,F,Vls}, + I = {select,select_val,R,F,Vls}, peep(Is, gb_sets:empty(), [I|Acc0]) end; peep([{get_map_elements,Fail,Src,List}=I|Is], _SeenTests, Acc0) -> diff --git a/lib/compiler/src/beam_ssa_bsm.erl b/lib/compiler/src/beam_ssa_bsm.erl index 9631bf3334..466337db0e 100644 --- a/lib/compiler/src/beam_ssa_bsm.erl +++ b/lib/compiler/src/beam_ssa_bsm.erl @@ -877,7 +877,8 @@ annotate_context_parameters(F, ModInfo) -> %% Assertion. error(conflicting_parameter_types); (K, suitable_for_reuse, Acc) -> - Acc#{ K => match_context }; + T = beam_validator:type_anno(match_context), + Acc#{ K => T }; (_K, _V, Acc) -> Acc end, TypeAnno0, ParamInfo), diff --git a/lib/compiler/src/beam_ssa_codegen.erl b/lib/compiler/src/beam_ssa_codegen.erl index d3facc5911..c2d5035b19 100644 --- a/lib/compiler/src/beam_ssa_codegen.erl +++ b/lib/compiler/src/beam_ssa_codegen.erl @@ -161,7 +161,7 @@ add_parameter_annos([{label, _}=Entry | Body], Anno) -> (_K, _V, Acc) -> Acc end, [], maps:get(registers, Anno)), - [Entry | Annos] ++ Body. + [Entry | sort(Annos)] ++ Body. cg_fun(Blocks, St0) -> Linear0 = linearize(Blocks), @@ -1071,8 +1071,8 @@ cg_block([#cg_set{op={bif,Name},dst=Dst0,args=Args0}]=Is0, {Dst0,Fail}, St0) -> {z,_} -> %% The result of the BIF call will only be used once. Convert to %% a test instruction. - Test = bif_to_test(Name, Args, ensure_label(Fail, St0)), - {Test,St0}; + {Test,St1} = bif_to_test(Name, Args, ensure_label(Fail, St0), St0), + {Test,St1}; _ -> %% Must explicitly call the BIF since the result will be used %% more than once. @@ -1269,16 +1269,17 @@ cg_copy_1([], _St) -> []. element(1, Val) =:= atom orelse element(1, Val) =:= literal)). +bif_to_test('or', [V1,V2], {f,Lbl}=Fail, St0) when Lbl =/= 0 -> + {SuccLabel,St} = new_label(St0), + {[{test,is_eq_exact,{f,SuccLabel},[V1,{atom,false}]}, + {test,is_eq_exact,Fail,[V2,{atom,true}]}, + {label,SuccLabel}],St}; +bif_to_test(Op, Args, Fail, St) -> + {bif_to_test(Op, Args, Fail),St}. + bif_to_test('and', [V1,V2], Fail) -> [{test,is_eq_exact,Fail,[V1,{atom,true}]}, {test,is_eq_exact,Fail,[V2,{atom,true}]}]; -bif_to_test('or', [V1,V2], {f,Lbl}=Fail) when Lbl =/= 0 -> - %% Labels are spaced 2 apart. We can create a new - %% label by incrementing the Fail label. - SuccLabel = Lbl + 1, - [{test,is_eq_exact,{f,SuccLabel},[V1,{atom,false}]}, - {test,is_eq_exact,Fail,[V2,{atom,true}]}, - {label,SuccLabel}]; bif_to_test('not', [Var], Fail) -> [{test,is_eq_exact,Fail,[Var,{atom,false}]}]; bif_to_test(Name, Args, Fail) -> @@ -1448,7 +1449,12 @@ cg_call(#cg_set{anno=Anno,op=call,dst=Dst0,args=[#b_local{}=Func0|Args0]}, Line = call_line(Where, local, Anno), Call = build_call(call, Arity, {f,FuncLbl}, Context, Dst), Is = setup_args(Args, Anno, Context, St) ++ Line ++ Call, - {Is,St}; + case Anno of + #{ result_type := Info } -> + {Is ++ [{'%', {type_info, Dst, Info}}], St}; + #{} -> + {Is, St} + end; cg_call(#cg_set{anno=Anno0,op=call,dst=Dst0,args=[#b_remote{}=Func0|Args0]}, Where, Context, St) -> [Dst|Args] = beam_args([Dst0|Args0], St), @@ -1724,6 +1730,14 @@ copy(Src, Dst) -> [{move,Src,Dst}]. force_reg({literal,_}=Lit, Reg) -> {Reg,[{move,Lit,Reg}]}; +force_reg({integer,_}=Lit, Reg) -> + {Reg,[{move,Lit,Reg}]}; +force_reg({atom,_}=Lit, Reg) -> + {Reg,[{move,Lit,Reg}]}; +force_reg({float,_}=Lit, Reg) -> + {Reg,[{move,Lit,Reg}]}; +force_reg(nil=Lit, Reg) -> + {Reg,[{move,Lit,Reg}]}; force_reg({Kind,_}=R, _) when Kind =:= x; Kind =:= y -> {R,[]}. @@ -2017,9 +2031,7 @@ is_gc_bif(Bif, Args) -> %% new_label(St) -> {L,St}. new_label(#cg{lcount=Next}=St) -> - %% Advance the label counter by 2 to allow us to create - %% a label for 'or' by incrementing an existing label. - {Next,St#cg{lcount=Next+2}}. + {Next,St#cg{lcount=Next+1}}. %% call_line(tail|body, Func, Anno) -> [] | [{line,...}]. %% Produce a line instruction if it will be needed by the diff --git a/lib/compiler/src/beam_ssa_opt.erl b/lib/compiler/src/beam_ssa_opt.erl index ac2d943fef..2c898ba6f8 100644 --- a/lib/compiler/src/beam_ssa_opt.erl +++ b/lib/compiler/src/beam_ssa_opt.erl @@ -18,63 +18,167 @@ %% %CopyrightEnd% %% +%%% +%%% This is a collection of various optimizations that don't need a separate +%%% pass by themselves and/or are mutually beneficial to other passes. +%%% +%%% The optimizations are applied in "phases," each with a list of sub-passes +%%% to run. These sub-passes are applied on all functions in a module before +%%% moving on to the next phase, which lets us gather module-level information +%%% in one phase and then apply it in the next without having to risk working +%%% with incomplete information. +%%% +%%% Each sub-pass operates on a #st{} record and a func_info_db(), where the +%%% former is just a #b_function{} whose blocks can be represented either in +%%% linear or map form, and the latter is a map with information about all +%%% functions in the module (see beam_ssa_opt.hrl for more details). +%%% + -module(beam_ssa_opt). -export([module/2]). --include("beam_ssa.hrl"). --import(lists, [all/2,append/1,foldl/3,keyfind/3,member/2,reverse/1,reverse/2, - splitwith/2,takewhile/2,unzip/1]). +-include("beam_ssa_opt.hrl"). + +-import(lists, [all/2,append/1,duplicate/2,foldl/3,keyfind/3,member/2, + reverse/1,reverse/2, + splitwith/2,sort/1,takewhile/2,unzip/1]). + +-define(DEFAULT_REPETITIONS, 2). -spec module(beam_ssa:b_module(), [compile:option()]) -> {'ok',beam_ssa:b_module()}. -module(#b_module{body=Fs0}=Module, Opts) -> - Ps = passes(Opts), - Fs = functions(Fs0, Ps), - {ok,Module#b_module{body=Fs}}. +-record(st, {ssa :: [{beam_ssa:label(),beam_ssa:b_blk()}] | + beam_ssa:block_map(), + args :: [beam_ssa:b_var()], + cnt :: beam_ssa:label(), + anno :: beam_ssa:anno()}). +-type st_map() :: #{ func_id() => #st{} }. + +module(Module, Opts) -> + FuncDb0 = case proplists:get_value(no_module_opt, Opts, false) of + false -> build_func_db(Module); + true -> #{} + end, + + %% Passes that perform module-level optimizations are often aided by + %% optimizing callers before callees and vice versa, so we optimize all + %% functions in call order, flipping it as required. + StMap0 = build_st_map(Module), + Order = get_call_order_po(StMap0, FuncDb0), + + Phases = + [{Order, prologue_passes(Opts)}] ++ + repeat(Opts, repeated_passes(Opts), Order) ++ + [{Order, epilogue_passes(Opts)}], + + {StMap, _FuncDb} = foldl(fun({FuncIds, Ps}, {StMap, FuncDb}) -> + phase(FuncIds, Ps, StMap, FuncDb) + end, {StMap0, FuncDb0}, Phases), + + {ok, finish(Module, StMap)}. + +phase([FuncId | Ids], Ps, StMap, FuncDb0) -> + try + {St, FuncDb} = + compile:run_sub_passes(Ps, {map_get(FuncId, StMap), FuncDb0}), + + phase(Ids, Ps, StMap#{ FuncId => St }, FuncDb) + catch + Class:Error:Stack -> + #b_local{name=Name,arity=Arity} = FuncId, + io:fwrite("Function: ~w/~w\n", [Name,Arity]), + erlang:raise(Class, Error, Stack) + end; +phase([], _Ps, StMap, FuncDb) -> + {StMap, FuncDb}. + +%% Repeats the given passes, alternating the order between runs to make the +%% type pass more efficient. +repeat(Opts, Ps, OrderA) -> + Repeat = proplists:get_value(ssa_opt_repeat, Opts, ?DEFAULT_REPETITIONS), + OrderB = reverse(OrderA), + repeat_1(Repeat, Ps, OrderA, OrderB). -functions([F|Fs], Ps) -> - [function(F, Ps)|functions(Fs, Ps)]; -functions([], _Ps) -> []. +repeat_1(0, _Opts, _OrderA, _OrderB) -> + []; +repeat_1(N, Ps, OrderA, OrderB) when N > 0, N rem 2 =:= 0 -> + [{OrderA, Ps} | repeat_1(N - 1, Ps, OrderA, OrderB)]; +repeat_1(N, Ps, OrderA, OrderB) when N > 0, N rem 2 =:= 1 -> + [{OrderB, Ps} | repeat_1(N - 1, Ps, OrderA, OrderB)]. --type b_blk() :: beam_ssa:b_blk(). --type b_var() :: beam_ssa:b_var(). --type label() :: beam_ssa:label(). +%% + +get_func_id(F) -> + {_Mod, Name, Arity} = beam_ssa:get_anno(func_info, F), + #b_local{name=#b_literal{val=Name}, arity=Arity}. + +-spec build_st_map(#b_module{}) -> st_map(). +build_st_map(#b_module{body=Fs}) -> + build_st_map_1(Fs, #{}). + +build_st_map_1([F | Fs], Map) -> + #b_function{anno=Anno,args=Args,cnt=Counter,bs=Bs} = F, + St = #st{anno=Anno,args=Args,cnt=Counter,ssa=Bs}, + build_st_map_1(Fs, Map#{ get_func_id(F) => St }); +build_st_map_1([], Map) -> + Map. + +-spec finish(#b_module{}, st_map()) -> #b_module{}. +finish(#b_module{body=Fs0}=Module, StMap) -> + Module#b_module{body=finish_1(Fs0, StMap)}. + +finish_1([F0 | Fs], StMap) -> + #st{anno=Anno,cnt=Counter,ssa=Blocks} = map_get(get_func_id(F0), StMap), + F = F0#b_function{anno=Anno,bs=Blocks,cnt=Counter}, + [F | finish_1(Fs, StMap)]; +finish_1([], _StMap) -> + []. + +%% --record(st, {ssa :: beam_ssa:block_map() | [{label(),b_blk()}], - args :: [b_var()], - cnt :: label()}). -define(PASS(N), {N,fun N/1}). -passes(Opts0) -> +prologue_passes(Opts) -> Ps = [?PASS(ssa_opt_split_blocks), ?PASS(ssa_opt_coalesce_phis), + ?PASS(ssa_opt_tail_phis), ?PASS(ssa_opt_element), ?PASS(ssa_opt_linearize), + ?PASS(ssa_opt_tuple_size), ?PASS(ssa_opt_record), - - %% Run ssa_opt_cse twice, because it will help ssa_opt_dead, - %% and ssa_opt_dead will help ssa_opt_cse. Run ssa_opt_live - %% twice, because it will help ssa_opt_dead and ssa_opt_dead - %% will help ssa_opt_live. - ?PASS(ssa_opt_cse), - ?PASS(ssa_opt_type), - ?PASS(ssa_opt_live), + ?PASS(ssa_opt_cse), %Helps the first type pass. + ?PASS(ssa_opt_type_start)], + passes_1(Ps, Opts). + +%% These passes all benefit from each other (in roughly this order), so they +%% are repeated as required. +repeated_passes(Opts) -> + Ps = [?PASS(ssa_opt_live), + ?PASS(ssa_opt_bs_puts), ?PASS(ssa_opt_dead), - ?PASS(ssa_opt_cse), %Second time. - ?PASS(ssa_opt_float), - ?PASS(ssa_opt_live), %Second time. + ?PASS(ssa_opt_cse), + ?PASS(ssa_opt_tail_phis), + ?PASS(ssa_opt_type_continue)], %Must run after ssa_opt_dead to + %clean up phi nodes. + passes_1(Ps, Opts). +epilogue_passes(Opts) -> + Ps = [?PASS(ssa_opt_type_finish), + ?PASS(ssa_opt_float), + ?PASS(ssa_opt_live), %One last time to clean up the + %mess left by the float pass. ?PASS(ssa_opt_bsm), ?PASS(ssa_opt_bsm_units), ?PASS(ssa_opt_bsm_shortcut), - ?PASS(ssa_opt_misc), - ?PASS(ssa_opt_tuple_size), ?PASS(ssa_opt_sw), ?PASS(ssa_opt_blockify), ?PASS(ssa_opt_sink), ?PASS(ssa_opt_merge_blocks), ?PASS(ssa_opt_trim_unreachable)], + passes_1(Ps, Opts). + +passes_1(Ps, Opts0) -> Negations = [{list_to_atom("no_"++atom_to_list(N)),N} || {N,_} <- Ps], Opts = proplists:substitute_negations(Negations, Opts0), @@ -86,36 +190,132 @@ passes(Opts0) -> {NoName,fun(S) -> S end} end || {Name,_}=P <- Ps]. -function(#b_function{anno=Anno,bs=Blocks0,args=Args,cnt=Count0}=F, Ps) -> +%% Builds a function information map with basic information about incoming and +%% outgoing local calls, as well as whether the function is exported. +-spec build_func_db(#b_module{}) -> func_info_db(). +build_func_db(#b_module{body=Fs,exports=Exports}) -> try - St = #st{ssa=Blocks0,args=Args,cnt=Count0}, - #st{ssa=Blocks,cnt=Count} = compile:run_sub_passes(Ps, St), - F#b_function{bs=Blocks,cnt=Count} + fdb_1(Fs, gb_sets:from_list(Exports), #{}) catch - Class:Error:Stack -> - #{func_info:={_,Name,Arity}} = Anno, - io:fwrite("Function: ~w/~w\n", [Name,Arity]), - erlang:raise(Class, Error, Stack) + %% All module-level optimizations are invalid when a NIF can override a + %% function, so we have to bail out. + throw:load_nif -> #{} end. +fdb_1([#b_function{ args=Args,bs=Bs }=F | Fs], Exports, FuncDb0) -> + Id = get_func_id(F), + + #b_local{name=#b_literal{val=Name}, arity=Arity} = Id, + Exported = gb_sets:is_element({Name, Arity}, Exports), + ArgTypes = duplicate(length(Args), #{}), + + FuncDb1 = case FuncDb0 of + %% We may have an entry already if someone's called us. + #{ Id := Info } -> + FuncDb0#{ Id := Info#func_info{ exported=Exported, + arg_types=ArgTypes }}; + #{} -> + FuncDb0#{ Id => #func_info{ exported=Exported, + arg_types=ArgTypes }} + end, + + FuncDb = beam_ssa:fold_rpo(fun(_L, #b_blk{is=Is}, FuncDb) -> + fdb_is(Is, Id, FuncDb) + end, FuncDb1, Bs), + + fdb_1(Fs, Exports, FuncDb); +fdb_1([], _Exports, FuncDb) -> + FuncDb. + +fdb_is([#b_set{op=call, + args=[#b_local{}=Callee | _]} | Is], + Caller, FuncDb) -> + fdb_is(Is, Caller, fdb_update(Caller, Callee, FuncDb)); +fdb_is([#b_set{op=call, + args=[#b_remote{mod=#b_literal{val=erlang}, + name=#b_literal{val=load_nif}}, + _Path, _LoadInfo]} | _Is], _Caller, _FuncDb) -> + throw(load_nif); +fdb_is([_ | Is], Caller, FuncDb) -> + fdb_is(Is, Caller, FuncDb); +fdb_is([], _Caller, FuncDb) -> + FuncDb. + +fdb_update(Caller, Callee, FuncDb) -> + CallerVertex = maps:get(Caller, FuncDb, #func_info{}), + CalleeVertex = maps:get(Callee, FuncDb, #func_info{}), + + Calls = ordsets:add_element(Callee, CallerVertex#func_info.out), + CalledBy = ordsets:add_element(Caller, CalleeVertex#func_info.in), + + FuncDb#{ Caller => CallerVertex#func_info{out=Calls}, + Callee => CalleeVertex#func_info{in=CalledBy} }. + +%% Returns the post-order of all local calls in this module. That is, it starts +%% with the functions that don't call any others and then walks up the call +%% chain. +%% +%% Functions where module-level optimization is disabled are added last in +%% arbitrary order. + +get_call_order_po(StMap, FuncDb) -> + Leaves = maps:fold(fun(Id, #func_info{out=[]}, Acc) -> + [Id | Acc]; + (_, _, Acc) -> + Acc + end, [], FuncDb), + + Order = gco_po_1(sort(Leaves), FuncDb, [], #{}), + + Order ++ maps:fold(fun(K, _V, Acc) -> + case is_map_key(K, FuncDb) of + false -> [K | Acc]; + true -> Acc + end + end, [], StMap). + +gco_po_1([Id | Ids], FuncDb, Children, Seen) when not is_map_key(Id, Seen) -> + [Id | gco_po_1(Ids, FuncDb, [Id | Children], Seen#{ Id => true })]; +gco_po_1([_Id | Ids], FuncDb, Children, Seen) -> + gco_po_1(Ids, FuncDb, Children, Seen); +gco_po_1([], FuncDb, [_|_]=Children, Seen) -> + gco_po_1(gco_po_parents(Children, FuncDb), FuncDb, [], Seen); +gco_po_1([], _FuncDb, [], _Seen) -> + []. + +gco_po_parents([Child | Children], FuncDb) -> + #{ Child := #func_info{in=Parents}} = FuncDb, + Parents ++ gco_po_parents(Children, FuncDb); +gco_po_parents([], _FuncDb) -> + []. + %%% %%% Trivial sub passes. %%% -ssa_opt_dead(#st{ssa=Linear}=St) -> - St#st{ssa=beam_ssa_dead:opt(Linear)}. +ssa_opt_dead({#st{ssa=Linear}=St, FuncDb}) -> + {St#st{ssa=beam_ssa_dead:opt(Linear)}, FuncDb}. + +ssa_opt_linearize({#st{ssa=Blocks}=St, FuncDb}) -> + {St#st{ssa=beam_ssa:linearize(Blocks)}, FuncDb}. + +ssa_opt_type_start({#st{ssa=Linear0,args=Args,anno=Anno}=St0, FuncDb0}) -> + {Linear, FuncDb} = beam_ssa_type:opt_start(Linear0, Args, Anno, FuncDb0), + {St0#st{ssa=Linear}, FuncDb}. -ssa_opt_linearize(#st{ssa=Blocks}=St) -> - St#st{ssa=beam_ssa:linearize(Blocks)}. +ssa_opt_type_continue({#st{ssa=Linear0,args=Args,anno=Anno}=St0, FuncDb0}) -> + {Linear, FuncDb} = beam_ssa_type:opt_continue(Linear0, Args, Anno, FuncDb0), + {St0#st{ssa=Linear}, FuncDb}. -ssa_opt_type(#st{ssa=Linear,args=Args}=St) -> - St#st{ssa=beam_ssa_type:opt(Linear, Args)}. +ssa_opt_type_finish({#st{args=Args,anno=Anno0}=St0, FuncDb0}) -> + {Anno, FuncDb} = beam_ssa_type:opt_finish(Args, Anno0, FuncDb0), + {St0#st{anno=Anno}, FuncDb}. -ssa_opt_blockify(#st{ssa=Linear}=St) -> - St#st{ssa=maps:from_list(Linear)}. +ssa_opt_blockify({#st{ssa=Linear}=St, FuncDb}) -> + {St#st{ssa=maps:from_list(Linear)}, FuncDb}. -ssa_opt_trim_unreachable(#st{ssa=Blocks}=St) -> - St#st{ssa=beam_ssa:trim_unreachable(Blocks)}. +ssa_opt_trim_unreachable({#st{ssa=Blocks}=St, FuncDb}) -> + {St#st{ssa=beam_ssa:trim_unreachable(Blocks)}, FuncDb}. %%% %%% Split blocks before certain instructions to enable more optimizations. @@ -127,14 +327,14 @@ ssa_opt_trim_unreachable(#st{ssa=Blocks}=St) -> %%% for sinking get_tuple_element instructions. %%% -ssa_opt_split_blocks(#st{ssa=Blocks0,cnt=Count0}=St) -> +ssa_opt_split_blocks({#st{ssa=Blocks0,cnt=Count0}=St, FuncDb}) -> P = fun(#b_set{op={bif,element}}) -> true; (#b_set{op=call}) -> true; (#b_set{op=make_fun}) -> true; (_) -> false end, {Blocks,Count} = beam_ssa:split_blocks(P, Blocks0, Count0), - St#st{ssa=Blocks,cnt=Count}. + {St#st{ssa=Blocks,cnt=Count}, FuncDb}. %%% %%% Coalesce phi nodes. @@ -158,10 +358,10 @@ ssa_opt_split_blocks(#st{ssa=Blocks0,cnt=Count0}=St) -> %%% different registers). %%% -ssa_opt_coalesce_phis(#st{ssa=Blocks0}=St) -> +ssa_opt_coalesce_phis({#st{ssa=Blocks0}=St, FuncDb}) -> Ls = beam_ssa:rpo(Blocks0), Blocks = c_phis_1(Ls, Blocks0), - St#st{ssa=Blocks}. + {St#st{ssa=Blocks}, FuncDb}. c_phis_1([L|Ls], Blocks0) -> case maps:get(L, Blocks0) of @@ -233,6 +433,160 @@ c_fix_branches([{_,Pred}|As], L, Blocks0) -> c_fix_branches([], _, Blocks) -> Blocks. %%% +%%% Eliminate phi nodes in the tail of a function. +%%% +%%% Try to eliminate short blocks that starts with a phi node +%%% and end in a return. For example: +%%% +%%% Result = phi { Res1, 4 }, { literal true, 5 } +%%% Ret = put_tuple literal ok, Result +%%% ret Ret +%%% +%%% The code in this block can be inserted at the end blocks 4 and +%%% 5. Thus, the following code can be inserted into block 4: +%%% +%%% Ret:1 = put_tuple literal ok, Res1 +%%% ret Ret:1 +%%% +%%% And the following code into block 5: +%%% +%%% Ret:2 = put_tuple literal ok, literal true +%%% ret Ret:2 +%%% +%%% Which can be further simplified to: +%%% +%%% ret literal {ok, true} +%%% +%%% This transformation may lead to more code improvements: +%%% +%%% - Stack trimming +%%% - Fewer test_heap instructions +%%% - Smaller stack frames +%%% + +ssa_opt_tail_phis({#st{ssa=SSA0,cnt=Count0}=St, FuncDb}) -> + {SSA,Count} = opt_tail_phis(SSA0, Count0), + {St#st{ssa=SSA,cnt=Count}, FuncDb}. + +opt_tail_phis(Blocks, Count) when is_map(Blocks) -> + opt_tail_phis(maps:values(Blocks), Blocks, Count); +opt_tail_phis(Linear0, Count0) when is_list(Linear0) -> + Blocks0 = maps:from_list(Linear0), + {Blocks,Count} = opt_tail_phis(Blocks0, Count0), + {beam_ssa:linearize(Blocks),Count}. + +opt_tail_phis([#b_blk{is=Is0,last=Last}|Bs], Blocks0, Count0) -> + case {Is0,Last} of + {[#b_set{op=phi,args=[_,_|_]}|_],#b_ret{arg=#b_var{}}=Ret} -> + {Phis,Is} = splitwith(fun(#b_set{op=Op}) -> Op =:= phi end, Is0), + case suitable_tail_ops(Is) of + true -> + {Blocks,Count} = opt_tail_phi(Phis, Is, Ret, + Blocks0, Count0), + opt_tail_phis(Bs, Blocks, Count); + false -> + opt_tail_phis(Bs, Blocks0, Count0) + end; + {_,_} -> + opt_tail_phis(Bs, Blocks0, Count0) + end; +opt_tail_phis([], Blocks, Count) -> + {Blocks,Count}. + +opt_tail_phi(Phis0, Is, Ret, Blocks0, Count0) -> + Phis = rel2fam(reduce_phis(Phis0)), + {Blocks,Count,Cost} = + foldl(fun(PhiArg, Acc) -> + opt_tail_phi_arg(PhiArg, Is, Ret, Acc) + end, {Blocks0,Count0,0}, Phis), + MaxCost = length(Phis) * 3 + 2, + if + Cost =< MaxCost -> + %% The transformation would cause at most a slight + %% increase in code size if no more optimizations + %% can be applied. + {Blocks,Count}; + true -> + %% The code size would be increased too much. + {Blocks0,Count0} + end. + +reduce_phis([#b_set{dst=PhiDst,args=PhiArgs}|Is]) -> + [{L,{PhiDst,Val}} || {Val,L} <- PhiArgs] ++ reduce_phis(Is); +reduce_phis([]) -> []. + +opt_tail_phi_arg({PredL,Sub0}, Is0, Ret0, {Blocks0,Count0,Cost0}) -> + Blk0 = map_get(PredL, Blocks0), + #b_blk{is=IsPrefix,last=#b_br{succ=Next,fail=Next}} = Blk0, + case is_exit_bif(IsPrefix) of + false -> + Sub1 = maps:from_list(Sub0), + {Is1,Count,Sub} = new_names(Is0, Sub1, Count0, []), + Is2 = [sub(I, Sub) || I <- Is1], + Cost = build_cost(Is2, Cost0), + Is = IsPrefix ++ Is2, + Ret = sub(Ret0, Sub), + Blk = Blk0#b_blk{is=Is,last=Ret}, + Blocks = Blocks0#{PredL:=Blk}, + {Blocks,Count,Cost}; + true -> + %% The block ends in a call to a function that + %% will cause an exception. + {Blocks0,Count0,Cost0+3} + end. + +is_exit_bif([#b_set{op=call, + args=[#b_remote{mod=#b_literal{val=Mod}, + name=#b_literal{val=Name}}|Args]}]) -> + erl_bifs:is_exit_bif(Mod, Name, length(Args)); +is_exit_bif(_) -> false. + +new_names([#b_set{dst=Dst}=I|Is], Sub0, Count0, Acc) -> + {NewDst,Count} = new_var(Dst, Count0), + Sub = Sub0#{Dst=>NewDst}, + new_names(Is, Sub, Count, [I#b_set{dst=NewDst}|Acc]); +new_names([], Sub, Count, Acc) -> + {reverse(Acc),Count,Sub}. + +suitable_tail_ops(Is) -> + all(fun(#b_set{op=Op}) -> + is_suitable_tail_op(Op) + end, Is). + +is_suitable_tail_op({bif,_}) -> true; +is_suitable_tail_op(put_list) -> true; +is_suitable_tail_op(put_tuple) -> true; +is_suitable_tail_op(_) -> false. + +build_cost([#b_set{op=put_list,args=Args}|Is], Cost) -> + case are_all_literals(Args) of + true -> + build_cost(Is, Cost); + false -> + build_cost(Is, Cost + 1) + end; +build_cost([#b_set{op=put_tuple,args=Args}|Is], Cost) -> + case are_all_literals(Args) of + true -> + build_cost(Is, Cost); + false -> + build_cost(Is, Cost + length(Args) + 1) + end; +build_cost([#b_set{op={bif,_},args=Args}|Is], Cost) -> + case are_all_literals(Args) of + true -> + build_cost(Is, Cost); + false -> + build_cost(Is, Cost + 1) + end; +build_cost([], Cost) -> Cost. + +are_all_literals(Args) -> + all(fun(#b_literal{}) -> true; + (_) -> false + end, Args). + +%%% %%% Order element/2 calls. %%% %%% Order an unbroken chain of element/2 calls for the same tuple @@ -241,7 +595,7 @@ c_fix_branches([], _, Blocks) -> Blocks. %%% be replaced with get_tuple_element/3 instructions. %%% -ssa_opt_element(#st{ssa=Blocks}=St) -> +ssa_opt_element({#st{ssa=Blocks}=St, FuncDb}) -> %% Collect the information about element instructions in this %% function. GetEls = collect_element_calls(beam_ssa:linearize(Blocks)), @@ -253,7 +607,7 @@ ssa_opt_element(#st{ssa=Blocks}=St) -> %% For each chain, swap the first element call with the %% element call with the highest index. - St#st{ssa=swap_element_calls(Chains, Blocks)}. + {St#st{ssa=swap_element_calls(Chains, Blocks)}, FuncDb}. collect_element_calls([{L,#b_blk{is=Is0,last=Last}}|Bs]) -> case {Is0,Last} of @@ -314,9 +668,9 @@ swap_element_calls_1([], _, Blocks) -> %%% when applicable. %%% -ssa_opt_record(#st{ssa=Linear}=St) -> +ssa_opt_record({#st{ssa=Linear}=St, FuncDb}) -> Blocks = maps:from_list(Linear), - St#st{ssa=record_opt(Linear, Blocks)}. + {St#st{ssa=record_opt(Linear, Blocks)}, FuncDb}. record_opt([{L,#b_blk{is=Is0,last=Last}=Blk0}|Bs], Blocks) -> Is = record_opt_is(Is0, Last, Blocks), @@ -400,9 +754,9 @@ is_tagged_tuple_4([], _, _) -> no. %%% subexpressions across instructions that clobber the X registers. %%% -ssa_opt_cse(#st{ssa=Linear}=St) -> +ssa_opt_cse({#st{ssa=Linear}=St, FuncDb}) -> M = #{0=>#{}}, - St#st{ssa=cse(Linear, #{}, M)}. + {St#st{ssa=cse(Linear, #{}, M)}, FuncDb}. cse([{L,#b_blk{is=Is0,last=Last0}=Blk}|Bs], Sub0, M0) -> Es0 = maps:get(L, M0), @@ -543,13 +897,13 @@ cse_suitable(#b_set{}) -> false. bs :: beam_ssa:block_map() }). -ssa_opt_float(#st{ssa=Linear0,cnt=Count0}=St) -> +ssa_opt_float({#st{ssa=Linear0,cnt=Count0}=St, FuncDb}) -> NonGuards0 = float_non_guards(Linear0), NonGuards = gb_sets:from_list(NonGuards0), Blocks = maps:from_list(Linear0), Fs = #fs{non_guards=NonGuards,bs=Blocks}, {Linear,Count} = float_opt(Linear0, Count0, Fs), - St#st{ssa=Linear,cnt=Count}. + {St#st{ssa=Linear,cnt=Count}, FuncDb}. float_non_guards([{L,#b_blk{is=Is}}|Bs]) -> case Is of @@ -787,15 +1141,20 @@ float_flush_regs(#fs{regs=Rs}) -> %%% with a cheaper instructions %%% -ssa_opt_live(#st{ssa=Linear}=St) -> - St#st{ssa=live_opt(reverse(Linear), #{}, [])}. +ssa_opt_live({#st{ssa=Linear0}=St, FuncDb}) -> + RevLinear = reverse(Linear0), + Blocks0 = maps:from_list(RevLinear), + Blocks = live_opt(RevLinear, #{}, Blocks0), + Linear = beam_ssa:linearize(Blocks), + {St#st{ssa=Linear}, FuncDb}. -live_opt([{L,Blk0}|Bs], LiveMap0, Acc) -> - Successors = beam_ssa:successors(Blk0), +live_opt([{L,Blk0}|Bs], LiveMap0, Blocks) -> + Blk1 = beam_ssa_share:block(Blk0, Blocks), + Successors = beam_ssa:successors(Blk1), Live0 = live_opt_succ(Successors, L, LiveMap0), - {Blk,Live} = live_opt_blk(Blk0, Live0), + {Blk,Live} = live_opt_blk(Blk1, Live0), LiveMap = live_opt_phis(Blk#b_blk.is, L, Live, LiveMap0), - live_opt(Bs, LiveMap, [{L,Blk}|Acc]); + live_opt(Bs, LiveMap, Blocks#{L:=Blk}); live_opt([], _, Acc) -> Acc. live_opt_succ([S|Ss], L, LiveMap) -> @@ -849,14 +1208,9 @@ live_opt_is([#b_set{op=succeeded,dst=SuccDst=SuccDstVar, #b_set{dst=Dst}=I|Is], Live0, Acc) -> case gb_sets:is_member(Dst, Live0) of true -> - case gb_sets:is_member(SuccDst, Live0) of - true -> - Live1 = gb_sets:add(Dst, Live0), - Live = gb_sets:delete_any(SuccDst, Live1), - live_opt_is([I|Is], Live, [SuccI|Acc]); - false -> - live_opt_is([I|Is], Live0, Acc) - end; + Live1 = gb_sets:add(Dst, Live0), + Live = gb_sets:delete_any(SuccDst, Live1), + live_opt_is([I|Is], Live, [SuccI|Acc]); false -> case live_opt_unused(I) of {replace,NewI0} -> @@ -896,24 +1250,32 @@ live_opt_unused(#b_set{op=get_map_element}=Set) -> live_opt_unused(_) -> keep. %%% -%%% Optimize binary matching instructions. +%%% Optimize binary matching. +%%% +%%% * If the value of segment is never extracted, rewrite +%%% to a bs_skip instruction. +%%% +%%% * Coalesce adjacent bs_skip instructions and skip instructions +%%% with bs_test_tail. %%% -ssa_opt_bsm(#st{ssa=Linear}=St) -> +ssa_opt_bsm({#st{ssa=Linear}=St, FuncDb}) -> Extracted0 = bsm_extracted(Linear), Extracted = cerl_sets:from_list(Extracted0), - St#st{ssa=bsm_skip(Linear, Extracted)}. + {St#st{ssa=bsm_skip(Linear, Extracted)}, FuncDb}. -bsm_skip([{L,#b_blk{is=Is0}=Blk}|Bs], Extracted) -> +bsm_skip([{L,#b_blk{is=Is0}=Blk}|Bs0], Extracted) -> + Bs = bsm_skip(Bs0, Extracted), Is = bsm_skip_is(Is0, Extracted), - [{L,Blk#b_blk{is=Is}}|bsm_skip(Bs, Extracted)]; + coalesce_skips({L,Blk#b_blk{is=Is}}, Bs); bsm_skip([], _) -> []. bsm_skip_is([I0|Is], Extracted) -> case I0 of - #b_set{op=bs_match,args=[#b_literal{val=string}|_]} -> - [I0|bsm_skip_is(Is, Extracted)]; - #b_set{op=bs_match,dst=Ctx,args=[Type,PrevCtx|Args0]} -> + #b_set{op=bs_match, + dst=Ctx, + args=[#b_literal{val=T}=Type,PrevCtx|Args0]} + when T =/= string, T =/= skip -> I = case cerl_sets:is_element(Ctx, Extracted) of true -> I0; @@ -937,18 +1299,75 @@ bsm_extracted([{_,#b_blk{is=Is}}|Bs]) -> end; bsm_extracted([]) -> []. +coalesce_skips({L,#b_blk{is=[#b_set{op=bs_extract}=Extract|Is0], + last=Last0}=Blk0}, Bs0) -> + case coalesce_skips_is(Is0, Last0, Bs0) of + not_possible -> + [{L,Blk0}|Bs0]; + {Is,Last,Bs} -> + Blk = Blk0#b_blk{is=[Extract|Is],last=Last}, + [{L,Blk}|Bs] + end; +coalesce_skips({L,#b_blk{is=Is0,last=Last0}=Blk0}, Bs0) -> + case coalesce_skips_is(Is0, Last0, Bs0) of + not_possible -> + [{L,Blk0}|Bs0]; + {Is,Last,Bs} -> + Blk = Blk0#b_blk{is=Is,last=Last}, + [{L,Blk}|Bs] + end. + +coalesce_skips_is([#b_set{op=bs_match, + args=[#b_literal{val=skip}, + Ctx0,Type,Flags, + #b_literal{val=Size0}, + #b_literal{val=Unit0}]}=Skip0, + #b_set{op=succeeded}], + #b_br{succ=L2,fail=Fail}=Br0, + Bs0) when is_integer(Size0) -> + case Bs0 of + [{L2,#b_blk{is=[#b_set{op=bs_match, + dst=SkipDst, + args=[#b_literal{val=skip},_,_,_, + #b_literal{val=Size1}, + #b_literal{val=Unit1}]}, + #b_set{op=succeeded}=Succeeded], + last=#b_br{fail=Fail}=Br}}|Bs] when is_integer(Size1) -> + SkipBits = Size0 * Unit0 + Size1 * Unit1, + Skip = Skip0#b_set{dst=SkipDst, + args=[#b_literal{val=skip},Ctx0, + Type,Flags, + #b_literal{val=SkipBits}, + #b_literal{val=1}]}, + Is = [Skip,Succeeded], + {Is,Br,Bs}; + [{L2,#b_blk{is=[#b_set{op=bs_test_tail, + args=[_Ctx,#b_literal{val=TailSkip}]}], + last=#b_br{succ=NextSucc,fail=Fail}}}|Bs] -> + SkipBits = Size0 * Unit0, + TestTail = Skip0#b_set{op=bs_test_tail, + args=[Ctx0,#b_literal{val=SkipBits+TailSkip}]}, + Br = Br0#b_br{bool=TestTail#b_set.dst,succ=NextSucc}, + Is = [TestTail], + {Is,Br,Bs}; + _ -> + not_possible + end; +coalesce_skips_is(_, _, _) -> + not_possible. + %%% %%% Short-cutting binary matching instructions. %%% -ssa_opt_bsm_shortcut(#st{ssa=Linear}=St) -> +ssa_opt_bsm_shortcut({#st{ssa=Linear}=St, FuncDb}) -> Positions = bsm_positions(Linear, #{}), case map_size(Positions) of 0 -> %% No binary matching instructions. - St; + {St, FuncDb}; _ -> - St#st{ssa=bsm_shortcut(Linear, Positions)} + {St#st{ssa=bsm_shortcut(Linear, Positions)}, FuncDb} end. bsm_positions([{L,#b_blk{is=Is,last=Last}}|Bs], PosMap0) -> @@ -1010,8 +1429,8 @@ bsm_shortcut([], _PosMap) -> []. %%% Eliminate redundant bs_test_unit2 instructions. %%% -ssa_opt_bsm_units(#st{ssa=Linear}=St) -> - St#st{ssa=bsm_units(Linear, #{})}. +ssa_opt_bsm_units({#st{ssa=Linear}=St, FuncDb}) -> + {St#st{ssa=bsm_units(Linear, #{})}, FuncDb}. bsm_units([{L,#b_blk{last=#b_br{succ=Succ,fail=Fail}}=Block0} | Bs], UnitMaps0) -> UnitsIn = maps:get(L, UnitMaps0, #{}), @@ -1111,91 +1530,172 @@ bsm_units_join_1([], _MapA, Right) -> Right. %%% -%%% Miscellanous optimizations in execution order. +%%% Optimize binary construction. +%%% +%%% If an integer segment or a float segment has a literal size and +%%% a literal value, convert to a binary segment. Coalesce adjacent +%%% literal binary segments. Literal binary segments will be converted +%%% to bs_put_string instructions in later pass. %%% -ssa_opt_misc(#st{ssa=Linear}=St) -> - St#st{ssa=misc_opt(Linear, #{})}. +ssa_opt_bs_puts({#st{ssa=Linear0,cnt=Count0}=St, FuncDb}) -> + {Linear,Count} = opt_bs_puts(Linear0, Count0, []), + {St#st{ssa=Linear,cnt=Count}, FuncDb}. -misc_opt([{L,#b_blk{is=Is0,last=Last0}=Blk0}|Bs], Sub0) -> - {Is,Sub} = misc_opt_is(Is0, Sub0, []), - Last = sub(Last0, Sub), - Blk = Blk0#b_blk{is=Is,last=Last}, - [{L,Blk}|misc_opt(Bs, Sub)]; -misc_opt([], _) -> []. +opt_bs_puts([{L,#b_blk{is=Is}=Blk0}|Bs], Count0, Acc0) -> + case Is of + [#b_set{op=bs_put}=I0] -> + case opt_bs_put(L, I0, Blk0, Count0, Acc0) of + not_possible -> + opt_bs_puts(Bs, Count0, [{L,Blk0}|Acc0]); + {Count,Acc1} -> + Acc = opt_bs_puts_merge(Acc1), + opt_bs_puts(Bs, Count, Acc) + end; + _ -> + opt_bs_puts(Bs, Count0, [{L,Blk0}|Acc0]) + end; +opt_bs_puts([], Count, Acc) -> + {reverse(Acc),Count}. -misc_opt_is([#b_set{op=phi}=I0|Is], Sub0, Acc) -> - #b_set{dst=Dst,args=Args} = I = sub(I0, Sub0), - case all_same(Args) of +opt_bs_puts_merge([{L1,#b_blk{is=Is}=Blk0},{L2,#b_blk{is=AccIs}}=BAcc|Acc]) -> + case {AccIs,Is} of + {[#b_set{op=bs_put, + args=[#b_literal{val=binary}, + #b_literal{}, + #b_literal{val=Bin0}, + #b_literal{val=all}, + #b_literal{val=1}]}], + [#b_set{op=bs_put, + args=[#b_literal{val=binary}, + #b_literal{}, + #b_literal{val=Bin1}, + #b_literal{val=all}, + #b_literal{val=1}]}=I0]} -> + %% Coalesce the two segments to one. + Bin = <<Bin0/bitstring,Bin1/bitstring>>, + I = I0#b_set{args=bs_put_args(binary, Bin, all)}, + Blk = Blk0#b_blk{is=[I]}, + [{L2,Blk}|Acc]; + {_,_} -> + [{L1,Blk0},BAcc|Acc] + end. + +opt_bs_put(L, I0, #b_blk{last=Br0}=Blk0, Count0, Acc) -> + case opt_bs_put(I0) of + [Bin] when is_bitstring(Bin) -> + Args = bs_put_args(binary, Bin, all), + I = I0#b_set{args=Args}, + Blk = Blk0#b_blk{is=[I]}, + {Count0,[{L,Blk}|Acc]}; + [{int,Int,Size},Bin] when is_bitstring(Bin) -> + %% Construct a bs_put_integer instruction following + %% by a bs_put_binary instruction. + IntArgs = bs_put_args(integer, Int, Size), + BinArgs = bs_put_args(binary, Bin, all), + {BinL,BinVarNum} = {Count0,Count0+1}, + Count = Count0 + 2, + BinVar = #b_var{name={'@ssa_bool',BinVarNum}}, + BinI = I0#b_set{dst=BinVar,args=BinArgs}, + BinBlk = Blk0#b_blk{is=[BinI],last=Br0#b_br{bool=BinVar}}, + IntI = I0#b_set{args=IntArgs}, + IntBlk = Blk0#b_blk{is=[IntI],last=Br0#b_br{succ=BinL}}, + {Count,[{BinL,BinBlk},{L,IntBlk}|Acc]}; + not_possible -> + not_possible + end. + +opt_bs_put(#b_set{args=[#b_literal{val=binary},_,#b_literal{val=Val}, + #b_literal{val=all},#b_literal{val=Unit}]}) + when is_bitstring(Val) -> + if + bit_size(Val) rem Unit =:= 0 -> + [Val]; true -> - %% Eliminate the phi node if there is just one source - %% value or if the values are identical. - [{Val,_}|_] = Args, - Sub = Sub0#{Dst=>Val}, - misc_opt_is(Is, Sub, Acc); - false -> - misc_opt_is(Is, Sub0, [I|Acc]) - end; -misc_opt_is([#b_set{op={bif,'and'}}=I0], Sub, Acc) -> - #b_set{dst=Dst,args=Args} = I = sub(I0, Sub), - case eval_and(Args) of - error -> - misc_opt_is([], Sub, [I|Acc]); - Val -> - misc_opt_is([], Sub#{Dst=>Val}, Acc) - end; -misc_opt_is([#b_set{op={bif,'or'}}=I0], Sub, Acc) -> - #b_set{dst=Dst,args=Args} = I = sub(I0, Sub), - case eval_or(Args) of - error -> - misc_opt_is([], Sub, [I|Acc]); - Val -> - misc_opt_is([], Sub#{Dst=>Val}, Acc) + not_possible end; -misc_opt_is([#b_set{}=I0|Is], Sub, Acc) -> - #b_set{op=Op,dst=Dst,args=Args} = I = sub(I0, Sub), - case make_literal(Op, Args) of - #b_literal{}=Literal -> - misc_opt_is(Is, Sub#{Dst=>Literal}, Acc); - error -> - misc_opt_is(Is, Sub, [I|Acc]) +opt_bs_put(#b_set{args=[#b_literal{val=Type},#b_literal{val=Flags}, + #b_literal{val=Val},#b_literal{val=Size}, + #b_literal{val=Unit}]}=I0) when is_integer(Size) -> + EffectiveSize = Size * Unit, + if + EffectiveSize > 0 -> + case {Type,opt_bs_put_endian(Flags)} of + {integer,big} when is_integer(Val) -> + if + EffectiveSize < 64 -> + [<<Val:EffectiveSize>>]; + true -> + opt_bs_put_split_int(Val, EffectiveSize) + end; + {integer,little} when is_integer(Val), EffectiveSize < 128 -> + %% To avoid an explosion in code size, we only try + %% to optimize relatively small fields. + <<Int:EffectiveSize>> = <<Val:EffectiveSize/little>>, + Args = bs_put_args(Type, Int, EffectiveSize), + I = I0#b_set{args=Args}, + opt_bs_put(I); + {binary,_} when is_bitstring(Val) -> + <<Bitstring:EffectiveSize/bits,_/bits>> = Val, + [Bitstring]; + {float,Endian} -> + try + [opt_bs_put_float(Val, EffectiveSize, Endian)] + catch error:_ -> + not_possible + end; + {_,_} -> + not_possible + end; + true -> + not_possible end; -misc_opt_is([], Sub, Acc) -> - {reverse(Acc),Sub}. +opt_bs_put(#b_set{}) -> not_possible. -all_same([{H,_}|T]) -> - all(fun({E,_}) -> E =:= H end, T). - -make_literal(put_tuple, Args) -> - case make_literal_list(Args, []) of - error -> - error; - List -> - #b_literal{val=list_to_tuple(List)} - end; -make_literal(put_list, [#b_literal{val=H},#b_literal{val=T}]) -> - #b_literal{val=[H|T]}; -make_literal(_, _) -> error. +opt_bs_put_float(N, Sz, Endian) -> + case Endian of + big -> <<N:Sz/big-float-unit:1>>; + little -> <<N:Sz/little-float-unit:1>> + end. -make_literal_list([#b_literal{val=H}|T], Acc) -> - make_literal_list(T, [H|Acc]); -make_literal_list([_|_], _) -> - error; -make_literal_list([], Acc) -> - reverse(Acc). - -eval_and(Args) -> - case Args of - [_,#b_literal{val=false}=Res] -> Res; - [Res,#b_literal{val=true}] -> Res; - [_,_] -> error +bs_put_args(Type, Val, Size) -> + [#b_literal{val=Type}, + #b_literal{val=[unsigned,big]}, + #b_literal{val=Val}, + #b_literal{val=Size}, + #b_literal{val=1}]. + +opt_bs_put_endian([big=E|_]) -> E; +opt_bs_put_endian([little=E|_]) -> E; +opt_bs_put_endian([native=E|_]) -> E; +opt_bs_put_endian([_|Fs]) -> opt_bs_put_endian(Fs). + +opt_bs_put_split_int(Int, Size) -> + Pos = opt_bs_put_split_int_1(Int, 0, Size - 1), + UpperSize = Size - Pos, + if + Pos =:= 0 -> + %% Value is 0 or -1 -- keep the original instruction. + not_possible; + UpperSize < 64 -> + %% No or few leading zeroes or ones. + [<<Int:Size>>]; + true -> + %% There are 64 or more leading ones or zeroes in + %% the resulting binary. Split into two separate + %% segments to avoid an explosion in code size. + [{int,Int bsr Pos,UpperSize},<<Int:Pos>>] end. -eval_or(Args) -> - case Args of - [Res,#b_literal{val=false}] -> Res; - [_,#b_literal{val=true}=Res] -> Res; - [_,_] -> error +opt_bs_put_split_int_1(_Int, L, R) when L > R -> + 8 * ((L + 7) div 8); +opt_bs_put_split_int_1(Int, L, R) -> + Mid = (L + R) div 2, + case Int bsr Mid of + Upper when Upper =:= 0; Upper =:= -1 -> + opt_bs_put_split_int_1(Int, L, Mid - 1); + _ -> + opt_bs_put_split_int_1(Int, Mid + 1, R) end. %%% @@ -1258,9 +1758,9 @@ eval_or(Args) -> %%% is_tuple_of_arity instruction by the loader. %%% -ssa_opt_tuple_size(#st{ssa=Linear0,cnt=Count0}=St) -> +ssa_opt_tuple_size({#st{ssa=Linear0,cnt=Count0}=St, FuncDb}) -> {Linear,Count} = opt_tup_size(Linear0, Count0, []), - St#st{ssa=Linear,cnt=Count}. + {St#st{ssa=Linear,cnt=Count}, FuncDb}. opt_tup_size([{L,#b_blk{is=Is,last=Last}=Blk}|Bs], Count0, Acc0) -> case {Is,Last} of @@ -1333,9 +1833,9 @@ opt_tup_size_is([], _, _, _Acc) -> none. %%% is 'true' or 'false' can be rewritten to a is_boolean test. %%% -ssa_opt_sw(#st{ssa=Linear0,cnt=Count0}=St) -> +ssa_opt_sw({#st{ssa=Linear0,cnt=Count0}=St, FuncDb}) -> {Linear,Count} = opt_sw(Linear0, #{}, Count0, []), - St#st{ssa=Linear,cnt=Count}. + {St#st{ssa=Linear,cnt=Count}, FuncDb}. opt_sw([{L,#b_blk{is=Is,last=#b_switch{}=Last0}=Blk0}|Bs], Phis0, Count0, Acc) -> Phis = opt_sw_phis(Is, Phis0), @@ -1426,9 +1926,10 @@ opt_sw_literals([], Acc) -> Acc. %%% Merge blocks. %%% -ssa_opt_merge_blocks(#st{ssa=Blocks}=St) -> +ssa_opt_merge_blocks({#st{ssa=Blocks}=St, FuncDb}) -> Preds = beam_ssa:predecessors(Blocks), - St#st{ssa=merge_blocks_1(beam_ssa:rpo(Blocks), Preds, Blocks)}. + Merged = merge_blocks_1(beam_ssa:rpo(Blocks), Preds, Blocks), + {St#st{ssa=Merged}, FuncDb}. merge_blocks_1([L|Ls], Preds0, Blocks0) -> case Preds0 of @@ -1438,6 +1939,7 @@ merge_blocks_1([L|Ls], Preds0, Blocks0) -> true -> #b_blk{is=Is0} = Blk0, #b_blk{is=Is1} = Blk1, + verify_merge_is(Is1), Is = Is0 ++ Is1, Blk = Blk1#b_blk{is=Is}, Blocks1 = maps:remove(L, Blocks0), @@ -1463,6 +1965,13 @@ merge_update_preds([], _, _, Preds) -> Preds. rename_label(From, From, To) -> To; rename_label(Lbl, _, _) -> Lbl. +verify_merge_is([#b_set{op=Op}|_]) -> + %% The merged block has only one predecessor, so it should not have any phi + %% nodes. + true = Op =/= phi; %Assertion. +verify_merge_is(_) -> + ok. + is_merge_allowed(_, _, #b_blk{is=[#b_set{op=peek_message}|_]}) -> false; is_merge_allowed(L, Blk0, #b_blk{}) -> @@ -1487,7 +1996,7 @@ is_merge_allowed(L, Blk0, #b_blk{}) -> %%% extracted values. %%% -ssa_opt_sink(#st{ssa=Blocks0}=St) -> +ssa_opt_sink({#st{ssa=Blocks0}=St, FuncDb}) -> Linear = beam_ssa:linearize(Blocks0), %% Create a map with all variables that define get_tuple_element @@ -1528,7 +2037,7 @@ ssa_opt_sink(#st{ssa=Blocks0}=St) -> From = maps:get(V, Defs), move_defs(V, From, To, A) end, Blocks0, DefLoc), - St#st{ssa=Blocks}. + {St#st{ssa=Blocks}, FuncDb}. def_blocks([{L,#b_blk{is=Is}}|Bs]) -> def_blocks_is(Is, L, def_blocks(Bs)); @@ -1763,3 +2272,9 @@ sub_arg(Old, Sub) -> #{Old:=New} -> New; #{} -> Old end. + +new_var(#b_var{name={Base,N}}, Count) -> + true = is_integer(N), %Assertion. + {#b_var{name={Base,Count}},Count+1}; +new_var(#b_var{name=Base}, Count) -> + {#b_var{name={Base,Count}},Count+1}. diff --git a/lib/compiler/src/beam_ssa_opt.hrl b/lib/compiler/src/beam_ssa_opt.hrl new file mode 100644 index 0000000000..37711a6f48 --- /dev/null +++ b/lib/compiler/src/beam_ssa_opt.hrl @@ -0,0 +1,53 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2019. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% + +-include("beam_ssa.hrl"). + +-record(func_info, + {%% Local calls going in/out of this function. + in = ordsets:new() :: ordsets:ordset(func_id()), + out = ordsets:new() :: ordsets:ordset(func_id()), + + %% Whether the function is exported or not; some optimizations may + %% need to be suppressed if it is. + exported = true :: boolean(), + + %% The inferred types of each argument (as opposed to parameter), + %% indexed by call site. + %% + %% This is more effective than the naive approach of joining into a + %% "parameter_type" as we go as it lets us narrow parameter types + %% without having to visit all callers on each pass, which helps a lot + %% when dealing with co-recursive functions. + arg_types = [] :: list(arg_type_map()), + + %% The inferred return type of this function, this is either [type()] + %% or [] to note absence. + ret_type = [] :: list()}). + +-type arg_key() :: {CallerId :: func_id(), + CallDst :: beam_ssa:b_var()}. +-type arg_type_map() :: #{ arg_key() => term() }. + +%% Per-function metadata used by various optimization passes to perform +%% module-level optimization. If a function is absent it means that +%% module-level optimization has been turned off for said function. +-type func_id() :: beam_ssa:b_local(). +-type func_info_db() :: #{ func_id() => #func_info{} }. diff --git a/lib/compiler/src/beam_ssa_pre_codegen.erl b/lib/compiler/src/beam_ssa_pre_codegen.erl index 32232e9b9f..fde1118c29 100644 --- a/lib/compiler/src/beam_ssa_pre_codegen.erl +++ b/lib/compiler/src/beam_ssa_pre_codegen.erl @@ -72,7 +72,7 @@ -import(lists, [all/2,any/2,append/1,duplicate/2, foldl/3,last/1,map/2,member/2,partition/2, - reverse/1,reverse/2,sort/1,zip/2]). + reverse/1,reverse/2,sort/1,splitwith/2,zip/2]). -spec module(beam_ssa:b_module(), [compile:option()]) -> {'ok',beam_ssa:b_module()}. @@ -1031,7 +1031,7 @@ need_frame_1([#b_set{op=call,args=[Func|_]}|Is], Context) -> case Func of #b_remote{mod=#b_literal{val=Mod}, name=#b_literal{val=Name}, - arity=Arity} -> + arity=Arity} when is_atom(Mod), is_atom(Name) -> case erl_bifs:is_exit_bif(Mod, Name, Arity) of true -> false; @@ -1993,19 +1993,35 @@ reserve_zregs(Blocks, Intervals, Res) -> end, beam_ssa:fold_rpo(F, [0], Res, Blocks). +reserve_zreg([#b_set{op=call,dst=Dst}], + #b_br{bool=Dst}, _ShortLived, A) -> + %% If type optimization has determined that the result of a call can be + %% used directly in a branch, we must avoid reserving a z register or code + %% generation will fail. + A; reserve_zreg([#b_set{op={bif,tuple_size},dst=Dst}, - #b_set{op={bif,'=:='},args=[Dst,Val]}], _Last, ShortLived, A0) -> - case Val of - #b_literal{val=Arity} when Arity bsr 32 =:= 0 -> + #b_set{op={bif,'=:='},args=[Dst,Val]}], Last, ShortLived, A0) -> + case {Val,Last} of + {#b_literal{val=Arity},#b_br{bool=#b_var{}}} when Arity bsr 32 =:= 0 -> %% These two instructions can be combined to a test_arity %% instruction provided that the arity variable is short-lived. reserve_zreg_1(Dst, ShortLived, A0); - _ -> + {_,_} -> + %% Either the arity is too big, or the boolean value is not + %% used in a conditional branch. A0 end; reserve_zreg([#b_set{op={bif,tuple_size},dst=Dst}], #b_switch{}, ShortLived, A) -> reserve_zreg_1(Dst, ShortLived, A); +reserve_zreg([#b_set{op={bif,'xor'}}], _Last, _ShortLived, A) -> + %% There is no short, easy way to rewrite 'xor' to a series of + %% test instructions. + A; +reserve_zreg([#b_set{op={bif,is_record}}], _Last, _ShortLived, A) -> + %% There is no short, easy way to rewrite is_record/2 to a series of + %% test instructions. + A; reserve_zreg([#b_set{op=Op,dst=Dst}|Is], Last, ShortLived, A0) -> IsZReg = case Op of bs_match_string -> true; @@ -2070,23 +2086,95 @@ reserve_freg([], Res) -> Res. %% will allocate the lowest free X register for the variable. reserve_xregs(Blocks, Res) -> - F = fun(L, #b_blk{is=Is,last=Last}, R) -> - {Xs0,Used0} = reserve_terminator(L, Last, Blocks, R), - reserve_xregs_is(reverse(Is), R, Xs0, Used0) - end, - beam_ssa:fold_po(F, Res, Blocks). - + Ls = reverse(beam_ssa:rpo(Blocks)), + reserve_xregs(Ls, Blocks, #{}, Res). + +reserve_xregs([L|Ls], Blocks, XsMap0, Res0) -> + #b_blk{anno=Anno,is=Is0,last=Last} = map_get(L, Blocks), + + %% Calculate mapping from variable name to the preferred + %% register. + Xs0 = reserve_terminator(L, Is0, Last, Blocks, XsMap0, Res0), + + %% We need to figure out where the code generator will + %% place instructions that will do a garbage collection. + %% Insert 'gc' markers as pseudo-instructions in the + %% instruction sequence. + Is1 = reverse(Is0), + Is2 = res_place_gc_instrs(Is1, []), + Is = res_place_allocate(Anno, Is2), + + %% Add register hints for variables that are defined + %% in the (reversed) instruction sequence. + {Res,Xs} = reserve_xregs_is(Is, Res0, Xs0, []), + + XsMap = XsMap0#{L=>Xs}, + reserve_xregs(Ls, Blocks, XsMap, Res); +reserve_xregs([], _, _, Res) -> Res. + +%% Insert explicit 'gc' markers points where there will +%% be a garbage collection. (Note that the instruction +%% sequence passed to this function is reversed.) + +res_place_gc_instrs([#b_set{op=phi}=I|Is], Acc) -> + res_place_gc_instrs(Is, [I|Acc]); +res_place_gc_instrs([#b_set{op=Op}=I|Is], Acc) + when Op =:= call; Op =:= make_fun -> + case Acc of + [] -> + res_place_gc_instrs(Is, [I|Acc]); + [GC|_] when GC =:= gc; GC =:= test_heap -> + res_place_gc_instrs(Is, [I,gc|Acc]); + [_|_] -> + res_place_gc_instrs(Is, [I,gc|Acc]) + end; +res_place_gc_instrs([#b_set{op=Op,args=Args}=I|Is], Acc0) -> + case beam_ssa_codegen:classify_heap_need(Op, Args) of + neutral -> + case Acc0 of + [test_heap|Acc] -> + res_place_gc_instrs(Is, [test_heap,I|Acc]); + Acc -> + res_place_gc_instrs(Is, [I|Acc]) + end; + {put,_} -> + case Acc0 of + [test_heap|Acc] -> + res_place_gc_instrs(Is, [test_heap,I|Acc]); + Acc -> + res_place_gc_instrs(Is, [test_heap,I|Acc]) + end; + _ -> + res_place_gc_instrs(Is, [gc,I|Acc0]) + end; +res_place_gc_instrs([], Acc) -> + %% Reverse and replace 'test_heap' markers with 'gc'. + %% (The distinction is no longer useful.) + res_place_gc_instrs_rev(Acc, []). + +res_place_gc_instrs_rev([test_heap|Is], [gc|_]=Acc) -> + res_place_gc_instrs_rev(Is, Acc); +res_place_gc_instrs_rev([test_heap|Is], Acc) -> + res_place_gc_instrs_rev(Is, [gc|Acc]); +res_place_gc_instrs_rev([gc|Is], [gc|_]=Acc) -> + res_place_gc_instrs_rev(Is, Acc); +res_place_gc_instrs_rev([I|Is], Acc) -> + res_place_gc_instrs_rev(Is, [I|Acc]); +res_place_gc_instrs_rev([], Acc) -> Acc. + +res_place_allocate(#{yregs:=_}, Is) -> + %% There will be an 'allocate' instruction inserted here. + Is ++ [gc]; +res_place_allocate(#{}, Is) -> Is. + +reserve_xregs_is([gc|Is], Res, Xs0, Used) -> + %% At this point, the code generator will place an instruction + %% that does a garbage collection. We must prune the remembered + %% registers. + Xs = res_xregs_prune(Xs0, Used, Res), + reserve_xregs_is(Is, Res, Xs, Used); reserve_xregs_is([#b_set{op=Op,dst=Dst,args=Args}=I|Is], Res0, Xs0, Used0) -> - Xs1 = case is_gc_safe(I) of - true -> - Xs0; - false -> - %% There may be a garbage collection after executing this - %% instruction. We will need prune the list of preferred - %% X registers. - res_xregs_prune(Xs0, Used0, Res0) - end, - Res = reserve_xreg(Dst, Xs1, Res0), + Res = reserve_xreg(Dst, Xs0, Res0), Used1 = ordsets:union(Used0, beam_ssa:used(I)), Used = ordsets:del_element(Dst, Used1), case Op of @@ -2097,28 +2185,74 @@ reserve_xregs_is([#b_set{op=Op,dst=Dst,args=Args}=I|Is], Res0, Xs0, Used0) -> Xs = reserve_call_args(tl(Args)), reserve_xregs_is(Is, Res, Xs, Used); _ -> - reserve_xregs_is(Is, Res, Xs1, Used) + reserve_xregs_is(Is, Res, Xs0, Used) end; -reserve_xregs_is([], Res, _Xs, _Used) -> Res. - -reserve_terminator(L, #b_br{bool=#b_literal{val=true},succ=Succ}, Blocks, Res) -> - case maps:get(Succ, Blocks) of +reserve_xregs_is([], Res, Xs, _Used) -> + {Res,Xs}. + +%% Pick up register hints from the successors of this blocks. +reserve_terminator(_L, _Is, #b_br{bool=#b_var{},succ=Succ,fail=?BADARG_BLOCK}, + _Blocks, XsMap, _Res) -> + %% We know that no variables are used at ?BADARG_BLOCK, so + %% any register hints from the success blocks are safe to use. + map_get(Succ, XsMap); +reserve_terminator(L, Is, #b_br{bool=#b_var{},succ=Succ,fail=Fail}, + Blocks, XsMap, Res) when Succ =/= Fail -> + #{Succ:=SuccBlk,Fail:=FailBlk} = Blocks, + case {SuccBlk,FailBlk} of + {#b_blk{is=[],last=#b_br{succ=PhiL,fail=PhiL}}, + #b_blk{is=[],last=#b_br{succ=PhiL,fail=PhiL}}} -> + %% Both branches ultimately transfer to the same + %% block (via two blocks with no instructions). + %% Pick up register hints from the phi nodes + %% in the common block. + #{PhiL:=#b_blk{is=PhiIs}} = Blocks, + Xs = res_xregs_from_phi(PhiIs, Succ, Res, #{}), + res_xregs_from_phi(PhiIs, Fail, Res, Xs); + {_,_} when Is =/= [] -> + case last(Is) of + #b_set{op=succeeded,args=[Arg]} -> + %% We know that Arg will not be used at the failure + %% label, so we can pick up register hints from the + %% success label. + Br = #b_br{bool=#b_literal{val=true},succ=Succ,fail=Succ}, + case reserve_terminator(L, [], Br, Blocks, XsMap, Res) of + #{Arg:=Reg} -> #{Arg=>Reg}; + #{} -> #{} + end; + _ -> + %% Register hints from the success block may not + %% be safe at the failure block, and vice versa. + #{} + end; + {_,_} -> + %% Register hints from the success block may not + %% be safe at the failure block, and vice versa. + #{} + end; +reserve_terminator(L, Is, #b_br{bool=#b_literal{val=true},succ=Succ}, + Blocks, XsMap, Res) -> + case map_get(Succ, Blocks) of #b_blk{is=[],last=Last} -> - reserve_terminator(Succ, Last, Blocks, Res); - #b_blk{is=[_|_]=Is} -> - {res_xregs_from_phi(Is, L, Res, #{}),[]} + reserve_terminator(Succ, Is, Last, Blocks, XsMap, Res); + #b_blk{is=[_|_]=PhiIs} -> + res_xregs_from_phi(PhiIs, L, Res, #{}) end; -reserve_terminator(_, Last, _, _) -> - {#{},beam_ssa:used(Last)}. +reserve_terminator(_, _, _, _, _, _) -> #{}. +%% Pick up a reservation from a phi node. res_xregs_from_phi([#b_set{op=phi,dst=Dst,args=Args}|Is], Pred, Res, Acc) -> case [V || {#b_var{}=V,L} <- Args, L =:= Pred] of [] -> + %% The value of the phi node for this predecessor + %% is a literal. Nothing to do here. res_xregs_from_phi(Is, Pred, Res, Acc); [V] -> case Res of #{Dst:={prefer,Reg}} -> + %% Try placing V in the same register as for + %% the phi node. res_xregs_from_phi(Is, Pred, Res, Acc#{V=>Reg}); #{Dst:=_} -> res_xregs_from_phi(Is, Pred, Res, Acc) @@ -2138,12 +2272,12 @@ reserve_call_args([], _, Xs) -> Xs. reserve_xreg(V, Xs, Res) -> case Res of #{V:=_} -> - %% Already reserved. + %% Already reserved (but not as an X register). Res; #{} -> case Xs of #{V:=X} -> - %% Add a hint that a specific X register is + %% Add a hint that this specific X register is %% preferred, unless it is already in use. Res#{V=>{prefer,X}}; #{} -> @@ -2152,23 +2286,15 @@ reserve_xreg(V, Xs, Res) -> end end. -is_gc_safe(#b_set{op=phi}) -> - false; -is_gc_safe(#b_set{op=Op,args=Args}) -> - case beam_ssa_codegen:classify_heap_need(Op, Args) of - neutral -> true; - {put,_} -> true; - _ -> false - end. - %% res_xregs_prune(PreferredRegs, Used, Res) -> PreferredRegs. -%% Prune the list of preferred to only include X registers that -%% are guaranteed to survice a garbage collection. +%% Prune the list of preferred registers, to make sure that +%% there are no "holes" (uninitialized X registers) when +%% invoking the garbage collector. -res_xregs_prune(Xs, Used, Res) -> +res_xregs_prune(Xs, Used, Res) when map_size(Xs) =/= 0 -> %% The number of safe registers is the number of the X registers %% used after this point. The actual number of safe registers may - %% be highter than this number, but this is a conservative safe + %% be higher than this number, but this is a conservative safe %% estimate. NumSafe = foldl(fun(V, N) -> case Res of @@ -2180,7 +2306,8 @@ res_xregs_prune(Xs, Used, Res) -> %% Remove unsafe registers from the list of potential %% preferred registers. - maps:filter(fun(_, {x,X}) -> X < NumSafe end, Xs). + maps:filter(fun(_, {x,X}) -> X < NumSafe end, Xs); +res_xregs_prune(Xs, _Used, _Res) -> Xs. %%% %%% Register allocation using linear scan. @@ -2486,7 +2613,7 @@ rel2fam(S0) -> sofs:to_external(S). split_phis(Is) -> - partition(fun(#b_set{op=Op}) -> Op =:= phi end, Is). + splitwith(fun(#b_set{op=Op}) -> Op =:= phi end, Is). is_yreg({y,_}) -> true; is_yreg({x,_}) -> false; diff --git a/lib/compiler/src/beam_ssa_share.erl b/lib/compiler/src/beam_ssa_share.erl new file mode 100644 index 0000000000..426efa2cc9 --- /dev/null +++ b/lib/compiler/src/beam_ssa_share.erl @@ -0,0 +1,370 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2018. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% + +%% +%% Share code for semantically equivalent blocks referred to +%% to by `br` and `switch` instructions. +%% +%% A similar optimization is done in beam_jump, but doing it here as +%% well is beneficial as it may enable other optimizations. If there +%% are many semantically equivalent clauses, this optimization can +%% substanstially decrease compilation times. +%% +%% block/2 is called from the liveness optimization pass in +%% beam_ssa_opt, as code sharing helps the liveness pass and vice +%% versa. +%% + +-module(beam_ssa_share). +-export([module/2,block/2]). + +-include("beam_ssa.hrl"). + +-import(lists, [keyfind/3,reverse/1,sort/1]). + +-spec module(beam_ssa:b_module(), [compile:option()]) -> + {'ok',beam_ssa:b_module()}. + +module(#b_module{body=Fs0}=Module, _Opts) -> + Fs = [function(F) || F <- Fs0], + {ok,Module#b_module{body=Fs}}. + +-spec block(Blk0, Blocks0) -> Blk when + Blk0 :: beam_ssa:b_blk(), + Blocks0 :: beam_ssa:block_map(), + Blk :: beam_ssa:b_blk(). + +block(#b_blk{last=Last0}=Blk, Blocks) -> + case share_terminator(Last0, Blocks) of + none -> Blk; + Last -> Blk#b_blk{last=beam_ssa:normalize(Last)} + end. + +%%% +%%% Local functions. +%%% + +function(#b_function{anno=Anno,bs=Blocks0}=F) -> + try + PO = reverse(beam_ssa:rpo(Blocks0)), + {Blocks1,Changed} = blocks(PO, Blocks0, false), + Blocks = case Changed of + true -> + beam_ssa:trim_unreachable(Blocks1); + false -> + Blocks0 + end, + F#b_function{bs=Blocks} + catch + Class:Error:Stack -> + #{func_info:={_,Name,Arity}} = Anno, + io:fwrite("Function: ~w/~w\n", [Name,Arity]), + erlang:raise(Class, Error, Stack) + end. + +blocks([L|Ls], Blocks, Changed) -> + #b_blk{last=Last0} = Blk0 = map_get(L, Blocks), + case block(Blk0, Blocks) of + #b_blk{last=Last0} -> + blocks(Ls, Blocks, Changed); + #b_blk{}=Blk -> + blocks(Ls, Blocks#{L:=Blk}, true) + end; +blocks([], Blocks, Changed) -> + {Blocks,Changed}. + +share_terminator(#b_br{bool=#b_var{},succ=Succ0,fail=Fail0}=Br, Blocks) -> + {Succ,SuccBlk} = shortcut_nonempty_block(Succ0, Blocks), + {Fail,FailBlk} = shortcut_nonempty_block(Fail0, Blocks), + case are_equivalent(Succ, SuccBlk, Fail, FailBlk, Blocks) of + true -> + %% The blocks are semantically equivalent. + Br#b_br{succ=Succ,fail=Succ}; + false -> + if + Succ =:= Succ0, Fail =:= Fail0 -> + %% None of blocks were cut short. + none; + true -> + %% One or both labels were cut short + %% to avoid jumping to an empty block. + Br#b_br{succ=Succ,fail=Fail} + end + end; +share_terminator(#b_switch{}=Sw, Blocks) -> + share_switch(Sw, Blocks); +share_terminator(_Last, _Blocks) -> none. + +%% Test whether the two blocks are semantically equivalent. This +%% function is specially optimized to return `false` as fast as +%% possible if the blocks are not equivalent, as that is the common +%% case. + +are_equivalent(_Succ, _, ?BADARG_BLOCK, _, _Blocks) -> + %% ?BADARG_BLOCK is special. Sharing could be incorrect. + false; +are_equivalent(_Succ, #b_blk{is=Is1,last=#b_ret{arg=RetVal1}=Ret1}, + _Fail, #b_blk{is=Is2,last=#b_ret{arg=RetVal2}=Ret2}, _Blocks) -> + case {RetVal1,RetVal2} of + {#b_literal{},#b_literal{}} -> + case RetVal1 =:= RetVal2 of + true -> + %% The return values are identical literals. We + %% only need to compare the canonicalized bodies. + Can1 = canonical_is(Is1), + Can2 = canonical_is(Is2), + Can1 =:= Can2; + false -> + %% Non-equal literals. + false + end; + {#b_var{},#b_var{}} -> + %% The return values are varibles. We must canonicalize + %% the blocks (including returns) and compare them. + Can1 = canonical_is(Is1 ++ [Ret1]), + Can2 = canonical_is(Is2 ++ [Ret2]), + Can1 =:= Can2; + {_,_} -> + %% One literal and one variable. + false + end; +are_equivalent(Succ, + #b_blk{is=Is1, + last=#b_br{bool=#b_literal{val=true}, + succ=Target}}, + Fail, + #b_blk{is=Is2, + last=#b_br{bool=#b_literal{val=true}, + succ=Target}}, + Blocks) -> + %% Both blocks end with an unconditional branch to the + %% same target block. If the target block has phi nodes, + %% we must pick up the values from the phi nodes and + %% compare them. + #b_blk{is=Is} = map_get(Target, Blocks), + Phis1 = canonical_terminator_phis(Is, Succ), + Phis2 = canonical_terminator_phis(Is, Fail), + case {Phis1,Phis2} of + {[#b_set{args=[#b_literal{}]}|_],_} when Phis1 =/= Phis2 -> + %% Different values are used in the phi nodes. + false; + {_,[#b_set{args=[#b_literal{}]}|_]} when Phis1 =/= Phis2 -> + %% Different values are used in the phi nodes. + false; + {_,_} -> + %% The values in the phi nodes are variables or identical + %% literals. We must canonicalize the blocks and compare + %% them. + Can1 = canonical_is(Is1 ++ Phis1), + Can2 = canonical_is(Is2 ++ Phis2), + Can1 =:= Can2 + end; +are_equivalent(Succ0, #b_blk{is=Is1,last=#b_br{bool=#b_var{},fail=Same}}, + Fail0, #b_blk{is=Is2,last=#b_br{bool=#b_var{},fail=Same}}, + Blocks) -> + %% Two-way branches with identical failure labels. First compare the + %% canonicalized bodies of the blocks. + case canonical_is(Is1) =:= canonical_is(Is2) of + false -> + %% Different bodies. + false; + true -> + %% Bodies were equal. That is fairly uncommon, so to keep + %% the code simple we will rewrite the `br` to a `switch` + %% and let share_switch/2 do the work of following the + %% branches. + Sw = #b_switch{arg=#b_var{name=not_used},fail=Fail0, + list=[{#b_literal{},Succ0}]}, + #b_switch{fail=Fail,list=[{_,Succ}]} = share_switch(Sw, Blocks), + Fail =:= Succ + end; +are_equivalent(_, _, _, _, _) -> false. + +share_switch(#b_switch{fail=Fail0,list=List0}=Sw, Blocks) -> + Prep = share_prepare_sw([{value,Fail0}|List0], Blocks, 0, []), + Res = do_share_switch(Prep, Blocks, []), + [{_,Fail}|List] = [VL || {_,VL} <- sort(Res)], + Sw#b_switch{fail=Fail,list=List}. + +share_prepare_sw([{V,L0}|T], Blocks, N, Acc) -> + {L,_Blk} = shortcut_nonempty_block(L0, Blocks), + share_prepare_sw(T, Blocks, N+1, [{{L,#{}},{N,{V,L}}}|Acc]); +share_prepare_sw([], _, _, Acc) -> Acc. + +do_share_switch(Prep, Blocks, Acc) -> + Map = share_switch_1(Prep, Blocks, #{}), + share_switch_2(maps:values(Map), Blocks, Acc). + +share_switch_1([{Next0,Res}|T], Blocks, Map) -> + {Can,Next} = canonical_block(Next0, Blocks), + case Map of + #{Can:=Ls} -> + share_switch_1(T, Blocks, Map#{Can:=[{Next,Res}|Ls]}); + #{} -> + share_switch_1(T, Blocks, Map#{Can=>[{Next,Res}]}) + end; +share_switch_1([], _Blocks, Map) -> Map. + +share_switch_2([[{_,{N,Res}}]|T], Blocks, Acc) -> + %% This block is not equivalent to any other block. + share_switch_2(T, Blocks, [{N,Res}|Acc]); +share_switch_2([[{done,{_,{_,Common}}}|_]=Eqs|T], Blocks, Acc0) -> + %% Two or more blocks are semantically equivalent, and all blocks + %% are either terminated with a `ret` or a `br` to the same target + %% block. Replace the labels in the `switch` for all of those + %% blocks with the label for the first of the blocks. + Acc = [{N,{V,Common}} || {done,{N,{V,_}}} <- Eqs] ++ Acc0, + share_switch_2(T, Blocks, Acc); +share_switch_2([[{_,_}|_]=Prep|T], Blocks, Acc0) -> + %% Two or more blocks are semantically equivalent, but they have + %% different successful successor blocks. Now we must check + %% recursively whether the successor blocks are equivalent too. + Acc = do_share_switch(Prep, Blocks, Acc0), + share_switch_2(T, Blocks, Acc); +share_switch_2([], _, Acc) -> Acc. + +canonical_block({L,VarMap0}, Blocks) -> + #b_blk{is=Is,last=Last0} = map_get(L, Blocks), + case canonical_terminator(L, Last0, Blocks) of + none -> + %% The block has a terminator that we don't handle. + {{none,L},done}; + {Last,done} -> + %% The block ends with a `ret` or an unconditional `br` to + %% another block. + {Can,_VarMap} = canonical_is(Is ++ Last, VarMap0, []), + {Can,done}; + {Last,Next} -> + %% The block ends with a conditional branch. + {Can,VarMap} = canonical_is(Is ++ Last, VarMap0, []), + {Can,{Next,VarMap}} + end. + +%% Translate a sequence of instructions to a canonical representation. If the +%% canonical representation of two blocks compare equal, the blocks are +%% semantically equivalent. The following translations are done: +%% +%% * Variables defined in the instruction sequence are replaced with +%% {var,0}, {var,1}, and so on. Free variables are not changed. +%% +%% * `location` annotations that would produce a `line` instruction are +%% kept. All other annotations are cleared. +%% +%% * Instructions are repackaged into tuples instead of into the +%% usual records. The main reason is to avoid violating the types for +%% the SSA records. We can simplify things a little by linking the +%% instructions directly instead of putting them into a list. + +canonical_is(Is) -> + {Can,_} = canonical_is(Is, #{}, []), + Can. + +canonical_is([#b_set{op=Op,dst=Dst,args=Args0}=I|Is], VarMap0, Acc) -> + Args = [canonical_arg(Arg, VarMap0) || Arg <-Args0], + Var = {var,map_size(VarMap0)}, + VarMap = VarMap0#{Dst=>Var}, + LineAnno = case Op of + bs_match -> + %% The location annotation for a bs_match instruction + %% is only used in warnings, never to emit a `line` + %% instruction. Therefore, it should not be included. + []; + _ -> + %% The location annotation will be used in a `line` + %% instruction. It must be included. + beam_ssa:get_anno(location, I, none) + end, + canonical_is(Is, VarMap, {Op,LineAnno,Var,Args,Acc}); +canonical_is([#b_ret{arg=Arg}], VarMap, Acc0) -> + Acc1 = case Acc0 of + {call,_Anno,Var,[#b_local{}|_]=Args,PrevAcc} -> + %% This is a tail-recursive call to a local function. + %% There will be no line instruction generated; + %% thus, the annotation is not significant. + {call,[],Var,Args,PrevAcc}; + _ -> + Acc0 + end, + {{ret,canonical_arg(Arg, VarMap),Acc1},VarMap}; +canonical_is([#b_br{bool=#b_var{},fail=Fail}], VarMap, Acc) -> + {{br,succ,Fail,Acc},VarMap}; +canonical_is([#b_br{succ=Succ}], VarMap, Acc) -> + {{br,Succ,Acc},VarMap}; +canonical_is([], VarMap, Acc) -> + {Acc,VarMap}. + +canonical_terminator(_L, #b_ret{}=Ret, _Blocks) -> + {[Ret],done}; +canonical_terminator(L, #b_br{bool=#b_literal{val=true},succ=Succ}=Br, Blocks) -> + #b_blk{is=Is} = map_get(Succ, Blocks), + case canonical_terminator_phis(Is, L) of + [] -> + {[],Succ}; + [_|_]=Phis -> + {Phis ++ [Br],done} + end; +canonical_terminator(_L, #b_br{bool=#b_var{},succ=Succ}=Br, _Blocks) -> + {[Br],Succ}; +canonical_terminator(_, _, _) -> none. + +canonical_terminator_phis([#b_set{op=phi,args=PhiArgs}=Phi|Is], L) -> + {Value,L} = keyfind(L, 2, PhiArgs), + [Phi#b_set{op=copy,args=[Value]}|canonical_terminator_phis(Is, L)]; +canonical_terminator_phis([#b_set{op=peek_message}=I|_], L) -> + %% We could get stuck into an infinite loop if we allowed the + %% comparisons to continue into this block. Force an unequal + %% compare with all other predecessors of this block. + [I#b_set{op=copy,args=[#b_literal{val=L}]}]; +canonical_terminator_phis(_, _) -> []. + +canonical_arg(#b_var{}=Var, VarMap) -> + case VarMap of + #{Var:=CanonicalVar} -> + CanonicalVar; + #{} -> + Var + end; +canonical_arg(#b_remote{mod=Mod,name=Name}, VarMap) -> + {remote,canonical_arg(Mod, VarMap), + canonical_arg(Name, VarMap)}; +canonical_arg(Other, _VarMap) -> Other. + +%% Shortcut branches to empty blocks if safe. + +shortcut_nonempty_block(L, Blocks) -> + case map_get(L, Blocks) of + #b_blk{is=[],last=#b_br{bool=#b_literal{val=true},succ=Succ}}=Blk -> + %% This block is empty. + case is_forbidden(Succ, Blocks) of + false -> + shortcut_nonempty_block(Succ, Blocks); + true -> + {L,Blk} + end; + #b_blk{}=Blk -> + {L,Blk} + end. + +is_forbidden(L, Blocks) -> + case map_get(L, Blocks) of + #b_blk{is=[#b_set{op=phi}|_]} -> true; + #b_blk{is=[#b_set{op=peek_message}|_]} -> true; + #b_blk{} -> false + end. diff --git a/lib/compiler/src/beam_ssa_type.erl b/lib/compiler/src/beam_ssa_type.erl index 95fc3bb0e9..38ea5e6914 100644 --- a/lib/compiler/src/beam_ssa_type.erl +++ b/lib/compiler/src/beam_ssa_type.erl @@ -19,19 +19,22 @@ %% -module(beam_ssa_type). --export([opt/2]). +-export([opt_start/4, opt_continue/4, opt_finish/3]). --include("beam_ssa.hrl"). +-include("beam_ssa_opt.hrl"). -import(lists, [all/2,any/2,droplast/1,foldl/3,last/1,member/2, - reverse/1,sort/1]). + partition/2,reverse/1,sort/1]). -define(UNICODE_INT, #t_integer{elements={0,16#10FFFF}}). --record(d, {ds :: #{beam_ssa:b_var():=beam_ssa:b_set()}, - ls :: #{beam_ssa:label():=type_db()}, - once :: cerl_sets:set(beam_ssa:b_var()), - sub :: #{beam_ssa:b_var():=beam_ssa:value()} - }). +-record(d, + {ds :: #{beam_ssa:b_var():=beam_ssa:b_set()}, + ls :: #{beam_ssa:label():=type_db()}, + once :: cerl_sets:set(beam_ssa:b_var()), + func_id :: func_id(), + func_db :: func_info_db(), + sub = #{} :: #{beam_ssa:b_var():=beam_ssa:value()}, + ret_type = [] :: [type()]}). -define(ATOM_SET_SIZE, 5). @@ -49,36 +52,155 @@ {'binary',pos_integer()} | 'cons' | 'float' | 'list' | 'map' | 'nil' |'number'. -type type_db() :: #{beam_ssa:var_name():=type()}. --spec opt([{Label0,Block0}], Args) -> [{Label,Block}] when - Label0 :: beam_ssa:label(), - Block0 :: beam_ssa:b_blk(), +-spec opt_start(Linear, Args, Anno, FuncDb) -> {Linear, FuncDb} when + Linear :: [{non_neg_integer(), beam_ssa:b_blk()}], Args :: [beam_ssa:b_var()], - Label :: beam_ssa:label(), - Block :: beam_ssa:b_blk(). - -opt(Linear, Args) -> - UsedOnce = used_once(Linear, Args), + Anno :: beam_ssa:anno(), + FuncDb :: func_info_db(). +opt_start(Linear, Args, Anno, FuncDb) -> + %% This is the first run through the module, so our arg_types can be + %% incomplete as we may not have visited all call sites at least once. Ts = maps:from_list([{V,any} || #b_var{}=V <- Args]), + opt_continue_1(Linear, Args, get_func_id(Anno), Ts, FuncDb). + +-spec opt_continue(Linear, Args, Anno, FuncDb) -> {Linear, FuncDb} when + Linear :: [{non_neg_integer(), beam_ssa:b_blk()}], + Args :: [beam_ssa:b_var()], + Anno :: beam_ssa:anno(), + FuncDb :: func_info_db(). +opt_continue(Linear, Args, Anno, FuncDb) -> + Id = get_func_id(Anno), + case FuncDb of + #{ Id := #func_info{exported=false,arg_types=ArgTypes} } -> + %% This is a local function and we're guaranteed to have visited + %% every call site at least once, so we know that the parameter + %% types are at least as narrow as the join of all argument types. + Ts = join_arg_types(Args, ArgTypes, Anno), + opt_continue_1(Linear, Args, Id, Ts, FuncDb); + #{} -> + %% We can't infer the parameter types of exported functions, nor + %% the ones where module-level optimization is disabled, but + %% running the pass again could still help other functions. + Ts = maps:from_list([{V,any} || #b_var{}=V <- Args]), + opt_continue_1(Linear, Args, Id, Ts, FuncDb) + end. + +join_arg_types(Args, ArgTypes, Anno) -> + %% We suppress type optimization for parameters that have already been + %% optimized by another pass, as they may have done things we have no idea + %% how to interpret and running them over could generate incorrect code. + ParamTypes = maps:get(parameter_type_info, Anno, #{}), + Ts0 = join_arg_types_1(Args, ArgTypes, #{}), + maps:fold(fun(Arg, _V, Ts) -> + maps:put(Arg, any, Ts) + end, Ts0, ParamTypes). + +join_arg_types_1([Arg | Args], [TM | TMs], Ts) when map_size(TM) =/= 0 -> + join_arg_types_1(Args, TMs, Ts#{ Arg => join(maps:values(TM))}); +join_arg_types_1([Arg | Args], [_TM | TMs], Ts) -> + join_arg_types_1(Args, TMs, Ts#{ Arg => any }); +join_arg_types_1([], [], Ts) -> + Ts. + +-spec opt_continue_1(Linear, Args, Id, Ts, FuncDb) -> Result when + Linear :: [{non_neg_integer(), beam_ssa:b_blk()}], + Args :: [beam_ssa:b_var()], + Id :: func_id(), + Ts :: type_db(), + FuncDb :: func_info_db(), + Result :: {Linear, FuncDb}. +opt_continue_1(Linear0, Args, Id, Ts, FuncDb0) -> + UsedOnce = used_once(Linear0, Args), FakeCall = #b_set{op=call,args=[#b_remote{mod=#b_literal{val=unknown}, name=#b_literal{val=unknown}, arity=0}]}, Defs = maps:from_list([{Var,FakeCall#b_set{dst=Var}} || #b_var{}=Var <- Args]), - D = #d{ds=Defs,ls=#{0=>Ts,?BADARG_BLOCK=>#{}}, - once=UsedOnce,sub=#{}}, - opt_1(Linear, D). -opt_1([{L,Blk}|Bs], #d{ls=Ls}=D) -> + D = #d{ func_db=FuncDb0, + func_id=Id, + ds=Defs, + ls=#{0=>Ts,?BADARG_BLOCK=>#{}}, + once=UsedOnce }, + + {Linear, FuncDb, NewRet} = opt_1(Linear0, D, []), + + case FuncDb of + #{ Id := Entry0 } -> + Entry = Entry0#func_info{ret_type=NewRet}, + {Linear, FuncDb#{ Id := Entry }}; + #{} -> + %% Module-level optimizations have been turned off for this + %% function. + {Linear, FuncDb} + end. + +-spec opt_finish(Args, Anno, FuncDb) -> {Anno, FuncDb} when + Args :: [beam_ssa:b_var()], + Anno :: beam_ssa:anno(), + FuncDb :: func_info_db(). +opt_finish(Args, Anno, FuncDb) -> + Id = get_func_id(Anno), + case FuncDb of + #{ Id := #func_info{exported=false,arg_types=ArgTypes} } -> + ParamInfo0 = maps:get(parameter_type_info, Anno, #{}), + ParamInfo = opt_finish_1(Args, ArgTypes, ParamInfo0), + {Anno#{ parameter_type_info => ParamInfo }, FuncDb}; + #{} -> + {Anno, FuncDb} + end. + +opt_finish_1([Arg | Args], [TypeMap | TypeMaps], ParamInfo) + when is_map_key(Arg, ParamInfo); %% See join_arg_types/3 + map_size(TypeMap) =:= 0 -> + opt_finish_1(Args, TypeMaps, ParamInfo); +opt_finish_1([Arg | Args], [TypeMap | TypeMaps], ParamInfo0) -> + case join(maps:values(TypeMap)) of + any -> + opt_finish_1(Args, TypeMaps, ParamInfo0); + JoinedType -> + JoinedType = verified_type(JoinedType), + ParamInfo = ParamInfo0#{ Arg => validator_anno(JoinedType) }, + opt_finish_1(Args, TypeMaps, ParamInfo) + end; +opt_finish_1([], [], ParamInfo) -> + ParamInfo. + +validator_anno(#t_tuple{size=Size,exact=Exact}) -> + beam_validator:type_anno(tuple, Size, Exact); +validator_anno(#t_integer{elements={Same,Same}}) -> + beam_validator:type_anno(integer, Same); +validator_anno(#t_integer{}) -> + beam_validator:type_anno(integer); +validator_anno(float) -> + beam_validator:type_anno(float); +validator_anno(#t_atom{elements=[Val]}) -> + beam_validator:type_anno(atom, Val); +validator_anno(#t_atom{}=A) -> + case t_is_boolean(A) of + true -> beam_validator:type_anno(bool); + false -> beam_validator:type_anno(atom) + end; +validator_anno(T) -> + beam_validator:type_anno(T). + +get_func_id(Anno) -> + #{func_info:={_Mod, Name, Arity}} = Anno, + #b_local{name=#b_literal{val=Name}, arity=Arity}. + +opt_1([{L,Blk}|Bs], #d{ls=Ls}=D, Acc) -> case Ls of #{L:=Ts} -> - opt_2(L, Blk, Bs, Ts, D); + opt_2(L, Blk, Bs, Ts, D, Acc); #{} -> %% This block is never reached. Discard it. - opt_1(Bs, D) + opt_1(Bs, D, Acc) end; -opt_1([], #d{}) -> []. +opt_1([], D, Acc) -> + #d{func_db=FuncDb,ret_type=NewRet} = D, + {reverse(Acc), FuncDb, NewRet}. -opt_2(L, #b_blk{is=Is0}=Blk0, Bs, Ts, #d{sub=Sub}=D0) -> +opt_2(L, #b_blk{is=Is0}=Blk0, Bs, Ts, #d{sub=Sub}=D0, Acc) -> case Is0 of [#b_set{op=call,dst=Dst, args=[#b_remote{mod=#b_literal{val=Mod}, @@ -94,34 +216,43 @@ opt_2(L, #b_blk{is=Is0}=Blk0, Bs, Ts, #d{sub=Sub}=D0) -> Ret = #b_ret{arg=Dst}, Blk = Blk0#b_blk{is=[I],last=Ret}, Ls = maps:remove(L, D0#d.ls), - D = D0#d{ls=Ls}, - [{L,Blk}|opt_1(Bs, D)]; + + %% We potentially lack a return value. + RetType = join([none | D0#d.ret_type]), + + D = D0#d{ls=Ls,ret_type=[RetType]}, + opt_1(Bs, D, [{L,Blk} | Acc]); false -> - opt_3(L, Blk0, Bs, Ts, D0) + opt_3(L, Blk0, Bs, Ts, D0, Acc) end; _ -> - opt_3(L, Blk0, Bs, Ts, D0) + opt_3(L, Blk0, Bs, Ts, D0, Acc) end. opt_3(L, #b_blk{is=Is0,last=Last0}=Blk0, Bs, Ts0, - #d{ds=Ds0,ls=Ls0,sub=Sub0}=D0) -> - {Is,Ts,Ds,Sub} = opt_is(Is0, Ts0, Ds0, Ls0, Sub0, []), - D1 = D0#d{ds=Ds,sub=Sub}, - Last1 = simplify_terminator(Last0, Sub, Ts), + #d{ds=Ds0,ls=Ls0,sub=Sub0,func_db=Fdb0}=D0, Acc) -> + {Is,Ts,Ds,Fdb,Sub} = opt_is(Is0, Ts0, Ds0, Fdb0, Ls0, D0, Sub0, []), + D1 = D0#d{ds=Ds,sub=Sub,func_db=Fdb}, + Last1 = simplify_terminator(Last0, Sub, Ts, Ds), Last = opt_terminator(Last1, Ts, Ds), D = update_successors(Last, Ts, D1), Blk = Blk0#b_blk{is=Is,last=Last}, - [{L,Blk}|opt_1(Bs, D)]. + opt_1(Bs, D, [{L,Blk} | Acc]). -simplify_terminator(#b_br{bool=Bool}=Br, Sub, Ts) -> +simplify_terminator(#b_br{bool=Bool}=Br, Sub, Ts, _Ds) -> Br#b_br{bool=simplify_arg(Bool, Sub, Ts)}; -simplify_terminator(#b_switch{arg=Arg}=Sw, Sub, Ts) -> +simplify_terminator(#b_switch{arg=Arg}=Sw, Sub, Ts, _Ds) -> Sw#b_switch{arg=simplify_arg(Arg, Sub, Ts)}; -simplify_terminator(#b_ret{arg=Arg}=Ret, Sub, Ts) -> - Ret#b_ret{arg=simplify_arg(Arg, Sub, Ts)}. +simplify_terminator(#b_ret{arg=Arg}=Ret, Sub, Ts, Ds) -> + %% Reducing the result of a call to a literal (fairly common for 'ok') + %% breaks tail call optimization. + case Ds of + #{ Arg := #b_set{op=call}} -> Ret; + #{} -> Ret#b_ret{arg=simplify_arg(Arg, Sub, Ts)} + end. opt_is([#b_set{op=phi,dst=Dst,args=Args0}=I0|Is], - Ts0, Ds0, Ls, Sub0, Acc) -> + Ts0, Ds0, Fdb, Ls, D, Sub0, Acc) -> %% Simplify the phi node by removing all predecessor blocks that no %% longer exists or no longer branches to this block. Args = [{simplify_arg(Arg, Sub0, Ts0),From} || @@ -132,15 +263,61 @@ opt_is([#b_set{op=phi,dst=Dst,args=Args0}=I0|Is], %% value or if the values are identical. [{Val,_}|_] = Args, Sub = Sub0#{Dst=>Val}, - opt_is(Is, Ts0, Ds0, Ls, Sub, Acc); + opt_is(Is, Ts0, Ds0, Fdb, Ls, D, Sub, Acc); false -> I = I0#b_set{args=Args}, Ts = update_types(I, Ts0, Ds0), Ds = Ds0#{Dst=>I}, - opt_is(Is, Ts, Ds, Ls, Sub0, [I|Acc]) + opt_is(Is, Ts, Ds, Fdb, Ls, D, Sub0, [I|Acc]) + end; +opt_is([#b_set{op=call,args=Args0,dst=Dst}=I0 | Is], + Ts0, Ds0, Fdb0, Ls, D, Sub, Acc) -> + Args = simplify_args(Args0, Sub, Ts0), + I1 = beam_ssa:normalize(I0#b_set{args=Args}), + + %% This is a bit of a kludge; we know that any instruction whose return + %% type is 'none' will fail at runtime, but we don't yet have a way to cut + %% a block short so we move on like nothing nothing happened. + %% + %% This complicates argument type optimization as unreachable calls can + %% add types that will never occur, so we skip optimizing this call if + %% the type of any of its arguments is 'none'. + [_Callee | Rest] = Args, + case all(fun(Arg) -> get_type(Arg, Ts0) =/= none end, Rest) of + true -> + {Ts, Ds, Fdb, I} = opt_call(I1, D, Ts0, Ds0, Fdb0), + opt_is(Is, Ts, Ds, Fdb, Ls, D, Sub, [I|Acc]); + false -> + Ts = Ts0#{ Dst => any }, + Ds = Ds0#{ Dst => I1 }, + opt_is(Is, Ts, Ds, Fdb0, Ls, D, Sub, [I1|Acc]) + end; +opt_is([#b_set{op=succeeded,args=[Arg],dst=Dst}=I], + Ts0, Ds0, Fdb, Ls, D, Sub0, Acc) -> + case Ds0 of + #{ Arg := #b_set{op=call} } -> + %% The success check of a call is part of exception handling and + %% must not be optimized away. We still have to update its type + %% though. + Ts = update_types(I, Ts0, Ds0), + Ds = Ds0#{Dst=>I}, + + opt_is([], Ts, Ds, Fdb, Ls, D, Sub0, [I|Acc]); + #{} -> + Args = simplify_args([Arg], Sub0, Ts0), + Type = type(succeeded, Args, Ts0, Ds0), + case get_literal_from_type(Type) of + #b_literal{}=Lit -> + Sub = Sub0#{Dst=>Lit}, + opt_is([], Ts0, Ds0, Fdb, Ls, D, Sub, Acc); + none -> + Ts = Ts0#{Dst=>Type}, + Ds = Ds0#{Dst=>I}, + opt_is([], Ts, Ds, Fdb, Ls, D, Sub0, [I|Acc]) + end end; opt_is([#b_set{args=Args0,dst=Dst}=I0|Is], - Ts0, Ds0, Ls, Sub0, Acc) -> + Ts0, Ds0, Fdb, Ls, D, Sub0, Acc) -> Args = simplify_args(Args0, Sub0, Ts0), I1 = beam_ssa:normalize(I0#b_set{args=Args}), case simplify(I1, Ts0) of @@ -148,16 +325,76 @@ opt_is([#b_set{args=Args0,dst=Dst}=I0|Is], I = beam_ssa:normalize(I2), Ts = update_types(I, Ts0, Ds0), Ds = Ds0#{Dst=>I}, - opt_is(Is, Ts, Ds, Ls, Sub0, [I|Acc]); + opt_is(Is, Ts, Ds, Fdb, Ls, D, Sub0, [I|Acc]); #b_literal{}=Lit -> Sub = Sub0#{Dst=>Lit}, - opt_is(Is, Ts0, Ds0, Ls, Sub, Acc); + opt_is(Is, Ts0, Ds0, Fdb, Ls, D, Sub, Acc); #b_var{}=Var -> - Sub = Sub0#{Dst=>Var}, - opt_is(Is, Ts0, Ds0, Ls, Sub, Acc) + case Is of + [#b_set{op=succeeded,dst=SuccDst,args=[Dst]}] -> + %% We must remove this 'succeeded' instruction. + Sub = Sub0#{Dst=>Var,SuccDst=>#b_literal{val=true}}, + opt_is([], Ts0, Ds0, Fdb, Ls, D, Sub, Acc); + _ -> + Sub = Sub0#{Dst=>Var}, + opt_is(Is, Ts0, Ds0, Fdb, Ls, D, Sub, Acc) + end end; -opt_is([], Ts, Ds, _Ls, Sub, Acc) -> - {reverse(Acc),Ts,Ds,Sub}. +opt_is([], Ts, Ds, Fdb, _Ls, _D, Sub, Acc) -> + {reverse(Acc), Ts, Ds, Fdb, Sub}. + +opt_call(#b_set{dst=Dst,args=[#b_local{}=Callee|Args]}=I0, D, Ts0, Ds0, Fdb0) -> + {Ts, Ds, I} = opt_local_call(I0, Ts0, Ds0, Fdb0), + case Fdb0 of + #{ Callee := #func_info{exported=false,arg_types=ArgTypes0}=Info } -> + %% Update the argument types of *this exact call*, the types + %% will be joined later when the callee is optimized. + CallId = {D#d.func_id, Dst}, + ArgTypes = update_arg_types(Args, ArgTypes0, CallId, Ts0), + + Fdb = Fdb0#{ Callee => Info#func_info{arg_types=ArgTypes} }, + {Ts, Ds, Fdb, I}; + #{} -> + %% We can't narrow the argument types of exported functions as they + %% can receive anything as part of an external call. + {Ts, Ds, Fdb0, I} + end; +opt_call(#b_set{dst=Dst}=I, _D, Ts0, Ds0, Fdb) -> + Ts = update_types(I, Ts0, Ds0), + Ds = Ds0#{ Dst => I }, + {Ts, Ds, Fdb, I}. + +opt_local_call(#b_set{dst=Dst,args=[Id|_]}=I0, Ts0, Ds0, Fdb) -> + %% We skip propagating 'none' as we don't yet have a good way to cut a + %% block short. + Type = case Fdb of + #{ Id := #func_info{ret_type=[T]} } when T =/= none -> T; + #{} -> any + end, + I = case Type of + any -> I0; + _ -> beam_ssa:add_anno(result_type, validator_anno(Type), I0) + end, + Ts = Ts0#{ Dst => Type }, + Ds = Ds0#{ Dst => I }, + {Ts, Ds, I}. + +update_arg_types([Arg | Args], [TypeMap0 | TypeMaps], CallId, Ts) -> + %% Match contexts are treated as bitstrings when optimizing arguments, as + %% we don't yet support removing the "bs_start_match3" instruction. + NewType = case get_type(Arg, Ts) of + #t_bs_match{} -> {binary, 1}; + Type -> Type + end, + PrevType = maps:get(CallId, TypeMap0, NewType), + + %% The new type must be narrower than the old one. + true = meet(NewType, PrevType) =/= none, %Assertion. + + TypeMap = TypeMap0#{ CallId => NewType }, + [TypeMap | update_arg_types(Args, TypeMaps, CallId, Ts)]; +update_arg_types([], [], _CallId, _Ts) -> + []. simplify(#b_set{op={bif,'and'},args=Args}=I, Ts) -> case is_safe_bool_op(Args, Ts) of @@ -289,7 +526,7 @@ simplify(#b_set{op=put_tuple,args=Args}=I, _Ts) -> none -> I; List -> #b_literal{val=list_to_tuple(List)} end; -simplify(#b_set{op=succeeded,args=[#b_literal{}]}, _Ts) -> +simplify(#b_set{op=wait_timeout,args=[#b_literal{val=0}]}, _Ts) -> #b_literal{val=true}; simplify(#b_set{op=wait_timeout,args=[#b_literal{val=infinity}]}=I, _Ts) -> I#b_set{op=wait,args=[]}; @@ -436,12 +673,12 @@ update_successors(#b_br{bool=#b_var{}=Bool,succ=Succ,fail=Fail}, Ts0, D0) -> %% no need to include the type database passed on to the %% successors of this block. Ts = maps:remove(Bool, Ts0), - D = update_successor(Fail, Ts, D0), - SuccTs = infer_types(Bool, Ts, D0), + {SuccTs,FailTs} = infer_types(Bool, Ts, D0), + D = update_successor(Fail, FailTs, D0), update_successor(Succ, SuccTs, D); false -> - D = update_successor_bool(Bool, false, Fail, Ts0, D0), - SuccTs = infer_types(Bool, Ts0, D0), + {SuccTs,FailTs} = infer_types(Bool, Ts0, D0), + D = update_successor_bool(Bool, false, Fail, FailTs, D0), update_successor_bool(Bool, true, Succ, SuccTs, D) end; update_successors(#b_switch{arg=#b_var{}=V,fail=Fail,list=List}, Ts0, D0) -> @@ -458,14 +695,36 @@ update_successors(#b_switch{arg=#b_var{}=V,fail=Fail,list=List}, Ts0, D0) -> end, foldl(F, D, List); false -> - D = update_successor(Fail, Ts0, D0), + %% V can not be equal to any of the values in List at the fail + %% block. + FailTs = subtract_sw_list(V, List, Ts0), + D = update_successor(Fail, FailTs, D0), F = fun({Val,S}, A) -> T = get_type(Val, Ts0), update_successor(S, Ts0#{V=>T}, A) end, foldl(F, D, List) - end; -update_successors(#b_ret{}, _Ts, D) -> D. + end; +update_successors(#b_ret{arg=Arg}, Ts, D) -> + FuncId = D#d.func_id, + case D#d.ds of + #{ Arg := #b_set{op=call,args=[FuncId | _]} } -> + %% Returning a call to ourselves doesn't affect our own return + %% type. + D; + #{} -> + RetType = join([get_type(Arg, Ts) | D#d.ret_type]), + D#d{ret_type=[RetType]} + end. + +subtract_sw_list(V, List, Ts) -> + Ts#{ V := sub_sw_list_1(get_type(V, Ts), List, Ts) }. + +sub_sw_list_1(Type, [{Val,_}|T], Ts) -> + ValType = get_type(Val, Ts), + sub_sw_list_1(subtract(Type, ValType), T, Ts); +sub_sw_list_1(Type, [], _Ts) -> + Type. update_successor_bool(#b_var{}=Var, BoolValue, S, Ts, D) -> case t_is_boolean(get_type(Var, Ts)) of @@ -542,6 +801,14 @@ type(call, [#b_remote{mod=#b_literal{val=Mod}, {_,_} -> #t_tuple{} end; + {erlang,'++',[List1,List2]} -> + case get_type(List1, Ts) =:= cons orelse + get_type(List2, Ts) =:= cons of + true -> cons; + false -> list + end; + {erlang,'--',[_,_]} -> + list; {math,_,_} -> case is_math_bif(Name, length(Args)) of false -> any; @@ -607,6 +874,8 @@ type(succeeded, [#b_var{}=Src], Ts, Ds) -> #b_set{} -> t_boolean() end; +type(succeeded, [#b_literal{}], _Ts, _Ds) -> + t_atom(true); type(_, _, _, _) -> any. arith_op_type(Args, Ts) -> @@ -873,10 +1142,84 @@ get_type(#b_literal{val=Val}, _Ts) -> any end. -infer_types(#b_var{}=V, Ts, #d{ds=Ds}) -> +%% infer_types(Var, Types, #d{}) -> {SuccTypes,FailTypes} +%% Looking at the expression that defines the variable Var, infer +%% the types for the variables in the arguments. Return the updated +%% type database for the case that the expression evaluates to +%% true, and and for the case that it evaluates to false. +%% +%% Here is an example. The variable being asked about is +%% the variable Bool, which is defined like this: +%% +%% Bool = is_nonempty_list L +%% +%% If 'is_nonempty_list L' evaluates to 'true', L must +%% must be cons. The meet of the previously known type of L and 'cons' +%% will be added to SuccTypes. +%% +%% On the other hand, if 'is_nonempty_list L' evaluates to false, L +%% is not cons and cons can be subtracted from the previously known +%% type for L. For example, if L was known to be 'list', subtracting +%% 'cons' would give 'nil' as the only possible type. The result of the +%% subtraction for L will be added to FailTypes. +%% +%% Here is another example, asking about the variable Bool: +%% +%% Head = bif:hd L +%% Bool = succeeded Head +%% +%% 'succeeded Head' will evaluate to 'true' if the instrution that +%% defined Head succeeded. In this case, it is the 'bif:hd L' +%% instruction, which will succeed if L is 'cons'. Thus, the meet of +%% the previous type for L and 'cons' will be added to SuccTypes. +%% +%% If 'succeeded Head' evaluates to 'false', it means that 'bif:hd L' +%% failed and that L is not 'cons'. 'cons' can be subtracted from the +%% previously known type for L and the result put in FailTypes. + +infer_types(#b_var{}=V, Ts, #d{ds=Ds,once=Once}) -> #{V:=#b_set{op=Op,args=Args}} = Ds, - Types = infer_type(Op, Args, Ds), - meet_types(Types, Ts). + Types0 = infer_type(Op, Args, Ds), + + %% We must be careful with types inferred from '=:='. + %% + %% If we have seen L =:= [a], we know that L is 'cons' if the + %% comparison succeeds. However, if the comparison fails, L could + %% still be 'cons'. Therefore, we must not subtract 'cons' from the + %% previous type of L. + %% + %% However, it is safe to subtract a type inferred from '=:=' if + %% it is single-valued, e.g. if it is [] or the atom 'true'. + EqTypes0 = infer_eq_type(Op, Args, Ts, Ds), + {Types1,EqTypes} = partition(fun({_,T}) -> + is_singleton_type(T) + end, EqTypes0), + + %% Don't bother updating the types for variables that + %% are never used again. + Types2 = Types1 ++ Types0, + Types = [P || {InfV,_}=P <- Types2, not cerl_sets:is_element(InfV, Once)], + + {meet_types(EqTypes++Types, Ts),subtract_types(Types, Ts)}. + +infer_eq_type({bif,'=:='}, [#b_var{}=Src,#b_literal{}=Lit], Ts, Ds) -> + Def = maps:get(Src, Ds), + Type = get_type(Lit, Ts), + [{Src,Type}|infer_tuple_size(Def, Lit) ++ + infer_first_element(Def, Lit)]; +infer_eq_type({bif,'=:='}, [#b_var{}=Arg0,#b_var{}=Arg1], Ts, _Ds) -> + %% As an example, assume that L1 is known to be 'list', and L2 is + %% known to be 'cons'. Then if 'L1 =:= L2' evaluates to 'true', it can + %% be inferred that L1 is 'cons' (the meet of 'cons' and 'list'). + Type0 = get_type(Arg0, Ts), + Type1 = get_type(Arg1, Ts), + Type = meet(Type0, Type1), + [{V,MeetType} || + {V,OrigType,MeetType} <- + [{Arg0,Type0,Type},{Arg1,Type1,Type}], + OrigType =/= MeetType]; +infer_eq_type(_Op, _Args, _Ts, _Ds) -> + []. infer_type({bif,element}, [#b_literal{val=Pos},#b_var{}=Tuple], _Ds) -> if @@ -885,20 +1228,27 @@ infer_type({bif,element}, [#b_literal{val=Pos},#b_var{}=Tuple], _Ds) -> true -> [] end; -infer_type({bif,'=:='}, [#b_var{}=Src,#b_literal{}=Lit], Ds) -> - Def = maps:get(Src, Ds), - Type = get_type(Lit, #{}), - [{Src,Type}|infer_tuple_size(Def, Lit) ++ - infer_first_element(Def, Lit)]; +infer_type({bif,element}, [#b_var{}=Position,#b_var{}=Tuple], _Ds) -> + [{Position,t_integer()},{Tuple,#t_tuple{}}]; infer_type({bif,Bif}, [#b_var{}=Src]=Args, _Ds) -> case inferred_bif_type(Bif, Args) of any -> []; T -> [{Src,T}] end; +infer_type({bif,binary_part}, [#b_var{}=Src,_], _Ds) -> + [{Src,{binary,8}}]; infer_type({bif,is_map_key}, [_,#b_var{}=Src], _Ds) -> [{Src,map}]; infer_type({bif,map_get}, [_,#b_var{}=Src], _Ds) -> [{Src,map}]; +infer_type({bif,Bif}, [_,_]=Args, _Ds) -> + case inferred_bif_type(Bif, Args) of + any -> []; + T -> [{A,T} || #b_var{}=A <- Args] + end; +infer_type({bif,binary_part}, [#b_var{}=Src,Pos,Len], _Ds) -> + [{Src,{binary,8}}| + [{V,t_integer()} || #b_var{}=V <- [Pos,Len]]]; infer_type(bs_start_match, [#b_var{}=Bin], _Ds) -> [{Bin,{binary,1}}]; infer_type(is_nonempty_list, [#b_var{}=Src], _Ds) -> @@ -969,6 +1319,7 @@ inferred_bif_type(is_number, [_]) -> number; inferred_bif_type(is_tuple, [_]) -> #t_tuple{}; inferred_bif_type(abs, [_]) -> number; inferred_bif_type(bit_size, [_]) -> {binary,1}; +inferred_bif_type('bnot', [_]) -> t_integer(); inferred_bif_type(byte_size, [_]) -> {binary,1}; inferred_bif_type(ceil, [_]) -> number; inferred_bif_type(float, [_]) -> number; @@ -976,10 +1327,25 @@ inferred_bif_type(floor, [_]) -> number; inferred_bif_type(hd, [_]) -> cons; inferred_bif_type(length, [_]) -> list; inferred_bif_type(map_size, [_]) -> map; +inferred_bif_type('not', [_]) -> t_boolean(); inferred_bif_type(round, [_]) -> number; inferred_bif_type(trunc, [_]) -> number; inferred_bif_type(tl, [_]) -> cons; inferred_bif_type(tuple_size, [_]) -> #t_tuple{}; +inferred_bif_type('and', [_,_]) -> t_boolean(); +inferred_bif_type('or', [_,_]) -> t_boolean(); +inferred_bif_type('xor', [_,_]) -> t_boolean(); +inferred_bif_type('band', [_,_]) -> t_integer(); +inferred_bif_type('bor', [_,_]) -> t_integer(); +inferred_bif_type('bsl', [_,_]) -> t_integer(); +inferred_bif_type('bsr', [_,_]) -> t_integer(); +inferred_bif_type('bxor', [_,_]) -> t_integer(); +inferred_bif_type('div', [_,_]) -> t_integer(); +inferred_bif_type('rem', [_,_]) -> t_integer(); +inferred_bif_type('+', [_,_]) -> number; +inferred_bif_type('-', [_,_]) -> number; +inferred_bif_type('*', [_,_]) -> number; +inferred_bif_type('/', [_,_]) -> number; inferred_bif_type(_, _) -> any. infer_tuple_size(#b_set{op={bif,tuple_size},args=[#b_var{}=Tuple]}, @@ -1088,6 +1454,9 @@ t_tuple_size(#t_tuple{size=Size,exact=true}) -> t_tuple_size(_) -> none. +is_singleton_type(Type) -> + get_literal_from_type(Type) =/= none. + %% join(Type1, Type2) -> Type %% Return the "join" of Type1 and Type2. The join is a more general %% type than Type1 and Type2. For example: @@ -1152,14 +1521,40 @@ gcd(A, B) -> meet_types([{V,T0}|Vs], Ts) -> #{V:=T1} = Ts, - T = meet(T0, T1), - meet_types(Vs, Ts#{V:=T}); + case meet(T0, T1) of + T1 -> meet_types(Vs, Ts); + T -> meet_types(Vs, Ts#{V:=T}) + end; meet_types([], Ts) -> Ts. meet([T1,T2|Ts]) -> meet([meet(T1, T2)|Ts]); meet([T]) -> T. +subtract_types([{V,T0}|Vs], Ts) -> + #{V:=T1} = Ts, + case subtract(T1, T0) of + T1 -> subtract_types(Vs, Ts); + T -> subtract_types(Vs, Ts#{V:=T}) + end; +subtract_types([], Ts) -> Ts. + +%% subtract(Type1, Type2) -> Type. +%% Subtract Type2 from Type1. Example: +%% +%% subtract(list, cons) -> nil + +subtract(#t_atom{elements=[_|_]=Set0}, #t_atom{elements=[_|_]=Set1}) -> + case ordsets:subtract(Set0, Set1) of + [] -> none; + [_|_]=Set -> #t_atom{elements=Set} + end; +subtract(number, float) -> #t_integer{}; +subtract(number, #t_integer{elements=any}) -> float; +subtract(list, cons) -> nil; +subtract(list, nil) -> cons; +subtract(T, _) -> T. + %% meet(Type1, Type2) -> Type %% Return the "meet" of Type1 and Type2. The meet is a narrower %% type than Type1 and Type2. For example: diff --git a/lib/compiler/src/beam_trim.erl b/lib/compiler/src/beam_trim.erl index 51ff580a7a..acf3838da4 100644 --- a/lib/compiler/src/beam_trim.erl +++ b/lib/compiler/src/beam_trim.erl @@ -200,6 +200,8 @@ create_map(Trim, Moves) -> (Any) -> Any end. +remap([{'%',_}=I|Is], Map, Acc) -> + remap(Is, Map, [I|Acc]); remap([{block,Bl0}|Is], Map, Acc) -> Bl = remap_block(Bl0, Map, []), remap(Is, Map, [{block,Bl}|Acc]); @@ -279,6 +281,8 @@ safe_labels([_|Is], Acc) -> safe_labels(Is, Acc); safe_labels([], Acc) -> cerl_sets:from_list(Acc). +is_safe_label([{'%',_}|Is]) -> + is_safe_label(Is); is_safe_label([{line,_}|Is]) -> is_safe_label(Is); is_safe_label([{badmatch,{Tag,_}}|_]) -> @@ -337,6 +341,8 @@ frame_layout_2(Is) -> reverse(Is). %% to safe labels (i.e., the code at those labels don't depend %% on the contents of any Y register). +frame_size([{'%',_}|Is], Safe) -> + frame_size(Is, Safe); frame_size([{block,_}|Is], Safe) -> frame_size(Is, Safe); frame_size([{call_fun,_}|Is], Safe) -> @@ -393,6 +399,8 @@ frame_size_branch(L, Is, Safe) -> %% This function handles the same instructions as frame_size/2. It %% assumes that any labels in the instructions are safe labels. +is_not_used(Y, [{'%',_}|Is]) -> + is_not_used(Y, Is); is_not_used(Y, [{apply,_}|Is]) -> is_not_used(Y, Is); is_not_used(Y, [{bif,_,{f,_},Ss,Dst}|Is]) -> diff --git a/lib/compiler/src/beam_validator.erl b/lib/compiler/src/beam_validator.erl index 7d908df3bf..4081e366a5 100644 --- a/lib/compiler/src/beam_validator.erl +++ b/lib/compiler/src/beam_validator.erl @@ -26,6 +26,7 @@ %% Interface for compiler. -export([module/2, format_error/1]). +-export([type_anno/1, type_anno/2, type_anno/3]). -import(lists, [any/2,dropwhile/2,foldl/3,map/2,foreach/2,reverse/1]). @@ -44,6 +45,33 @@ module({Mod,Exp,Attr,Fs,Lc}=Code, _Opts) {error,[{atom_to_list(Mod),Es}]} end. +%% Provides a stable interface for type annotations, used by certain passes to +%% indicate that we can safely assume that a register has a given type. +-spec type_anno(term()) -> term(). +type_anno(atom) -> {atom,[]}; +type_anno(bool) -> bool; +type_anno({binary,_}) -> term; +type_anno(cons) -> cons; +type_anno(float) -> {float,[]}; +type_anno(integer) -> {integer,[]}; +type_anno(list) -> list; +type_anno(map) -> map; +type_anno(match_context) -> match_context; +type_anno(number) -> number; +type_anno(nil) -> nil. + +-spec type_anno(term(), term()) -> term(). +type_anno(atom, Value) -> {atom, Value}; +type_anno(float, Value) -> {float, Value}; +type_anno(integer, Value) -> {integer, Value}. + +-spec type_anno(term(), term(), term()) -> term(). +type_anno(tuple, Size, Exact) when is_integer(Size) -> + case Exact of + true -> {tuple, Size}; + false -> {tuple, [Size]} + end. + -spec format_error(term()) -> iolist(). format_error({{_M,F,A},{I,Off,limit}}) -> @@ -93,28 +121,6 @@ validate(Module, Fs) -> Ft = index_parameter_types(Fs, []), validate_0(Module, Fs, Ft). -index_parameter_types([{function,_,_,Entry,Code0}|Fs], Acc0) -> - Code = dropwhile(fun({label,L}) when L =:= Entry -> false; - (_) -> true - end, Code0), - case Code of - [{label,Entry}|Is] -> - Acc = index_parameter_types_1(Is, Entry, Acc0), - index_parameter_types(Fs, Acc); - _ -> - %% Something serious is wrong. Ignore it for now. - %% It will be detected and diagnosed later. - index_parameter_types(Fs, Acc0) - end; -index_parameter_types([], Acc) -> - gb_trees:from_orddict(lists:sort(Acc)). - -index_parameter_types_1([{'%', {type_info, Reg, Type}} | Is], Entry, Acc) -> - Key = {Entry, Reg}, - index_parameter_types_1(Is, Entry, [{Key, Type} | Acc]); -index_parameter_types_1(_, _, Acc) -> - Acc. - validate_0(_Module, [], _) -> []; validate_0(Module, [{function,Name,Ar,Entry,Code}|Fs], Ft) -> try validate_1(Code, Name, Ar, Entry, Ft) of @@ -167,6 +173,32 @@ validate_0(Module, [{function,Name,Ar,Entry,Code}|Fs], Ft) -> slots=0 :: non_neg_integer() %Number of slots }). +index_parameter_types([{function,_,_,Entry,Code0}|Fs], Acc0) -> + Code = dropwhile(fun({label,L}) when L =:= Entry -> false; + (_) -> true + end, Code0), + case Code of + [{label,Entry}|Is] -> + Acc = index_parameter_types_1(Is, Entry, Acc0), + index_parameter_types(Fs, Acc); + _ -> + %% Something serious is wrong. Ignore it for now. + %% It will be detected and diagnosed later. + index_parameter_types(Fs, Acc0) + end; +index_parameter_types([], Acc) -> + gb_trees:from_orddict(lists:sort(Acc)). + +index_parameter_types_1([{'%', {type_info, Reg, Type0}} | Is], Entry, Acc) -> + Type = case Type0 of + match_context -> #ms{}; + _ -> Type0 + end, + Key = {Entry, Reg}, + index_parameter_types_1(Is, Entry, [{Key, Type} | Acc]); +index_parameter_types_1(_, _, Acc) -> + Acc. + validate_1(Is, Name, Arity, Entry, Ft) -> validate_2(labels(Is), Name, Arity, Entry, Ft). @@ -386,25 +418,19 @@ valfun_1(remove_message, Vst) -> %% The message term is no longer fragile. It can be used %% without restrictions. remove_fragility(Vst); -valfun_1({'%', {type_info, Reg, Info0}}, Vst0) -> +valfun_1({'%', {type_info, Reg, match_context}}, Vst0) -> + set_aliased_type(#ms{}, Reg, Vst0); +valfun_1({'%', {type_info, Reg, NewType0}}, Vst0) -> %% Explicit type information inserted by optimization passes to indicate %% that Reg has a certain type, so that we can accept cross-function type %% optimizations. - %% - %% At the moment we only allow this when narrowing from 'term' which is - %% what to expect with function parameters, but in theory any narrowing - %% conversion should be legal. - case get_move_term_type(Reg, Vst0) of - term -> - Type0 = case Info0 of - match_context -> #ms{}; - _ -> Info0 - end, - Type = propagate_fragility(Type0, [Reg], Vst0), - set_type_reg(Type, Reg, Vst0); - _ -> - error(bad_type_info) - end; + OldType = get_durable_term_type(Reg, Vst0), + NewType = case meet(NewType0, OldType) of + none -> error({bad_type_info, Reg, NewType0, OldType}); + T -> T + end, + Type = propagate_fragility(NewType, [Reg], Vst0), + set_aliased_type(Type, Reg, Vst0); valfun_1({'%',_}, Vst) -> Vst; valfun_1({line,_}, Vst) -> @@ -643,7 +669,12 @@ valfun_4({gc_bif,Op,{f,Fail},Live,Src,Dst}, #vst{current=St0}=Vst0) -> Vst1 = Vst0#vst{current=St}, Vst2 = branch_state(Fail, Vst1), Vst3 = prune_x_regs(Live, Vst2), + SrcType = get_term_type(hd(Src), Vst3), Vst = case Op of + length when SrcType =/= cons, SrcType =/= nil -> + %% If we already know we have a cons cell or nil, it + %% shouldn't be demoted to list. + set_type(list, hd(Src), Vst3); map_size -> set_type(map, hd(Src), Vst3); _ -> @@ -786,6 +817,12 @@ valfun_4({bs_set_position, Ctx, Pos}, Vst) -> Vst; %% Other test instructions. +valfun_4({test,is_atom,{f,Lbl},[Src]}, Vst) -> + assert_term(Src, Vst), + set_aliased_type({atom,[]}, Src, branch_state(Lbl, Vst)); +valfun_4({test,is_boolean,{f,Lbl},[Src]}, Vst) -> + assert_term(Src, Vst), + set_aliased_type(bool, Src, branch_state(Lbl, Vst)); valfun_4({test,is_float,{f,Lbl},[Float]}, Vst) -> assert_term(Float, Vst), set_type({float,[]}, Float, branch_state(Lbl, Vst)); @@ -793,6 +830,9 @@ valfun_4({test,is_tuple,{f,Lbl},[Tuple]}, Vst) -> Type0 = get_term_type(Tuple, Vst), Type = upgrade_tuple_type({tuple,[0]}, Type0), set_aliased_type(Type, Tuple, branch_state(Lbl, Vst)); +valfun_4({test,is_integer,{f,Lbl},[Src]}, Vst) -> + assert_term(Src, Vst), + set_aliased_type({integer,[]}, Src, branch_state(Lbl, Vst)); valfun_4({test,is_nonempty_list,{f,Lbl},[Cons]}, Vst) -> assert_term(Cons, Vst), Type = cons, @@ -809,6 +849,14 @@ valfun_4({test,has_map_fields,{f,Lbl},Src,{list,List}}, Vst) -> assert_type(map, Src, Vst), assert_unique_map_keys(List), branch_state(Lbl, Vst); +valfun_4({test,is_list,{f,Lbl},[Src]}, Vst) -> + validate_src([Src], Vst), + Type = case get_term_type(Src, Vst) of + cons -> cons; + nil -> nil; + _ -> list + end, + set_aliased_type(Type, Src, branch_state(Lbl, Vst)); valfun_4({test,is_map,{f,Lbl},[Src]}, Vst0) -> Vst = branch_state(Lbl, Vst0), case Src of @@ -820,20 +868,28 @@ valfun_4({test,is_map,{f,Lbl},[Src]}, Vst0) -> _ -> kill_state(Vst0) end; +valfun_4({test,is_nil,{f,Lbl},[Src]}, Vst0) -> + Vst = case get_term_type(Src, Vst0) of + list -> + branch_state(Lbl, set_aliased_type(cons, Src, Vst0)); + _ -> + branch_state(Lbl, Vst0) + end, + set_aliased_type(nil, Src, Vst); valfun_4({test,is_eq_exact,{f,Lbl},[Src,Val]=Ss}, Vst0) -> validate_src(Ss, Vst0), Infer = infer_types(Src, Vst0), Vst1 = Infer(Val, Vst0), - Vst = branch_state(Lbl, Vst1), - case Val of - {literal,Tuple} when is_tuple(Tuple) -> - Type0 = get_term_type(Val, Vst), - Type = upgrade_tuple_type({tuple,tuple_size(Tuple)}, - Type0), - set_aliased_type(Type, Src, Vst); - _ -> - Vst - end; + Vst2 = upgrade_ne_types(Src, Val, Vst1), + Vst3 = branch_state(Lbl, Vst2), + Vst = Vst3#vst{current=Vst1#vst.current}, + upgrade_eq_types(Src, Val, Vst); +valfun_4({test,is_ne_exact,{f,Lbl},[Src,Val]=Ss}, Vst0) -> + validate_src(Ss, Vst0), + Vst1 = upgrade_eq_types(Src, Val, Vst0), + Vst2 = branch_state(Lbl, Vst1), + Vst = Vst2#vst{current=Vst0#vst.current}, + upgrade_ne_types(Src, Val, Vst); valfun_4({test,_Op,{f,Lbl},Src}, Vst) -> validate_src(Src, Vst), branch_state(Lbl, Vst); @@ -920,6 +976,25 @@ valfun_4({get_map_elements,{f,Fail},Src,{list,List}}, Vst) -> valfun_4(_, _) -> error(unknown_instruction). +upgrade_ne_types(Src1, Src2, Vst0) -> + T1 = get_durable_term_type(Src1, Vst0), + T2 = get_durable_term_type(Src2, Vst0), + Type = subtract(T1, T2), + set_aliased_type(Type, Src1, Vst0). + +upgrade_eq_types(Src1, Src2, Vst0) -> + T1 = get_durable_term_type(Src1, Vst0), + T2 = get_durable_term_type(Src2, Vst0), + Meet = meet(T1, T2), + Vst = case T1 =/= Meet of + true -> set_aliased_type(Meet, Src1, Vst0); + false -> Vst0 + end, + case T2 =/= Meet of + true -> set_aliased_type(Meet, Src2, Vst); + false -> Vst + end. + verify_get_map(Fail, Src, List, Vst0) -> assert_not_literal(Src), %OTP 22. assert_type(map, Src, Vst0), @@ -1046,39 +1121,58 @@ verify_call_args_1(N, Vst) -> verify_call_args_1(X, Vst). verify_local_call(Lbl, Live, Vst) -> - F = fun({R, _Ctx}) -> - verify_call_match_context(Lbl, R, Vst) - end, - MsRegs = all_ms_in_x_regs(Live, Vst), - verify_no_ms_aliases(MsRegs), - foreach(F, MsRegs). + F = fun({R, Type}) -> + verify_arg_type(Lbl, R, Type, Vst) + end, + TRegs = typed_call_regs(Live, Vst), + verify_no_ms_aliases(TRegs), + foreach(F, TRegs). -all_ms_in_x_regs(0, _Vst) -> +typed_call_regs(0, _Vst) -> []; -all_ms_in_x_regs(Live0, Vst) -> +typed_call_regs(Live0, Vst) -> Live = Live0 - 1, R = {x,Live}, - case get_move_term_type(R, Vst) of - #ms{}=M -> [{R,M} | all_ms_in_x_regs(Live, Vst)]; - _ -> all_ms_in_x_regs(Live, Vst) - end. + [{R, get_move_term_type(R, Vst)} | typed_call_regs(Live, Vst)]. %% Verifies that the same match context isn't present twice. -verify_no_ms_aliases(MsRegs) -> - CtxIds = [Id || {_, #ms{id=Id}} <- MsRegs], +verify_no_ms_aliases(Regs) -> + CtxIds = [Id || {_, #ms{id=Id}} <- Regs], UniqueCtxIds = ordsets:from_list(CtxIds), if length(UniqueCtxIds) < length(CtxIds) -> - error({multiple_match_contexts, MsRegs}); + error({multiple_match_contexts, Regs}); length(UniqueCtxIds) =:= length(CtxIds) -> ok end. -%% Verifies that the target label accepts match contexts in the given register. -verify_call_match_context(Lbl, Ctx, #vst{ft=Ft}) -> - case gb_trees:lookup({Lbl, Ctx}, Ft) of - {value, match_context} -> ok; - none -> error(no_bs_start_match2) +%% Verifies that the given argument narrows to what the function expects. +verify_arg_type(Lbl, Reg, #ms{}, #vst{ft=Ft}) -> + %% Match contexts require explicit support, and may not be passed to a + %% function that accepts arbitrary terms. + case gb_trees:lookup({Lbl, Reg}, Ft) of + {value, #ms{}} -> ok; + _ -> error(no_bs_start_match2) + end; +verify_arg_type(Lbl, Reg, GivenType, #vst{ft=Ft}) -> + case gb_trees:lookup({Lbl, Reg}, Ft) of + {value, bool} when GivenType =:= {atom, true}; + GivenType =:= {atom, false}; + GivenType =:= {atom, []} -> + %% We don't yet support upgrading true/false to bool, so we + %% assume unknown atoms can be bools when validating calls. + ok; + {value, #ms{}} -> + %% Functions that accept match contexts also accept all other + %% terms. This will change once we support union types. + ok; + {value, RequiredType} -> + case meet(GivenType, RequiredType) of + none -> error({bad_arg_type, Reg, GivenType, RequiredType}); + _ -> ok + end; + none -> + ok end. allocate(Zero, Stk, Heap, Live, #vst{current=#st{numy=none}}=Vst0) -> @@ -1298,27 +1392,27 @@ bsm_restore(Reg, SavePoint, Vst) -> _ -> error({illegal_restore,SavePoint,range}) end. - select_val_branches(Src, Choices, Vst) -> Infer = infer_types(Src, Vst), - select_val_branches_1(Choices, Infer, Vst). + select_val_branches_1(Choices, Src, Infer, Vst). -select_val_branches_1([Val,{f,L}|T], Infer, Vst0) -> - Vst = branch_state(L, Infer(Val, Vst0)), - select_val_branches_1(T, Infer, Vst); -select_val_branches_1([], _, Vst) -> Vst. +select_val_branches_1([Val,{f,L}|T], Src, Infer, Vst0) -> + Vst1 = set_aliased_type(Val, Src, Infer(Val, Vst0)), + Vst = branch_state(L, Vst1), + select_val_branches_1(T, Src, Infer, Vst); +select_val_branches_1([], _, _, Vst) -> Vst. infer_types(Src, Vst) -> case get_def(Src, Vst) of {bif,is_map,{f,_},[Map],_} -> - fun({atom,true}, S) -> set_type_reg(map, Map, S); + fun({atom,true}, S) -> set_aliased_type(map, Map, S); (_, S) -> S end; {bif,tuple_size,{f,_},[Tuple],_} -> fun({integer,Arity}, S) -> Type0 = get_term_type(Tuple, S), Type = upgrade_tuple_type({tuple,Arity}, Type0), - set_type(Type, Tuple, S); + set_aliased_type(Type, Tuple, S); (_, S) -> S end; {bif,'=:=',{f,_},[ArityReg,{integer,_}=Val],_} when ArityReg =/= Src -> @@ -1366,8 +1460,10 @@ kill_aliases(Reg, #st{aliases=Aliases0}=St) -> St end. -set_type(Type, {x,_}=Reg, Vst) -> set_type_reg(Type, Reg, Vst); -set_type(Type, {y,_}=Reg, Vst) -> set_type_y(Type, Reg, Vst); +set_type(Type, {x,_}=Reg, Vst) -> + set_type_reg(Type, Reg, Reg, Vst); +set_type(Type, {y,_}=Reg, Vst) -> + set_type_reg(Type, Reg, Reg, Vst); set_type(_, _, #vst{}=Vst) -> Vst. set_type_reg(Type, Src, Dst, Vst) -> @@ -1507,12 +1603,16 @@ assert_not_literal(Literal) -> error({literal_not_allowed,Literal}). %% %% term Any valid Erlang (but not of the special types above). %% +%% binary Binary or bitstring. +%% %% bool The atom 'true' or the atom 'false'. %% %% cons Cons cell: [_|_] %% %% nil Empty list: [] %% +%% list List: [] or [_|_] +%% %% {tuple,[Sz]} Tuple. An element has been accessed using %% element/2 or setelement/3 so that it is known that %% the type is a tuple of size at least Sz. @@ -1533,7 +1633,7 @@ assert_not_literal(Literal) -> error({literal_not_allowed,Literal}). %% %% map Map. %% -%% +%% none A conflict in types. There will be an exception at runtime. %% %% FRAGILITY %% --------- @@ -1546,6 +1646,51 @@ assert_not_literal(Literal) -> error({literal_not_allowed,Literal}). %% Such terms are wrapped in a {fragile,Type} tuple, where Type is one %% of the types described above. +%% meet(Type1, Type2) -> Type +%% Return the meet of two types. The meet is a more specific type. +%% It will be 'none' if the types are in conflict. + +meet(Same, Same) -> + Same; +meet(term, Other) -> + Other; +meet(Other, term) -> + Other; +meet(T1, T2) -> + case {erlang:min(T1, T2),erlang:max(T1, T2)} of + {{atom,_}=A,{atom,[]}} -> A; + {bool,{atom,B}=Atom} when is_boolean(B) -> Atom; + {bool,{atom,[]}} -> bool; + {cons,list} -> cons; + {{float,_}=T,{float,[]}} -> T; + {{integer,_}=T,{integer,[]}} -> T; + {list,nil} -> nil; + {number,{integer,_}=T} -> T; + {number,{float,_}=T} -> T; + {{tuple,Size1},{tuple,Size2}} -> + case {Size1,Size2} of + {[Sz1],[Sz2]} -> + {tuple,[erlang:max(Sz1, Sz2)]}; + {Sz1,[Sz2]} when Sz2 =< Sz1 -> + {tuple,Sz1}; + {_,_} -> + none + end; + {_,_} -> none + end. + +%% subtract(Type1, Type2) -> Type +%% Subtract Type2 from Type2. Example: +%% subtract(list, nil) -> cons + +subtract(list, nil) -> cons; +subtract(list, cons) -> nil; +subtract(number, {integer,[]}) -> {float,[]}; +subtract(number, {float,[]}) -> {integer,[]}; +subtract(bool, {atom,false}) -> {atom, true}; +subtract(bool, {atom,true}) -> {atom, false}; +subtract(Type, _) -> Type. + assert_type(WantedType, Term, Vst) -> case get_term_type(Term, Vst) of {fragile,Type} -> @@ -1579,25 +1724,27 @@ assert_type(Needed, Actual) -> %% be executed at run-time. upgrade_tuple_type(NewType, {fragile,OldType}) -> - make_fragile(upgrade_tuple_type_1(NewType, OldType)); + Type = upgrade_tuple_type_1(NewType, OldType), + make_fragile(Type); upgrade_tuple_type(NewType, OldType) -> upgrade_tuple_type_1(NewType, OldType). -upgrade_tuple_type_1({tuple,[Sz]}, {tuple,[OldSz]}=T) when Sz < OldSz -> - %% The old type has a higher value for the least tuple size. - T; -upgrade_tuple_type_1({tuple,[Sz]}, {tuple,OldSz}=T) - when is_integer(Sz), is_integer(OldSz), Sz =< OldSz -> - %% The old size is exact, and the new size is smaller than the old size. - T; -upgrade_tuple_type_1({tuple,_}=T, _) -> - %% The new type information is exact or has a higher value for - %% the least tuple size. - %% Note that inconsistencies are also handled in this - %% clause, e.g. if the old type was an integer or a tuple accessed - %% outside its size; inconsistences will generally cause an exception - %% at run-time but are safe from our point of view. - T. +upgrade_tuple_type_1(NewType, OldType) -> + case meet(NewType, OldType) of + none -> + %% Unoptimized code may look like this: + %% + %% {test,is_list,Fail,[Reg]}. + %% {test,is_tuple,Fail,[Reg]}. + %% {test,test_arity,Fail,[Reg,5]}. + %% + %% Note that the test_arity instruction can never be reached. + %% To make sure it's not rejected, set the type of Reg to + %% NewType instead of 'none'. + NewType; + Type -> + Type + end. get_tuple_size({integer,[]}) -> 0; get_tuple_size({integer,Sz}) -> Sz; @@ -1606,6 +1753,17 @@ get_tuple_size(_) -> 0. validate_src(Ss, Vst) when is_list(Ss) -> foreach(fun(S) -> get_term_type(S, Vst) end, Ss). +%% get_durable_term_type(Src, ValidatorState) -> Type +%% Get the type of the source Src. The returned type Type will be +%% a standard Erlang type (no catch/try tags or match contexts). +%% Fragility will be stripped. + +get_durable_term_type(Src, Vst) -> + case get_term_type(Src, Vst) of + {fragile,Type} -> Type; + Type -> Type + end. + %% get_move_term_type(Src, ValidatorState) -> Type %% Get the type of the source Src. The returned type Type will be %% a standard Erlang type (no catch/try tags). Match contexts are OK. @@ -1639,6 +1797,8 @@ get_term_type_1(nil=T, _) -> T; get_term_type_1({atom,A}=T, _) when is_atom(A) -> T; get_term_type_1({float,F}=T, _) when is_float(F) -> T; get_term_type_1({integer,I}=T, _) when is_integer(I) -> T; +get_term_type_1({literal,[_|_]}, _) -> cons; +get_term_type_1({literal,Bitstring}, _) when is_bitstring(Bitstring) -> binary; get_term_type_1({literal,Map}, _) when is_map(Map) -> map; get_term_type_1({literal,Tuple}, _) when is_tuple(Tuple) -> {tuple,tuple_size(Tuple)}; @@ -1744,7 +1904,7 @@ merge_regs_1([{R1,_}|Rs1], [{R2,_}|_]=Rs2) when R1 < R2 -> merge_regs_1([{R1,_}|_]=Rs1, [{R2,_}|Rs2]) when R1 > R2 -> merge_regs_1(Rs1, Rs2); merge_regs_1([{R,Type1}|Rs1], [{R,Type2}|Rs2]) -> - [{R,merge_types(Type1, Type2)}|merge_regs_1(Rs1, Rs2)]; + [{R,join(Type1, Type2)}|merge_regs_1(Rs1, Rs2)]; merge_regs_1([], []) -> []; merge_regs_1([], [_|_]) -> []; merge_regs_1([_|_], []) -> []. @@ -1763,63 +1923,90 @@ merge_y_regs_1(Y, S, Regs0) when Y >= 0 -> Type0 -> merge_y_regs_1(Y-1, S, Regs0); Type1 -> - Type = merge_types(Type0, Type1), + Type = join(Type0, Type1), Regs = gb_trees:update(Y, Type, Regs0), merge_y_regs_1(Y-1, S, Regs) end; merge_y_regs_1(_, _, Regs) -> Regs. -%% merge_types(Type1, Type2) -> Type +%% join(Type1, Type2) -> Type %% Return the most specific type possible. %% Note: Type1 must NOT be the same as Type2. -merge_types({fragile,Same}=Type, Same) -> +join({literal,_}=T1, T2) -> + join_literal(T1, T2); +join(T1, {literal,_}=T2) -> + join_literal(T2, T1); +join({fragile,Same}=Type, Same) -> Type; -merge_types({fragile,T1}, T2) -> - make_fragile(merge_types(T1, T2)); -merge_types(Same, {fragile,Same}=Type) -> +join({fragile,T1}, T2) -> + make_fragile(join(T1, T2)); +join(Same, {fragile,Same}=Type) -> Type; -merge_types(T1, {fragile,T2}) -> - make_fragile(merge_types(T1, T2)); -merge_types(uninitialized=I, _) -> I; -merge_types(_, uninitialized=I) -> I; -merge_types(initialized=I, _) -> I; -merge_types(_, initialized=I) -> I; -merge_types({catchtag,T0},{catchtag,T1}) -> +join(T1, {fragile,T2}) -> + make_fragile(join(T1, T2)); +join(uninitialized=I, _) -> I; +join(_, uninitialized=I) -> I; +join(initialized=I, _) -> I; +join(_, initialized=I) -> I; +join({catchtag,T0},{catchtag,T1}) -> {catchtag,ordsets:from_list(T0++T1)}; -merge_types({trytag,T0},{trytag,T1}) -> +join({trytag,T0},{trytag,T1}) -> {trytag,ordsets:from_list(T0++T1)}; -merge_types({tuple,A}, {tuple,B}) -> +join({tuple,A}, {tuple,B}) -> {tuple,[min(tuple_sz(A), tuple_sz(B))]}; -merge_types({Type,A}, {Type,B}) +join({Type,A}, {Type,B}) when Type =:= atom; Type =:= integer; Type =:= float -> if A =:= B -> {Type,A}; true -> {Type,[]} end; -merge_types({Type,_}, number) +join({Type,_}, number) when Type =:= integer; Type =:= float -> number; -merge_types(number, {Type,_}) +join(number, {Type,_}) when Type =:= integer; Type =:= float -> number; -merge_types(bool, {atom,A}) -> +join(bool, {atom,A}) -> merge_bool(A); -merge_types({atom,A}, bool) -> +join({atom,A}, bool) -> merge_bool(A); -merge_types(cons, {literal,[_|_]}) -> - cons; -merge_types({literal,[_|_]}, cons) -> - cons; -merge_types({literal,[_|_]}, {literal,[_|_]}) -> - cons; -merge_types(#ms{id=Id1,valid=B1,slots=Slots1}, +join({atom,_}, {atom,_}) -> + {atom,[]}; +join(#ms{id=Id1,valid=B1,slots=Slots1}, #ms{id=Id2,valid=B2,slots=Slots2}) -> Id = if Id1 =:= Id2 -> Id1; true -> make_ref() end, #ms{id=Id,valid=B1 band B2,slots=min(Slots1, Slots2)}; -merge_types(T1, T2) when T1 =/= T2 -> - %% Too different. All we know is that the type is a 'term'. +join(T1, T2) when T1 =/= T2 -> + %% We've exhaused all other options, so the type must either be a list or + %% a 'term'. + join_list(T1, T2). + +%% Merges types of literals. Note that the left argument must either be a +%% literal or exactly equal to the second argument. +join_literal(Same, Same) -> + Same; +join_literal({literal,[_|_]}, T) -> + join_literal(T, cons); +join_literal({literal,#{}}, T) -> + join_literal(T, map); +join_literal({literal,Tuple}, T) when is_tuple(Tuple) -> + join_literal(T, {tuple, tuple_size(Tuple)}); +join_literal({literal,_}, T) -> + %% Bitstring, fun, or similar. + join_literal(T, term); +join_literal(T1, T2) -> + %% We're done extracting the types, try merging them again. + join(T1, T2). + +join_list(nil, cons) -> list; +join_list(nil, list) -> list; +join_list(cons, list) -> list; +join_list(T, nil) -> join_list(nil, T); +join_list(T, cons) -> join_list(cons, T); +join_list(_, _) -> + %% Not a list, so it must be a term. term. tuple_sz([Sz]) -> Sz; @@ -1930,6 +2117,9 @@ bif_type(abs, [Num], Vst) -> end; bif_type(float, _, _) -> {float,[]}; bif_type('/', _, _) -> {float,[]}; +%% Binary operations +bif_type('byte_size', _, _) -> {integer,[]}; +bif_type('bit_size', _, _) -> {integer,[]}; %% Integer operations. bif_type(ceil, [_], _) -> {integer,[]}; bif_type('div', [_,_], _) -> {integer,[]}; @@ -2009,11 +2199,13 @@ is_bif_safe(_, _) -> false. arith_type([A], Vst) -> %% Unary '+' or '-'. case get_term_type(A, Vst) of + {integer,_} -> {integer,[]}; {float,_} -> {float,[]}; _ -> number end; arith_type([A,B], Vst) -> case {get_term_type(A, Vst),get_term_type(B, Vst)} of + {{integer,_},{integer,_}} -> {integer,[]}; {{float,_},_} -> {float,[]}; {_,{float,_}} -> {float,[]}; {_,_} -> number @@ -2039,6 +2231,14 @@ return_type_1(erlang, setelement, 3, Vst) -> {integer,I} -> upgrade_tuple_type({tuple,[I]}, TupleType); _ -> TupleType end; +return_type_1(erlang, '++', 2, Vst) -> + case get_term_type({x,0}, Vst) =:= cons orelse + get_term_type({x,1}, Vst) =:= cons of + true -> cons; + false -> list + end; +return_type_1(erlang, '--', 2, _Vst) -> + list; return_type_1(erlang, F, A, _) -> return_type_erl(F, A); return_type_1(math, F, A, _) -> diff --git a/lib/compiler/src/beam_z.erl b/lib/compiler/src/beam_z.erl index 677094b3cd..415b579240 100644 --- a/lib/compiler/src/beam_z.erl +++ b/lib/compiler/src/beam_z.erl @@ -71,6 +71,31 @@ undo_renames([{get_hd,Src,Dst1},{get_tl,Src,Dst2}|Is]) -> [{get_list,Src,Dst1,Dst2}|undo_renames(Is)]; undo_renames([{get_tl,Src,Dst2},{get_hd,Src,Dst1}|Is]) -> [{get_list,Src,Dst1,Dst2}|undo_renames(Is)]; +undo_renames([{bs_put,_,{bs_put_binary,1,_}, + [{atom,all},{literal,<<>>}]}|Is]) -> + undo_renames(Is); +undo_renames([{bs_put,Fail,{bs_put_binary,1,_Flags}, + [{atom,all},{literal,BinString}]}|Is0]) -> + Bits = bit_size(BinString), + Bytes = Bits div 8, + case Bits rem 8 of + 0 -> + I = {bs_put_string,byte_size(BinString), + {string,BinString}}, + [undo_rename(I)|undo_renames(Is0)]; + Rem -> + <<Binary:Bytes/bytes,Int:Rem>> = BinString, + PutInt = {bs_put_integer,Fail,{integer,Rem},1, + {field_flags,[unsigned,big]},{integer,Int}}, + Is = [PutInt|undo_renames(Is0)], + case Binary of + <<>> -> + Is; + _ -> + [{bs_put_string,byte_size(Binary), + {string,Binary}}|Is] + end + end; undo_renames([I|Is]) -> [undo_rename(I)|undo_renames(Is)]; undo_renames([]) -> []. @@ -79,8 +104,6 @@ undo_rename({bs_put,F,{I,U,Fl},[Sz,Src]}) -> {I,F,Sz,U,Fl,Src}; undo_rename({bs_put,F,{I,Fl},[Src]}) -> {I,F,Fl,Src}; -undo_rename({bs_put,{f,0},{bs_put_string,_,_}=I,[]}) -> - I; undo_rename({bif,bs_add=I,F,[Src1,Src2,{integer,U}],Dst}) -> {I,F,[Src1,Src2,U],Dst}; undo_rename({bif,bs_utf8_size=I,F,[Src],Dst}) -> @@ -101,7 +124,7 @@ undo_rename({test,bs_match_string=Op,F,[Ctx,Bin0]}) -> 0 -> Bin0; Rem -> <<Bin0/bitstring,0:(8-Rem)>> end, - {test,Op,F,[Ctx,Bits,{string,binary_to_list(Bin)}]}; + {test,Op,F,[Ctx,Bits,{string,Bin}]}; undo_rename({put_map,Fail,assoc,S,D,R,L}) -> {put_map_assoc,Fail,S,D,R,L}; undo_rename({put_map,Fail,exact,S,D,R,L}) -> diff --git a/lib/compiler/src/compile.erl b/lib/compiler/src/compile.erl index 65c4f140c9..53d3cec2d7 100644 --- a/lib/compiler/src/compile.erl +++ b/lib/compiler/src/compile.erl @@ -268,6 +268,10 @@ expand_opt(r21, Os) -> [no_put_tuple2 | expand_opt(no_bsm3, Os)]; expand_opt({debug_info_key,_}=O, Os) -> [encrypt_debug_info,O|Os]; +expand_opt(no_type_opt, Os) -> + [no_ssa_opt_type_start, + no_ssa_opt_type_continue, + no_ssa_opt_type_finish | Os]; expand_opt(O, Os) -> [O|Os]. expand_opt_before_21(Os) -> @@ -823,6 +827,9 @@ kernel_passes() -> {pass,beam_kernel_to_ssa}, {iff,dssa,{listing,"ssa"}}, {iff,ssalint,{pass,beam_ssa_lint}}, + {unless,no_share_opt,{pass,beam_ssa_share}}, + {iff,dssashare,{listing,"ssashare"}}, + {iff,ssalint,{pass,beam_ssa_lint}}, {unless,no_bsm_opt,{pass,beam_ssa_bsm}}, {iff,dssabsm,{listing,"ssabsm"}}, {iff,ssalint,{pass,beam_ssa_lint}}, @@ -852,8 +859,6 @@ asm_passes() -> {iff,dblk,{listing,"block"}}, {unless,no_except,{pass,beam_except}}, {iff,dexcept,{listing,"except"}}, - {unless,no_bs_opt,{pass,beam_bs}}, - {iff,dbs,{listing,"bs"}}, {unless,no_jopt,{pass,beam_jump}}, {iff,djmp,{listing,"jump"}}, {unless,no_peep_opt,{pass,beam_peep}}, @@ -2081,7 +2086,6 @@ pre_load() -> L = [beam_a, beam_asm, beam_block, - beam_bs, beam_clean, beam_dict, beam_except, @@ -2098,6 +2102,7 @@ pre_load() -> beam_ssa_opt, beam_ssa_pre_codegen, beam_ssa_recv, + beam_ssa_share, beam_ssa_type, beam_trim, beam_utils, diff --git a/lib/compiler/src/compiler.app.src b/lib/compiler/src/compiler.app.src index 86259270bd..108a0ca100 100644 --- a/lib/compiler/src/compiler.app.src +++ b/lib/compiler/src/compiler.app.src @@ -24,7 +24,6 @@ beam_a, beam_asm, beam_block, - beam_bs, beam_clean, beam_dict, beam_disasm, @@ -45,6 +44,7 @@ beam_ssa_pp, beam_ssa_pre_codegen, beam_ssa_recv, + beam_ssa_share, beam_ssa_type, beam_trim, beam_utils, diff --git a/lib/compiler/src/erl_bifs.erl b/lib/compiler/src/erl_bifs.erl index ce9762899e..d925decce6 100644 --- a/lib/compiler/src/erl_bifs.erl +++ b/lib/compiler/src/erl_bifs.erl @@ -195,6 +195,7 @@ is_safe(erlang, is_float, 1) -> true; is_safe(erlang, is_function, 1) -> true; is_safe(erlang, is_integer, 1) -> true; is_safe(erlang, is_list, 1) -> true; +is_safe(erlang, is_map, 1) -> true; is_safe(erlang, is_number, 1) -> true; is_safe(erlang, is_pid, 1) -> true; is_safe(erlang, is_port, 1) -> true; diff --git a/lib/compiler/src/sys_core_fold.erl b/lib/compiler/src/sys_core_fold.erl index d848cd8f19..43c99be982 100644 --- a/lib/compiler/src/sys_core_fold.erl +++ b/lib/compiler/src/sys_core_fold.erl @@ -2667,12 +2667,20 @@ opt_build_stacktrace(#c_let{vars=[#c_var{name=Cooked}], #c_call{module=#c_literal{val=erlang}, name=#c_literal{val=raise}, args=[Class,Exp,#c_var{name=Cooked}]} -> - %% The stacktrace is only used in a call to erlang:raise/3. - %% There is no need to build the stacktrace. Replace the - %% call to erlang:raise/3 with the the raw_raise/3 instruction, - %% which will use a raw stacktrace. - #c_primop{name=#c_literal{val=raw_raise}, - args=[Class,Exp,RawStk]}; + case core_lib:is_var_used(Cooked, #c_cons{hd=Class,tl=Exp}) of + true -> + %% Not safe. The stacktrace is used in the class or + %% reason. + Let; + false -> + %% The stacktrace is only used in the last + %% argument for erlang:raise/3. There is no need + %% to build the stacktrace. Replace the call to + %% erlang:raise/3 with the the raw_raise/3 + %% instruction, which will use a raw stacktrace. + #c_primop{name=#c_literal{val=raw_raise}, + args=[Class,Exp,RawStk]} + end; #c_let{vars=[#c_var{name=V}],arg=Arg,body=B0} when V =/= Cooked -> case core_lib:is_var_used(Cooked, Arg) of false -> diff --git a/lib/compiler/src/sys_core_inline.erl b/lib/compiler/src/sys_core_inline.erl index 5a6cc45e4a..3380e3f1bd 100644 --- a/lib/compiler/src/sys_core_inline.erl +++ b/lib/compiler/src/sys_core_inline.erl @@ -195,6 +195,9 @@ kill_id_anns(Body) -> cerl_trees:map(fun(#c_fun{anno=A0}=CFun) -> A = kill_id_anns_1(A0), CFun#c_fun{anno=A}; + (#c_var{anno=A0}=Var) -> + A = kill_id_anns_1(A0), + Var#c_var{anno=A}; (Expr) -> %% Mark everything as compiler generated to %% suppress bogus warnings. |