From cb4f08d8e6ebed95a0cec282a01e8b6c7972a2d3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?John=20H=C3=B6gberg?= Date: Tue, 15 Jan 2019 16:31:36 +0100 Subject: beam_validator: Handle aliased registers during type inference --- lib/compiler/src/beam_validator.erl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/lib/compiler/src/beam_validator.erl b/lib/compiler/src/beam_validator.erl index 3d53054f69..4943538809 100644 --- a/lib/compiler/src/beam_validator.erl +++ b/lib/compiler/src/beam_validator.erl @@ -1346,14 +1346,14 @@ select_val_branches_1([], _, Vst) -> Vst. infer_types(Src, Vst) -> case get_def(Src, Vst) of {bif,is_map,{f,_},[Map],_} -> - fun({atom,true}, S) -> set_type_reg(map, Map, S); + fun({atom,true}, S) -> set_aliased_type(map, Map, S); (_, S) -> S end; {bif,tuple_size,{f,_},[Tuple],_} -> fun({integer,Arity}, S) -> Type0 = get_term_type(Tuple, S), Type = upgrade_tuple_type({tuple,Arity}, Type0), - set_type(Type, Tuple, S); + set_aliased_type(Type, Tuple, S); (_, S) -> S end; {bif,'=:=',{f,_},[ArityReg,{integer,_}=Val],_} when ArityReg =/= Src -> -- cgit v1.2.3 From 5b031644e43196144a5824a6d2ade18153779cff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?John=20H=C3=B6gberg?= Date: Mon, 14 Jan 2019 17:37:10 +0100 Subject: beam_validator: Add a stable interface for type annotations --- lib/compiler/src/beam_ssa_bsm.erl | 3 ++- lib/compiler/src/beam_validator.erl | 28 ++++++++++++++++++++++++++++ 2 files changed, 30 insertions(+), 1 deletion(-) diff --git a/lib/compiler/src/beam_ssa_bsm.erl b/lib/compiler/src/beam_ssa_bsm.erl index 9631bf3334..466337db0e 100644 --- a/lib/compiler/src/beam_ssa_bsm.erl +++ b/lib/compiler/src/beam_ssa_bsm.erl @@ -877,7 +877,8 @@ annotate_context_parameters(F, ModInfo) -> %% Assertion. error(conflicting_parameter_types); (K, suitable_for_reuse, Acc) -> - Acc#{ K => match_context }; + T = beam_validator:type_anno(match_context), + Acc#{ K => T }; (_K, _V, Acc) -> Acc end, TypeAnno0, ParamInfo), diff --git a/lib/compiler/src/beam_validator.erl b/lib/compiler/src/beam_validator.erl index 4943538809..b3c09965cf 100644 --- a/lib/compiler/src/beam_validator.erl +++ b/lib/compiler/src/beam_validator.erl @@ -26,6 +26,7 @@ %% Interface for compiler. -export([module/2, format_error/1]). +-export([type_anno/1, type_anno/2, type_anno/3]). -import(lists, [any/2,dropwhile/2,foldl/3,map/2,foreach/2,reverse/1]). @@ -44,6 +45,33 @@ module({Mod,Exp,Attr,Fs,Lc}=Code, _Opts) {error,[{atom_to_list(Mod),Es}]} end. +%% Provides a stable interface for type annotations, used by certain passes to +%% indicate that we can safely assume that a register has a given type. +-spec type_anno(term()) -> term(). +type_anno(atom) -> {atom,[]}; +type_anno(bool) -> bool; +type_anno({binary,_}) -> term; +type_anno(cons) -> cons; +type_anno(float) -> {float,[]}; +type_anno(integer) -> {integer,[]}; +type_anno(list) -> list; +type_anno(map) -> map; +type_anno(match_context) -> match_context; +type_anno(number) -> number; +type_anno(nil) -> nil. + +-spec type_anno(term(), term()) -> term(). +type_anno(atom, Value) -> {atom, Value}; +type_anno(float, Value) -> {float, Value}; +type_anno(integer, Value) -> {integer, Value}. + +-spec type_anno(term(), term(), term()) -> term(). +type_anno(tuple, Size, Exact) when is_integer(Size) -> + case Exact of + true -> {tuple, Size}; + false -> {tuple, [Size]} + end. + -spec format_error(term()) -> iolist(). format_error({{_M,F,A},{I,Off,limit}}) -> -- cgit v1.2.3 From b46fe99e5fa3d96f0ae647f11956e5777d6bb1fe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?John=20H=C3=B6gberg?= Date: Wed, 16 Jan 2019 16:02:09 +0100 Subject: beam_validator: Validate types on local function calls --- lib/compiler/src/beam_validator.erl | 269 ++++++++++++++++++++++-------------- 1 file changed, 166 insertions(+), 103 deletions(-) diff --git a/lib/compiler/src/beam_validator.erl b/lib/compiler/src/beam_validator.erl index b3c09965cf..15ed267c54 100644 --- a/lib/compiler/src/beam_validator.erl +++ b/lib/compiler/src/beam_validator.erl @@ -121,28 +121,6 @@ validate(Module, Fs) -> Ft = index_parameter_types(Fs, []), validate_0(Module, Fs, Ft). -index_parameter_types([{function,_,_,Entry,Code0}|Fs], Acc0) -> - Code = dropwhile(fun({label,L}) when L =:= Entry -> false; - (_) -> true - end, Code0), - case Code of - [{label,Entry}|Is] -> - Acc = index_parameter_types_1(Is, Entry, Acc0), - index_parameter_types(Fs, Acc); - _ -> - %% Something serious is wrong. Ignore it for now. - %% It will be detected and diagnosed later. - index_parameter_types(Fs, Acc0) - end; -index_parameter_types([], Acc) -> - gb_trees:from_orddict(lists:sort(Acc)). - -index_parameter_types_1([{'%', {type_info, Reg, Type}} | Is], Entry, Acc) -> - Key = {Entry, Reg}, - index_parameter_types_1(Is, Entry, [{Key, Type} | Acc]); -index_parameter_types_1(_, _, Acc) -> - Acc. - validate_0(_Module, [], _) -> []; validate_0(Module, [{function,Name,Ar,Entry,Code}|Fs], Ft) -> try validate_1(Code, Name, Ar, Entry, Ft) of @@ -195,6 +173,32 @@ validate_0(Module, [{function,Name,Ar,Entry,Code}|Fs], Ft) -> slots=0 :: non_neg_integer() %Number of slots }). +index_parameter_types([{function,_,_,Entry,Code0}|Fs], Acc0) -> + Code = dropwhile(fun({label,L}) when L =:= Entry -> false; + (_) -> true + end, Code0), + case Code of + [{label,Entry}|Is] -> + Acc = index_parameter_types_1(Is, Entry, Acc0), + index_parameter_types(Fs, Acc); + _ -> + %% Something serious is wrong. Ignore it for now. + %% It will be detected and diagnosed later. + index_parameter_types(Fs, Acc0) + end; +index_parameter_types([], Acc) -> + gb_trees:from_orddict(lists:sort(Acc)). + +index_parameter_types_1([{'%', {type_info, Reg, Type0}} | Is], Entry, Acc) -> + Type = case Type0 of + match_context -> #ms{}; + _ -> Type0 + end, + Key = {Entry, Reg}, + index_parameter_types_1(Is, Entry, [{Key, Type} | Acc]); +index_parameter_types_1(_, _, Acc) -> + Acc. + validate_1(Is, Name, Arity, Entry, Ft) -> validate_2(labels(Is), Name, Arity, Entry, Ft). @@ -414,25 +418,19 @@ valfun_1(remove_message, Vst) -> %% The message term is no longer fragile. It can be used %% without restrictions. remove_fragility(Vst); -valfun_1({'%', {type_info, Reg, Info0}}, Vst0) -> +valfun_1({'%', {type_info, Reg, match_context}}, Vst0) -> + set_aliased_type(#ms{}, Reg, Vst0); +valfun_1({'%', {type_info, Reg, NewType0}}, Vst0) -> %% Explicit type information inserted by optimization passes to indicate %% that Reg has a certain type, so that we can accept cross-function type %% optimizations. - %% - %% At the moment we only allow this when narrowing from 'term' which is - %% what to expect with function parameters, but in theory any narrowing - %% conversion should be legal. - case get_move_term_type(Reg, Vst0) of - term -> - Type0 = case Info0 of - match_context -> #ms{}; - _ -> Info0 - end, - Type = propagate_fragility(Type0, [Reg], Vst0), - set_type_reg(Type, Reg, Vst0); - _ -> - error(bad_type_info) - end; + OldType = get_durable_term_type(Reg, Vst0), + NewType = case meet(NewType0, OldType) of + none -> error({bad_type_info, Reg, NewType0, OldType}); + T -> T + end, + Type = propagate_fragility(NewType, [Reg], Vst0), + set_aliased_type(Type, Reg, Vst0); valfun_1({'%',_}, Vst) -> Vst; valfun_1({line,_}, Vst) -> @@ -671,7 +669,12 @@ valfun_4({gc_bif,Op,{f,Fail},Live,Src,Dst}, #vst{current=St0}=Vst0) -> Vst1 = Vst0#vst{current=St}, Vst2 = branch_state(Fail, Vst1), Vst3 = prune_x_regs(Live, Vst2), + SrcType = get_term_type(hd(Src), Vst3), Vst = case Op of + length when SrcType =/= cons, SrcType =/= nil -> + %% If we already know we have a cons cell or nil, it + %% shouldn't be demoted to list. + set_type(list, hd(Src), Vst3); map_size -> set_type(map, hd(Src), Vst3); _ -> @@ -814,6 +817,12 @@ valfun_4({bs_set_position, Ctx, Pos}, Vst) -> Vst; %% Other test instructions. +valfun_4({test,is_atom,{f,Lbl},[Src]}, Vst) -> + assert_term(Src, Vst), + set_aliased_type({atom,[]}, Src, branch_state(Lbl, Vst)); +valfun_4({test,is_boolean,{f,Lbl},[Src]}, Vst) -> + assert_term(Src, Vst), + set_aliased_type(bool, Src, branch_state(Lbl, Vst)); valfun_4({test,is_float,{f,Lbl},[Float]}, Vst) -> assert_term(Float, Vst), set_type({float,[]}, Float, branch_state(Lbl, Vst)); @@ -821,6 +830,9 @@ valfun_4({test,is_tuple,{f,Lbl},[Tuple]}, Vst) -> Type0 = get_term_type(Tuple, Vst), Type = upgrade_tuple_type({tuple,[0]}, Type0), set_aliased_type(Type, Tuple, branch_state(Lbl, Vst)); +valfun_4({test,is_integer,{f,Lbl},[Src]}, Vst) -> + assert_term(Src, Vst), + set_aliased_type({integer,[]}, Src, branch_state(Lbl, Vst)); valfun_4({test,is_nonempty_list,{f,Lbl},[Cons]}, Vst) -> assert_term(Cons, Vst), Type = cons, @@ -858,11 +870,11 @@ valfun_4({test,is_map,{f,Lbl},[Src]}, Vst0) -> end; valfun_4({test,is_nil,{f,Lbl},[Src]}, Vst0) -> Vst = case get_term_type(Src, Vst0) of - list -> - branch_state(Lbl, set_type_reg(cons, Src, Vst0)); - _ -> - branch_state(Lbl, Vst0) - end, + list -> + branch_state(Lbl, set_type_reg(cons, Src, Vst0)); + _ -> + branch_state(Lbl, Vst0) + end, set_aliased_type(nil, Src, Vst); valfun_4({test,is_eq_exact,{f,Lbl},[Src,Val]=Ss}, Vst0) -> validate_src(Ss, Vst0), @@ -1109,39 +1121,58 @@ verify_call_args_1(N, Vst) -> verify_call_args_1(X, Vst). verify_local_call(Lbl, Live, Vst) -> - F = fun({R, _Ctx}) -> - verify_call_match_context(Lbl, R, Vst) - end, - MsRegs = all_ms_in_x_regs(Live, Vst), - verify_no_ms_aliases(MsRegs), - foreach(F, MsRegs). + F = fun({R, Type}) -> + verify_arg_type(Lbl, R, Type, Vst) + end, + TRegs = typed_call_regs(Live, Vst), + verify_no_ms_aliases(TRegs), + foreach(F, TRegs). -all_ms_in_x_regs(0, _Vst) -> +typed_call_regs(0, _Vst) -> []; -all_ms_in_x_regs(Live0, Vst) -> +typed_call_regs(Live0, Vst) -> Live = Live0 - 1, R = {x,Live}, - case get_move_term_type(R, Vst) of - #ms{}=M -> [{R,M} | all_ms_in_x_regs(Live, Vst)]; - _ -> all_ms_in_x_regs(Live, Vst) - end. + [{R, get_move_term_type(R, Vst)} | typed_call_regs(Live, Vst)]. %% Verifies that the same match context isn't present twice. -verify_no_ms_aliases(MsRegs) -> - CtxIds = [Id || {_, #ms{id=Id}} <- MsRegs], +verify_no_ms_aliases(Regs) -> + CtxIds = [Id || {_, #ms{id=Id}} <- Regs], UniqueCtxIds = ordsets:from_list(CtxIds), if length(UniqueCtxIds) < length(CtxIds) -> - error({multiple_match_contexts, MsRegs}); + error({multiple_match_contexts, Regs}); length(UniqueCtxIds) =:= length(CtxIds) -> ok end. -%% Verifies that the target label accepts match contexts in the given register. -verify_call_match_context(Lbl, Ctx, #vst{ft=Ft}) -> - case gb_trees:lookup({Lbl, Ctx}, Ft) of - {value, match_context} -> ok; - none -> error(no_bs_start_match2) +%% Verifies that the given argument narrows to what the function expects. +verify_arg_type(Lbl, Reg, #ms{}, #vst{ft=Ft}) -> + %% Match contexts require explicit support, and may not be passed to a + %% function that accepts arbitrary terms. + case gb_trees:lookup({Lbl, Reg}, Ft) of + {value, #ms{}} -> ok; + _ -> error(no_bs_start_match2) + end; +verify_arg_type(Lbl, Reg, GivenType, #vst{ft=Ft}) -> + case gb_trees:lookup({Lbl, Reg}, Ft) of + {value, bool} when GivenType =:= {atom, true}; + GivenType =:= {atom, false}; + GivenType =:= {atom, []} -> + %% We don't yet support upgrading true/false to bool, so we + %% assume unknown atoms can be bools when validating calls. + ok; + {value, #ms{}} -> + %% Functions that accept match contexts also accept all other + %% terms. This will change once we support union types. + ok; + {value, RequiredType} -> + case meet(GivenType, RequiredType) of + none -> error({bad_arg_type, Reg, GivenType, RequiredType}); + _ -> ok + end; + none -> + ok end. allocate(Zero, Stk, Heap, Live, #vst{current=#st{numy=none}}=Vst0) -> @@ -1361,15 +1392,15 @@ bsm_restore(Reg, SavePoint, Vst) -> _ -> error({illegal_restore,SavePoint,range}) end. - select_val_branches(Src, Choices, Vst) -> Infer = infer_types(Src, Vst), - select_val_branches_1(Choices, Infer, Vst). + select_val_branches_1(Choices, Src, Infer, Vst). -select_val_branches_1([Val,{f,L}|T], Infer, Vst0) -> - Vst = branch_state(L, Infer(Val, Vst0)), - select_val_branches_1(T, Infer, Vst); -select_val_branches_1([], _, Vst) -> Vst. +select_val_branches_1([Val,{f,L}|T], Src, Infer, Vst0) -> + Vst1 = set_aliased_type(Val, Src, Infer(Val, Vst0)), + Vst = branch_state(L, Vst1), + select_val_branches_1(T, Src, Infer, Vst); +select_val_branches_1([], _, _, Vst) -> Vst. infer_types(Src, Vst) -> case get_def(Src, Vst) of @@ -1654,6 +1685,10 @@ meet(T1, T2) -> subtract(list, nil) -> cons; subtract(list, cons) -> nil; +subtract(number, {integer,[]}) -> {float,[]}; +subtract(number, {float,[]}) -> {integer,[]}; +subtract(bool, {atom,false}) -> {atom, true}; +subtract(bool, {atom,true}) -> {atom, false}; subtract(Type, _) -> Type. assert_type(WantedType, Term, Vst) -> @@ -1869,7 +1904,7 @@ merge_regs_1([{R1,_}|Rs1], [{R2,_}|_]=Rs2) when R1 < R2 -> merge_regs_1([{R1,_}|_]=Rs1, [{R2,_}|Rs2]) when R1 > R2 -> merge_regs_1(Rs1, Rs2); merge_regs_1([{R,Type1}|Rs1], [{R,Type2}|Rs2]) -> - [{R,merge_types(Type1, Type2)}|merge_regs_1(Rs1, Rs2)]; + [{R,join(Type1, Type2)}|merge_regs_1(Rs1, Rs2)]; merge_regs_1([], []) -> []; merge_regs_1([], [_|_]) -> []; merge_regs_1([_|_], []) -> []. @@ -1888,67 +1923,90 @@ merge_y_regs_1(Y, S, Regs0) when Y >= 0 -> Type0 -> merge_y_regs_1(Y-1, S, Regs0); Type1 -> - Type = merge_types(Type0, Type1), + Type = join(Type0, Type1), Regs = gb_trees:update(Y, Type, Regs0), merge_y_regs_1(Y-1, S, Regs) end; merge_y_regs_1(_, _, Regs) -> Regs. -%% merge_types(Type1, Type2) -> Type +%% join(Type1, Type2) -> Type %% Return the most specific type possible. %% Note: Type1 must NOT be the same as Type2. -merge_types({fragile,Same}=Type, Same) -> +join({literal,_}=T1, T2) -> + join_literal(T1, T2); +join(T1, {literal,_}=T2) -> + join_literal(T2, T1); +join({fragile,Same}=Type, Same) -> Type; -merge_types({fragile,T1}, T2) -> - make_fragile(merge_types(T1, T2)); -merge_types(Same, {fragile,Same}=Type) -> +join({fragile,T1}, T2) -> + make_fragile(join(T1, T2)); +join(Same, {fragile,Same}=Type) -> Type; -merge_types(T1, {fragile,T2}) -> - make_fragile(merge_types(T1, T2)); -merge_types(uninitialized=I, _) -> I; -merge_types(_, uninitialized=I) -> I; -merge_types(initialized=I, _) -> I; -merge_types(_, initialized=I) -> I; -merge_types({catchtag,T0},{catchtag,T1}) -> +join(T1, {fragile,T2}) -> + make_fragile(join(T1, T2)); +join(uninitialized=I, _) -> I; +join(_, uninitialized=I) -> I; +join(initialized=I, _) -> I; +join(_, initialized=I) -> I; +join({catchtag,T0},{catchtag,T1}) -> {catchtag,ordsets:from_list(T0++T1)}; -merge_types({trytag,T0},{trytag,T1}) -> +join({trytag,T0},{trytag,T1}) -> {trytag,ordsets:from_list(T0++T1)}; -merge_types({tuple,A}, {tuple,B}) -> +join({tuple,A}, {tuple,B}) -> {tuple,[min(tuple_sz(A), tuple_sz(B))]}; -merge_types({Type,A}, {Type,B}) +join({Type,A}, {Type,B}) when Type =:= atom; Type =:= integer; Type =:= float -> if A =:= B -> {Type,A}; true -> {Type,[]} end; -merge_types({Type,_}, number) +join({Type,_}, number) when Type =:= integer; Type =:= float -> number; -merge_types(number, {Type,_}) +join(number, {Type,_}) when Type =:= integer; Type =:= float -> number; -merge_types(bool, {atom,A}) -> +join(bool, {atom,A}) -> merge_bool(A); -merge_types({atom,A}, bool) -> +join({atom,A}, bool) -> merge_bool(A); -merge_types(cons, {literal,[_|_]}) -> - cons; -merge_types(cons, nil) -> - list; -merge_types(nil, cons) -> - list; -merge_types({literal,[_|_]}, cons) -> - cons; -merge_types({literal,[_|_]}, {literal,[_|_]}) -> - cons; -merge_types(#ms{id=Id1,valid=B1,slots=Slots1}, +join({atom,_}, {atom,_}) -> + {atom,[]}; +join(#ms{id=Id1,valid=B1,slots=Slots1}, #ms{id=Id2,valid=B2,slots=Slots2}) -> Id = if Id1 =:= Id2 -> Id1; true -> make_ref() end, #ms{id=Id,valid=B1 band B2,slots=min(Slots1, Slots2)}; -merge_types(T1, T2) when T1 =/= T2 -> - %% Too different. All we know is that the type is a 'term'. +join(T1, T2) when T1 =/= T2 -> + %% We've exhaused all other options, so the type must either be a list or + %% a 'term'. + join_list(T1, T2). + +%% Merges types of literals. Note that the left argument must either be a +%% literal or exactly equal to the second argument. +join_literal(Same, Same) -> + Same; +join_literal({literal,[_|_]}, T) -> + join_literal(T, cons); +join_literal({literal,#{}}, T) -> + join_literal(T, map); +join_literal({literal,Tuple}, T) when is_tuple(Tuple) -> + join_literal(T, {tuple, tuple_size(Tuple)}); +join_literal({literal,_}, T) -> + %% Bitstring, fun, or similar. + join_literal(T, term); +join_literal(T1, T2) -> + %% We're done extracting the types, try merging them again. + join(T1, T2). + +join_list(nil, cons) -> list; +join_list(nil, list) -> list; +join_list(cons, list) -> list; +join_list(T, nil) -> join_list(nil, T); +join_list(T, cons) -> join_list(cons, T); +join_list(_, _) -> + %% Not a list, so it must be a term. term. tuple_sz([Sz]) -> Sz; @@ -2059,6 +2117,9 @@ bif_type(abs, [Num], Vst) -> end; bif_type(float, _, _) -> {float,[]}; bif_type('/', _, _) -> {float,[]}; +%% Binary operations +bif_type('byte_size', _, _) -> {integer,[]}; +bif_type('bit_size', _, _) -> {integer,[]}; %% Integer operations. bif_type(ceil, [_], _) -> {integer,[]}; bif_type('div', [_,_], _) -> {integer,[]}; @@ -2138,11 +2199,13 @@ is_bif_safe(_, _) -> false. arith_type([A], Vst) -> %% Unary '+' or '-'. case get_term_type(A, Vst) of + {integer,_} -> {integer,[]}; {float,_} -> {float,[]}; _ -> number end; arith_type([A,B], Vst) -> case {get_term_type(A, Vst),get_term_type(B, Vst)} of + {{integer,_},{integer,_}} -> {integer,[]}; {{float,_},_} -> {float,[]}; {_,{float,_}} -> {float,[]}; {_,_} -> number -- cgit v1.2.3 From 2d96971a96868d70a750422788d1493a5cbed605 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?John=20H=C3=B6gberg?= Date: Mon, 21 Jan 2019 07:35:55 +0100 Subject: beam_ssa_type: Remove wait_timeout instructions with a timeout of 0 --- lib/compiler/src/beam_ssa_type.erl | 2 ++ lib/compiler/test/receive_SUITE.erl | 27 +++++++++++++++++++++++++-- 2 files changed, 27 insertions(+), 2 deletions(-) diff --git a/lib/compiler/src/beam_ssa_type.erl b/lib/compiler/src/beam_ssa_type.erl index ede57875e2..7ef49fee64 100644 --- a/lib/compiler/src/beam_ssa_type.erl +++ b/lib/compiler/src/beam_ssa_type.erl @@ -309,6 +309,8 @@ simplify(#b_set{op=put_tuple,args=Args}=I, _Ts) -> none -> I; List -> #b_literal{val=list_to_tuple(List)} end; +simplify(#b_set{op=wait_timeout,args=[#b_literal{val=0}]}, _Ts) -> + #b_literal{val=true}; simplify(#b_set{op=wait_timeout,args=[#b_literal{val=infinity}]}=I, _Ts) -> I#b_set{op=wait,args=[]}; simplify(I, _Ts) -> I. diff --git a/lib/compiler/test/receive_SUITE.erl b/lib/compiler/test/receive_SUITE.erl index 4219768d6f..12108445f0 100644 --- a/lib/compiler/test/receive_SUITE.erl +++ b/lib/compiler/test/receive_SUITE.erl @@ -25,7 +25,7 @@ init_per_group/2,end_per_group/2, init_per_testcase/2,end_per_testcase/2, export/1,recv/1,coverage/1,otp_7980/1,ref_opt/1, - wait/1,recv_in_try/1,double_recv/1]). + wait/1,recv_in_try/1,double_recv/1,receive_var_zero/1]). -include_lib("common_test/include/ct.hrl"). @@ -45,7 +45,7 @@ all() -> groups() -> [{p,test_lib:parallel(), [recv,coverage,otp_7980,ref_opt,export,wait, - recv_in_try,double_recv]}]. + recv_in_try,double_recv,receive_var_zero]}]. init_per_suite(Config) -> @@ -378,4 +378,27 @@ do_double_recv(_, Msg) -> error end. +%% Test 'after Z', when Z =:= 0 been propagated as an immediate by the type +%% optimization pass. +receive_var_zero(Config) when is_list(Config) -> + self() ! x, + self() ! y, + Z = zero(), + timeout = receive + z -> ok + after Z -> timeout + end, + timeout = receive + after Z -> timeout + end, + self() ! w, + receive + x -> ok; + Other -> + ct:fail({bad_message,Other}) + end. + +zero() -> 0. + + id(I) -> I. -- cgit v1.2.3 From 53b46c186051e256a4238f4c4b75e3dcbf61a1b0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?John=20H=C3=B6gberg?= Date: Thu, 17 Jan 2019 14:45:55 +0100 Subject: beam_ssa_type: Fix type subtraction in #b_switch{} A switch is equivalent to a series of '=:=', so we have to subtract each value individually from the type. Subtracting a join risks removing too much type information, and managed to narrow "number" into "float" in the attached test case. --- lib/compiler/src/beam_ssa_type.erl | 15 +++++++++++---- lib/compiler/test/float_SUITE.erl | 15 +++++++++++++-- 2 files changed, 24 insertions(+), 6 deletions(-) diff --git a/lib/compiler/src/beam_ssa_type.erl b/lib/compiler/src/beam_ssa_type.erl index 7ef49fee64..fcfb7b86f6 100644 --- a/lib/compiler/src/beam_ssa_type.erl +++ b/lib/compiler/src/beam_ssa_type.erl @@ -478,7 +478,9 @@ update_successors(#b_switch{arg=#b_var{}=V,fail=Fail,list=List}, Ts0, D0) -> end, foldl(F, D, List); false -> - FailTs = subtract_types([{V,join_sw_list(List, Ts0, none)}], Ts0), + %% V can not be equal to any of the values in List at the fail + %% block. + FailTs = subtract_sw_list(V, List, Ts0), D = update_successor(Fail, FailTs, D0), F = fun({Val,S}, A) -> T = get_type(Val, Ts0), @@ -488,9 +490,14 @@ update_successors(#b_switch{arg=#b_var{}=V,fail=Fail,list=List}, Ts0, D0) -> end; update_successors(#b_ret{}, _Ts, D) -> D. -join_sw_list([{Val,_}|T], Ts, Type) -> - join_sw_list(T, Ts, join(Type, get_type(Val, Ts))); -join_sw_list([], _, Type) -> Type. +subtract_sw_list(V, List, Ts) -> + Ts#{ V := sub_sw_list_1(get_type(V, Ts), List, Ts) }. + +sub_sw_list_1(Type, [{Val,_}|T], Ts) -> + ValType = get_type(Val, Ts), + sub_sw_list_1(subtract(Type, ValType), T, Ts); +sub_sw_list_1(Type, [], _Ts) -> + Type. update_successor_bool(#b_var{}=Var, BoolValue, S, Ts, D) -> case t_is_boolean(get_type(Var, Ts)) of diff --git a/lib/compiler/test/float_SUITE.erl b/lib/compiler/test/float_SUITE.erl index 012810aba2..831e8279aa 100644 --- a/lib/compiler/test/float_SUITE.erl +++ b/lib/compiler/test/float_SUITE.erl @@ -20,7 +20,8 @@ -module(float_SUITE). -export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1, init_per_group/2,end_per_group/2, - pending/1,bif_calls/1,math_functions/1,mixed_float_and_int/1]). + pending/1,bif_calls/1,math_functions/1,mixed_float_and_int/1, + subtract_number_type/1]). -include_lib("common_test/include/ct.hrl"). @@ -28,7 +29,7 @@ suite() -> [{ct_hooks,[ts_install_cth]}]. all() -> [pending, bif_calls, math_functions, - mixed_float_and_int]. + mixed_float_and_int, subtract_number_type]. groups() -> []. @@ -176,5 +177,15 @@ mixed_float_and_int(Config) when is_list(Config) -> pc(Cov, NotCov, X) -> round(Cov/(Cov+NotCov)*100) + 42 + 2.0*X. +subtract_number_type(Config) when is_list(Config) -> + 120 = fact(5). + +fact(N) -> + fact(N, 1). + +fact(0, P) -> P; +fact(1, P) -> P; +fact(N, P) -> fact(N-1, P*N). + id(I) -> I. -- cgit v1.2.3 From b7db13e2f3ef6990367fb2cc08263da8a57e7414 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?John=20H=C3=B6gberg?= Date: Mon, 21 Jan 2019 10:40:27 +0100 Subject: beam_ssa_opt: Don't ruin arguments of bs_match/skip If the match instruction was already marked as a skip, we'd ruin its argument list. --- lib/compiler/src/beam_ssa_opt.erl | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/lib/compiler/src/beam_ssa_opt.erl b/lib/compiler/src/beam_ssa_opt.erl index 6f7044f006..c587f9fb44 100644 --- a/lib/compiler/src/beam_ssa_opt.erl +++ b/lib/compiler/src/beam_ssa_opt.erl @@ -924,9 +924,10 @@ bsm_skip([], _) -> []. bsm_skip_is([I0|Is], Extracted) -> case I0 of - #b_set{op=bs_match,args=[#b_literal{val=string}|_]} -> - [I0|bsm_skip_is(Is, Extracted)]; - #b_set{op=bs_match,dst=Ctx,args=[Type,PrevCtx|Args0]} -> + #b_set{op=bs_match, + dst=Ctx, + args=[#b_literal{val=T}=Type,PrevCtx|Args0]} + when T =/= string, T =/= skip -> I = case cerl_sets:is_element(Ctx, Extracted) of true -> I0; -- cgit v1.2.3 From 1c73a313e72909d054f55e321c1929d2be55ff11 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?John=20H=C3=B6gberg?= Date: Thu, 10 Jan 2019 17:06:31 +0100 Subject: beam_ssa_opt: Add a scaffold for module-level optimizations This serves as a base for the upcoming module-level type optimization, but may come in handy for other passes like beam_ssa_funs and beam_ssa_bsm that have their own ad-hoc implementations. --- .gitignore | 1 + lib/compiler/src/Makefile | 1 + lib/compiler/src/beam_ssa_opt.erl | 353 +++++++++++++++++++++++++++++--------- lib/compiler/src/beam_ssa_opt.hrl | 37 ++++ lib/compiler/test/Makefile | 14 +- lib/compiler/test/test_lib.erl | 7 +- 6 files changed, 327 insertions(+), 86 deletions(-) create mode 100644 lib/compiler/src/beam_ssa_opt.hrl diff --git a/.gitignore b/.gitignore index 02894c4786..e426c042a2 100644 --- a/.gitignore +++ b/.gitignore @@ -247,6 +247,7 @@ JAVADOC-GENERATED /lib/compiler/test/*_post_opt_SUITE.erl /lib/compiler/test/*_inline_SUITE.erl /lib/compiler/test/*_r21_SUITE.erl +/lib/compiler/test/*_no_module_opt_SUITE.erl # crypto /lib/crypto/test/crypto_SUITE_data/*.rsp diff --git a/lib/compiler/src/Makefile b/lib/compiler/src/Makefile index 074d9b881b..97c73d0e07 100644 --- a/lib/compiler/src/Makefile +++ b/lib/compiler/src/Makefile @@ -103,6 +103,7 @@ BEAM_H = $(wildcard ../priv/beam_h/*.h) HRL_FILES= \ beam_disasm.hrl \ + beam_ssa_opt.hrl \ beam_ssa.hrl \ core_parse.hrl \ v3_kernel.hrl diff --git a/lib/compiler/src/beam_ssa_opt.erl b/lib/compiler/src/beam_ssa_opt.erl index c587f9fb44..93c5922547 100644 --- a/lib/compiler/src/beam_ssa_opt.erl +++ b/lib/compiler/src/beam_ssa_opt.erl @@ -18,61 +18,153 @@ %% %CopyrightEnd% %% +%%% +%%% This is a collection of various optimizations that don't need a separate +%%% pass by themselves and/or are mutually beneficial to other passes. +%%% +%%% The optimizations are applied in "phases," each with a list of sub-passes +%%% to run. These sub-passes are applied on all functions in a module before +%%% moving on to the next phase, which lets us gather module-level information +%%% in one phase and then apply it in the next without having to risk working +%%% with incomplete information. +%%% +%%% Each sub-pass operates on a #st{} record and a func_info_db(), where the +%%% former is just a #b_function{} whose blocks can be represented either in +%%% linear or map form, and the latter is a map with information about all +%%% functions in the module (see beam_ssa_opt.hrl for more details). +%%% + -module(beam_ssa_opt). -export([module/2]). --include("beam_ssa.hrl"). +-include("beam_ssa_opt.hrl"). + -import(lists, [append/1,foldl/3,keyfind/3,member/2, reverse/1,reverse/2, - splitwith/2,takewhile/2,unzip/1]). + splitwith/2,sort/1,takewhile/2,unzip/1]). + +-define(DEFAULT_REPETITIONS, 2). -spec module(beam_ssa:b_module(), [compile:option()]) -> {'ok',beam_ssa:b_module()}. -module(#b_module{body=Fs0}=Module, Opts) -> - Ps = passes(Opts), - Fs = functions(Fs0, Ps), - {ok,Module#b_module{body=Fs}}. +-record(st, {ssa :: [{beam_ssa:label(),beam_ssa:b_blk()}] | + beam_ssa:block_map(), + args :: [beam_ssa:b_var()], + cnt :: beam_ssa:label(), + anno :: beam_ssa:anno()}). +-type st_map() :: #{ func_id() => #st{} }. + +module(Module, Opts) -> + FuncDb0 = case proplists:get_value(no_module_opt, Opts, false) of + false -> build_func_db(Module); + true -> #{} + end, + + %% Passes that perform module-level optimizations are often aided by + %% optimizing callers before callees and vice versa, so we optimize all + %% functions in call order, flipping it as required. + StMap0 = build_st_map(Module), + Order = get_call_order_po(StMap0, FuncDb0), -functions([F|Fs], Ps) -> - [function(F, Ps)|functions(Fs, Ps)]; -functions([], _Ps) -> []. + Phases = + [{Order, prologue_passes(Opts)}] ++ + repeat(Opts, repeated_passes(Opts), Order) ++ + [{Order, epilogue_passes(Opts)}], --type b_blk() :: beam_ssa:b_blk(). --type b_var() :: beam_ssa:b_var(). --type label() :: beam_ssa:label(). + {StMap, _FuncDb} = foldl(fun({FuncIds, Ps}, {StMap, FuncDb}) -> + phase(FuncIds, Ps, StMap, FuncDb) + end, {StMap0, FuncDb0}, Phases), + + {ok, finish(Module, StMap)}. + +phase([FuncId | Ids], Ps, StMap, FuncDb0) -> + try + {St, FuncDb} = + compile:run_sub_passes(Ps, {map_get(FuncId, StMap), FuncDb0}), + + phase(Ids, Ps, StMap#{ FuncId => St }, FuncDb) + catch + Class:Error:Stack -> + #b_local{name=Name,arity=Arity} = FuncId, + io:fwrite("Function: ~w/~w\n", [Name,Arity]), + erlang:raise(Class, Error, Stack) + end; +phase([], _Ps, StMap, FuncDb) -> + {StMap, FuncDb}. + +%% Repeats the given passes, alternating the order between runs to make the +%% type pass more efficient. +repeat(Opts, Ps, OrderA) -> + Repeat = proplists:get_value(ssa_opt_repeat, Opts, ?DEFAULT_REPETITIONS), + OrderB = reverse(OrderA), + repeat_1(Repeat, Ps, OrderA, OrderB). + +repeat_1(0, _Opts, _OrderA, _OrderB) -> + []; +repeat_1(N, Ps, OrderA, OrderB) when N > 0, N rem 2 =:= 0 -> + [{OrderA, Ps} | repeat_1(N - 1, Ps, OrderA, OrderB)]; +repeat_1(N, Ps, OrderA, OrderB) when N > 0, N rem 2 =:= 1 -> + [{OrderB, Ps} | repeat_1(N - 1, Ps, OrderA, OrderB)]. + +%% + +get_func_id(F) -> + {_Mod, Name, Arity} = beam_ssa:get_anno(func_info, F), + #b_local{name=#b_literal{val=Name}, arity=Arity}. + +-spec build_st_map(#b_module{}) -> st_map(). +build_st_map(#b_module{body=Fs}) -> + build_st_map_1(Fs, #{}). + +build_st_map_1([F | Fs], Map) -> + #b_function{anno=Anno,args=Args,cnt=Counter,bs=Bs} = F, + St = #st{anno=Anno,args=Args,cnt=Counter,ssa=Bs}, + build_st_map_1(Fs, Map#{ get_func_id(F) => St }); +build_st_map_1([], Map) -> + Map. + +-spec finish(#b_module{}, st_map()) -> #b_module{}. +finish(#b_module{body=Fs0}=Module, StMap) -> + Module#b_module{body=finish_1(Fs0, StMap)}. + +finish_1([F0 | Fs], StMap) -> + #st{anno=Anno,cnt=Counter,ssa=Blocks} = map_get(get_func_id(F0), StMap), + F = F0#b_function{anno=Anno,bs=Blocks,cnt=Counter}, + [F | finish_1(Fs, StMap)]; +finish_1([], _StMap) -> + []. + +%% --record(st, {ssa :: beam_ssa:block_map() | [{label(),b_blk()}], - args :: [b_var()], - cnt :: label()}). -define(PASS(N), {N,fun N/1}). -passes(Opts0) -> +prologue_passes(Opts) -> Ps = [?PASS(ssa_opt_split_blocks), ?PASS(ssa_opt_coalesce_phis), ?PASS(ssa_opt_element), ?PASS(ssa_opt_linearize), ?PASS(ssa_opt_tuple_size), ?PASS(ssa_opt_record), - - %% Run ssa_opt_cse twice, because it will help ssa_opt_dead, - %% and ssa_opt_dead will help ssa_opt_cse. - %% - %% Run ssa_opt_live twice, because it will help ssa_opt_dead - %% and ssa_opt_dead will help ssa_opt_live. - %% - %% Run beam_ssa_type twice, because there will be more - %% opportunities for optimizations after running beam_ssa_dead. - ?PASS(ssa_opt_cse), - ?PASS(ssa_opt_type), - ?PASS(ssa_opt_live), + ?PASS(ssa_opt_cse) %Helps the first type pass. + ], + passes_1(Ps, Opts). + +%% These passes all benefit from each other (in roughly this order), so they +%% are repeated as required. +repeated_passes(Opts) -> + Ps = [?PASS(ssa_opt_live), ?PASS(ssa_opt_bs_puts), ?PASS(ssa_opt_dead), - ?PASS(ssa_opt_cse), %Second time. - ?PASS(ssa_opt_float), - ?PASS(ssa_opt_type), %Second time. - ?PASS(ssa_opt_live), %Second time. - + ?PASS(ssa_opt_cse), + ?PASS(ssa_opt_type)], %Must run after ssa_opt_dead to + %clean up phi nodes. + passes_1(Ps, Opts). + +epilogue_passes(Opts) -> + Ps = [?PASS(ssa_opt_float), + ?PASS(ssa_opt_live), %One last time to clean up the + %mess left by the float pass. ?PASS(ssa_opt_bsm), ?PASS(ssa_opt_bsm_units), ?PASS(ssa_opt_bsm_shortcut), @@ -81,6 +173,9 @@ passes(Opts0) -> ?PASS(ssa_opt_sink), ?PASS(ssa_opt_merge_blocks), ?PASS(ssa_opt_trim_unreachable)], + passes_1(Ps, Opts). + +passes_1(Ps, Opts0) -> Negations = [{list_to_atom("no_"++atom_to_list(N)),N} || {N,_} <- Ps], Opts = proplists:substitute_negations(Negations, Opts0), @@ -92,36 +187,121 @@ passes(Opts0) -> {NoName,fun(S) -> S end} end || {Name,_}=P <- Ps]. -function(#b_function{anno=Anno,bs=Blocks0,args=Args,cnt=Count0}=F, Ps) -> +%% Builds a function information map with basic information about incoming and +%% outgoing local calls, as well as whether the function is exported. +-spec build_func_db(#b_module{}) -> func_info_db(). +build_func_db(#b_module{body=Fs,exports=Exports}) -> try - St = #st{ssa=Blocks0,args=Args,cnt=Count0}, - #st{ssa=Blocks,cnt=Count} = compile:run_sub_passes(Ps, St), - F#b_function{bs=Blocks,cnt=Count} + fdb_1(Fs, gb_sets:from_list(Exports), #{}) catch - Class:Error:Stack -> - #{func_info:={_,Name,Arity}} = Anno, - io:fwrite("Function: ~w/~w\n", [Name,Arity]), - erlang:raise(Class, Error, Stack) + %% All module-level optimizations are invalid when a NIF can override a + %% function, so we have to bail out. + throw:load_nif -> #{} end. +fdb_1([#b_function{ bs=Bs }=F | Fs], Exports, FuncDb0) -> + Id = get_func_id(F), + + #b_local{name=#b_literal{val=Name}, arity=Arity} = Id, + Exported = gb_sets:is_element({Name, Arity}, Exports), + + FuncDb1 = case FuncDb0 of + %% We may have an entry already if someone's called us. + #{ Id := Info } -> + FuncDb0#{ Id := Info#func_info{ exported=Exported }}; + #{} -> + FuncDb0#{ Id => #func_info{ exported=Exported }} + end, + + FuncDb = beam_ssa:fold_rpo(fun(_L, #b_blk{is=Is}, FuncDb) -> + fdb_is(Is, Id, FuncDb) + end, FuncDb1, Bs), + + fdb_1(Fs, Exports, FuncDb); +fdb_1([], _Exports, FuncDb) -> + FuncDb. + +fdb_is([#b_set{op=call, + args=[#b_local{}=Callee | _]} | Is], + Caller, FuncDb) -> + fdb_is(Is, Caller, fdb_update(Caller, Callee, FuncDb)); +fdb_is([#b_set{op=call, + args=[#b_remote{mod=#b_literal{val=erlang}, + name=#b_literal{val=load_nif}}, + _Path, _LoadInfo]} | _Is], _Caller, _FuncDb) -> + throw(load_nif); +fdb_is([_ | Is], Caller, FuncDb) -> + fdb_is(Is, Caller, FuncDb); +fdb_is([], _Caller, FuncDb) -> + FuncDb. + +fdb_update(Caller, Callee, FuncDb) -> + CallerVertex = maps:get(Caller, FuncDb, #func_info{}), + CalleeVertex = maps:get(Callee, FuncDb, #func_info{}), + + Calls = ordsets:add_element(Callee, CallerVertex#func_info.out), + CalledBy = ordsets:add_element(Caller, CalleeVertex#func_info.in), + + FuncDb#{ Caller => CallerVertex#func_info{out=Calls}, + Callee => CalleeVertex#func_info{in=CalledBy} }. + +%% Returns the post-order of all local calls in this module. That is, it starts +%% with the functions that don't call any others and then walks up the call +%% chain. +%% +%% Functions where module-level optimization is disabled are added last in +%% arbitrary order. + +get_call_order_po(StMap, FuncDb) -> + Leaves = maps:fold(fun(Id, #func_info{out=[]}, Acc) -> + [Id | Acc]; + (_, _, Acc) -> + Acc + end, [], FuncDb), + + Order = gco_po_1(sort(Leaves), FuncDb, [], #{}), + + Order ++ maps:fold(fun(K, _V, Acc) -> + case is_map_key(K, FuncDb) of + false -> [K | Acc]; + true -> Acc + end + end, [], StMap). + +gco_po_1([Id | Ids], FuncDb, Children, Seen) when not is_map_key(Id, Seen) -> + [Id | gco_po_1(Ids, FuncDb, [Id | Children], Seen#{ Id => true })]; +gco_po_1([_Id | Ids], FuncDb, Children, Seen) -> + gco_po_1(Ids, FuncDb, Children, Seen); +gco_po_1([], FuncDb, [_|_]=Children, Seen) -> + gco_po_1(gco_po_parents(Children, FuncDb), FuncDb, [], Seen); +gco_po_1([], _FuncDb, [], _Seen) -> + []. + +gco_po_parents([Child | Children], FuncDb) -> + #{ Child := #func_info{in=Parents}} = FuncDb, + Parents ++ gco_po_parents(Children, FuncDb); +gco_po_parents([], _FuncDb) -> + []. + %%% %%% Trivial sub passes. %%% -ssa_opt_dead(#st{ssa=Linear}=St) -> - St#st{ssa=beam_ssa_dead:opt(Linear)}. +ssa_opt_dead({#st{ssa=Linear}=St, FuncDb}) -> + {St#st{ssa=beam_ssa_dead:opt(Linear)}, FuncDb}. -ssa_opt_linearize(#st{ssa=Blocks}=St) -> - St#st{ssa=beam_ssa:linearize(Blocks)}. +ssa_opt_linearize({#st{ssa=Blocks}=St, FuncDb}) -> + {St#st{ssa=beam_ssa:linearize(Blocks)}, FuncDb}. -ssa_opt_type(#st{ssa=Linear,args=Args}=St) -> - St#st{ssa=beam_ssa_type:opt(Linear, Args)}. +ssa_opt_type({#st{ssa=Linear0,args=Args}=St0, FuncDb}) -> + Linear = beam_ssa_type:opt(Linear0, Args), + {St0#st{ssa=Linear}, FuncDb}. -ssa_opt_blockify(#st{ssa=Linear}=St) -> - St#st{ssa=maps:from_list(Linear)}. +ssa_opt_blockify({#st{ssa=Linear}=St, FuncDb}) -> + {St#st{ssa=maps:from_list(Linear)}, FuncDb}. -ssa_opt_trim_unreachable(#st{ssa=Blocks}=St) -> - St#st{ssa=beam_ssa:trim_unreachable(Blocks)}. +ssa_opt_trim_unreachable({#st{ssa=Blocks}=St, FuncDb}) -> + {St#st{ssa=beam_ssa:trim_unreachable(Blocks)}, FuncDb}. %%% %%% Split blocks before certain instructions to enable more optimizations. @@ -133,14 +313,14 @@ ssa_opt_trim_unreachable(#st{ssa=Blocks}=St) -> %%% for sinking get_tuple_element instructions. %%% -ssa_opt_split_blocks(#st{ssa=Blocks0,cnt=Count0}=St) -> +ssa_opt_split_blocks({#st{ssa=Blocks0,cnt=Count0}=St, FuncDb}) -> P = fun(#b_set{op={bif,element}}) -> true; (#b_set{op=call}) -> true; (#b_set{op=make_fun}) -> true; (_) -> false end, {Blocks,Count} = beam_ssa:split_blocks(P, Blocks0, Count0), - St#st{ssa=Blocks,cnt=Count}. + {St#st{ssa=Blocks,cnt=Count}, FuncDb}. %%% %%% Coalesce phi nodes. @@ -164,10 +344,10 @@ ssa_opt_split_blocks(#st{ssa=Blocks0,cnt=Count0}=St) -> %%% different registers). %%% -ssa_opt_coalesce_phis(#st{ssa=Blocks0}=St) -> +ssa_opt_coalesce_phis({#st{ssa=Blocks0}=St, FuncDb}) -> Ls = beam_ssa:rpo(Blocks0), Blocks = c_phis_1(Ls, Blocks0), - St#st{ssa=Blocks}. + {St#st{ssa=Blocks}, FuncDb}. c_phis_1([L|Ls], Blocks0) -> case maps:get(L, Blocks0) of @@ -247,7 +427,7 @@ c_fix_branches([], _, Blocks) -> Blocks. %%% be replaced with get_tuple_element/3 instructions. %%% -ssa_opt_element(#st{ssa=Blocks}=St) -> +ssa_opt_element({#st{ssa=Blocks}=St, FuncDb}) -> %% Collect the information about element instructions in this %% function. GetEls = collect_element_calls(beam_ssa:linearize(Blocks)), @@ -259,7 +439,7 @@ ssa_opt_element(#st{ssa=Blocks}=St) -> %% For each chain, swap the first element call with the %% element call with the highest index. - St#st{ssa=swap_element_calls(Chains, Blocks)}. + {St#st{ssa=swap_element_calls(Chains, Blocks)}, FuncDb}. collect_element_calls([{L,#b_blk{is=Is0,last=Last}}|Bs]) -> case {Is0,Last} of @@ -320,9 +500,9 @@ swap_element_calls_1([], _, Blocks) -> %%% when applicable. %%% -ssa_opt_record(#st{ssa=Linear}=St) -> +ssa_opt_record({#st{ssa=Linear}=St, FuncDb}) -> Blocks = maps:from_list(Linear), - St#st{ssa=record_opt(Linear, Blocks)}. + {St#st{ssa=record_opt(Linear, Blocks)}, FuncDb}. record_opt([{L,#b_blk{is=Is0,last=Last}=Blk0}|Bs], Blocks) -> Is = record_opt_is(Is0, Last, Blocks), @@ -406,9 +586,9 @@ is_tagged_tuple_4([], _, _) -> no. %%% subexpressions across instructions that clobber the X registers. %%% -ssa_opt_cse(#st{ssa=Linear}=St) -> +ssa_opt_cse({#st{ssa=Linear}=St, FuncDb}) -> M = #{0=>#{}}, - St#st{ssa=cse(Linear, #{}, M)}. + {St#st{ssa=cse(Linear, #{}, M)}, FuncDb}. cse([{L,#b_blk{is=Is0,last=Last0}=Blk}|Bs], Sub0, M0) -> Es0 = maps:get(L, M0), @@ -549,13 +729,13 @@ cse_suitable(#b_set{}) -> false. bs :: beam_ssa:block_map() }). -ssa_opt_float(#st{ssa=Linear0,cnt=Count0}=St) -> +ssa_opt_float({#st{ssa=Linear0,cnt=Count0}=St, FuncDb}) -> NonGuards0 = float_non_guards(Linear0), NonGuards = gb_sets:from_list(NonGuards0), Blocks = maps:from_list(Linear0), Fs = #fs{non_guards=NonGuards,bs=Blocks}, {Linear,Count} = float_opt(Linear0, Count0, Fs), - St#st{ssa=Linear,cnt=Count}. + {St#st{ssa=Linear,cnt=Count}, FuncDb}. float_non_guards([{L,#b_blk{is=Is}}|Bs]) -> case Is of @@ -793,12 +973,12 @@ float_flush_regs(#fs{regs=Rs}) -> %%% with a cheaper instructions %%% -ssa_opt_live(#st{ssa=Linear0}=St) -> +ssa_opt_live({#st{ssa=Linear0}=St, FuncDb}) -> RevLinear = reverse(Linear0), Blocks0 = maps:from_list(RevLinear), Blocks = live_opt(RevLinear, #{}, Blocks0), Linear = beam_ssa:linearize(Blocks), - St#st{ssa=Linear}. + {St#st{ssa=Linear}, FuncDb}. live_opt([{L,Blk0}|Bs], LiveMap0, Blocks) -> Blk1 = beam_ssa_share:block(Blk0, Blocks), @@ -911,10 +1091,10 @@ live_opt_unused(_) -> keep. %%% with bs_test_tail. %%% -ssa_opt_bsm(#st{ssa=Linear}=St) -> +ssa_opt_bsm({#st{ssa=Linear}=St, FuncDb}) -> Extracted0 = bsm_extracted(Linear), Extracted = cerl_sets:from_list(Extracted0), - St#st{ssa=bsm_skip(Linear, Extracted)}. + {St#st{ssa=bsm_skip(Linear, Extracted)}, FuncDb}. bsm_skip([{L,#b_blk{is=Is0}=Blk}|Bs0], Extracted) -> Bs = bsm_skip(Bs0, Extracted), @@ -1012,14 +1192,14 @@ coalesce_skips_is(_, _, _) -> %%% Short-cutting binary matching instructions. %%% -ssa_opt_bsm_shortcut(#st{ssa=Linear}=St) -> +ssa_opt_bsm_shortcut({#st{ssa=Linear}=St, FuncDb}) -> Positions = bsm_positions(Linear, #{}), case map_size(Positions) of 0 -> %% No binary matching instructions. - St; + {St, FuncDb}; _ -> - St#st{ssa=bsm_shortcut(Linear, Positions)} + {St#st{ssa=bsm_shortcut(Linear, Positions)}, FuncDb} end. bsm_positions([{L,#b_blk{is=Is,last=Last}}|Bs], PosMap0) -> @@ -1081,8 +1261,8 @@ bsm_shortcut([], _PosMap) -> []. %%% Eliminate redundant bs_test_unit2 instructions. %%% -ssa_opt_bsm_units(#st{ssa=Linear}=St) -> - St#st{ssa=bsm_units(Linear, #{})}. +ssa_opt_bsm_units({#st{ssa=Linear}=St, FuncDb}) -> + {St#st{ssa=bsm_units(Linear, #{})}, FuncDb}. bsm_units([{L,#b_blk{last=#b_br{succ=Succ,fail=Fail}}=Block0} | Bs], UnitMaps0) -> UnitsIn = maps:get(L, UnitMaps0, #{}), @@ -1190,9 +1370,9 @@ bsm_units_join_1([], _MapA, Right) -> %%% to bs_put_string instructions in later pass. %%% -ssa_opt_bs_puts(#st{ssa=Linear0,cnt=Count0}=St) -> +ssa_opt_bs_puts({#st{ssa=Linear0,cnt=Count0}=St, FuncDb}) -> {Linear,Count} = opt_bs_puts(Linear0, Count0, []), - St#st{ssa=Linear,cnt=Count}. + {St#st{ssa=Linear,cnt=Count}, FuncDb}. opt_bs_puts([{L,#b_blk{is=Is}=Blk0}|Bs], Count0, Acc0) -> case Is of @@ -1410,9 +1590,9 @@ opt_bs_put_split_int_1(Int, L, R) -> %%% is_tuple_of_arity instruction by the loader. %%% -ssa_opt_tuple_size(#st{ssa=Linear0,cnt=Count0}=St) -> +ssa_opt_tuple_size({#st{ssa=Linear0,cnt=Count0}=St, FuncDb}) -> {Linear,Count} = opt_tup_size(Linear0, Count0, []), - St#st{ssa=Linear,cnt=Count}. + {St#st{ssa=Linear,cnt=Count}, FuncDb}. opt_tup_size([{L,#b_blk{is=Is,last=Last}=Blk}|Bs], Count0, Acc0) -> case {Is,Last} of @@ -1485,9 +1665,9 @@ opt_tup_size_is([], _, _, _Acc) -> none. %%% is 'true' or 'false' can be rewritten to a is_boolean test. %%% -ssa_opt_sw(#st{ssa=Linear0,cnt=Count0}=St) -> +ssa_opt_sw({#st{ssa=Linear0,cnt=Count0}=St, FuncDb}) -> {Linear,Count} = opt_sw(Linear0, #{}, Count0, []), - St#st{ssa=Linear,cnt=Count}. + {St#st{ssa=Linear,cnt=Count}, FuncDb}. opt_sw([{L,#b_blk{is=Is,last=#b_switch{}=Last0}=Blk0}|Bs], Phis0, Count0, Acc) -> Phis = opt_sw_phis(Is, Phis0), @@ -1578,9 +1758,10 @@ opt_sw_literals([], Acc) -> Acc. %%% Merge blocks. %%% -ssa_opt_merge_blocks(#st{ssa=Blocks}=St) -> +ssa_opt_merge_blocks({#st{ssa=Blocks}=St, FuncDb}) -> Preds = beam_ssa:predecessors(Blocks), - St#st{ssa=merge_blocks_1(beam_ssa:rpo(Blocks), Preds, Blocks)}. + Merged = merge_blocks_1(beam_ssa:rpo(Blocks), Preds, Blocks), + {St#st{ssa=Merged}, FuncDb}. merge_blocks_1([L|Ls], Preds0, Blocks0) -> case Preds0 of @@ -1590,6 +1771,7 @@ merge_blocks_1([L|Ls], Preds0, Blocks0) -> true -> #b_blk{is=Is0} = Blk0, #b_blk{is=Is1} = Blk1, + verify_merge_is(Is1), Is = Is0 ++ Is1, Blk = Blk1#b_blk{is=Is}, Blocks1 = maps:remove(L, Blocks0), @@ -1615,6 +1797,13 @@ merge_update_preds([], _, _, Preds) -> Preds. rename_label(From, From, To) -> To; rename_label(Lbl, _, _) -> Lbl. +verify_merge_is([#b_set{op=Op}|_]) -> + %% The merged block has only one predecessor, so it should not have any phi + %% nodes. + true = Op =/= phi; %Assertion. +verify_merge_is(_) -> + ok. + is_merge_allowed(_, _, #b_blk{is=[#b_set{op=peek_message}|_]}) -> false; is_merge_allowed(L, Blk0, #b_blk{}) -> @@ -1639,7 +1828,7 @@ is_merge_allowed(L, Blk0, #b_blk{}) -> %%% extracted values. %%% -ssa_opt_sink(#st{ssa=Blocks0}=St) -> +ssa_opt_sink({#st{ssa=Blocks0}=St, FuncDb}) -> Linear = beam_ssa:linearize(Blocks0), %% Create a map with all variables that define get_tuple_element @@ -1680,7 +1869,7 @@ ssa_opt_sink(#st{ssa=Blocks0}=St) -> From = maps:get(V, Defs), move_defs(V, From, To, A) end, Blocks0, DefLoc), - St#st{ssa=Blocks}. + {St#st{ssa=Blocks}, FuncDb}. def_blocks([{L,#b_blk{is=Is}}|Bs]) -> def_blocks_is(Is, L, def_blocks(Bs)); diff --git a/lib/compiler/src/beam_ssa_opt.hrl b/lib/compiler/src/beam_ssa_opt.hrl new file mode 100644 index 0000000000..21a45f562a --- /dev/null +++ b/lib/compiler/src/beam_ssa_opt.hrl @@ -0,0 +1,37 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2019. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% + +-include("beam_ssa.hrl"). + +-record(func_info, + {%% Local calls going in/out of this function. + in = ordsets:new() :: ordsets:ordset(func_id()), + out = ordsets:new() :: ordsets:ordset(func_id()), + + %% Whether the function is exported or not; some optimizations may + %% need to be suppressed if it is. + exported = true :: boolean()}). + +%% Per-function metadata used by various optimization passes to perform +%% module-level optimization. If a function is absent it means that +%% module-level optimization has been turned off for said function. + +-type func_id() :: beam_ssa:b_local(). +-type func_info_db() :: #{ func_id() => #func_info{} }. diff --git a/lib/compiler/test/Makefile b/lib/compiler/test/Makefile index 40428b7f2d..f042a5cb51 100644 --- a/lib/compiler/test/Makefile +++ b/lib/compiler/test/Makefile @@ -105,6 +105,8 @@ CORE_MODULES = \ lfe_andor_SUITE \ lfe_guard_SUITE +NO_MOD_OPT = $(NO_OPT) + NO_OPT_MODULES= $(NO_OPT:%=%_no_opt_SUITE) NO_OPT_ERL_FILES= $(NO_OPT_MODULES:%=%.erl) POST_OPT_MODULES= $(NO_OPT:%=%_post_opt_SUITE) @@ -113,6 +115,8 @@ INLINE_MODULES= $(INLINE:%=%_inline_SUITE) INLINE_ERL_FILES= $(INLINE_MODULES:%=%.erl) R21_MODULES= $(R21:%=%_r21_SUITE) R21_ERL_FILES= $(R21_MODULES:%=%.erl) +NO_MOD_OPT_MODULES= $(NO_MOD_OPT:%=%_no_module_opt_SUITE) +NO_MOD_OPT_ERL_FILES= $(NO_MOD_OPT_MODULES:%=%.erl) ERL_FILES= $(MODULES:%=%.erl) CORE_FILES= $(CORE_MODULES:%=%.core) @@ -142,7 +146,7 @@ EBIN = . # ---------------------------------------------------- make_emakefile: $(NO_OPT_ERL_FILES) $(POST_OPT_ERL_FILES) \ - $(INLINE_ERL_FILES) $(R21_ERL_FILES) + $(INLINE_ERL_FILES) $(R21_ERL_FILES) $(NO_MOD_OPT_ERL_FILES) $(ERL_TOP)/make/make_emakefile $(ERL_COMPILE_FLAGS) -o$(EBIN) $(MODULES) \ > $(EMAKEFILE) $(ERL_TOP)/make/make_emakefile +no_copt +no_postopt \ @@ -154,6 +158,8 @@ make_emakefile: $(NO_OPT_ERL_FILES) $(POST_OPT_ERL_FILES) \ -o$(EBIN) $(INLINE_MODULES) >> $(EMAKEFILE) $(ERL_TOP)/make/make_emakefile +r21 $(ERL_COMPILE_FLAGS) \ -o$(EBIN) $(R21_MODULES) >> $(EMAKEFILE) + $(ERL_TOP)/make/make_emakefile +no_module_opt $(ERL_COMPILE_FLAGS) \ + -o$(EBIN) $(NO_MOD_OPT_MODULES) >> $(EMAKEFILE) $(ERL_TOP)/make/make_emakefile +from_core $(ERL_COMPILE_FLAGS) \ -o$(EBIN) $(CORE_MODULES) >> $(EMAKEFILE) @@ -183,6 +189,9 @@ docs: %_r21_SUITE.erl: %_SUITE.erl sed -e 's;-module($(basename $<));-module($(basename $@));' $< > $@ +%_no_module_opt_SUITE.erl: %_SUITE.erl + sed -e 's;-module($(basename $<));-module($(basename $@));' $< > $@ + # ---------------------------------------------------- # Release Target # ---------------------------------------------------- @@ -195,7 +204,8 @@ release_tests_spec: make_emakefile $(INSTALL_DATA) compiler.spec compiler.cover \ $(EMAKEFILE) $(ERL_FILES) "$(RELSYSDIR)" $(INSTALL_DATA) $(NO_OPT_ERL_FILES) $(POST_OPT_ERL_FILES) \ - $(INLINE_ERL_FILES) $(R21_ERL_FILES) "$(RELSYSDIR)" + $(INLINE_ERL_FILES) $(R21_ERL_FILES) \ + $(NO_MOD_OPT_ERL_FILES) "$(RELSYSDIR)" $(INSTALL_DATA) $(CORE_FILES) "$(RELSYSDIR)" for file in $(ERL_DUMMY_FILES); do \ module=`basename $$file .erl`; \ diff --git a/lib/compiler/test/test_lib.erl b/lib/compiler/test/test_lib.erl index 4502f5b68a..08f2e8fae9 100644 --- a/lib/compiler/test/test_lib.erl +++ b/lib/compiler/test/test_lib.erl @@ -81,6 +81,7 @@ opt_opts(Mod) -> (no_put_tuple2) -> true; (no_bsm3) -> true; (no_bsm_opt) -> true; + (no_module_opt) -> true; (_) -> false end, Opts). @@ -93,8 +94,9 @@ get_data_dir(Config) -> Opts = [{return,list}], Data1 = re:replace(Data0, "_no_opt_SUITE", "_SUITE", Opts), Data2 = re:replace(Data1, "_post_opt_SUITE", "_SUITE", Opts), - Data = re:replace(Data2, "_inline_SUITE", "_SUITE", Opts), - re:replace(Data, "_r21_SUITE", "_SUITE", Opts). + Data3 = re:replace(Data2, "_inline_SUITE", "_SUITE", Opts), + Data4 = re:replace(Data3, "_r21_SUITE", "_SUITE", Opts), + re:replace(Data4, "_no_module_opt_SUITE", "_SUITE", Opts). is_cloned_mod(Mod) -> is_cloned_mod_1(atom_to_list(Mod)). @@ -105,6 +107,7 @@ is_cloned_mod_1("no_opt_SUITE") -> true; is_cloned_mod_1("post_opt_SUITE") -> true; is_cloned_mod_1("inline_SUITE") -> true; is_cloned_mod_1("21_SUITE") -> true; +is_cloned_mod_1("no_module_opt_SUITE") -> true; is_cloned_mod_1([_|T]) -> is_cloned_mod_1(T); is_cloned_mod_1([]) -> false. -- cgit v1.2.3 From 294d66a295f6c2101fe3c2da630979ad4e736c08 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?John=20H=C3=B6gberg?= Date: Mon, 21 Jan 2019 07:25:47 +0100 Subject: compiler: Introduce module-level type optimization This commit lets the type optimization pass work across functions, tracking return and argument types to eliminate redundant tests. --- erts/emulator/test/exception_SUITE.erl | 5 + lib/compiler/src/beam_ssa_codegen.erl | 17 +- lib/compiler/src/beam_ssa_opt.erl | 32 ++- lib/compiler/src/beam_ssa_opt.hrl | 20 +- lib/compiler/src/beam_ssa_pre_codegen.erl | 8 +- lib/compiler/src/beam_ssa_type.erl | 343 +++++++++++++++++++++++++----- lib/compiler/src/compile.erl | 4 + lib/compiler/test/apply_SUITE.erl | 8 + lib/compiler/test/compile_SUITE.erl | 3 +- lib/compiler/test/test_lib.erl | 1 + 10 files changed, 367 insertions(+), 74 deletions(-) diff --git a/erts/emulator/test/exception_SUITE.erl b/erts/emulator/test/exception_SUITE.erl index aec66cb9a3..c4d9ea515a 100644 --- a/erts/emulator/test/exception_SUITE.erl +++ b/erts/emulator/test/exception_SUITE.erl @@ -36,6 +36,11 @@ %% during compilation instead of at runtime, so do not perform this analysis. -compile([{hipe, [no_icode_range]}]). +%% Module-level type optimization propagates the constants used when testing +%% increment1/1 and increment2/1, which makes it test something completely +%% different, so we're turning it off. +-compile(no_module_opt). + suite() -> [{ct_hooks,[ts_install_cth]}, {timetrap, {minutes, 1}}]. diff --git a/lib/compiler/src/beam_ssa_codegen.erl b/lib/compiler/src/beam_ssa_codegen.erl index fe1a0c8480..c2d5035b19 100644 --- a/lib/compiler/src/beam_ssa_codegen.erl +++ b/lib/compiler/src/beam_ssa_codegen.erl @@ -161,7 +161,7 @@ add_parameter_annos([{label, _}=Entry | Body], Anno) -> (_K, _V, Acc) -> Acc end, [], maps:get(registers, Anno)), - [Entry | Annos] ++ Body. + [Entry | sort(Annos)] ++ Body. cg_fun(Blocks, St0) -> Linear0 = linearize(Blocks), @@ -1449,7 +1449,12 @@ cg_call(#cg_set{anno=Anno,op=call,dst=Dst0,args=[#b_local{}=Func0|Args0]}, Line = call_line(Where, local, Anno), Call = build_call(call, Arity, {f,FuncLbl}, Context, Dst), Is = setup_args(Args, Anno, Context, St) ++ Line ++ Call, - {Is,St}; + case Anno of + #{ result_type := Info } -> + {Is ++ [{'%', {type_info, Dst, Info}}], St}; + #{} -> + {Is, St} + end; cg_call(#cg_set{anno=Anno0,op=call,dst=Dst0,args=[#b_remote{}=Func0|Args0]}, Where, Context, St) -> [Dst|Args] = beam_args([Dst0|Args0], St), @@ -1725,6 +1730,14 @@ copy(Src, Dst) -> [{move,Src,Dst}]. force_reg({literal,_}=Lit, Reg) -> {Reg,[{move,Lit,Reg}]}; +force_reg({integer,_}=Lit, Reg) -> + {Reg,[{move,Lit,Reg}]}; +force_reg({atom,_}=Lit, Reg) -> + {Reg,[{move,Lit,Reg}]}; +force_reg({float,_}=Lit, Reg) -> + {Reg,[{move,Lit,Reg}]}; +force_reg(nil=Lit, Reg) -> + {Reg,[{move,Lit,Reg}]}; force_reg({Kind,_}=R, _) when Kind =:= x; Kind =:= y -> {R,[]}. diff --git a/lib/compiler/src/beam_ssa_opt.erl b/lib/compiler/src/beam_ssa_opt.erl index 93c5922547..3685c09a2b 100644 --- a/lib/compiler/src/beam_ssa_opt.erl +++ b/lib/compiler/src/beam_ssa_opt.erl @@ -39,7 +39,7 @@ -include("beam_ssa_opt.hrl"). --import(lists, [append/1,foldl/3,keyfind/3,member/2, +-import(lists, [append/1,duplicate/2,foldl/3,keyfind/3,member/2, reverse/1,reverse/2, splitwith/2,sort/1,takewhile/2,unzip/1]). @@ -146,8 +146,8 @@ prologue_passes(Opts) -> ?PASS(ssa_opt_linearize), ?PASS(ssa_opt_tuple_size), ?PASS(ssa_opt_record), - ?PASS(ssa_opt_cse) %Helps the first type pass. - ], + ?PASS(ssa_opt_cse), %Helps the first type pass. + ?PASS(ssa_opt_type_start)], passes_1(Ps, Opts). %% These passes all benefit from each other (in roughly this order), so they @@ -157,12 +157,13 @@ repeated_passes(Opts) -> ?PASS(ssa_opt_bs_puts), ?PASS(ssa_opt_dead), ?PASS(ssa_opt_cse), - ?PASS(ssa_opt_type)], %Must run after ssa_opt_dead to + ?PASS(ssa_opt_type_continue)], %Must run after ssa_opt_dead to %clean up phi nodes. passes_1(Ps, Opts). epilogue_passes(Opts) -> - Ps = [?PASS(ssa_opt_float), + Ps = [?PASS(ssa_opt_type_finish), + ?PASS(ssa_opt_float), ?PASS(ssa_opt_live), %One last time to clean up the %mess left by the float pass. ?PASS(ssa_opt_bsm), @@ -199,18 +200,21 @@ build_func_db(#b_module{body=Fs,exports=Exports}) -> throw:load_nif -> #{} end. -fdb_1([#b_function{ bs=Bs }=F | Fs], Exports, FuncDb0) -> +fdb_1([#b_function{ args=Args,bs=Bs }=F | Fs], Exports, FuncDb0) -> Id = get_func_id(F), #b_local{name=#b_literal{val=Name}, arity=Arity} = Id, Exported = gb_sets:is_element({Name, Arity}, Exports), + ArgTypes = duplicate(length(Args), #{}), FuncDb1 = case FuncDb0 of %% We may have an entry already if someone's called us. #{ Id := Info } -> - FuncDb0#{ Id := Info#func_info{ exported=Exported }}; + FuncDb0#{ Id := Info#func_info{ exported=Exported, + arg_types=ArgTypes }}; #{} -> - FuncDb0#{ Id => #func_info{ exported=Exported }} + FuncDb0#{ Id => #func_info{ exported=Exported, + arg_types=ArgTypes }} end, FuncDb = beam_ssa:fold_rpo(fun(_L, #b_blk{is=Is}, FuncDb) -> @@ -293,10 +297,18 @@ ssa_opt_dead({#st{ssa=Linear}=St, FuncDb}) -> ssa_opt_linearize({#st{ssa=Blocks}=St, FuncDb}) -> {St#st{ssa=beam_ssa:linearize(Blocks)}, FuncDb}. -ssa_opt_type({#st{ssa=Linear0,args=Args}=St0, FuncDb}) -> - Linear = beam_ssa_type:opt(Linear0, Args), +ssa_opt_type_start({#st{ssa=Linear0,args=Args,anno=Anno}=St0, FuncDb0}) -> + {Linear, FuncDb} = beam_ssa_type:opt_start(Linear0, Args, Anno, FuncDb0), {St0#st{ssa=Linear}, FuncDb}. +ssa_opt_type_continue({#st{ssa=Linear0,args=Args,anno=Anno}=St0, FuncDb0}) -> + {Linear, FuncDb} = beam_ssa_type:opt_continue(Linear0, Args, Anno, FuncDb0), + {St0#st{ssa=Linear}, FuncDb}. + +ssa_opt_type_finish({#st{args=Args,anno=Anno0}=St0, FuncDb0}) -> + {Anno, FuncDb} = beam_ssa_type:opt_finish(Args, Anno0, FuncDb0), + {St0#st{anno=Anno}, FuncDb}. + ssa_opt_blockify({#st{ssa=Linear}=St, FuncDb}) -> {St#st{ssa=maps:from_list(Linear)}, FuncDb}. diff --git a/lib/compiler/src/beam_ssa_opt.hrl b/lib/compiler/src/beam_ssa_opt.hrl index 21a45f562a..37711a6f48 100644 --- a/lib/compiler/src/beam_ssa_opt.hrl +++ b/lib/compiler/src/beam_ssa_opt.hrl @@ -27,11 +27,27 @@ %% Whether the function is exported or not; some optimizations may %% need to be suppressed if it is. - exported = true :: boolean()}). + exported = true :: boolean(), + + %% The inferred types of each argument (as opposed to parameter), + %% indexed by call site. + %% + %% This is more effective than the naive approach of joining into a + %% "parameter_type" as we go as it lets us narrow parameter types + %% without having to visit all callers on each pass, which helps a lot + %% when dealing with co-recursive functions. + arg_types = [] :: list(arg_type_map()), + + %% The inferred return type of this function, this is either [type()] + %% or [] to note absence. + ret_type = [] :: list()}). + +-type arg_key() :: {CallerId :: func_id(), + CallDst :: beam_ssa:b_var()}. +-type arg_type_map() :: #{ arg_key() => term() }. %% Per-function metadata used by various optimization passes to perform %% module-level optimization. If a function is absent it means that %% module-level optimization has been turned off for said function. - -type func_id() :: beam_ssa:b_local(). -type func_info_db() :: #{ func_id() => #func_info{} }. diff --git a/lib/compiler/src/beam_ssa_pre_codegen.erl b/lib/compiler/src/beam_ssa_pre_codegen.erl index fa1b7bb71e..03d193db4d 100644 --- a/lib/compiler/src/beam_ssa_pre_codegen.erl +++ b/lib/compiler/src/beam_ssa_pre_codegen.erl @@ -1031,7 +1031,7 @@ need_frame_1([#b_set{op=call,args=[Func|_]}|Is], Context) -> case Func of #b_remote{mod=#b_literal{val=Mod}, name=#b_literal{val=Name}, - arity=Arity} -> + arity=Arity} when is_atom(Mod), is_atom(Name) -> case erl_bifs:is_exit_bif(Mod, Name, Arity) of true -> false; @@ -1993,6 +1993,12 @@ reserve_zregs(Blocks, Intervals, Res) -> end, beam_ssa:fold_rpo(F, [0], Res, Blocks). +reserve_zreg([#b_set{op=call,dst=Dst}], + #b_br{bool=Dst}, _ShortLived, A) -> + %% If type optimization has determined that the result of a call can be + %% used directly in a branch, we must avoid reserving a z register or code + %% generation will fail. + A; reserve_zreg([#b_set{op={bif,tuple_size},dst=Dst}, #b_set{op={bif,'=:='},args=[Dst,Val]}], Last, ShortLived, A0) -> case {Val,Last} of diff --git a/lib/compiler/src/beam_ssa_type.erl b/lib/compiler/src/beam_ssa_type.erl index fcfb7b86f6..38ea5e6914 100644 --- a/lib/compiler/src/beam_ssa_type.erl +++ b/lib/compiler/src/beam_ssa_type.erl @@ -19,19 +19,22 @@ %% -module(beam_ssa_type). --export([opt/2]). +-export([opt_start/4, opt_continue/4, opt_finish/3]). --include("beam_ssa.hrl"). +-include("beam_ssa_opt.hrl"). -import(lists, [all/2,any/2,droplast/1,foldl/3,last/1,member/2, partition/2,reverse/1,sort/1]). -define(UNICODE_INT, #t_integer{elements={0,16#10FFFF}}). --record(d, {ds :: #{beam_ssa:b_var():=beam_ssa:b_set()}, - ls :: #{beam_ssa:label():=type_db()}, - once :: cerl_sets:set(beam_ssa:b_var()), - sub :: #{beam_ssa:b_var():=beam_ssa:value()} - }). +-record(d, + {ds :: #{beam_ssa:b_var():=beam_ssa:b_set()}, + ls :: #{beam_ssa:label():=type_db()}, + once :: cerl_sets:set(beam_ssa:b_var()), + func_id :: func_id(), + func_db :: func_info_db(), + sub = #{} :: #{beam_ssa:b_var():=beam_ssa:value()}, + ret_type = [] :: [type()]}). -define(ATOM_SET_SIZE, 5). @@ -49,36 +52,155 @@ {'binary',pos_integer()} | 'cons' | 'float' | 'list' | 'map' | 'nil' |'number'. -type type_db() :: #{beam_ssa:var_name():=type()}. --spec opt([{Label0,Block0}], Args) -> [{Label,Block}] when - Label0 :: beam_ssa:label(), - Block0 :: beam_ssa:b_blk(), +-spec opt_start(Linear, Args, Anno, FuncDb) -> {Linear, FuncDb} when + Linear :: [{non_neg_integer(), beam_ssa:b_blk()}], Args :: [beam_ssa:b_var()], - Label :: beam_ssa:label(), - Block :: beam_ssa:b_blk(). - -opt(Linear, Args) -> - UsedOnce = used_once(Linear, Args), + Anno :: beam_ssa:anno(), + FuncDb :: func_info_db(). +opt_start(Linear, Args, Anno, FuncDb) -> + %% This is the first run through the module, so our arg_types can be + %% incomplete as we may not have visited all call sites at least once. Ts = maps:from_list([{V,any} || #b_var{}=V <- Args]), + opt_continue_1(Linear, Args, get_func_id(Anno), Ts, FuncDb). + +-spec opt_continue(Linear, Args, Anno, FuncDb) -> {Linear, FuncDb} when + Linear :: [{non_neg_integer(), beam_ssa:b_blk()}], + Args :: [beam_ssa:b_var()], + Anno :: beam_ssa:anno(), + FuncDb :: func_info_db(). +opt_continue(Linear, Args, Anno, FuncDb) -> + Id = get_func_id(Anno), + case FuncDb of + #{ Id := #func_info{exported=false,arg_types=ArgTypes} } -> + %% This is a local function and we're guaranteed to have visited + %% every call site at least once, so we know that the parameter + %% types are at least as narrow as the join of all argument types. + Ts = join_arg_types(Args, ArgTypes, Anno), + opt_continue_1(Linear, Args, Id, Ts, FuncDb); + #{} -> + %% We can't infer the parameter types of exported functions, nor + %% the ones where module-level optimization is disabled, but + %% running the pass again could still help other functions. + Ts = maps:from_list([{V,any} || #b_var{}=V <- Args]), + opt_continue_1(Linear, Args, Id, Ts, FuncDb) + end. + +join_arg_types(Args, ArgTypes, Anno) -> + %% We suppress type optimization for parameters that have already been + %% optimized by another pass, as they may have done things we have no idea + %% how to interpret and running them over could generate incorrect code. + ParamTypes = maps:get(parameter_type_info, Anno, #{}), + Ts0 = join_arg_types_1(Args, ArgTypes, #{}), + maps:fold(fun(Arg, _V, Ts) -> + maps:put(Arg, any, Ts) + end, Ts0, ParamTypes). + +join_arg_types_1([Arg | Args], [TM | TMs], Ts) when map_size(TM) =/= 0 -> + join_arg_types_1(Args, TMs, Ts#{ Arg => join(maps:values(TM))}); +join_arg_types_1([Arg | Args], [_TM | TMs], Ts) -> + join_arg_types_1(Args, TMs, Ts#{ Arg => any }); +join_arg_types_1([], [], Ts) -> + Ts. + +-spec opt_continue_1(Linear, Args, Id, Ts, FuncDb) -> Result when + Linear :: [{non_neg_integer(), beam_ssa:b_blk()}], + Args :: [beam_ssa:b_var()], + Id :: func_id(), + Ts :: type_db(), + FuncDb :: func_info_db(), + Result :: {Linear, FuncDb}. +opt_continue_1(Linear0, Args, Id, Ts, FuncDb0) -> + UsedOnce = used_once(Linear0, Args), FakeCall = #b_set{op=call,args=[#b_remote{mod=#b_literal{val=unknown}, name=#b_literal{val=unknown}, arity=0}]}, Defs = maps:from_list([{Var,FakeCall#b_set{dst=Var}} || #b_var{}=Var <- Args]), - D = #d{ds=Defs,ls=#{0=>Ts,?BADARG_BLOCK=>#{}}, - once=UsedOnce,sub=#{}}, - opt_1(Linear, D). -opt_1([{L,Blk}|Bs], #d{ls=Ls}=D) -> + D = #d{ func_db=FuncDb0, + func_id=Id, + ds=Defs, + ls=#{0=>Ts,?BADARG_BLOCK=>#{}}, + once=UsedOnce }, + + {Linear, FuncDb, NewRet} = opt_1(Linear0, D, []), + + case FuncDb of + #{ Id := Entry0 } -> + Entry = Entry0#func_info{ret_type=NewRet}, + {Linear, FuncDb#{ Id := Entry }}; + #{} -> + %% Module-level optimizations have been turned off for this + %% function. + {Linear, FuncDb} + end. + +-spec opt_finish(Args, Anno, FuncDb) -> {Anno, FuncDb} when + Args :: [beam_ssa:b_var()], + Anno :: beam_ssa:anno(), + FuncDb :: func_info_db(). +opt_finish(Args, Anno, FuncDb) -> + Id = get_func_id(Anno), + case FuncDb of + #{ Id := #func_info{exported=false,arg_types=ArgTypes} } -> + ParamInfo0 = maps:get(parameter_type_info, Anno, #{}), + ParamInfo = opt_finish_1(Args, ArgTypes, ParamInfo0), + {Anno#{ parameter_type_info => ParamInfo }, FuncDb}; + #{} -> + {Anno, FuncDb} + end. + +opt_finish_1([Arg | Args], [TypeMap | TypeMaps], ParamInfo) + when is_map_key(Arg, ParamInfo); %% See join_arg_types/3 + map_size(TypeMap) =:= 0 -> + opt_finish_1(Args, TypeMaps, ParamInfo); +opt_finish_1([Arg | Args], [TypeMap | TypeMaps], ParamInfo0) -> + case join(maps:values(TypeMap)) of + any -> + opt_finish_1(Args, TypeMaps, ParamInfo0); + JoinedType -> + JoinedType = verified_type(JoinedType), + ParamInfo = ParamInfo0#{ Arg => validator_anno(JoinedType) }, + opt_finish_1(Args, TypeMaps, ParamInfo) + end; +opt_finish_1([], [], ParamInfo) -> + ParamInfo. + +validator_anno(#t_tuple{size=Size,exact=Exact}) -> + beam_validator:type_anno(tuple, Size, Exact); +validator_anno(#t_integer{elements={Same,Same}}) -> + beam_validator:type_anno(integer, Same); +validator_anno(#t_integer{}) -> + beam_validator:type_anno(integer); +validator_anno(float) -> + beam_validator:type_anno(float); +validator_anno(#t_atom{elements=[Val]}) -> + beam_validator:type_anno(atom, Val); +validator_anno(#t_atom{}=A) -> + case t_is_boolean(A) of + true -> beam_validator:type_anno(bool); + false -> beam_validator:type_anno(atom) + end; +validator_anno(T) -> + beam_validator:type_anno(T). + +get_func_id(Anno) -> + #{func_info:={_Mod, Name, Arity}} = Anno, + #b_local{name=#b_literal{val=Name}, arity=Arity}. + +opt_1([{L,Blk}|Bs], #d{ls=Ls}=D, Acc) -> case Ls of #{L:=Ts} -> - opt_2(L, Blk, Bs, Ts, D); + opt_2(L, Blk, Bs, Ts, D, Acc); #{} -> %% This block is never reached. Discard it. - opt_1(Bs, D) + opt_1(Bs, D, Acc) end; -opt_1([], #d{}) -> []. +opt_1([], D, Acc) -> + #d{func_db=FuncDb,ret_type=NewRet} = D, + {reverse(Acc), FuncDb, NewRet}. -opt_2(L, #b_blk{is=Is0}=Blk0, Bs, Ts, #d{sub=Sub}=D0) -> +opt_2(L, #b_blk{is=Is0}=Blk0, Bs, Ts, #d{sub=Sub}=D0, Acc) -> case Is0 of [#b_set{op=call,dst=Dst, args=[#b_remote{mod=#b_literal{val=Mod}, @@ -94,34 +216,43 @@ opt_2(L, #b_blk{is=Is0}=Blk0, Bs, Ts, #d{sub=Sub}=D0) -> Ret = #b_ret{arg=Dst}, Blk = Blk0#b_blk{is=[I],last=Ret}, Ls = maps:remove(L, D0#d.ls), - D = D0#d{ls=Ls}, - [{L,Blk}|opt_1(Bs, D)]; + + %% We potentially lack a return value. + RetType = join([none | D0#d.ret_type]), + + D = D0#d{ls=Ls,ret_type=[RetType]}, + opt_1(Bs, D, [{L,Blk} | Acc]); false -> - opt_3(L, Blk0, Bs, Ts, D0) + opt_3(L, Blk0, Bs, Ts, D0, Acc) end; _ -> - opt_3(L, Blk0, Bs, Ts, D0) + opt_3(L, Blk0, Bs, Ts, D0, Acc) end. opt_3(L, #b_blk{is=Is0,last=Last0}=Blk0, Bs, Ts0, - #d{ds=Ds0,ls=Ls0,sub=Sub0}=D0) -> - {Is,Ts,Ds,Sub} = opt_is(Is0, Ts0, Ds0, Ls0, Sub0, []), - D1 = D0#d{ds=Ds,sub=Sub}, - Last1 = simplify_terminator(Last0, Sub, Ts), + #d{ds=Ds0,ls=Ls0,sub=Sub0,func_db=Fdb0}=D0, Acc) -> + {Is,Ts,Ds,Fdb,Sub} = opt_is(Is0, Ts0, Ds0, Fdb0, Ls0, D0, Sub0, []), + D1 = D0#d{ds=Ds,sub=Sub,func_db=Fdb}, + Last1 = simplify_terminator(Last0, Sub, Ts, Ds), Last = opt_terminator(Last1, Ts, Ds), D = update_successors(Last, Ts, D1), Blk = Blk0#b_blk{is=Is,last=Last}, - [{L,Blk}|opt_1(Bs, D)]. + opt_1(Bs, D, [{L,Blk} | Acc]). -simplify_terminator(#b_br{bool=Bool}=Br, Sub, Ts) -> +simplify_terminator(#b_br{bool=Bool}=Br, Sub, Ts, _Ds) -> Br#b_br{bool=simplify_arg(Bool, Sub, Ts)}; -simplify_terminator(#b_switch{arg=Arg}=Sw, Sub, Ts) -> +simplify_terminator(#b_switch{arg=Arg}=Sw, Sub, Ts, _Ds) -> Sw#b_switch{arg=simplify_arg(Arg, Sub, Ts)}; -simplify_terminator(#b_ret{arg=Arg}=Ret, Sub, Ts) -> - Ret#b_ret{arg=simplify_arg(Arg, Sub, Ts)}. +simplify_terminator(#b_ret{arg=Arg}=Ret, Sub, Ts, Ds) -> + %% Reducing the result of a call to a literal (fairly common for 'ok') + %% breaks tail call optimization. + case Ds of + #{ Arg := #b_set{op=call}} -> Ret; + #{} -> Ret#b_ret{arg=simplify_arg(Arg, Sub, Ts)} + end. opt_is([#b_set{op=phi,dst=Dst,args=Args0}=I0|Is], - Ts0, Ds0, Ls, Sub0, Acc) -> + Ts0, Ds0, Fdb, Ls, D, Sub0, Acc) -> %% Simplify the phi node by removing all predecessor blocks that no %% longer exists or no longer branches to this block. Args = [{simplify_arg(Arg, Sub0, Ts0),From} || @@ -132,28 +263,61 @@ opt_is([#b_set{op=phi,dst=Dst,args=Args0}=I0|Is], %% value or if the values are identical. [{Val,_}|_] = Args, Sub = Sub0#{Dst=>Val}, - opt_is(Is, Ts0, Ds0, Ls, Sub, Acc); + opt_is(Is, Ts0, Ds0, Fdb, Ls, D, Sub, Acc); false -> I = I0#b_set{args=Args}, Ts = update_types(I, Ts0, Ds0), Ds = Ds0#{Dst=>I}, - opt_is(Is, Ts, Ds, Ls, Sub0, [I|Acc]) + opt_is(Is, Ts, Ds, Fdb, Ls, D, Sub0, [I|Acc]) end; -opt_is([#b_set{op=succeeded,args=Args0,dst=Dst}=I], - Ts0, Ds0, Ls, Sub0, Acc) -> - Args = simplify_args(Args0, Sub0, Ts0), - Type = type(succeeded, Args, Ts0, Ds0), - case get_literal_from_type(Type) of - #b_literal{}=Lit -> - Sub = Sub0#{Dst=>Lit}, - opt_is([], Ts0, Ds0, Ls, Sub, Acc); - none -> +opt_is([#b_set{op=call,args=Args0,dst=Dst}=I0 | Is], + Ts0, Ds0, Fdb0, Ls, D, Sub, Acc) -> + Args = simplify_args(Args0, Sub, Ts0), + I1 = beam_ssa:normalize(I0#b_set{args=Args}), + + %% This is a bit of a kludge; we know that any instruction whose return + %% type is 'none' will fail at runtime, but we don't yet have a way to cut + %% a block short so we move on like nothing nothing happened. + %% + %% This complicates argument type optimization as unreachable calls can + %% add types that will never occur, so we skip optimizing this call if + %% the type of any of its arguments is 'none'. + [_Callee | Rest] = Args, + case all(fun(Arg) -> get_type(Arg, Ts0) =/= none end, Rest) of + true -> + {Ts, Ds, Fdb, I} = opt_call(I1, D, Ts0, Ds0, Fdb0), + opt_is(Is, Ts, Ds, Fdb, Ls, D, Sub, [I|Acc]); + false -> + Ts = Ts0#{ Dst => any }, + Ds = Ds0#{ Dst => I1 }, + opt_is(Is, Ts, Ds, Fdb0, Ls, D, Sub, [I1|Acc]) + end; +opt_is([#b_set{op=succeeded,args=[Arg],dst=Dst}=I], + Ts0, Ds0, Fdb, Ls, D, Sub0, Acc) -> + case Ds0 of + #{ Arg := #b_set{op=call} } -> + %% The success check of a call is part of exception handling and + %% must not be optimized away. We still have to update its type + %% though. Ts = update_types(I, Ts0, Ds0), Ds = Ds0#{Dst=>I}, - opt_is([], Ts, Ds, Ls, Sub0, [I|Acc]) + + opt_is([], Ts, Ds, Fdb, Ls, D, Sub0, [I|Acc]); + #{} -> + Args = simplify_args([Arg], Sub0, Ts0), + Type = type(succeeded, Args, Ts0, Ds0), + case get_literal_from_type(Type) of + #b_literal{}=Lit -> + Sub = Sub0#{Dst=>Lit}, + opt_is([], Ts0, Ds0, Fdb, Ls, D, Sub, Acc); + none -> + Ts = Ts0#{Dst=>Type}, + Ds = Ds0#{Dst=>I}, + opt_is([], Ts, Ds, Fdb, Ls, D, Sub0, [I|Acc]) + end end; opt_is([#b_set{args=Args0,dst=Dst}=I0|Is], - Ts0, Ds0, Ls, Sub0, Acc) -> + Ts0, Ds0, Fdb, Ls, D, Sub0, Acc) -> Args = simplify_args(Args0, Sub0, Ts0), I1 = beam_ssa:normalize(I0#b_set{args=Args}), case simplify(I1, Ts0) of @@ -161,23 +325,76 @@ opt_is([#b_set{args=Args0,dst=Dst}=I0|Is], I = beam_ssa:normalize(I2), Ts = update_types(I, Ts0, Ds0), Ds = Ds0#{Dst=>I}, - opt_is(Is, Ts, Ds, Ls, Sub0, [I|Acc]); + opt_is(Is, Ts, Ds, Fdb, Ls, D, Sub0, [I|Acc]); #b_literal{}=Lit -> Sub = Sub0#{Dst=>Lit}, - opt_is(Is, Ts0, Ds0, Ls, Sub, Acc); + opt_is(Is, Ts0, Ds0, Fdb, Ls, D, Sub, Acc); #b_var{}=Var -> case Is of [#b_set{op=succeeded,dst=SuccDst,args=[Dst]}] -> %% We must remove this 'succeeded' instruction. Sub = Sub0#{Dst=>Var,SuccDst=>#b_literal{val=true}}, - opt_is([], Ts0, Ds0, Ls, Sub, Acc); + opt_is([], Ts0, Ds0, Fdb, Ls, D, Sub, Acc); _ -> Sub = Sub0#{Dst=>Var}, - opt_is(Is, Ts0, Ds0, Ls, Sub, Acc) + opt_is(Is, Ts0, Ds0, Fdb, Ls, D, Sub, Acc) end end; -opt_is([], Ts, Ds, _Ls, Sub, Acc) -> - {reverse(Acc),Ts,Ds,Sub}. +opt_is([], Ts, Ds, Fdb, _Ls, _D, Sub, Acc) -> + {reverse(Acc), Ts, Ds, Fdb, Sub}. + +opt_call(#b_set{dst=Dst,args=[#b_local{}=Callee|Args]}=I0, D, Ts0, Ds0, Fdb0) -> + {Ts, Ds, I} = opt_local_call(I0, Ts0, Ds0, Fdb0), + case Fdb0 of + #{ Callee := #func_info{exported=false,arg_types=ArgTypes0}=Info } -> + %% Update the argument types of *this exact call*, the types + %% will be joined later when the callee is optimized. + CallId = {D#d.func_id, Dst}, + ArgTypes = update_arg_types(Args, ArgTypes0, CallId, Ts0), + + Fdb = Fdb0#{ Callee => Info#func_info{arg_types=ArgTypes} }, + {Ts, Ds, Fdb, I}; + #{} -> + %% We can't narrow the argument types of exported functions as they + %% can receive anything as part of an external call. + {Ts, Ds, Fdb0, I} + end; +opt_call(#b_set{dst=Dst}=I, _D, Ts0, Ds0, Fdb) -> + Ts = update_types(I, Ts0, Ds0), + Ds = Ds0#{ Dst => I }, + {Ts, Ds, Fdb, I}. + +opt_local_call(#b_set{dst=Dst,args=[Id|_]}=I0, Ts0, Ds0, Fdb) -> + %% We skip propagating 'none' as we don't yet have a good way to cut a + %% block short. + Type = case Fdb of + #{ Id := #func_info{ret_type=[T]} } when T =/= none -> T; + #{} -> any + end, + I = case Type of + any -> I0; + _ -> beam_ssa:add_anno(result_type, validator_anno(Type), I0) + end, + Ts = Ts0#{ Dst => Type }, + Ds = Ds0#{ Dst => I }, + {Ts, Ds, I}. + +update_arg_types([Arg | Args], [TypeMap0 | TypeMaps], CallId, Ts) -> + %% Match contexts are treated as bitstrings when optimizing arguments, as + %% we don't yet support removing the "bs_start_match3" instruction. + NewType = case get_type(Arg, Ts) of + #t_bs_match{} -> {binary, 1}; + Type -> Type + end, + PrevType = maps:get(CallId, TypeMap0, NewType), + + %% The new type must be narrower than the old one. + true = meet(NewType, PrevType) =/= none, %Assertion. + + TypeMap = TypeMap0#{ CallId => NewType }, + [TypeMap | update_arg_types(Args, TypeMaps, CallId, Ts)]; +update_arg_types([], [], _CallId, _Ts) -> + []. simplify(#b_set{op={bif,'and'},args=Args}=I, Ts) -> case is_safe_bool_op(Args, Ts) of @@ -487,8 +704,18 @@ update_successors(#b_switch{arg=#b_var{}=V,fail=Fail,list=List}, Ts0, D0) -> update_successor(S, Ts0#{V=>T}, A) end, foldl(F, D, List) - end; -update_successors(#b_ret{}, _Ts, D) -> D. + end; +update_successors(#b_ret{arg=Arg}, Ts, D) -> + FuncId = D#d.func_id, + case D#d.ds of + #{ Arg := #b_set{op=call,args=[FuncId | _]} } -> + %% Returning a call to ourselves doesn't affect our own return + %% type. + D; + #{} -> + RetType = join([get_type(Arg, Ts) | D#d.ret_type]), + D#d{ret_type=[RetType]} + end. subtract_sw_list(V, List, Ts) -> Ts#{ V := sub_sw_list_1(get_type(V, Ts), List, Ts) }. diff --git a/lib/compiler/src/compile.erl b/lib/compiler/src/compile.erl index 73c66e6efc..53d3cec2d7 100644 --- a/lib/compiler/src/compile.erl +++ b/lib/compiler/src/compile.erl @@ -268,6 +268,10 @@ expand_opt(r21, Os) -> [no_put_tuple2 | expand_opt(no_bsm3, Os)]; expand_opt({debug_info_key,_}=O, Os) -> [encrypt_debug_info,O|Os]; +expand_opt(no_type_opt, Os) -> + [no_ssa_opt_type_start, + no_ssa_opt_type_continue, + no_ssa_opt_type_finish | Os]; expand_opt(O, Os) -> [O|Os]. expand_opt_before_21(Os) -> diff --git a/lib/compiler/test/apply_SUITE.erl b/lib/compiler/test/apply_SUITE.erl index 0f82a56fb7..2ee518b1a0 100644 --- a/lib/compiler/test/apply_SUITE.erl +++ b/lib/compiler/test/apply_SUITE.erl @@ -73,6 +73,7 @@ mfa(Config) when is_list(Config) -> {'EXIT',_} = (catch ?APPLY2(Mod, (id(bazzzzzz)), a, b)), {'EXIT',_} = (catch ?APPLY2({}, baz, a, b)), {'EXIT',_} = (catch ?APPLY2(?MODULE, [], a, b)), + {'EXIT',_} = (catch bad_literal_call(1)), ok = apply(Mod, foo, id([])), {[a,b|c]} = apply(Mod, bar, id([[a,b|c]])), @@ -92,6 +93,13 @@ mfa(Config) when is_list(Config) -> apply(Mod, foo, []). +%% The single call to this function with a literal argument caused type +%% optimization to swap out the 'mod' field of a #b_remote{}, which was +%% mishandled during code generation as it assumed that the module would always +%% be an atom. +bad_literal_call(I) -> + I:foo(). + foo() -> ok. diff --git a/lib/compiler/test/compile_SUITE.erl b/lib/compiler/test/compile_SUITE.erl index c17d63cd60..7452466666 100644 --- a/lib/compiler/test/compile_SUITE.erl +++ b/lib/compiler/test/compile_SUITE.erl @@ -1249,7 +1249,8 @@ do_opt_guards_fun([]) -> []. is_exception(guard_SUITE, {'-complex_not/1-fun-4-',1}) -> true; is_exception(guard_SUITE, {'-complex_not/1-fun-5-',1}) -> true; is_exception(guard_SUITE, {bad_guards,1}) -> true; -is_exception(guard_SUITE, {nested_not_2b,4}) -> true; +is_exception(guard_SUITE, {nested_not_2b,6}) -> true; %% w/o type optimization +is_exception(guard_SUITE, {nested_not_2b,2}) -> true; %% with type optimization is_exception(_, _) -> false. sys_pre_attributes(Config) -> diff --git a/lib/compiler/test/test_lib.erl b/lib/compiler/test/test_lib.erl index 08f2e8fae9..26149e11e6 100644 --- a/lib/compiler/test/test_lib.erl +++ b/lib/compiler/test/test_lib.erl @@ -82,6 +82,7 @@ opt_opts(Mod) -> (no_bsm3) -> true; (no_bsm_opt) -> true; (no_module_opt) -> true; + (no_type_opt) -> true; (_) -> false end, Opts). -- cgit v1.2.3