diff options
Diffstat (limited to 'lib/compiler/src')
-rw-r--r-- | lib/compiler/src/beam_except.erl | 32 | ||||
-rw-r--r-- | lib/compiler/src/beam_ssa_codegen.erl | 18 | ||||
-rw-r--r-- | lib/compiler/src/beam_ssa_dead.erl | 87 | ||||
-rw-r--r-- | lib/compiler/src/beam_ssa_opt.erl | 18 | ||||
-rw-r--r-- | lib/compiler/src/beam_ssa_pre_codegen.erl | 62 | ||||
-rw-r--r-- | lib/compiler/src/beam_ssa_share.erl | 8 | ||||
-rw-r--r-- | lib/compiler/src/beam_ssa_type.erl | 4 | ||||
-rw-r--r-- | lib/compiler/src/beam_validator.erl | 73 | ||||
-rw-r--r-- | lib/compiler/src/compile.erl | 7 |
9 files changed, 211 insertions, 98 deletions
diff --git a/lib/compiler/src/beam_except.erl b/lib/compiler/src/beam_except.erl index 2305502800..2b9c1b0cf5 100644 --- a/lib/compiler/src/beam_except.erl +++ b/lib/compiler/src/beam_except.erl @@ -152,7 +152,7 @@ dig_out_fc(Arity, Is0) -> ({test,_,_,_}) -> false; (_) -> true end, Is0), - {Regs,Acc} = dig_out_fc_1(reverse(Is), Regs0, Acc0), + {Regs,Acc} = dig_out_fc_1(reverse(Is), Arity, Regs0, Acc0), case Regs of #{{x,0}:={atom,function_clause},{x,1}:=Args} -> case moves_from_stack(Args, 0, []) of @@ -165,19 +165,27 @@ dig_out_fc(Arity, Is0) -> no end. -dig_out_fc_1([{block,Bl}|Is], Regs0, Acc) -> +dig_out_fc_1([{block,Bl}|Is], Arity, Regs0, Acc) -> Regs = dig_out_fc_block(Bl, Regs0), - dig_out_fc_1(Is, Regs, Acc); -dig_out_fc_1([{bs_set_position,_,_}=I|Is], Regs, Acc) -> - dig_out_fc_1(Is, Regs, [I|Acc]); -dig_out_fc_1([{bs_get_tail,Src,Dst,Live0}|Is], Regs0, Acc) -> - Regs = prune_xregs(Live0, Regs0), - Live = dig_out_stack_live(Regs, Live0), - I = {bs_get_tail,Src,Dst,Live}, - dig_out_fc_1(Is, Regs, [I|Acc]); -dig_out_fc_1([_|_], _Regs, _Acc) -> + dig_out_fc_1(Is, Arity, Regs, Acc); +dig_out_fc_1([{bs_set_position,_,_}=I|Is], Arity, Regs, Acc) -> + dig_out_fc_1(Is, Arity, Regs, [I|Acc]); +dig_out_fc_1([{bs_get_tail,Src,Dst,Live0}|Is], Arity, Regs0, Acc) -> + case Src of + {x,X} when X < Arity -> + %% The heuristic for determining the number of live + %% registers is likely to give an incorrect result. + %% Give up. + {#{},[]}; + _ -> + Regs = prune_xregs(Live0, Regs0), + Live = dig_out_stack_live(Regs, Live0), + I = {bs_get_tail,Src,Dst,Live}, + dig_out_fc_1(Is, Arity, Regs, [I|Acc]) + end; +dig_out_fc_1([_|_], _Arity, _Regs, _Acc) -> {#{},[]}; -dig_out_fc_1([], Regs, Acc) -> +dig_out_fc_1([], _Arity, Regs, Acc) -> {Regs,Acc}. dig_out_fc_block([{set,[],[],{alloc,Live,_}}|Is], Regs0) -> diff --git a/lib/compiler/src/beam_ssa_codegen.erl b/lib/compiler/src/beam_ssa_codegen.erl index 07f4c8b461..08641e2abc 100644 --- a/lib/compiler/src/beam_ssa_codegen.erl +++ b/lib/compiler/src/beam_ssa_codegen.erl @@ -764,9 +764,8 @@ defined(Linear, #cg{regs=Regs}) -> def([{L,#cg_blk{is=Is0,last=Last}=Blk0}|Bs], DefMap0, Regs) -> Def0 = def_get(L, DefMap0), - {Is,Def} = def_is(Is0, Regs, Def0, []), - Successors = successors(Last), - DefMap = def_successors(Successors, Def, DefMap0), + {Is,Def,MaybeDef} = def_is(Is0, Regs, Def0, []), + DefMap = def_successors(Last, Def, MaybeDef, DefMap0), Blk = Blk0#cg_blk{is=Is}, [{L,Blk}|def(Bs, DefMap, Regs)]; def([], _, _) -> []. @@ -780,6 +779,11 @@ def_get(L, DefMap) -> def_is([#cg_alloc{anno=Anno0}=I0|Is], Regs, Def, Acc) -> I = I0#cg_alloc{anno=Anno0#{def_yregs=>Def}}, def_is(Is, Regs, Def, [I|Acc]); +def_is([#cg_set{op=succeeded,args=[Var]}=I], Regs, Def, Acc) -> + %% Var will only be defined on the success branch of the `br` + %% for this block. + MaybeDef = def_add_yreg(Var, [], Regs), + {reverse(Acc, [I]),Def,MaybeDef}; def_is([#cg_set{op=kill_try_tag,args=[#b_var{}=Tag]}=I|Is], Regs, Def0, Acc) -> Def = ordsets:del_element(Tag, Def0), def_is(Is, Regs, Def, [I|Acc]); @@ -822,7 +826,7 @@ def_is([#cg_set{anno=Anno0,dst=Dst}=I0|Is], Regs, Def0, Acc) -> Def = def_add_yreg(Dst, Def0, Regs), def_is(Is, Regs, Def, [I|Acc]); def_is([], _, Def, Acc) -> - {reverse(Acc),Def}. + {reverse(Acc),Def,[]}. def_add_yreg(Dst, Def, Regs) -> case is_yreg(Dst, Regs) of @@ -830,6 +834,12 @@ def_add_yreg(Dst, Def, Regs) -> false -> Def end. +def_successors(#cg_br{bool=#b_var{},succ=Succ,fail=Fail}, Def, MaybeDef, DefMap0) -> + DefMap = def_successors([Fail], ordsets:subtract(Def, MaybeDef), DefMap0), + def_successors([Succ], Def, DefMap); +def_successors(Last, Def, [], DefMap) -> + def_successors(successors(Last), Def, DefMap). + def_successors([S|Ss], Def0, DefMap) -> case DefMap of #{S:=Def1} -> diff --git a/lib/compiler/src/beam_ssa_dead.erl b/lib/compiler/src/beam_ssa_dead.erl index 64b9b3e222..e78e4647a8 100644 --- a/lib/compiler/src/beam_ssa_dead.erl +++ b/lib/compiler/src/beam_ssa_dead.erl @@ -30,7 +30,7 @@ -import(lists, [append/1,keymember/3,last/1,member/2, takewhile/2,reverse/1]). --type used_vars() :: #{beam_ssa:label():=ordsets:ordset(beam_ssa:var_name())}. +-type used_vars() :: #{beam_ssa:label():=cerl_sets:set(beam_ssa:var_name())}. -type basic_type_test() :: atom() | {'is_tagged_tuple',pos_integer(),atom()}. -type type_test() :: basic_type_test() | {'not',basic_type_test()}. @@ -90,13 +90,11 @@ shortcut_opt(#st{bs=Blocks}=St) -> %% the diff.) %% %% Unfortunately, processing the blocks in reverse post order - %% potentially makes the time complexity quadratic or even cubic if - %% the ordset of unset variables grows large, instead of - %% linear for post order processing. We try to still get reasonable - %% compilation times by optimizations that will keep the constant - %% factor as low as possible, and we try to avoid the cubic time - %% complexity by trying to keep the set of unset variables as small - %% as possible. + %% potentially makes the time complexity quadratic, instead of + %% linear for post order processing. We avoid drastic slowdowns by + %% limiting how far we search forward to a common block that + %% both the success and failure label will reach (see the comment + %% in the first clause of shortcut_2/5). Ls = beam_ssa:rpo(Blocks), shortcut_opt(Ls, #{}, St). @@ -124,10 +122,15 @@ shortcut_terminator(#b_br{bool=#b_var{}=Bool,succ=Succ0,fail=Fail0}=Br, Is, From, Bs, St0) -> St = St0#st{target=one_way}, RelOp = get_rel_op(Bool, Is), - SuccBs = bind_var(Bool, #b_literal{val=true}, Bs), + + %% The boolean in a `br` is seldom used by the successors. By + %% not binding its value unless it is actually used we might be able + %% to skip some work in shortcut/4 and sub/2. + SuccBs = bind_var_if_used(Succ0, Bool, #b_literal{val=true}, Bs, St), BrSucc = shortcut(Succ0, From, SuccBs, St#st{rel_op=RelOp}), - FailBs = bind_var(Bool, #b_literal{val=false}, Bs), + FailBs = bind_var_if_used(Fail0, Bool, #b_literal{val=false}, Bs, St), BrFail = shortcut(Fail0, From, FailBs, St#st{rel_op=invert_op(RelOp)}), + case {BrSucc,BrFail} of {#b_br{bool=#b_literal{val=true},succ=Succ}, #b_br{bool=#b_literal{val=true},succ=Fail}} @@ -152,8 +155,14 @@ shortcut_switch([{Lit,L0}|T], Bool, From, Bs, St0) -> [{Lit,L}|shortcut_switch(T, Bool, From, Bs, St0)]; shortcut_switch([], _, _, _, _) -> []. +shortcut(L, _From, Bs, #st{rel_op=none,target=one_way}) when map_size(Bs) =:= 0 -> + %% There is no way that we can find a suitable branch, because there is no + %% relational operator stored, there are no bindings, and the block L can't + %% have any phi nodes from which we could pick bindings because when the target + %% is `one_way`, it implies the From block has a two-way `br` terminator. + #b_br{bool=#b_literal{val=true},succ=L,fail=L}; shortcut(L, From, Bs, St) -> - shortcut_1(L, From, Bs, ordsets:new(), St). + shortcut_1(L, From, Bs, cerl_sets:new(), St). shortcut_1(L, From, Bs0, UnsetVars0, St) -> case shortcut_2(L, From, Bs0, UnsetVars0, St) of @@ -170,7 +179,19 @@ shortcut_1(L, From, Bs0, UnsetVars0, St) -> end. %% Try to shortcut this block, branching to a successor. -shortcut_2(L, From, Bs0, UnsetVars0, St) -> +shortcut_2(L, From, Bs, UnsetVars, St) -> + case cerl_sets:size(UnsetVars) of + SetSize when SetSize > 128 -> + %% This is an heuristic to limit the search for a forced label + %% before it drastically slows down the compiler. Experiments + %% with scripts/diffable showed that limits larger than 31 did not + %% find any more opportunities for optimization. + none; + _SetSize -> + shortcut_3(L, From, Bs, UnsetVars, St) + end. + +shortcut_3(L, From, Bs0, UnsetVars0, St) -> #b_blk{is=Is,last=Last} = get_block(L, St), case eval_is(Is, From, Bs0, St) of none -> @@ -347,7 +368,7 @@ update_unset_vars(L, Is, Br, UnsetVars, #st{skippable=Skippable}) -> %% Some variables defined in this block are used by %% successors. We must update the set of unset variables. SetInThisBlock = [V || #b_set{dst=V} <- Is], - ordsets:union(UnsetVars, ordsets:from_list(SetInThisBlock)) + cerl_sets:union(UnsetVars, cerl_sets:from_list(SetInThisBlock)) end. shortcut_two_way(#b_br{succ=Succ,fail=Fail}, From, Bs0, UnsetVars0, St0) -> @@ -376,14 +397,14 @@ is_br_safe(UnsetVars, Br, #st{us=Us}=St) -> %% A two-way branch never branches to a phi node, so there %% is no need to check for phi nodes here. - not member(V, UnsetVars) andalso - ordsets:is_disjoint(Used0, UnsetVars) andalso - ordsets:is_disjoint(Used1, UnsetVars); + not cerl_sets:is_element(V, UnsetVars) andalso + cerl_sets:is_disjoint(Used0, UnsetVars) andalso + cerl_sets:is_disjoint(Used1, UnsetVars); #b_br{succ=Same,fail=Same} -> %% An unconditional branch must not jump to %% a phi node. not is_forbidden(Same, St) andalso - ordsets:is_disjoint(map_get(Same, Us), UnsetVars) + cerl_sets:is_disjoint(map_get(Same, Us), UnsetVars) end. is_forbidden(L, St) -> @@ -500,6 +521,15 @@ eval_switch_1([], _Arg, _PrevOp, Fail) -> %% Fail is now either the failure label or 'none'. Fail. +bind_var_if_used(L, Var, Val0, Bs, #st{us=Us}) -> + case cerl_sets:is_element(Var, map_get(L, Us)) of + true -> + Val = get_value(Val0, Bs), + Bs#{Var=>Val}; + false -> + Bs + end. + bind_var(Var, Val0, Bs) -> Val = get_value(Val0, Bs), Bs#{Var=>Val}. @@ -989,7 +1019,7 @@ used_vars([{L,#b_blk{is=Is}=Blk}|Bs], UsedVars0, Skip0) -> %% shortcut_opt/1. Successors = beam_ssa:successors(Blk), - Used0 = used_vars_succ(Successors, L, UsedVars0, []), + Used0 = used_vars_succ(Successors, L, UsedVars0, cerl_sets:new()), Used = used_vars_blk(Blk, Used0), UsedVars = used_vars_phis(Is, L, Used, UsedVars0), @@ -1000,8 +1030,8 @@ used_vars([{L,#b_blk{is=Is}=Blk}|Bs], UsedVars0, Skip0) -> %% shortcut_opt/1. Defined0 = [Def || #b_set{dst=Def} <- Is], - Defined = ordsets:from_list(Defined0), - MaySkip = ordsets:is_disjoint(Defined, Used0), + Defined = cerl_sets:from_list(Defined0), + MaySkip = cerl_sets:is_disjoint(Defined, Used0), case MaySkip of true -> Skip = Skip0#{L=>true}, @@ -1018,11 +1048,11 @@ used_vars_succ([S|Ss], L, LiveMap, Live0) -> #{Key:=Live} -> %% The successor has a phi node, and the value for %% this block in the phi node is a variable. - used_vars_succ(Ss, L, LiveMap, ordsets:union(Live, Live0)); + used_vars_succ(Ss, L, LiveMap, cerl_sets:union(Live, Live0)); #{S:=Live} -> %% No phi node in the successor, or the value for %% this block in the phi node is a literal. - used_vars_succ(Ss, L, LiveMap, ordsets:union(Live, Live0)); + used_vars_succ(Ss, L, LiveMap, cerl_sets:union(Live, Live0)); #{} -> %% A peek_message block which has not been processed yet. used_vars_succ(Ss, L, LiveMap, Live0) @@ -1040,7 +1070,7 @@ used_vars_phis(Is, L, Live0, UsedVars0) -> case [{P,V} || {#b_var{}=V,P} <- PhiArgs] of [_|_]=PhiVars -> PhiLive0 = rel2fam(PhiVars), - PhiLive = [{{L,P},ordsets:union(ordsets:from_list(Vs), Live0)} || + PhiLive = [{{L,P},cerl_sets:union(cerl_sets:from_list(Vs), Live0)} || {P,Vs} <- PhiLive0], maps:merge(UsedVars, maps:from_list(PhiLive)); [] -> @@ -1050,14 +1080,14 @@ used_vars_phis(Is, L, Live0, UsedVars0) -> end. used_vars_blk(#b_blk{is=Is,last=Last}, Used0) -> - Used = ordsets:union(Used0, beam_ssa:used(Last)), + Used = cerl_sets:union(Used0, cerl_sets:from_list(beam_ssa:used(Last))), used_vars_is(reverse(Is), Used). used_vars_is([#b_set{op=phi}|Is], Used) -> used_vars_is(Is, Used); used_vars_is([#b_set{dst=Dst}=I|Is], Used0) -> - Used1 = ordsets:union(Used0, beam_ssa:used(I)), - Used = ordsets:del_element(Dst, Used1), + Used1 = cerl_sets:union(Used0, cerl_sets:from_list(beam_ssa:used(I))), + Used = cerl_sets:del_element(Dst, Used1), used_vars_is(Is, Used); used_vars_is([], Used) -> Used. @@ -1066,8 +1096,9 @@ used_vars_is([], Used) -> %%% Common utilities. %%% -sub(#b_set{args=Args}=I, Sub) -> - I#b_set{args=[sub_arg(A, Sub) || A <- Args]}. +sub(#b_set{args=Args}=I, Sub) when map_size(Sub) =/= 0 -> + I#b_set{args=[sub_arg(A, Sub) || A <- Args]}; +sub(I, _Sub) -> I. sub_arg(#b_var{}=Old, Sub) -> case Sub of diff --git a/lib/compiler/src/beam_ssa_opt.erl b/lib/compiler/src/beam_ssa_opt.erl index 229edc6a1d..d87c66c272 100644 --- a/lib/compiler/src/beam_ssa_opt.erl +++ b/lib/compiler/src/beam_ssa_opt.erl @@ -1939,12 +1939,24 @@ verify_merge_is(_) -> is_merge_allowed(_, #b_blk{}, #b_blk{is=[#b_set{op=peek_message}|_]}) -> false; -is_merge_allowed(L, #b_blk{last=#b_br{}}=Blk, #b_blk{}) -> +is_merge_allowed(L, #b_blk{last=#b_br{}}=Blk, #b_blk{is=Is}) -> %% The predecessor block must have exactly one successor (L) for %% the merge to be safe. case beam_ssa:successors(Blk) of - [L] -> true; - [_|_] -> false + [L] -> + case Is of + [#b_set{op=phi,args=[_]}|_] -> + %% The type optimizer pass must have been + %% turned off, since it would have removed this + %% redundant phi node. Refuse to merge the blocks + %% to ensure that this phi node remains at the + %% beginning of a block. + false; + _ -> + true + end; + [_|_] -> + false end; is_merge_allowed(_, #b_blk{last=#b_switch{}}, #b_blk{}) -> false. diff --git a/lib/compiler/src/beam_ssa_pre_codegen.erl b/lib/compiler/src/beam_ssa_pre_codegen.erl index 7ef604d444..7f816b9802 100644 --- a/lib/compiler/src/beam_ssa_pre_codegen.erl +++ b/lib/compiler/src/beam_ssa_pre_codegen.erl @@ -1463,25 +1463,51 @@ fix_receive([], _Defs, Blocks, Count) -> {Blocks,Count}. %% find_loop_exit([Label], Blocks) -> Label | none. -%% Find the block to which control is transferred when the -%% the receive loop is exited. - -find_loop_exit([L1,L2|_Ls], Blocks) -> - Path1 = beam_ssa:rpo([L1], Blocks), - Path2 = beam_ssa:rpo([L2], Blocks), - find_loop_exit_1(Path1, cerl_sets:from_list(Path2)); -find_loop_exit(_, _) -> none. - -find_loop_exit_1([?BADARG_BLOCK | T], OtherPath) -> - %% ?BADARG_BLOCK is a marker and not an actual block, so we can't consider - %% it to be a common block even if both paths cross it. - find_loop_exit_1(T, OtherPath); -find_loop_exit_1([H|T], OtherPath) -> - case cerl_sets:is_element(H, OtherPath) of - true -> H; - false -> find_loop_exit_1(T, OtherPath) +%% Given the list of all blocks with the remove_message instructions +%% for this receive, find the block to which control is transferred +%% when the receive loop is exited (if any). + +find_loop_exit([_,_|_]=RmBlocks, Blocks) -> + %% We used to only analyze the path from two of the remove_message + %% blocks. That would fail to find a common block if one or both + %% of the blocks happened to raise an exception. To be sure that + %% we always find a common block if there is one (shared by at + %% least two clauses), we must analyze the path from all + %% remove_message blocks. + {Dominators,_} = beam_ssa:dominators(Blocks), + RmSet = cerl_sets:from_list(RmBlocks), + Rpo = beam_ssa:rpo(RmBlocks, Blocks), + find_loop_exit_1(Rpo, RmSet, Dominators); +find_loop_exit(_, _) -> + %% There is (at most) a single clause. There is no common + %% loop exit block. + none. + +find_loop_exit_1([?BADARG_BLOCK|Ls], RmSet, Dominators) -> + %% ?BADARG_BLOCK is a marker and not an actual block, so it is not + %% the block we are looking for. + find_loop_exit_1(Ls, RmSet, Dominators); +find_loop_exit_1([L|Ls], RmSet, Dominators) -> + DomBy = map_get(L, Dominators), + case any(fun(E) -> cerl_sets:is_element(E, RmSet) end, DomBy) of + true -> + %% This block is dominated by one of the remove_message blocks, + %% which means that the block is part of only one clause. + %% It is not the block we are looking for. + find_loop_exit_1(Ls, RmSet, Dominators); + false -> + %% This block is the first block that is not dominated by + %% any of the blocks with remove_message instructions, + %% which means that at least two of the receive clauses + %% will ultimately transfer control to it. It is the block + %% we are looking for. + L end; -find_loop_exit_1([], _) -> none. +find_loop_exit_1([], _, _) -> + %% None of clauses transfers control to a common block after the receive + %% statement. That means that the receive statement is a the end of a + %% function (or that all clauses raise exceptions). + none. %% find_rm_blocks(StartLabel, Blocks) -> [Label]. %% Find all blocks that start with remove_message within the receive diff --git a/lib/compiler/src/beam_ssa_share.erl b/lib/compiler/src/beam_ssa_share.erl index 426efa2cc9..73983bd34a 100644 --- a/lib/compiler/src/beam_ssa_share.erl +++ b/lib/compiler/src/beam_ssa_share.erl @@ -303,8 +303,12 @@ canonical_is([#b_ret{arg=Arg}], VarMap, Acc0) -> Acc0 end, {{ret,canonical_arg(Arg, VarMap),Acc1},VarMap}; -canonical_is([#b_br{bool=#b_var{},fail=Fail}], VarMap, Acc) -> - {{br,succ,Fail,Acc},VarMap}; +canonical_is([#b_br{bool=#b_var{}=Arg,fail=Fail}], VarMap, Acc) -> + %% A previous buggy version of this code omitted the canonicalized + %% argument in the return value. Unfortunately, that worked most + %% of the time, except when `br` terminator referenced a variable + %% defined in a previous block instead of in the same block. + {{br,canonical_arg(Arg, VarMap),succ,Fail,Acc},VarMap}; canonical_is([#b_br{succ=Succ}], VarMap, Acc) -> {{br,Succ,Acc},VarMap}; canonical_is([], VarMap, Acc) -> diff --git a/lib/compiler/src/beam_ssa_type.erl b/lib/compiler/src/beam_ssa_type.erl index 68920e7dd3..3c06c83e2e 100644 --- a/lib/compiler/src/beam_ssa_type.erl +++ b/lib/compiler/src/beam_ssa_type.erl @@ -160,6 +160,10 @@ opt_finish_1([Arg | Args], [TypeMap | TypeMaps], ParamInfo0) -> case join(maps:values(TypeMap)) of any -> opt_finish_1(Args, TypeMaps, ParamInfo0); + none -> + %% This function will never be called. Pretend that we don't + %% know the type for this argument. + opt_finish_1(Args, TypeMaps, ParamInfo0); JoinedType -> JoinedType = verified_type(JoinedType), ParamInfo = ParamInfo0#{ Arg => validator_anno(JoinedType) }, diff --git a/lib/compiler/src/beam_validator.erl b/lib/compiler/src/beam_validator.erl index ebe9631e09..349d74eb58 100644 --- a/lib/compiler/src/beam_validator.erl +++ b/lib/compiler/src/beam_validator.erl @@ -1068,8 +1068,11 @@ verify_get_map(Fail, Src, List, Vst0) -> %% {get_map_elements,{f,7},{x,1},{list,[{atom,a},{x,1},{atom,b},{x,2}]}}. %% %% If 'a' exists but not 'b', {x,1} is overwritten when we jump to {f,7}. +%% +%% We must be careful to preserve the uninitialized status for Y registers +%% that have been allocated but not yet defined. clobber_map_vals([Key,Dst|T], Map, Vst0) -> - case is_reg_defined(Dst, Vst0) of + case is_reg_initialized(Dst, Vst0) of true -> Vst = extract_term(term, {bif,map_get}, [Key, Map], Dst, Vst0), clobber_map_vals(T, Map, Vst); @@ -1079,6 +1082,17 @@ clobber_map_vals([Key,Dst|T], Map, Vst0) -> clobber_map_vals([], _Map, Vst) -> Vst. +is_reg_initialized({x,_}=Reg, #vst{current=#st{xs=Xs}}) -> + is_map_key(Reg, Xs); +is_reg_initialized({y,_}=Reg, #vst{current=#st{ys=Ys}}) -> + case Ys of + #{Reg:=Val} -> + Val =/= uninitialized; + #{} -> + false + end; +is_reg_initialized(V, #vst{}) -> error({not_a_register, V}). + extract_map_keys([Key,_Val|T]) -> [Key|extract_map_keys(T)]; extract_map_keys([]) -> []. @@ -1604,13 +1618,8 @@ infer_types_1(#value{op={bif,'=:='},args=[LHS,RHS]}) -> end; infer_types_1(#value{op={bif,element},args=[{integer,Index}=Key,Tuple]}) -> fun(Val, S) -> - case is_value_alive(Tuple, S) of - true -> - Type = {tuple,[Index], #{ Key => get_term_type(Val, S) }}, - update_type(fun meet/2, Type, Tuple, S); - false -> - S - end + Type = {tuple,[Index], #{ Key => get_term_type(Val, S) }}, + update_type(fun meet/2, Type, Tuple, S) end; infer_types_1(#value{op={bif,is_atom},args=[Src]}) -> infer_type_test_bif({atom,[]}, Src); @@ -1634,10 +1643,7 @@ infer_types_1(#value{op={bif,is_tuple},args=[Src]}) -> infer_type_test_bif({tuple,[0],#{}}, Src); infer_types_1(#value{op={bif,tuple_size}, args=[Tuple]}) -> fun({integer,Arity}, S) -> - case is_value_alive(Tuple, S) of - true -> update_type(fun meet/2, {tuple,Arity,#{}}, Tuple, S); - false -> S - end; + update_type(fun meet/2, {tuple,Arity,#{}}, Tuple, S); (_, S) -> S end; infer_types_1(_) -> @@ -1645,10 +1651,7 @@ infer_types_1(_) -> infer_type_test_bif(Type, Src) -> fun({atom,true}, S) -> - case is_value_alive(Src, S) of - true -> update_type(fun meet/2, Type, Src, S); - false -> S - end; + update_type(fun meet/2, Type, Src, S); (_, S) -> S end. @@ -1885,10 +1888,6 @@ check_try_catch_tags(Type, {y,N}=Reg, Vst) -> ok end. -is_reg_defined({x,_}=Reg, #vst{current=#st{xs=Xs}}) -> is_map_key(Reg, Xs); -is_reg_defined({y,_}=Reg, #vst{current=#st{ys=Ys}}) -> is_map_key(Reg, Ys); -is_reg_defined(V, #vst{}) -> error({not_a_register, V}). - assert_term(Src, Vst) -> _ = get_term_type(Src, Vst), ok. @@ -2285,9 +2284,6 @@ get_raw_type(#value_ref{}=Ref, #vst{current=#st{vs=Vs}}) -> get_raw_type(Src, #vst{}) -> get_literal_type(Src). -is_value_alive(#value_ref{}=Ref, #vst{current=#st{vs=Vs}}) -> - is_map_key(Ref, Vs). - get_literal_type(nil=T) -> T; get_literal_type({atom,A}=T) when is_atom(A) -> T; get_literal_type({float,F}=T) when is_float(F) -> T; @@ -2469,25 +2465,44 @@ merge_vrefs(RefA, RefB, Merge, Counter) -> merge_values(Merge, VsA, VsB) -> maps:fold(fun(Spec, New, Acc) -> - merge_values_1(Spec, New, VsA, VsB, Acc) + mv_1(Spec, New, VsA, VsB, Acc) end, #{}, Merge). -merge_values_1(Same, Same, VsA, VsB, Acc) -> +mv_1(Same, Same, VsA, VsB, Acc0) -> %% We're merging different versions of the same value, so it's safe to %% reuse old entries if the type's unchanged. - #value{type=TypeA}=EntryA = map_get(Same, VsA), - #value{type=TypeB}=EntryB = map_get(Same, VsB), + #value{type=TypeA,args=Args}=EntryA = map_get(Same, VsA), + #value{type=TypeB,args=Args}=EntryB = map_get(Same, VsB), + Entry = case join(TypeA, TypeB) of TypeA -> EntryA; TypeB -> EntryB; JoinedType -> EntryA#value{type=JoinedType} end, - Acc#{ Same => Entry }; -merge_values_1({RefA, RefB}, New, VsA, VsB, Acc) -> + + Acc = Acc0#{ Same => Entry }, + + %% Type inference may depend on values that are no longer reachable from a + %% register, so all arguments must be merged into the new state. + mv_args(Args, VsA, VsB, Acc); +mv_1({RefA, RefB}, New, VsA, VsB, Acc) -> #value{type=TypeA} = map_get(RefA, VsA), #value{type=TypeB} = map_get(RefB, VsB), Acc#{ New => #value{op=join,args=[],type=join(TypeA, TypeB)} }. +mv_args([#value_ref{}=Arg | Args], VsA, VsB, Acc0) -> + case Acc0 of + #{ Arg := _ } -> + mv_args(Args, VsA, VsB, Acc0); + #{} -> + Acc = mv_1(Arg, Arg, VsA, VsB, Acc0), + mv_args(Args, VsA, VsB, Acc) + end; +mv_args([_ | Args], VsA, VsB, Acc) -> + mv_args(Args, VsA, VsB, Acc); +mv_args([], _VsA, _VsB, Acc) -> + Acc. + merge_fragility(FragileA, FragileB) -> cerl_sets:union(FragileA, FragileB). diff --git a/lib/compiler/src/compile.erl b/lib/compiler/src/compile.erl index 28db8986ff..0325c714d0 100644 --- a/lib/compiler/src/compile.erl +++ b/lib/compiler/src/compile.erl @@ -268,8 +268,11 @@ expand_opt(r21, Os) -> [no_put_tuple2 | expand_opt(no_bsm3, Os)]; expand_opt({debug_info_key,_}=O, Os) -> [encrypt_debug_info,O|Os]; -expand_opt(no_type_opt, Os) -> - [no_ssa_opt_type_start, +expand_opt(no_type_opt=O, Os) -> + %% Be sure to keep the no_type_opt option so that it will + %% be recorded in the BEAM file, allowing the test suites + %% to recompile the file with this option. + [O,no_ssa_opt_type_start, no_ssa_opt_type_continue, no_ssa_opt_type_finish | Os]; expand_opt(O, Os) -> [O|Os]. |