From 60d4e673149aaa34e977e42aabeef3efd4b7c34e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20Gustavsson?= Date: Thu, 20 Sep 2018 04:23:12 +0200 Subject: Move optimization of 'move' from beam_flatten to beam_ssa_codegen The purpose of beam_flatten is to eliminate the blocks created by beam_block, but it also does a few optimizations because at the time the optimizations were added, beam_flatten was the most convenient place. Move the optimization that places `{move,Something,{x,0}}` before `call` instructions from beam_flatten to beam_ssa_codegen. This change will very slightly improve compilation times, and it will also apply the optimization in more places. In particular, a `{move,Literal,{x,0}}` would never be moved passed a `trim` instruction before this change. Now it will. --- lib/compiler/src/beam_ssa_codegen.erl | 68 +++++++++++++++++++++++++++++++++-- 1 file changed, 66 insertions(+), 2 deletions(-) (limited to 'lib/compiler/src/beam_ssa_codegen.erl') diff --git a/lib/compiler/src/beam_ssa_codegen.erl b/lib/compiler/src/beam_ssa_codegen.erl index 5085c950b5..cae9920f40 100644 --- a/lib/compiler/src/beam_ssa_codegen.erl +++ b/lib/compiler/src/beam_ssa_codegen.erl @@ -1196,11 +1196,40 @@ bif_to_test(Name, Args, Fail) -> opt_call_moves(Is0, Arity) -> {Moves0,Is} = splitwith(fun({move,_,_}) -> true; + ({kill,_}) -> true; (_) -> false end, Is0), Moves = opt_call_moves_1(Moves0, Arity), Moves ++ Is. +opt_call_moves_1([{move,Src,{x,_}=Tmp}=M1|[{kill,_}|_]=Is], Arity) -> + %% There could be a {move,Tmp,{x,0}} instruction after the + %% kill/1 instructions (moved to there by opt_move_to_x0/1). + case splitwith(fun({kill,_}) -> true; + (_) -> false + end, Is) of + {Kills,[{move,{x,_}=Tmp,{x,0}}=M2]} -> + %% The two move/2 instructions (M1 and M2) can be combined + %% to one. The question is, though, is it safe to place + %% them after the kill/1 instructions? + case is_killed(Src, Kills, Arity) of + true -> + %% Src (a Y register) is killed by one of the + %% kill/1 instructions. Thus M1 and M2 + %% must be placed before the kill/1 instructions + %% (essentially undoing what opt_move_to_x0/1 + %% did, which turned out to be a pessimization + %% in this case). + opt_call_moves_1([M1,M2|Kills], Arity); + false -> + %% Src is not killed by any of the kill/1 + %% instructions. Thus it is safe to place + %% M1 and M2 after the kill/1 instructions. + opt_call_moves_1(Kills++[M1,M2], Arity) + end; + {_,_} -> + [M1|Is] + end; opt_call_moves_1([{move,Src,{x,_}=Tmp}=M1,{move,Tmp,Dst}=M2|Is], Arity) -> case is_killed(Tmp, Is, Arity) of true -> @@ -1214,6 +1243,10 @@ opt_call_moves_1([M|Ms], Arity) -> [M|opt_call_moves_1(Ms, Arity)]; opt_call_moves_1([], _Arity) -> []. +is_killed(Y, [{kill,Y}|_], _) -> + true; +is_killed(R, [{kill,_}|Is], Arity) -> + is_killed(R, Is, Arity); is_killed(R, [{move,R,_}|_], _) -> false; is_killed(R, [{move,_,R}|_], _) -> @@ -1221,7 +1254,9 @@ is_killed(R, [{move,_,R}|_], _) -> is_killed(R, [{move,_,_}|Is], Arity) -> is_killed(R, Is, Arity); is_killed({x,X}, [], Arity) -> - X >= Arity. + X >= Arity; +is_killed({y,_}, [], _) -> + false. cg_alloc(#cg_alloc{stack=none,words=#need{h=0,f=0}}, _St) -> []; @@ -1621,12 +1656,41 @@ phi_copies([#b_set{dst=Dst,args=PhiArgs}|Sets], L) -> [#cg_set{op=copy,dst=Dst,args=CopyArgs}|phi_copies(Sets, L)]; phi_copies([], _) -> []. +%% opt_move_to_x0([Instruction]) -> [Instruction]. +%% Simple peep-hole optimization to move a {move,Any,{x,0}} past +%% any kill up to the next call instruction. (To give the loader +%% an opportunity to combine the 'move' and the 'call' instructions.) + +opt_move_to_x0(Moves) -> + opt_move_to_x0(Moves, []). + +opt_move_to_x0([{move,_,{x,0}}=I|Is0], Acc0) -> + case move_past_kill(Is0, I, Acc0) of + impossible -> opt_move_to_x0(Is0, [I|Acc0]); + {Is,Acc} -> opt_move_to_x0(Is, Acc) + end; +opt_move_to_x0([I|Is], Acc) -> + opt_move_to_x0(Is, [I|Acc]); +opt_move_to_x0([], Acc) -> reverse(Acc). + +move_past_kill([{kill,Src}|_], {move,Src,_}, _) -> + impossible; +move_past_kill([{kill,_}=I|Is], Move, Acc) -> + move_past_kill(Is, Move, [I|Acc]); +move_past_kill(Is, Move, Acc) -> + {Is,[Move|Acc]}. + %% setup_args(Args, Anno, Context) -> [Instruction]. %% setup_args(Args) -> [Instruction]. %% Set up X registers for a call. setup_args(Args, Anno, none, St) -> - setup_args(Args) ++ kill_yregs(Anno, St); + case {setup_args(Args),kill_yregs(Anno, St)} of + {Moves,[]} -> + Moves; + {Moves,Kills} -> + opt_move_to_x0(Moves ++ Kills) + end; setup_args(Args, _, _, _) -> setup_args(Args). -- cgit v1.2.3 From af632b4a9f259f5b8779d23d7aea6ebab7196724 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20Gustavsson?= Date: Thu, 20 Sep 2018 07:57:19 +0200 Subject: Move allocation combining from beam_flatten to beam_ssa_codegen Continuing the simplification of beam_flatten, move the optimization that eliminates a test_heap instruction following a binary construction by incorporating the allocation of the heap space into the bs_init* instruction itself. This change does not change the generated code in any way. Also remove beam_utils:combine_heap_needs/2, because beam_flatten was the last user of it. --- lib/compiler/src/beam_ssa_codegen.erl | 44 ++++++++++++++++++++++++++++------- 1 file changed, 36 insertions(+), 8 deletions(-) (limited to 'lib/compiler/src/beam_ssa_codegen.erl') diff --git a/lib/compiler/src/beam_ssa_codegen.erl b/lib/compiler/src/beam_ssa_codegen.erl index cae9920f40..142df5192f 100644 --- a/lib/compiler/src/beam_ssa_codegen.erl +++ b/lib/compiler/src/beam_ssa_codegen.erl @@ -218,7 +218,7 @@ need_heap_never(_) -> false. need_heap_blks([{L,#cg_blk{is=Is0}=Blk0}|Bs], H0, Acc) -> {Is1,H1} = need_heap_is(reverse(Is0), H0, []), - {Ns,H} = need_heap_terminator(Bs, H1), + {Ns,H} = need_heap_terminator(Bs, L, H1), Is = Ns ++ Is1, Blk = Blk0#cg_blk{is=Is}, need_heap_blks(Bs, H, [{L,Blk}|Acc]); @@ -228,6 +228,13 @@ need_heap_blks([], H, Acc) -> need_heap_is([#cg_alloc{words=Words}=Alloc0|Is], N, Acc) -> Alloc = Alloc0#cg_alloc{words=add_heap_words(N, Words)}, need_heap_is(Is, #need{}, [Alloc|Acc]); +need_heap_is([#cg_set{anno=Anno,op=bs_init}=I0|Is], N, Acc) -> + Alloc = case need_heap_need(N) of + [#cg_alloc{words=Need}] -> alloc(Need); + [] -> 0 + end, + I = I0#cg_set{anno=Anno#{alloc=>Alloc}}, + need_heap_is(Is, #need{}, [I|Acc]); need_heap_is([#cg_set{op=Op,args=Args}=I|Is], N, Acc) -> case classify_heap_need(Op, Args) of {put,Words} -> @@ -243,11 +250,31 @@ need_heap_is([#cg_set{op=Op,args=Args}=I|Is], N, Acc) -> need_heap_is([], N, Acc) -> {Acc,N}. -need_heap_terminator([{_,#cg_blk{last=#cg_br{succ=Same,fail=Same}}}|_], N) -> +need_heap_terminator([{_,#cg_blk{last=#cg_br{succ=L,fail=L}}}|_], L, N) -> + %% Fallthrough. {[],N}; -need_heap_terminator([{_,#cg_blk{}}|_], N) -> +need_heap_terminator([{_,#cg_blk{is=Is,last=#cg_br{succ=L}}}|_], L, N) -> + case need_heap_need(N) of + [] -> + {[],#need{}}; + [_|_]=Alloc -> + %% If the preceding instructions are a binary construction, + %% hoist the allocation and incorporate into the bs_init + %% instruction. + case reverse(Is) of + [#cg_set{op=succeeded},#cg_set{op=bs_init}|_] -> + {[],N}; + [#cg_set{op=bs_put}|_] -> + {[],N}; + _ -> + %% Not binary construction. Must emit an allocation + %% instruction in this block. + {Alloc,#need{}} + end + end; +need_heap_terminator([{_,#cg_blk{}}|_], _, N) -> {need_heap_need(N),#need{}}; -need_heap_terminator([], H) -> +need_heap_terminator([], _, H) -> {need_heap_need(H),#need{}}. need_heap_need(#need{h=0,f=0}) -> []; @@ -1022,12 +1049,13 @@ cg_block([#cg_set{op=bs_init,dst=Dst0,args=Args0,anno=Anno}=I, #cg_set{op=succeeded,dst=Bool}], {Bool,Fail0}, St) -> Fail = bif_fail(Fail0), Line = line(Anno), + Alloc = map_get(alloc, Anno), [#b_literal{val=Kind}|Args1] = Args0, case Kind of new -> [Dst,Size,{integer,Unit}] = beam_args([Dst0|Args1], St), Live = get_live(I), - {[Line|cg_bs_init(Dst, Size, Unit, Live, Fail)],St}; + {[Line|cg_bs_init(Dst, Size, Alloc, Unit, Live, Fail)],St}; private_append -> [Dst,Src,Bits,{integer,Unit}] = beam_args([Dst0|Args1], St), Flags = {field_flags,[]}, @@ -1037,7 +1065,7 @@ cg_block([#cg_set{op=bs_init,dst=Dst0,args=Args0,anno=Anno}=I, [Dst,Src,Bits,{integer,Unit}] = beam_args([Dst0|Args1], St), Flags = {field_flags,[]}, Live = get_live(I), - Is = [Line,{bs_append,Fail,Bits,0,Live,Unit,Src,Flags,Dst}], + Is = [Line,{bs_append,Fail,Bits,Alloc,Live,Unit,Src,Flags,Dst}], {Is,St} end; cg_block([#cg_set{anno=Anno,op=bs_start_match,dst=Ctx0,args=[Bin0]}=I, @@ -1531,13 +1559,13 @@ cg_bs_put(Fail, [{atom,Type},{literal,Flags}|Args]) -> [{Op,Fail,{field_flags,Flags},Src}] end. -cg_bs_init(Dst, Size0, Unit, Live, Fail) -> +cg_bs_init(Dst, Size0, Alloc, Unit, Live, Fail) -> Op = case Unit of 1 -> bs_init_bits; 8 -> bs_init2 end, Size = cg_bs_init_size(Size0), - [{Op,Fail,Size,0,Live,{field_flags,[]},Dst}]. + [{Op,Fail,Size,Alloc,Live,{field_flags,[]},Dst}]. cg_bs_init_size({x,_}=R) -> R; cg_bs_init_size({y,_}=R) -> R; -- cgit v1.2.3 From 13dd57dd63fee7593f809f1fc77ec91e4646c90c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20Gustavsson?= Date: Mon, 24 Sep 2018 15:23:40 +0200 Subject: Move bif_to_test/3 from beam_utils to beam_ssa_codegen The only caller of bif_to_test/3 is beam_ssa_codegen. --- lib/compiler/src/beam_ssa_codegen.erl | 63 ++++++++++++++++++++++++++++++++++- 1 file changed, 62 insertions(+), 1 deletion(-) (limited to 'lib/compiler/src/beam_ssa_codegen.erl') diff --git a/lib/compiler/src/beam_ssa_codegen.erl b/lib/compiler/src/beam_ssa_codegen.erl index 142df5192f..163553d2c9 100644 --- a/lib/compiler/src/beam_ssa_codegen.erl +++ b/lib/compiler/src/beam_ssa_codegen.erl @@ -1207,6 +1207,12 @@ cg_copy_1([#cg_set{dst=Dst0,args=Args}|T], St) -> end; cg_copy_1([], _St) -> []. +-define(IS_LITERAL(Val), (Val =:= nil orelse + element(1, Val) =:= integer orelse + element(1, Val) =:= float orelse + element(1, Val) =:= atom orelse + element(1, Val) =:= literal)). + bif_to_test('and', [V1,V2], Fail) -> [{test,is_eq_exact,Fail,[V1,{atom,true}]}, {test,is_eq_exact,Fail,[V2,{atom,true}]}]; @@ -1220,7 +1226,62 @@ bif_to_test('or', [V1,V2], {f,Lbl}=Fail) when Lbl =/= 0 -> bif_to_test('not', [Var], Fail) -> [{test,is_eq_exact,Fail,[Var,{atom,false}]}]; bif_to_test(Name, Args, Fail) -> - [beam_utils:bif_to_test(Name, Args, Fail)]. + [bif_to_test_1(Name, Args, Fail)]. + +bif_to_test_1(is_atom, [_]=Ops, Fail) -> + {test,is_atom,Fail,Ops}; +bif_to_test_1(is_boolean, [_]=Ops, Fail) -> + {test,is_boolean,Fail,Ops}; +bif_to_test_1(is_binary, [_]=Ops, Fail) -> + {test,is_binary,Fail,Ops}; +bif_to_test_1(is_bitstring,[_]=Ops, Fail) -> + {test,is_bitstr,Fail,Ops}; +bif_to_test_1(is_float, [_]=Ops, Fail) -> + {test,is_float,Fail,Ops}; +bif_to_test_1(is_function, [_]=Ops, Fail) -> + {test,is_function,Fail,Ops}; +bif_to_test_1(is_function, [_,_]=Ops, Fail) -> + {test,is_function2,Fail,Ops}; +bif_to_test_1(is_integer, [_]=Ops, Fail) -> + {test,is_integer,Fail,Ops}; +bif_to_test_1(is_list, [_]=Ops, Fail) -> + {test,is_list,Fail,Ops}; +bif_to_test_1(is_map, [_]=Ops, Fail) -> + {test,is_map,Fail,Ops}; +bif_to_test_1(is_number, [_]=Ops, Fail) -> + {test,is_number,Fail,Ops}; +bif_to_test_1(is_pid, [_]=Ops, Fail) -> + {test,is_pid,Fail,Ops}; +bif_to_test_1(is_port, [_]=Ops, Fail) -> + {test,is_port,Fail,Ops}; +bif_to_test_1(is_reference, [_]=Ops, Fail) -> + {test,is_reference,Fail,Ops}; +bif_to_test_1(is_tuple, [_]=Ops, Fail) -> + {test,is_tuple,Fail,Ops}; +bif_to_test_1('=<', [A,B], Fail) -> + {test,is_ge,Fail,[B,A]}; +bif_to_test_1('>', [A,B], Fail) -> + {test,is_lt,Fail,[B,A]}; +bif_to_test_1('<', [_,_]=Ops, Fail) -> + {test,is_lt,Fail,Ops}; +bif_to_test_1('>=', [_,_]=Ops, Fail) -> + {test,is_ge,Fail,Ops}; +bif_to_test_1('==', [C,A], Fail) when ?IS_LITERAL(C) -> + {test,is_eq,Fail,[A,C]}; +bif_to_test_1('==', [_,_]=Ops, Fail) -> + {test,is_eq,Fail,Ops}; +bif_to_test_1('/=', [C,A], Fail) when ?IS_LITERAL(C) -> + {test,is_ne,Fail,[A,C]}; +bif_to_test_1('/=', [_,_]=Ops, Fail) -> + {test,is_ne,Fail,Ops}; +bif_to_test_1('=:=', [C,A], Fail) when ?IS_LITERAL(C) -> + {test,is_eq_exact,Fail,[A,C]}; +bif_to_test_1('=:=', [_,_]=Ops, Fail) -> + {test,is_eq_exact,Fail,Ops}; +bif_to_test_1('=/=', [C,A], Fail) when ?IS_LITERAL(C) -> + {test,is_ne_exact,Fail,[A,C]}; +bif_to_test_1('=/=', [_,_]=Ops, Fail) -> + {test,is_ne_exact,Fail,Ops}. opt_call_moves(Is0, Arity) -> {Moves0,Is} = splitwith(fun({move,_,_}) -> true; -- cgit v1.2.3