aboutsummaryrefslogtreecommitdiffstats
path: root/lib/hipe/amd64
diff options
context:
space:
mode:
authorMagnus Lång <[email protected]>2016-03-17 21:37:26 +0100
committerMagnus Lång <[email protected]>2016-08-30 17:17:45 +0200
commitdc4e9384c6f2f733ab0d12727ab53238bda3fbca (patch)
tree55c4d84effc98ae0bffbd9b7bc185d2d0a602ba6 /lib/hipe/amd64
parent4b8c17b0901dfa356792c36f01d78de827cb6645 (diff)
downloadotp-dc4e9384c6f2f733ab0d12727ab53238bda3fbca.tar.gz
otp-dc4e9384c6f2f733ab0d12727ab53238bda3fbca.tar.bz2
otp-dc4e9384c6f2f733ab0d12727ab53238bda3fbca.zip
hipe_x86: Minimise CFG<->linear conversions
Most x86 passes were either linearise(pass(to_cfg(Code))) or trivially rewritable to process a CFG. This saves a great deal of time and memory churn when compiling large programs. Now, there will only ever be a single Linear->CFG conversion, just after lowering from RTL, and only ever a single CFG->Linear conversion, just before the finalise pass. Both of these now happen in hipe_x86_main.
Diffstat (limited to 'lib/hipe/amd64')
-rw-r--r--lib/hipe/amd64/hipe_amd64_ra_sse2_postconditions.erl107
-rw-r--r--lib/hipe/amd64/hipe_amd64_sse2.erl9
2 files changed, 38 insertions, 78 deletions
diff --git a/lib/hipe/amd64/hipe_amd64_ra_sse2_postconditions.erl b/lib/hipe/amd64/hipe_amd64_ra_sse2_postconditions.erl
index 8483d2d0d5..bbf9170bc3 100644
--- a/lib/hipe/amd64/hipe_amd64_ra_sse2_postconditions.erl
+++ b/lib/hipe/amd64/hipe_amd64_ra_sse2_postconditions.erl
@@ -29,17 +29,21 @@
-define(count_temp(T), ?cons_counter(counter_mfa_mem_temps, T)).
-check_and_rewrite(AMD64Defun, Coloring) ->
- check_and_rewrite(AMD64Defun, Coloring, 'normal').
+check_and_rewrite(AMD64CFG, Coloring) ->
+ check_and_rewrite(AMD64CFG, Coloring, 'normal').
-check_and_rewrite(AMD64Defun, Coloring, Strategy) ->
+check_and_rewrite(AMD64CFG, Coloring, Strategy) ->
%%io:format("Converting\n"),
TempMap = hipe_temp_map:cols2tuple(Coloring,hipe_amd64_specific_sse2),
%%io:format("Rewriting\n"),
- #defun{code=Code0} = AMD64Defun,
- {Code1, DidSpill} = do_insns(Code0, TempMap, Strategy, [], false),
- {AMD64Defun#defun{code=Code1, var_range={0, hipe_gensym:get_var(x86)}},
- DidSpill}.
+ do_bbs(hipe_x86_cfg:labels(AMD64CFG), TempMap, Strategy, AMD64CFG, false).
+
+do_bbs([], _, _, CFG, DidSpill) -> {CFG, DidSpill};
+do_bbs([Lbl|Lbls], TempMap, Strategy, CFG0, DidSpill0) ->
+ Code0 = hipe_bb:code(BB = hipe_x86_cfg:bb(CFG0, Lbl)),
+ {Code, DidSpill} = do_insns(Code0, TempMap, Strategy, [], DidSpill0),
+ CFG = hipe_x86_cfg:bb_add(CFG0, Lbl, hipe_bb:code_update(BB, Code)),
+ do_bbs(Lbls, TempMap, Strategy, CFG, DidSpill).
do_insns([I|Insns], TempMap, Strategy, Accum, DidSpill0) ->
{NewIs, DidSpill1} = do_insn(I, TempMap, Strategy),
@@ -110,74 +114,27 @@ is_float_temp(#x86_mem{}) -> false.
%%% Check if an operand denotes a memory cell (mem or pseudo).
is_mem_opnd(Opnd, TempMap) ->
- R =
- case Opnd of
- #x86_mem{} -> true;
- #x86_temp{type=double} ->
- Reg = hipe_x86:temp_reg(Opnd),
- case hipe_x86:temp_is_allocatable(Opnd) of
- true ->
- case tuple_size(TempMap) > Reg of
- true ->
- case
- hipe_temp_map:is_spilled(Reg, TempMap) of
- true ->
- ?count_temp(Reg),
- true;
- false -> false
- end;
- _ -> false
- end;
- false -> true
- end;
- _ -> false
- end,
- %% io:format("Op ~w mem: ~w\n",[Opnd,R]),
- R.
-
-%%% Check if an operand is a spilled Temp.
-
-%%src_is_spilled(Src, TempMap) ->
-%% case hipe_x86:is_temp(Src) of
-%% true ->
-%% Reg = hipe_x86:temp_reg(Src),
-%% case hipe_x86:temp_is_allocatable(Src) of
-%% true ->
-%% case tuple_size(TempMap) > Reg of
-%% true ->
-%% case hipe_temp_map:is_spilled(Reg, TempMap) of
-%% true ->
-%% ?count_temp(Reg),
-%% true;
-%% false ->
-%% false
-%% end;
-%% false ->
-%% false
-%% end;
-%% false -> true
-%% end;
-%% false -> false
-%% end.
-
-%% is_spilled(Temp, TempMap) ->
-%% case hipe_x86:temp_is_allocatable(Temp) of
-%% true ->
-%% Reg = hipe_x86:temp_reg(Temp),
-%% case tuple_size(TempMap) > Reg of
-%% true ->
-%% case hipe_temp_map:is_spilled(Reg, TempMap) of
-%% true ->
-%% ?count_temp(Reg),
-%% true;
-%% false ->
-%% false
-%% end;
-%% false ->
-%% false
-%% end;
-%% false -> true
-%% end.
+ case Opnd of
+ #x86_mem{} -> true;
+ #x86_temp{type=double} ->
+ Reg = hipe_x86:temp_reg(Opnd),
+ case hipe_x86:temp_is_allocatable(Opnd) of
+ true ->
+ case tuple_size(TempMap) > Reg of
+ true ->
+ case
+ hipe_temp_map:is_spilled(Reg, TempMap) of
+ true ->
+ ?count_temp(Reg),
+ true;
+ false -> false
+ end;
+ _ -> false
+ end;
+ false -> true
+ end;
+ _ -> false
+ end.
%%% Make Reg a clone of Dst (attach Dst's type to Reg).
diff --git a/lib/hipe/amd64/hipe_amd64_sse2.erl b/lib/hipe/amd64/hipe_amd64_sse2.erl
index df78941be5..ea6b6cb9ba 100644
--- a/lib/hipe/amd64/hipe_amd64_sse2.erl
+++ b/lib/hipe/amd64/hipe_amd64_sse2.erl
@@ -30,9 +30,12 @@
%%----------------------------------------------------------------------
-map(Defun = #defun{code=Code0}) ->
- Code1 = do_insns(Code0, []),
- Defun#defun{code=Code1}.
+map(CFG) ->
+ hipe_x86_cfg:map_bbs(fun do_bb/2, CFG).
+
+do_bb(_Lbl, BB) ->
+ Code = do_insns(hipe_bb:code(BB), []),
+ hipe_bb:code_update(BB, Code).
do_insns([I|Insns], Accum) ->
NewIs = do_insn(I),