aboutsummaryrefslogtreecommitdiffstats
path: root/lib/hipe/x86/hipe_x86_ra_postconditions.erl
diff options
context:
space:
mode:
authorMagnus Lång <[email protected]>2017-03-16 15:30:00 +0100
committerMagnus Lång <[email protected]>2017-03-16 20:49:42 +0100
commitc52b2cf226cb3f1bb1b16bee28d47785506adff3 (patch)
treef016829f93e26db7fe7e2329ace868f4b2442510 /lib/hipe/x86/hipe_x86_ra_postconditions.erl
parentd1d26f4bf9da3cc5eab4e918df771d67fe9e6bb5 (diff)
downloadotp-c52b2cf226cb3f1bb1b16bee28d47785506adff3.tar.gz
otp-c52b2cf226cb3f1bb1b16bee28d47785506adff3.tar.bz2
otp-c52b2cf226cb3f1bb1b16bee28d47785506adff3.zip
hipe: Add pseudo_spill_f?move instructions
These pseudo instructions are added to all backends and allow spill slot to spill slot move coalescing in a clean way. They have regular move semantics, but contain an additional scratch register to be used if both source and destination are spilled, and can not be move coalesced. Additionally, a register allocator callback Target:is_spill_move(Instr, Context) is added which allows the spill slot allocators to check for these instructions and try to coalesce the spill slots the two temporaries are allocated to.
Diffstat (limited to 'lib/hipe/x86/hipe_x86_ra_postconditions.erl')
-rw-r--r--lib/hipe/x86/hipe_x86_ra_postconditions.erl26
1 files changed, 22 insertions, 4 deletions
diff --git a/lib/hipe/x86/hipe_x86_ra_postconditions.erl b/lib/hipe/x86/hipe_x86_ra_postconditions.erl
index 28ec9c4277..db6391d5c1 100644
--- a/lib/hipe/x86/hipe_x86_ra_postconditions.erl
+++ b/lib/hipe/x86/hipe_x86_ra_postconditions.erl
@@ -74,6 +74,8 @@ do_insn(I, TempMap, Strategy) -> % Insn -> {Insn list, DidSpill}
do_movx(I, TempMap, Strategy);
#fmove{} ->
do_fmove(I, TempMap, Strategy);
+ #pseudo_spill_move{} ->
+ do_pseudo_spill_move(I, TempMap, Strategy);
#shift{} ->
do_shift(I, TempMap, Strategy);
#test{} ->
@@ -190,10 +192,19 @@ do_lea(I, TempMap, Strategy) ->
do_move(I, TempMap, Strategy) ->
#move{src=Src0,dst=Dst0} = I,
- {FixSrc, Src, FixDst, Dst, DidSpill} =
- do_check_byte_move(Src0, Dst0, TempMap, Strategy),
- {FixSrc ++ FixDst ++ [I#move{src=Src,dst=Dst}],
- DidSpill}.
+ case
+ is_record(Src0, x86_temp) andalso is_record(Dst0, x86_temp)
+ andalso is_spilled(Src0, TempMap) andalso is_spilled(Dst0, TempMap)
+ of
+ true ->
+ Tmp = clone(Src0, Strategy),
+ {[hipe_x86:mk_pseudo_spill_move(Src0, Tmp, Dst0)], true};
+ false ->
+ {FixSrc, Src, FixDst, Dst, DidSpill} =
+ do_check_byte_move(Src0, Dst0, TempMap, Strategy),
+ {FixSrc ++ FixDst ++ [I#move{src=Src,dst=Dst}],
+ DidSpill}
+ end.
-ifdef(HIPE_AMD64).
@@ -287,6 +298,13 @@ do_fmove(I, TempMap, Strategy) ->
{FixSrc ++ FixDst ++ [I#fmove{src=Src,dst=Dst}],
DidSpill1 or DidSpill2}.
+%%% Fix an pseudo_spill_move op.
+
+do_pseudo_spill_move(I = #pseudo_spill_move{temp=Temp}, TempMap, _Strategy) ->
+ %% Temp is above the low water mark and must not have been spilled
+ false = is_spilled(Temp, TempMap),
+ {[I], false}. % nothing to do
+
%%% Fix a shift operation.
%%% 1. remove pseudos from any explicit memory operands
%%% 2. if the source is a register or memory position