aboutsummaryrefslogtreecommitdiffstats
path: root/lib/hipe/x86/hipe_rtl_to_x86.erl
diff options
context:
space:
mode:
authorbhuztez <[email protected]>2017-12-31 14:07:49 +0800
committerSverker Eriksson <[email protected]>2018-01-29 16:39:21 +0100
commit38a99af36f044459db40b76be2cc72c638eb6d98 (patch)
treee8c656436ed0d934aab93daa5c3bc8ef33a05cd4 /lib/hipe/x86/hipe_rtl_to_x86.erl
parentb73e87f5de895860337c0d30473edd1e59667baa (diff)
downloadotp-38a99af36f044459db40b76be2cc72c638eb6d98.tar.gz
otp-38a99af36f044459db40b76be2cc72c638eb6d98.tar.bz2
otp-38a99af36f044459db40b76be2cc72c638eb6d98.zip
make HiPE work on x86_64 when PIE is enabled
Currently HiPE amd64 assumes the runtime system code is loaded into the low 2G of the address space. However, this is not the case when PIE is enabled, it is loaded into a random location. So trampolines are required to call BIFs, and also we have first to load the address of sse2_fnegate_mask to a regisiter before xorpd in fchs.
Diffstat (limited to 'lib/hipe/x86/hipe_rtl_to_x86.erl')
-rw-r--r--lib/hipe/x86/hipe_rtl_to_x86.erl24
1 files changed, 16 insertions, 8 deletions
diff --git a/lib/hipe/x86/hipe_rtl_to_x86.erl b/lib/hipe/x86/hipe_rtl_to_x86.erl
index 31e4f6e4ac..22947da148 100644
--- a/lib/hipe/x86/hipe_rtl_to_x86.erl
+++ b/lib/hipe/x86/hipe_rtl_to_x86.erl
@@ -646,7 +646,7 @@ conv_imm(Opnd, Map) ->
is_imm64(Value) when is_integer(Value) ->
(Value < -(1 bsl (32 - 1))) or (Value > (1 bsl (32 - 1)) - 1);
is_imm64({_,atom}) -> false; % Atoms are 32 bits.
-is_imm64({_,c_const}) -> false; % c_consts are 32 bits.
+is_imm64({_,c_const}) -> true; % c_consts are 64 bits.
is_imm64({_,_}) -> true . % Other relocs are 64 bits.
-else.
conv_imm(Opnd, Map) ->
@@ -777,6 +777,18 @@ conv_fconv(Dst, Src) ->
%%% Finalise the conversion of a 2-address FP operation.
+-ifdef(HIPE_AMD64).
+conv_fp_unary(Dst, Src, 'fchs') ->
+ Tmp = new_untagged_temp(),
+ case same_opnd(Dst, Src) of
+ true ->
+ [];
+ _ ->
+ [hipe_x86:mk_fmove(Src, Dst)]
+ end ++
+ mk_load_address(c_const, hipe_x86:mk_imm({sse2_fnegate_mask, c_const}), Tmp) ++
+ [hipe_x86:mk_fp_binop('xorpd', hipe_x86:mk_mem(Tmp, hipe_x86:mk_imm(0), double), Dst)].
+-else.
conv_fp_unary(Dst, Src, FpUnOp) ->
case same_opnd(Dst, Src) of
true ->
@@ -785,6 +797,7 @@ conv_fp_unary(Dst, Src, FpUnOp) ->
[hipe_x86:mk_fmove(Src, Dst),
hipe_x86:mk_fp_unop(FpUnOp, Dst)]
end.
+-endif.
conv_fp_unop(RtlFpUnOp) ->
case RtlFpUnOp of
@@ -854,13 +867,8 @@ mk_jmp_switch(Index, JTabLab, Labels) ->
%%% Finalise the translation of a load_address instruction.
-ifdef(HIPE_AMD64).
-mk_load_address(Type, Src, Dst) ->
- case Type of
- c_const -> % 32 bits
- [hipe_x86:mk_move(Src, Dst)];
- _ ->
- [hipe_x86:mk_move64(Src, Dst)]
- end.
+mk_load_address(_Type, Src, Dst) ->
+ [hipe_x86:mk_move64(Src, Dst)].
-else.
mk_load_address(_Type, Src, Dst) ->
[hipe_x86:mk_move(Src, Dst)].