Index: src/x64/macro-assembler-x64.cc |
=================================================================== |
--- src/x64/macro-assembler-x64.cc (revision 15486) |
+++ src/x64/macro-assembler-x64.cc (working copy) |
@@ -56,8 +56,11 @@ |
static const int kInvalidRootRegisterDelta = -1; |
+#define __k |
+#define __q |
+#define __n |
-intptr_t MacroAssembler::RootRegisterDelta(ExternalReference other) { |
+int64_t MacroAssembler::RootRegisterDelta(ExternalReference other) { |
if (predictable_code_size() && |
(other.address() < reinterpret_cast<Address>(isolate()) || |
other.address() >= reinterpret_cast<Address>(isolate() + 1))) { |
@@ -65,7 +68,13 @@ |
} |
Address roots_register_value = kRootRegisterBias + |
reinterpret_cast<Address>(isolate()->heap()->roots_array_start()); |
+#ifndef V8_TARGET_ARCH_X32 |
intptr_t delta = other.address() - roots_register_value; |
+#else |
+ uint64_t o = reinterpret_cast<uint32_t>(other.address()); |
+ uint64_t r = reinterpret_cast<uint32_t>(roots_register_value); |
+ int64_t delta = o - r; |
+#endif |
return delta; |
} |
@@ -73,7 +82,7 @@ |
Operand MacroAssembler::ExternalOperand(ExternalReference target, |
Register scratch) { |
if (root_array_available_ && !Serializer::enabled()) { |
- intptr_t delta = RootRegisterDelta(target); |
+ int64_t delta = RootRegisterDelta(target); |
if (delta != kInvalidRootRegisterDelta && is_int32(delta)) { |
Serializer::TooLateToEnableNow(); |
return Operand(kRootRegister, static_cast<int32_t>(delta)); |
@@ -86,7 +95,7 @@ |
void MacroAssembler::Load(Register destination, ExternalReference source) { |
if (root_array_available_ && !Serializer::enabled()) { |
- intptr_t delta = RootRegisterDelta(source); |
+ int64_t delta = RootRegisterDelta(source); |
if (delta != kInvalidRootRegisterDelta && is_int32(delta)) { |
Serializer::TooLateToEnableNow(); |
movq(destination, Operand(kRootRegister, static_cast<int32_t>(delta))); |
@@ -105,7 +114,7 @@ |
void MacroAssembler::Store(ExternalReference destination, Register source) { |
if (root_array_available_ && !Serializer::enabled()) { |
- intptr_t delta = RootRegisterDelta(destination); |
+ int64_t delta = RootRegisterDelta(destination); |
if (delta != kInvalidRootRegisterDelta && is_int32(delta)) { |
Serializer::TooLateToEnableNow(); |
movq(Operand(kRootRegister, static_cast<int32_t>(delta)), source); |
@@ -125,7 +134,7 @@ |
void MacroAssembler::LoadAddress(Register destination, |
ExternalReference source) { |
if (root_array_available_ && !Serializer::enabled()) { |
- intptr_t delta = RootRegisterDelta(source); |
+ int64_t delta = RootRegisterDelta(source); |
if (delta != kInvalidRootRegisterDelta && is_int32(delta)) { |
Serializer::TooLateToEnableNow(); |
lea(destination, Operand(kRootRegister, static_cast<int32_t>(delta))); |
@@ -142,7 +151,7 @@ |
// This calculation depends on the internals of LoadAddress. |
// It's correctness is ensured by the asserts in the Call |
// instruction below. |
- intptr_t delta = RootRegisterDelta(source); |
+ int64_t delta = RootRegisterDelta(source); |
if (delta != kInvalidRootRegisterDelta && is_int32(delta)) { |
Serializer::TooLateToEnableNow(); |
// Operand is lea(scratch, Operand(kRootRegister, delta)); |
@@ -154,8 +163,13 @@ |
return size; |
} |
} |
+#ifndef V8_TARGET_ARCH_X32 |
// Size of movq(destination, src); |
return 10; |
+#else |
+ // Size of movl(destination, src); |
+ return 6; |
+#endif |
} |
@@ -285,10 +299,12 @@ |
cmpq(scratch, kScratchRegister); |
j(cc, branch, distance); |
} else { |
+#ifndef V8_TARGET_ARCH_X32 |
ASSERT(is_int32(static_cast<int64_t>(HEAP->NewSpaceMask()))); |
+#endif |
intptr_t new_space_start = |
reinterpret_cast<intptr_t>(HEAP->NewSpaceStart()); |
- movq(kScratchRegister, -new_space_start, RelocInfo::NONE64); |
+ __n movq(kScratchRegister, -new_space_start, RelocInfo::NONE64); |
if (scratch.is(object)) { |
addq(scratch, kScratchRegister); |
} else { |
@@ -525,11 +541,17 @@ |
} |
#endif |
push(rax); |
- movq(kScratchRegister, p0, RelocInfo::NONE64); |
+ __n movq(kScratchRegister, p0, RelocInfo::NONE64); |
push(kScratchRegister); |
+#ifndef V8_TARGET_ARCH_X32 |
movq(kScratchRegister, |
reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(p1 - p0))), |
RelocInfo::NONE64); |
+#else |
+ movl(kScratchRegister, |
+ reinterpret_cast<intptr_t>(Smi::FromInt(static_cast<int>(p1 - p0))), |
+ RelocInfo::NONE32); |
+#endif |
push(kScratchRegister); |
if (!has_frame_) { |
@@ -757,8 +779,13 @@ |
bind(&profiler_disabled); |
// Call the api function! |
+#ifndef V8_TARGET_ARCH_X32 |
movq(rax, reinterpret_cast<int64_t>(function_address), |
RelocInfo::EXTERNAL_REFERENCE); |
+#else |
+ movl(rax, reinterpret_cast<uint32_t>(function_address), |
+ RelocInfo::EXTERNAL_REFERENCE); |
+#endif |
bind(&end_profiler_check); |
@@ -792,7 +819,12 @@ |
bind(&empty_result); |
} |
// Load the value from ReturnValue |
+#ifndef V8_TARGET_ARCH_X32 |
movq(rax, Operand(rbp, return_value_offset * kPointerSize)); |
+#else |
+ movl(rax, |
+ Operand(rbp, 2 * kHWRegSize + (return_value_offset - 2) * kPointerSize)); |
+#endif |
bind(&prologue); |
// No more valid handles (the result handle was the last one). Restore |
@@ -926,7 +958,7 @@ |
for (int i = 0; i < kNumberOfSavedRegs; i++) { |
Register reg = saved_regs[i]; |
if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) { |
- push(reg); |
+ __k push(reg); |
} |
} |
// R12 to r15 are callee save on all platforms. |
@@ -954,7 +986,7 @@ |
for (int i = kNumberOfSavedRegs - 1; i >= 0; i--) { |
Register reg = saved_regs[i]; |
if (!reg.is(exclusion1) && !reg.is(exclusion2) && !reg.is(exclusion3)) { |
- pop(reg); |
+ __k pop(reg); |
} |
} |
} |
@@ -966,12 +998,14 @@ |
} else if (is_uint32(x)) { |
movl(dst, Immediate(static_cast<uint32_t>(x))); |
} else if (is_int32(x)) { |
- movq(dst, Immediate(static_cast<int32_t>(x))); |
+ __k movq(dst, Immediate(static_cast<int32_t>(x))); |
} else { |
- movq(dst, x, RelocInfo::NONE64); |
+ __k movq(dst, x, RelocInfo::NONE64); |
} |
} |
+ |
+#ifndef V8_TARGET_ARCH_X32 |
void MacroAssembler::Set(const Operand& dst, int64_t x) { |
if (is_int32(x)) { |
movq(dst, Immediate(static_cast<int32_t>(x))); |
@@ -980,6 +1014,11 @@ |
movq(dst, kScratchRegister); |
} |
} |
+#else |
+void MacroAssembler::Set(const Operand& dst, int32_t x) { |
+ movl(dst, Immediate(x)); |
+} |
+#endif |
bool MacroAssembler::IsUnsafeInt(const int x) { |
@@ -988,6 +1027,7 @@ |
} |
+#ifndef V8_TARGET_ARCH_X32 |
void MacroAssembler::SafeMove(Register dst, Smi* src) { |
ASSERT(!dst.is(kScratchRegister)); |
ASSERT(kSmiValueSize == 32); // JIT cookie can be converted to Smi. |
@@ -999,8 +1039,21 @@ |
Move(dst, src); |
} |
} |
+#else |
+void MacroAssembler::SafeMove(Register dst, Smi* src) { |
+ ASSERT(!dst.is(kScratchRegister)); |
+ ASSERT(kSmiValueSize == 31); // JIT cookie can be converted to Smi. |
+ if (IsUnsafeInt(src->value()) && jit_cookie() != 0) { |
+ movl(dst, Immediate(reinterpret_cast<int32_t>(src) ^ jit_cookie())); |
+ xorl(dst, Immediate(jit_cookie())); |
+ } else { |
+ Move(dst, src); |
+ } |
+} |
+#endif |
+#ifndef V8_TARGET_ARCH_X32 |
void MacroAssembler::SafePush(Smi* src) { |
ASSERT(kSmiValueSize == 32); // JIT cookie can be converted to Smi. |
if (IsUnsafeInt(src->value()) && jit_cookie() != 0) { |
@@ -1011,6 +1064,17 @@ |
Push(src); |
} |
} |
+#else |
+void MacroAssembler::SafePush(Smi* src) { |
+ ASSERT(kSmiValueSize == 31); // JIT cookie can be converted to Smi. |
+ if (IsUnsafeInt(src->value()) && jit_cookie() != 0) { |
+ Push(Immediate(reinterpret_cast<int32_t>(src) ^ jit_cookie())); |
+ xorl(Operand(rsp, 0), Immediate(jit_cookie())); |
+ } else { |
+ Push(src); |
+ } |
+} |
+#endif |
// ---------------------------------------------------------------------------- |
@@ -1030,6 +1094,7 @@ |
} |
void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) { |
+#ifndef V8_TARGET_ARCH_X32 |
if (emit_debug_code()) { |
movq(dst, |
reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)), |
@@ -1044,6 +1109,9 @@ |
bind(&ok); |
} |
} |
+#else |
+ // Disable check for Uninitialized kSmiConstantRegister for X32. |
+#endif |
int value = source->value(); |
if (value == 0) { |
xorl(dst, dst); |
@@ -1054,7 +1122,8 @@ |
switch (uvalue) { |
case 9: |
- lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_8, 0)); |
+ lea(dst, |
+ Operand(kSmiConstantRegister, kSmiConstantRegister, times_8, 0)); |
break; |
case 8: |
xorl(dst, dst); |
@@ -1065,13 +1134,16 @@ |
lea(dst, Operand(dst, kSmiConstantRegister, times_4, 0)); |
break; |
case 5: |
- lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_4, 0)); |
+ lea(dst, |
+ Operand(kSmiConstantRegister, kSmiConstantRegister, times_4, 0)); |
break; |
case 3: |
- lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_2, 0)); |
+ lea(dst, |
+ Operand(kSmiConstantRegister, kSmiConstantRegister, times_2, 0)); |
break; |
case 2: |
- lea(dst, Operand(kSmiConstantRegister, kSmiConstantRegister, times_1, 0)); |
+ lea(dst, |
+ Operand(kSmiConstantRegister, kSmiConstantRegister, times_1, 0)); |
break; |
case 1: |
movq(dst, kSmiConstantRegister); |
@@ -1080,7 +1152,7 @@ |
UNREACHABLE(); |
return; |
default: |
- movq(dst, reinterpret_cast<uint64_t>(source), RelocInfo::NONE64); |
+ __k movq(dst, reinterpret_cast<uint64_t>(source), RelocInfo::NONE64); |
return; |
} |
if (negative) { |
@@ -1110,8 +1182,13 @@ |
} |
bind(&ok); |
} |
+#ifndef V8_TARGET_ARCH_X32 |
ASSERT(kSmiShift % kBitsPerByte == 0); |
movl(Operand(dst, kSmiShift / kBitsPerByte), src); |
+#else |
+ Integer32ToSmi(kScratchRegister, src); |
+ movl(dst, kScratchRegister); |
+#endif |
} |
@@ -1132,12 +1209,21 @@ |
if (!dst.is(src)) { |
movq(dst, src); |
} |
+#ifndef V8_TARGET_ARCH_X32 |
shr(dst, Immediate(kSmiShift)); |
+#else |
+ sarl(dst, Immediate(kSmiShift)); |
+#endif |
} |
void MacroAssembler::SmiToInteger32(Register dst, const Operand& src) { |
+#ifndef V8_TARGET_ARCH_X32 |
movl(dst, Operand(src, kSmiShift / kBitsPerByte)); |
+#else |
+ movl(dst, src); |
+ sarl(dst, Immediate(kSmiShift)); |
+#endif |
} |
@@ -1146,12 +1232,22 @@ |
if (!dst.is(src)) { |
movq(dst, src); |
} |
+#ifndef V8_TARGET_ARCH_X32 |
sar(dst, Immediate(kSmiShift)); |
+#else |
+ shl(dst, Immediate(32)); |
+ sar(dst, Immediate(32 + kSmiShift)); |
+#endif |
} |
void MacroAssembler::SmiToInteger64(Register dst, const Operand& src) { |
+#ifndef V8_TARGET_ARCH_X32 |
movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte)); |
+#else |
+ movl(dst, src); |
+ SmiToInteger64(dst, dst); |
+#endif |
} |
@@ -1200,7 +1296,11 @@ |
void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) { |
AssertSmi(dst); |
+#ifndef V8_TARGET_ARCH_X32 |
cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value())); |
+#else |
+ cmpl(dst, Immediate(src)); |
+#endif |
} |
@@ -1213,7 +1313,12 @@ |
void MacroAssembler::SmiCompareInteger32(const Operand& dst, Register src) { |
+#ifndef V8_TARGET_ARCH_X32 |
cmpl(Operand(dst, kSmiShift / kBitsPerByte), src); |
+#else |
+ SmiToInteger32(kScratchRegister, dst); |
+ cmpl(kScratchRegister, src); |
+#endif |
} |
@@ -1296,8 +1401,14 @@ |
return CheckSmi(first); |
} |
STATIC_ASSERT(kSmiTag == 0 && kHeapObjectTag == 1 && kHeapObjectTagMask == 3); |
+#ifndef V8_TARGET_ARCH_X32 |
leal(kScratchRegister, Operand(first, second, times_1, 0)); |
testb(kScratchRegister, Immediate(0x03)); |
+#else |
+ movl(kScratchRegister, first); |
+ orl(kScratchRegister, second); |
+ testb(kScratchRegister, Immediate(kSmiTagMask)); |
+#endif |
return zero; |
} |
@@ -1342,6 +1453,7 @@ |
} |
+#ifndef V8_TARGET_ARCH_X32 |
Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) { |
// A 32-bit integer value can always be converted to a smi. |
return always; |
@@ -1354,8 +1466,20 @@ |
testl(src, src); |
return positive; |
} |
+#else |
+Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) { |
+ cmpl(src, Immediate(0xc0000000)); |
+ return positive; |
+} |
+Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) { |
+ testl(src, Immediate(0xc0000000)); |
+ return zero; |
+} |
+#endif |
+ |
+ |
void MacroAssembler::CheckSmiToIndicator(Register dst, Register src) { |
if (dst.is(src)) { |
andl(dst, Immediate(kSmiTagMask)); |
@@ -1517,7 +1641,11 @@ |
void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) { |
if (constant->value() != 0) { |
+#ifndef V8_TARGET_ARCH_X32 |
addl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(constant->value())); |
+#else |
+ addl(dst, Immediate(constant)); |
+#endif |
} |
} |
@@ -1842,7 +1970,11 @@ |
// We overshoot a little and go to slow case if we divide min-value |
// by any negative value, not just -1. |
Label safe_div; |
+#ifndef V8_TARGET_ARCH_X32 |
testl(rax, Immediate(0x7fffffff)); |
+#else |
+ testl(rax, Immediate(0x3fffffff)); |
+#endif |
j(not_zero, &safe_div, Label::kNear); |
testq(src2, src2); |
if (src1.is(rax)) { |
@@ -1937,7 +2069,11 @@ |
ASSERT(!dst.is(kScratchRegister)); |
ASSERT(!src.is(kScratchRegister)); |
// Set tag and padding bits before negating, so that they are zero afterwards. |
+#ifndef V8_TARGET_ARCH_X32 |
movl(kScratchRegister, Immediate(~0)); |
+#else |
+ movl(kScratchRegister, Immediate(1)); |
+#endif |
if (dst.is(src)) { |
xor_(dst, kScratchRegister); |
} else { |
@@ -2052,11 +2188,18 @@ |
j(negative, on_not_smi_result, near_jump); |
} |
shr(dst, Immediate(shift_value + kSmiShift)); |
+#ifndef V8_TARGET_ARCH_X32 |
shl(dst, Immediate(kSmiShift)); |
+#else |
+ testl(dst, Immediate(0xc0000000)); |
+ j(not_zero, on_not_smi_result, near_jump); |
+ shll(dst, Immediate(kSmiShift)); |
+#endif |
} |
} |
+#ifndef V8_TARGET_ARCH_X32 |
void MacroAssembler::SmiShiftLeft(Register dst, |
Register src1, |
Register src2) { |
@@ -2070,13 +2213,51 @@ |
and_(rcx, Immediate(0x1f)); |
shl_cl(dst); |
} |
+#else |
+void MacroAssembler::SmiShiftLeft(Register dst, |
+ Register src1, |
+ Register src2, |
+ Label* on_not_smi_result) { |
+ ASSERT(!dst.is(kScratchRegister)); |
+ ASSERT(!src1.is(kScratchRegister)); |
+ ASSERT(!src2.is(kScratchRegister)); |
+ ASSERT(!dst.is(rcx)); |
+ Label result_ok; |
+ if (src1.is(rcx) || src2.is(rcx)) { |
+ movl(kScratchRegister, rcx); |
+ } |
+ // Untag shift amount. |
+ if (!dst.is(src1)) { |
+ movl(dst, src1); |
+ } |
+ SmiToInteger32(dst, dst); |
+ SmiToInteger32(rcx, src2); |
+ // Shift amount specified by lower 5 bits, not six as the shl opcode. |
+ andl(rcx, Immediate(0x1f)); |
+ shll_cl(dst); |
+ cmpl(dst, Immediate(0xc0000000)); |
+ j(not_sign, &result_ok); |
+ if (src1.is(rcx) || src2.is(rcx)) { |
+ if (src1.is(rcx)) { |
+ movl(src1, kScratchRegister); |
+ } else { |
+ movl(src2, kScratchRegister); |
+ } |
+ } |
+ jmp(on_not_smi_result); |
+ bind(&result_ok); |
+ Integer32ToSmi(dst, dst); |
+} |
+#endif |
+ |
void MacroAssembler::SmiShiftLogicalRight(Register dst, |
Register src1, |
Register src2, |
Label* on_not_smi_result, |
Label::Distance near_jump) { |
+#ifndef V8_TARGET_ARCH_X32 |
ASSERT(!dst.is(kScratchRegister)); |
ASSERT(!src1.is(kScratchRegister)); |
ASSERT(!src2.is(kScratchRegister)); |
@@ -2108,6 +2289,37 @@ |
// src2 was zero and src1 negative. |
j(negative, on_not_smi_result, near_jump); |
} |
+#else |
+ ASSERT(!dst.is(kScratchRegister)); |
+ ASSERT(!src1.is(kScratchRegister)); |
+ ASSERT(!src2.is(kScratchRegister)); |
+ ASSERT(!dst.is(rcx)); |
+ Label result_ok; |
+ |
+ // dst and src1 can be the same, because the one case that bails out |
+ // is a shift by 0, which leaves dst, and therefore src1, unchanged. |
+ if (src1.is(rcx) || src2.is(rcx)) { |
+ movl(kScratchRegister, rcx); |
+ } |
+ if (!dst.is(src1)) { |
+ movq(dst, src1); |
+ } |
+ SmiToInteger32(rcx, src2); |
+ SmiToInteger32(dst, dst); |
+ shrl_cl(dst); |
+ testl(dst, Immediate(0xc0000000)); |
+ j(zero, &result_ok); |
+ if (src1.is(rcx) || src2.is(rcx)) { |
+ if (src1.is(rcx)) { |
+ movl(src1, kScratchRegister); |
+ } else { |
+ movl(src2, kScratchRegister); |
+ } |
+ } |
+ jmp(on_not_smi_result); |
+ bind(&result_ok); |
+ Integer32ToSmi(dst, dst); |
+#endif |
} |
@@ -2127,7 +2339,11 @@ |
movq(dst, src1); |
} |
SmiToInteger32(rcx, src2); |
+#ifndef V8_TARGET_ARCH_X32 |
orl(rcx, Immediate(kSmiShift)); |
+#else |
+ SmiToInteger32(dst, dst); |
+#endif |
sar_cl(dst); // Shift 32 + original rcx & 0x1f. |
shl(dst, Immediate(kSmiShift)); |
if (src1.is(rcx)) { |
@@ -2176,7 +2392,7 @@ |
// If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi. |
} |
- |
+#ifndef V8_TARGET_ARCH_X32 |
SmiIndex MacroAssembler::SmiToIndex(Register dst, |
Register src, |
int shift) { |
@@ -2193,7 +2409,24 @@ |
} |
return SmiIndex(dst, times_1); |
} |
+#else |
+SmiIndex MacroAssembler::SmiToIndex(Register dst, |
+ Register src, |
+ int shift) { |
+ ASSERT(shift >= times_1 && shift <= times_8); |
+ if (!dst.is(src)) { |
+ movl(dst, src); |
+ } |
+ if (shift == times_1) { |
+ sarl(dst, Immediate(kSmiShift)); |
+ return SmiIndex(dst, times_1); |
+ } |
+ return SmiIndex(dst, static_cast<ScaleFactor>(shift - 1)); |
+} |
+#endif |
+ |
+#ifndef V8_TARGET_ARCH_X32 |
SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst, |
Register src, |
int shift) { |
@@ -2210,11 +2443,33 @@ |
} |
return SmiIndex(dst, times_1); |
} |
+#else |
+SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst, |
+ Register src, |
+ int shift) { |
+ // Register src holds a positive smi. |
+ ASSERT(shift >= times_1 && shift <= times_8); |
+ if (!dst.is(src)) { |
+ movl(dst, src); |
+ } |
+ neg(dst); |
+ if (shift == times_1) { |
+ sar(dst, Immediate(kSmiShift)); |
+ return SmiIndex(dst, times_1); |
+ } |
+ return SmiIndex(dst, static_cast<ScaleFactor>(shift - 1)); |
+} |
+#endif |
void MacroAssembler::AddSmiField(Register dst, const Operand& src) { |
+#ifndef V8_TARGET_ARCH_X32 |
ASSERT_EQ(0, kSmiShift % kBitsPerByte); |
addl(dst, Operand(src, kSmiShift / kBitsPerByte)); |
+#else |
+ SmiToInteger32(kScratchRegister, src); |
+ addl(dst, kScratchRegister); |
+#endif |
} |
@@ -2451,6 +2706,62 @@ |
} |
+#ifdef V8_TARGET_ARCH_X32 |
+void MacroAssembler::Push(Immediate value) { |
+ leal(rsp, Operand(rsp, -4)); |
+ movl(Operand(rsp, 0), value); |
+} |
+ |
+ |
+void MacroAssembler::Push_imm32(int32_t imm32) { |
+ leal(rsp, Operand(rsp, -4)); |
+ movl(Operand(rsp, 0), Immediate(imm32)); |
+} |
+ |
+ |
+void MacroAssembler::Push(Register src) { |
+ // We use 64-bit push for rbp in the prologue |
+ ASSERT(src.code() != rbp.code()); |
+ leal(rsp, Operand(rsp, -4)); |
+ movl(Operand(rsp, 0), src); |
+} |
+ |
+ |
+void MacroAssembler::Push(const Operand& src) { |
+ movl(kScratchRegister, src); |
+ leal(rsp, Operand(rsp, -4)); |
+ movl(Operand(rsp, 0), kScratchRegister); |
+} |
+ |
+ |
+void MacroAssembler::Pop(Register dst) { |
+ // We use 64-bit push for rbp in the prologue |
+ ASSERT(dst.code() != rbp.code()); |
+ movl(dst, Operand(rsp, 0)); |
+ leal(rsp, Operand(rsp, 4)); |
+} |
+ |
+ |
+void MacroAssembler::Pop(const Operand& dst) { |
+ Register scratch = kScratchRegister; |
+ bool needExtraScratch = dst.AddressUsesRegister(kScratchRegister); |
+ if (needExtraScratch) { |
+ scratch = kSmiConstantRegister; |
+ } |
+ movl(scratch, Operand(rsp, 0)); |
+ movl(dst, scratch); |
+ if (needExtraScratch) { |
+ // Restore the value of kSmiConstantRegister. |
+ // Should use InitializeSmiConstantRegister(); |
+ movl(kSmiConstantRegister, |
+ reinterpret_cast<uint32_t>(Smi::FromInt(kSmiConstantRegisterValue)), |
+ RelocInfo::NONE32); |
+ } |
+ leal(rsp, Operand(rsp, 4)); |
+} |
+#endif |
+ |
+ |
void MacroAssembler::Push(Smi* source) { |
intptr_t smi = reinterpret_cast<intptr_t>(source); |
if (is_int32(smi)) { |
@@ -2470,17 +2781,33 @@ |
void MacroAssembler::Test(const Operand& src, Smi* source) { |
+#ifndef V8_TARGET_ARCH_X32 |
testl(Operand(src, kIntSize), Immediate(source->value())); |
+#else |
+ testl(src, Immediate(source)); |
+#endif |
} |
void MacroAssembler::TestBit(const Operand& src, int bits) { |
+#ifdef V8_TARGET_ARCH_X32 |
+ // Pointer fields in SharedFunctionInfo are SMI. |
+ bits += kSmiTagSize + kSmiShiftSize; |
+#endif |
int byte_offset = bits / kBitsPerByte; |
int bit_in_byte = bits & (kBitsPerByte - 1); |
testb(Operand(src, byte_offset), Immediate(1 << bit_in_byte)); |
} |
+#ifdef V8_TARGET_ARCH_X32 |
+void MacroAssembler::Jump(const Operand& src) { |
+ movl(kScratchRegister, src); |
+ jmp(kScratchRegister); |
+} |
+#endif |
+ |
+ |
void MacroAssembler::Jump(ExternalReference ext) { |
LoadAddress(kScratchRegister, ext); |
jmp(kScratchRegister); |
@@ -2518,6 +2845,14 @@ |
} |
+#ifdef V8_TARGET_ARCH_X32 |
+void MacroAssembler::Call(const Operand& op) { |
+ movl(kScratchRegister, op); |
+ call(kScratchRegister); |
+} |
+#endif |
+ |
+ |
void MacroAssembler::Call(Address destination, RelocInfo::Mode rmode) { |
#ifdef DEBUG |
int end_position = pc_offset() + CallSize(destination, rmode); |
@@ -2639,7 +2974,11 @@ |
void MacroAssembler::PushTryHandler(StackHandler::Kind kind, |
int handler_index) { |
// Adjust this code if not the case. |
+#ifndef V8_TARGET_ARCH_X32 |
STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); |
+#else |
+ STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize + kHWRegSize); |
+#endif |
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); |
STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); |
STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); |
@@ -2652,7 +2991,7 @@ |
// The frame pointer does not point to a JS frame so we save NULL for |
// rbp. We expect the code throwing an exception to check rbp before |
// dereferencing it to restore the context. |
- push(Immediate(0)); // NULL frame pointer. |
+ __k push(Immediate(0)); // NULL frame pointer. |
Push(Smi::FromInt(0)); // No context. |
} else { |
push(rbp); |
@@ -2698,7 +3037,11 @@ |
void MacroAssembler::Throw(Register value) { |
// Adjust this code if not the case. |
+#ifndef V8_TARGET_ARCH_X32 |
STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); |
+#else |
+ STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize + kHWRegSize); |
+#endif |
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); |
STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); |
STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); |
@@ -2738,7 +3081,11 @@ |
void MacroAssembler::ThrowUncatchable(Register value) { |
// Adjust this code if not the case. |
+#ifndef V8_TARGET_ARCH_X32 |
STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); |
+#else |
+ STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize + kHWRegSize); |
+#endif |
STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); |
STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); |
STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); |
@@ -2789,9 +3136,9 @@ |
if (is_uint16(bytes_dropped)) { |
ret(bytes_dropped); |
} else { |
- pop(scratch); |
+ __k pop(scratch); |
addq(rsp, Immediate(bytes_dropped)); |
- push(scratch); |
+ __k push(scratch); |
ret(0); |
} |
} |
@@ -3057,8 +3404,8 @@ |
void MacroAssembler::AssertZeroExtended(Register int32_register) { |
if (emit_debug_code()) { |
ASSERT(!int32_register.is(kScratchRegister)); |
- movq(kScratchRegister, 0x100000000l, RelocInfo::NONE64); |
- cmpq(kScratchRegister, int32_register); |
+ __k movq(kScratchRegister, 0x100000000l, RelocInfo::NONE64); |
+ __k cmpq(kScratchRegister, int32_register); |
Check(above_equal, "32 bit value in register is not zero-extended"); |
} |
} |
@@ -3328,8 +3675,14 @@ |
ASSERT(function.is(rdi)); |
movq(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset)); |
movq(rsi, FieldOperand(function, JSFunction::kContextOffset)); |
+#ifndef V8_TARGET_ARCH_X32 |
movsxlq(rbx, |
FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset)); |
+#else |
+ movl(rbx, |
+ FieldOperand(rdx, SharedFunctionInfo::kFormalParameterCountOffset)); |
+ SmiToInteger32(rbx, rbx); |
+#endif |
// Advances rdx to the end of the Code object header, to the start of |
// the executable code. |
movq(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset)); |
@@ -3467,17 +3820,27 @@ |
void MacroAssembler::EnterExitFramePrologue(bool save_rax) { |
// Set up the frame structure on the stack. |
// All constants are relative to the frame pointer of the exit frame. |
+#ifndef V8_TARGET_ARCH_X32 |
ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize); |
ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize); |
+#else |
+ ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kHWRegSize); |
+ ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kHWRegSize); |
+#endif |
ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize); |
push(rbp); |
movq(rbp, rsp); |
// Reserve room for entry stack pointer and push the code object. |
ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize); |
+#ifndef V8_TARGET_ARCH_X32 |
push(Immediate(0)); // Saved entry sp, patched before call. |
movq(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT); |
push(kScratchRegister); // Accessed from EditFrame::code_slot. |
+#else |
+ movl(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT); |
+ push(kScratchRegister); // Accessed from EditFrame::code_slot. |
+#endif |
// Save the frame pointer and the context in top. |
if (save_rax) { |
@@ -3498,15 +3861,22 @@ |
// Optionally save all XMM registers. |
if (save_doubles) { |
int space = XMMRegister::kMaxNumRegisters * kDoubleSize + |
- arg_stack_space * kPointerSize; |
+ __q arg_stack_space * kPointerSize; |
subq(rsp, Immediate(space)); |
int offset = -2 * kPointerSize; |
+#ifndef V8_TARGET_ARCH_X32 |
for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) { |
XMMRegister reg = XMMRegister::FromAllocationIndex(i); |
movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg); |
} |
+#else |
+ for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) { |
+ XMMRegister reg = XMMRegister::from_code(i); |
+ movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg); |
+ } |
+#endif |
} else if (arg_stack_space > 0) { |
- subq(rsp, Immediate(arg_stack_space * kPointerSize)); |
+ __q subq(rsp, Immediate(arg_stack_space * kPointerSize)); |
} |
// Get the required frame alignment for the OS. |
@@ -3545,13 +3915,20 @@ |
// r15 : argv |
if (save_doubles) { |
int offset = -2 * kPointerSize; |
+#ifndef V8_TARGET_ARCH_X32 |
for (int i = 0; i < XMMRegister::NumAllocatableRegisters(); i++) { |
XMMRegister reg = XMMRegister::FromAllocationIndex(i); |
movsd(reg, Operand(rbp, offset - ((i + 1) * kDoubleSize))); |
} |
+#else |
+ for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) { |
+ XMMRegister reg = XMMRegister::from_code(i); |
+ movsd(reg, Operand(rbp, offset - ((i + 1) * kDoubleSize))); |
+ } |
+#endif |
} |
// Get the return address from the stack and restore the frame pointer. |
- movq(rcx, Operand(rbp, 1 * kPointerSize)); |
+ __q movq(rcx, Operand(rbp, 1 * kPointerSize)); |
movq(rbp, Operand(rbp, 0 * kPointerSize)); |
// Drop everything up to and including the arguments and the receiver |
@@ -3559,7 +3936,7 @@ |
lea(rsp, Operand(r15, 1 * kPointerSize)); |
// Push the return address to get ready to return. |
- push(rcx); |
+ __k push(rcx); |
LeaveExitFrameEpilogue(); |
} |
@@ -3850,10 +4227,24 @@ |
// Align the next allocation. Storing the filler map without checking top is |
// always safe because the limit of the heap is always aligned. |
+#ifndef V8_TARGET_ARCH_X32 |
if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) { |
testq(result, Immediate(kDoubleAlignmentMask)); |
Check(zero, "Allocation is not double aligned"); |
} |
+#else |
+ if ((flags & DOUBLE_ALIGNMENT) != 0) { |
+ ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0); |
+ ASSERT(kPointerAlignment * 2 == kDoubleAlignment); |
+ Label aligned; |
+ testl(result, Immediate(kDoubleAlignmentMask)); |
+ j(zero, &aligned, Label::kNear); |
+ LoadRoot(kScratchRegister, Heap::kOnePointerFillerMapRootIndex); |
+ movl(Operand(result, 0), kScratchRegister); |
+ addl(result, Immediate(kDoubleSize / 2)); |
+ bind(&aligned); |
+ } |
+#endif |
// Calculate new top and bail out if new space is exhausted. |
ExternalReference allocation_limit = |
@@ -3917,10 +4308,24 @@ |
// Align the next allocation. Storing the filler map without checking top is |
// always safe because the limit of the heap is always aligned. |
+#ifndef V8_TARGET_ARCH_X32 |
if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) { |
testq(result, Immediate(kDoubleAlignmentMask)); |
Check(zero, "Allocation is not double aligned"); |
} |
+#else |
+ if ((flags & DOUBLE_ALIGNMENT) != 0) { |
+ ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0); |
+ ASSERT(kPointerAlignment * 2 == kDoubleAlignment); |
+ Label aligned; |
+ testl(result, Immediate(kDoubleAlignmentMask)); |
+ j(zero, &aligned, Label::kNear); |
+ LoadRoot(kScratchRegister, Heap::kOnePointerFillerMapRootIndex); |
+ movl(Operand(result, 0), kScratchRegister); |
+ addl(result, Immediate(kDoubleSize / 2)); |
+ bind(&aligned); |
+ } |
+#endif |
// Calculate new top and bail out if new space is exhausted. |
ExternalReference allocation_limit = |
@@ -3971,6 +4376,27 @@ |
// Load address of new object into result. |
LoadAllocationTopHelper(result, scratch, flags); |
+ // Align the next allocation. Storing the filler map without checking top is |
+ // always safe because the limit of the heap is always aligned. |
+#ifndef V8_TARGET_ARCH_X32 |
+ if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) { |
+ testq(result, Immediate(kDoubleAlignmentMask)); |
+ Check(zero, "Allocation is not double aligned"); |
+ } |
+#else |
+ if ((flags & DOUBLE_ALIGNMENT) != 0) { |
+ ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0); |
+ ASSERT(kPointerAlignment * 2 == kDoubleAlignment); |
+ Label aligned; |
+ testl(result, Immediate(kDoubleAlignmentMask)); |
+ j(zero, &aligned, Label::kNear); |
+ LoadRoot(kScratchRegister, Heap::kOnePointerFillerMapRootIndex); |
+ movl(Operand(result, 0), kScratchRegister); |
+ addl(result, Immediate(kDoubleSize / 2)); |
+ bind(&aligned); |
+ } |
+#endif |
+ |
// Calculate new top and bail out if new space is exhausted. |
ExternalReference allocation_limit = |
AllocationUtils::GetAllocationLimitReference(isolate(), flags); |
@@ -3986,13 +4412,6 @@ |
// Update allocation top. |
UpdateAllocationTopHelper(result_end, scratch, flags); |
- // Align the next allocation. Storing the filler map without checking top is |
- // always safe because the limit of the heap is always aligned. |
- if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) { |
- testq(result, Immediate(kDoubleAlignmentMask)); |
- Check(zero, "Allocation is not double aligned"); |
- } |
- |
// Tag the result if requested. |
if ((flags & TAG_OBJECT) != 0) { |
addq(result, Immediate(kHeapObjectTag)); |
@@ -4409,9 +4828,13 @@ |
ASSERT(IsPowerOf2(frame_alignment)); |
int argument_slots_on_stack = |
ArgumentStackSlotsForCFunctionCall(num_arguments); |
- subq(rsp, Immediate((argument_slots_on_stack + 1) * kPointerSize)); |
+ __q subq(rsp, Immediate((argument_slots_on_stack + 1) * kPointerSize)); |
and_(rsp, Immediate(-frame_alignment)); |
+#ifndef V8_TARGET_ARCH_X32 |
movq(Operand(rsp, argument_slots_on_stack * kPointerSize), kScratchRegister); |
+#else |
+ movq(Operand(rsp, argument_slots_on_stack * kHWRegSize), kScratchRegister); |
+#endif |
} |
@@ -4434,7 +4857,7 @@ |
ASSERT(num_arguments >= 0); |
int argument_slots_on_stack = |
ArgumentStackSlotsForCFunctionCall(num_arguments); |
- movq(rsp, Operand(rsp, argument_slots_on_stack * kPointerSize)); |
+ __q movq(rsp, Operand(rsp, argument_slots_on_stack * kPointerSize)); |
} |
@@ -4731,6 +5154,9 @@ |
bind(&no_info_available); |
} |
+#undef __n |
+#undef __q |
+#undef __k |
} } // namespace v8::internal |