Index: src/x64/macro-assembler-x64.cc |
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc |
index 13d7ddaa685bf92e16a4adab1faa1dc17fee4aae..195b766bbe67ba2b7d98d3e6c30e7fa560e2d216 100644 |
--- a/src/x64/macro-assembler-x64.cc |
+++ b/src/x64/macro-assembler-x64.cc |
@@ -991,6 +991,7 @@ bool MacroAssembler::IsUnsafeInt(const int x) { |
void MacroAssembler::SafeMove(Register dst, Smi* src) { |
ASSERT(!dst.is(kScratchRegister)); |
+#if !V8_USE_31_BITS_SMI_VALUE |
ASSERT(kSmiValueSize == 32); // JIT cookie can be converted to Smi. |
if (IsUnsafeInt(src->value()) && jit_cookie() != 0) { |
Move(dst, Smi::FromInt(src->value() ^ jit_cookie())); |
@@ -999,10 +1000,22 @@ void MacroAssembler::SafeMove(Register dst, Smi* src) { |
} else { |
Move(dst, src); |
} |
+#else |
+ ASSERT(kSmiValueSize == 31); |
+ if (IsUnsafeInt(src->value()) && jit_cookie() != 0) { |
+ movq(dst, Immediate(static_cast<int>(reinterpret_cast<intptr_t>(src)) ^ |
+ jit_cookie())); |
+ movq(kScratchRegister, Immediate(jit_cookie())); |
+ xor_(dst, kScratchRegister); |
+ } else { |
+ Move(dst, src); |
+ } |
+#endif |
} |
void MacroAssembler::SafePush(Smi* src) { |
+#if !V8_USE_31_BITS_SMI_VALUE |
ASSERT(kSmiValueSize == 32); // JIT cookie can be converted to Smi. |
if (IsUnsafeInt(src->value()) && jit_cookie() != 0) { |
Push(Smi::FromInt(src->value() ^ jit_cookie())); |
@@ -1011,6 +1024,17 @@ void MacroAssembler::SafePush(Smi* src) { |
} else { |
Push(src); |
} |
+#else |
+ ASSERT(kSmiValueSize == 31); |
+ if (IsUnsafeInt(src->value()) && jit_cookie() != 0) { |
+ push(Immediate(static_cast<int>(reinterpret_cast<intptr_t>(src)) ^ |
+ jit_cookie())); |
+ movq(kScratchRegister, Immediate(jit_cookie())); |
+ xor_(Operand(rsp, 0), kScratchRegister); |
+ } else { |
+ Push(src); |
+ } |
+#endif |
} |
@@ -1096,7 +1120,12 @@ void MacroAssembler::Integer32ToSmi(Register dst, Register src) { |
if (!dst.is(src)) { |
movl(dst, src); |
} |
+#if !V8_USE_31_BITS_SMI_VALUE |
shl(dst, Immediate(kSmiShift)); |
+#else |
+ shll(dst, Immediate(kSmiShift)); |
+ movsxlq(dst, dst); |
+#endif |
} |
@@ -1112,8 +1141,13 @@ void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) { |
} |
bind(&ok); |
} |
+#if !V8_USE_31_BITS_SMI_VALUE |
ASSERT(kSmiShift % kBitsPerByte == 0); |
movl(Operand(dst, kSmiShift / kBitsPerByte), src); |
+#else |
+ Integer32ToSmi(kScratchRegister, src); |
+ movq(dst, kScratchRegister); |
+#endif |
} |
@@ -1125,7 +1159,7 @@ void MacroAssembler::Integer64PlusConstantToSmi(Register dst, |
} else { |
leal(dst, Operand(src, constant)); |
} |
- shl(dst, Immediate(kSmiShift)); |
+ Integer32ToSmi(dst, dst); |
} |
@@ -1134,12 +1168,21 @@ void MacroAssembler::SmiToInteger32(Register dst, Register src) { |
if (!dst.is(src)) { |
movq(dst, src); |
} |
+#if !V8_USE_31_BITS_SMI_VALUE |
shr(dst, Immediate(kSmiShift)); |
+#else |
+ sarl(dst, Immediate(kSmiShift)); |
+#endif |
} |
void MacroAssembler::SmiToInteger32(Register dst, const Operand& src) { |
+#if !V8_USE_31_BITS_SMI_VALUE |
movl(dst, Operand(src, kSmiShift / kBitsPerByte)); |
+#else |
+ movl(dst, src); |
+ sarl(dst, Immediate(kSmiShift)); |
+#endif |
} |
@@ -1153,20 +1196,33 @@ void MacroAssembler::SmiToInteger64(Register dst, Register src) { |
void MacroAssembler::SmiToInteger64(Register dst, const Operand& src) { |
+#if !V8_USE_31_BITS_SMI_VALUE |
movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte)); |
+#else |
+ movq(dst, src); |
+ SmiToInteger64(dst, dst); |
+#endif |
} |
void MacroAssembler::SmiTest(Register src) { |
AssertSmi(src); |
+#if !V8_USE_31_BITS_SMI_VALUE |
testq(src, src); |
+#else |
+ testl(src, src); |
+#endif |
} |
void MacroAssembler::SmiCompare(Register smi1, Register smi2) { |
AssertSmi(smi1); |
AssertSmi(smi2); |
+#if !V8_USE_31_BITS_SMI_VALUE |
cmpq(smi1, smi2); |
+#else |
+ cmpl(smi1, smi2); |
+#endif |
} |
@@ -1179,10 +1235,18 @@ void MacroAssembler::SmiCompare(Register dst, Smi* src) { |
void MacroAssembler::Cmp(Register dst, Smi* src) { |
ASSERT(!dst.is(kScratchRegister)); |
if (src->value() == 0) { |
+#if !V8_USE_31_BITS_SMI_VALUE |
testq(dst, dst); |
+#else |
+ testl(dst, dst); |
+#endif |
} else { |
Register constant_reg = GetSmiConstant(src); |
+#if !V8_USE_31_BITS_SMI_VALUE |
cmpq(dst, constant_reg); |
+#else |
+ cmpl(dst, constant_reg); |
+#endif |
} |
} |
@@ -1190,33 +1254,54 @@ void MacroAssembler::Cmp(Register dst, Smi* src) { |
void MacroAssembler::SmiCompare(Register dst, const Operand& src) { |
AssertSmi(dst); |
AssertSmi(src); |
+#if !V8_USE_31_BITS_SMI_VALUE |
cmpq(dst, src); |
+#else |
+ cmpl(dst, src); |
+#endif |
} |
void MacroAssembler::SmiCompare(const Operand& dst, Register src) { |
AssertSmi(dst); |
AssertSmi(src); |
+#if !V8_USE_31_BITS_SMI_VALUE |
cmpq(dst, src); |
+#else |
+ cmpl(dst, src); |
+#endif |
} |
void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) { |
AssertSmi(dst); |
+#if !V8_USE_31_BITS_SMI_VALUE |
cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value())); |
+#else |
+ cmpl(dst, Immediate(src)); |
+#endif |
} |
void MacroAssembler::Cmp(const Operand& dst, Smi* src) { |
+#if !V8_USE_31_BITS_SMI_VALUE |
// The Operand cannot use the smi register. |
Register smi_reg = GetSmiConstant(src); |
ASSERT(!dst.AddressUsesRegister(smi_reg)); |
cmpq(dst, smi_reg); |
+#else |
+ cmpl(dst, Immediate(src)); |
+#endif |
} |
void MacroAssembler::SmiCompareInteger32(const Operand& dst, Register src) { |
+#if !V8_USE_31_BITS_SMI_VALUE |
cmpl(Operand(dst, kSmiShift / kBitsPerByte), src); |
+#else |
+ SmiToInteger32(kScratchRegister, dst); |
+ cmpl(kScratchRegister, src); |
+#endif |
} |
@@ -1245,7 +1330,11 @@ void MacroAssembler::PositiveSmiDivPowerOfTwoToInteger32(Register dst, |
int power) { |
ASSERT((0 <= power) && (power < 32)); |
if (dst.is(src)) { |
+#if !V8_USE_31_BITS_SMI_VALUE |
shr(dst, Immediate(power + kSmiShift)); |
+#else |
+ shrl(dst, Immediate(power + kSmiShift)); |
+#endif |
} else { |
UNIMPLEMENTED(); // Not used. |
} |
@@ -1286,9 +1375,14 @@ Condition MacroAssembler::CheckSmi(const Operand& src) { |
Condition MacroAssembler::CheckNonNegativeSmi(Register src) { |
STATIC_ASSERT(kSmiTag == 0); |
+#if !V8_USE_31_BITS_SMI_VALUE |
// Test that both bits of the mask 0x8000000000000001 are zero. |
movq(kScratchRegister, src); |
rol(kScratchRegister, Immediate(1)); |
+#else |
+ movl(kScratchRegister, src); |
+ roll(kScratchRegister, Immediate(1)); |
+#endif |
testb(kScratchRegister, Immediate(3)); |
return zero; |
} |
@@ -1299,8 +1393,14 @@ Condition MacroAssembler::CheckBothSmi(Register first, Register second) { |
return CheckSmi(first); |
} |
STATIC_ASSERT(kSmiTag == 0 && kHeapObjectTag == 1 && kHeapObjectTagMask == 3); |
+#if !V8_USE_31_BITS_SMI_VALUE |
leal(kScratchRegister, Operand(first, second, times_1, 0)); |
testb(kScratchRegister, Immediate(0x03)); |
+#else |
+ movl(kScratchRegister, first); |
+ orl(kScratchRegister, second); |
+ testb(kScratchRegister, Immediate(kSmiTagMask)); |
+#endif |
return zero; |
} |
@@ -1310,9 +1410,15 @@ Condition MacroAssembler::CheckBothNonNegativeSmi(Register first, |
if (first.is(second)) { |
return CheckNonNegativeSmi(first); |
} |
+#if !V8_USE_31_BITS_SMI_VALUE |
movq(kScratchRegister, first); |
or_(kScratchRegister, second); |
rol(kScratchRegister, Immediate(1)); |
+#else |
+ movl(kScratchRegister, first); |
+ orl(kScratchRegister, second); |
+ roll(kScratchRegister, Immediate(1)); |
+#endif |
testl(kScratchRegister, Immediate(3)); |
return zero; |
} |
@@ -1340,22 +1446,36 @@ Condition MacroAssembler::CheckEitherSmi(Register first, |
Condition MacroAssembler::CheckIsMinSmi(Register src) { |
ASSERT(!src.is(kScratchRegister)); |
// If we overflow by subtracting one, it's the minimal smi value. |
+#if !V8_USE_31_BITS_SMI_VALUE |
cmpq(src, kSmiConstantRegister); |
+#else |
+ cmpl(src, kSmiConstantRegister); |
+#endif |
return overflow; |
} |
Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) { |
+#if !V8_USE_31_BITS_SMI_VALUE |
// A 32-bit integer value can always be converted to a smi. |
return always; |
+#else |
+ cmpl(src, Immediate(0xc0000000)); |
+ return positive; |
+#endif |
} |
Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) { |
+#if !V8_USE_31_BITS_SMI_VALUE |
// An unsigned 32-bit integer value is valid as long as the high bit |
// is not set. |
testl(src, src); |
return positive; |
+#else |
+ testl(src, Immediate(0xc0000000)); |
+ return zero; |
+#endif |
} |
@@ -1461,11 +1581,18 @@ void MacroAssembler::SmiTryAddConstant(Register dst, |
JumpIfNotSmi(src, on_not_smi_result, near_jump); |
Register tmp = (dst.is(src) ? kScratchRegister : dst); |
LoadSmiConstant(tmp, constant); |
+#if !V8_USE_31_BITS_SMI_VALUE |
addq(tmp, src); |
+#else |
+ addl(tmp, src); |
+#endif |
j(overflow, on_not_smi_result, near_jump); |
if (dst.is(src)) { |
movq(dst, tmp); |
} |
+#if V8_USE_31_BITS_SMI_VALUE |
+ movsxlq(dst, dst); |
+#endif |
} |
@@ -1520,7 +1647,11 @@ void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) { |
void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) { |
if (constant->value() != 0) { |
+#if !V8_USE_31_BITS_SMI_VALUE |
addl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(constant->value())); |
+#else |
+ addq(dst, Immediate(constant)); |
+#endif |
} |
} |
@@ -1538,13 +1669,28 @@ void MacroAssembler::SmiAddConstant(Register dst, |
ASSERT(!dst.is(kScratchRegister)); |
LoadSmiConstant(kScratchRegister, constant); |
+#if !V8_USE_31_BITS_SMI_VALUE |
addq(kScratchRegister, src); |
+#else |
+ addl(kScratchRegister, src); |
+#endif |
j(overflow, on_not_smi_result, near_jump); |
+#if !V8_USE_31_BITS_SMI_VALUE |
movq(dst, kScratchRegister); |
+#else |
+ movsxlq(dst, kScratchRegister); |
+#endif |
} else { |
LoadSmiConstant(dst, constant); |
+#if !V8_USE_31_BITS_SMI_VALUE |
addq(dst, src); |
+#else |
+ addl(dst, src); |
+#endif |
j(overflow, on_not_smi_result, near_jump); |
+#if V8_USE_31_BITS_SMI_VALUE |
+ movsxlq(dst, dst); |
+#endif |
} |
} |
@@ -1560,10 +1706,15 @@ void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) { |
subq(dst, constant_reg); |
} else { |
if (constant->value() == Smi::kMinValue) { |
+#if !V8_USE_31_BITS_SMI_VALUE |
LoadSmiConstant(dst, constant); |
// Adding and subtracting the min-value gives the same result, it only |
// differs on the overflow bit, which we don't check here. |
addq(dst, src); |
+#else |
+ movq(dst, src); |
+ subq(dst, Immediate(constant)); |
+#endif |
} else { |
// Subtract by adding the negation. |
LoadSmiConstant(dst, Smi::FromInt(-constant->value())); |
@@ -1587,32 +1738,58 @@ void MacroAssembler::SmiSubConstant(Register dst, |
if (constant->value() == Smi::kMinValue) { |
// Subtracting min-value from any non-negative value will overflow. |
// We test the non-negativeness before doing the subtraction. |
+#if !V8_USE_31_BITS_SMI_VALUE |
testq(src, src); |
+#else |
+ testl(src, src); |
+#endif |
j(not_sign, on_not_smi_result, near_jump); |
LoadSmiConstant(kScratchRegister, constant); |
subq(dst, kScratchRegister); |
} else { |
// Subtract by adding the negation. |
LoadSmiConstant(kScratchRegister, Smi::FromInt(-constant->value())); |
+#if !V8_USE_31_BITS_SMI_VALUE |
addq(kScratchRegister, dst); |
+#else |
+ addl(kScratchRegister, dst); |
+#endif |
j(overflow, on_not_smi_result, near_jump); |
+#if !V8_USE_31_BITS_SMI_VALUE |
movq(dst, kScratchRegister); |
+#else |
+ movsxlq(dst, kScratchRegister); |
+#endif |
} |
} else { |
if (constant->value() == Smi::kMinValue) { |
// Subtracting min-value from any non-negative value will overflow. |
// We test the non-negativeness before doing the subtraction. |
+#if !V8_USE_31_BITS_SMI_VALUE |
testq(src, src); |
j(not_sign, on_not_smi_result, near_jump); |
LoadSmiConstant(dst, constant); |
// Adding and subtracting the min-value gives the same result, it only |
// differs on the overflow bit, which we don't check here. |
addq(dst, src); |
+#else |
+ testl(src, src); |
+ j(not_sign, on_not_smi_result, near_jump); |
+ movq(dst, src); |
+ subq(dst, Immediate(constant)); |
+#endif |
} else { |
// Subtract by adding the negation. |
LoadSmiConstant(dst, Smi::FromInt(-(constant->value()))); |
+#if !V8_USE_31_BITS_SMI_VALUE |
addq(dst, src); |
+#else |
+ addl(dst, src); |
+#endif |
j(overflow, on_not_smi_result, near_jump); |
+#if V8_USE_31_BITS_SMI_VALUE |
+ movsxlq(dst, dst); |
+#endif |
} |
} |
} |
@@ -1627,13 +1804,21 @@ void MacroAssembler::SmiNeg(Register dst, |
movq(kScratchRegister, src); |
neg(dst); // Low 32 bits are retained as zero by negation. |
// Test if result is zero or Smi::kMinValue. |
+#if !V8_USE_31_BITS_SMI_VALUE |
cmpq(dst, kScratchRegister); |
+#else |
+ cmpl(dst, kScratchRegister); |
+#endif |
j(not_equal, on_smi_result, near_jump); |
movq(src, kScratchRegister); |
} else { |
movq(dst, src); |
neg(dst); |
+#if !V8_USE_31_BITS_SMI_VALUE |
cmpq(dst, src); |
+#else |
+ cmpl(dst, src); |
+#endif |
// If the result is zero or Smi::kMinValue, negation failed to create a smi. |
j(not_equal, on_smi_result, near_jump); |
} |
@@ -1649,13 +1834,28 @@ void MacroAssembler::SmiAdd(Register dst, |
ASSERT(!dst.is(src2)); |
if (dst.is(src1)) { |
movq(kScratchRegister, src1); |
+#if !V8_USE_31_BITS_SMI_VALUE |
addq(kScratchRegister, src2); |
+#else |
+ addl(kScratchRegister, src2); |
+#endif |
j(overflow, on_not_smi_result, near_jump); |
+#if !V8_USE_31_BITS_SMI_VALUE |
movq(dst, kScratchRegister); |
+#else |
+ movsxlq(dst, kScratchRegister); |
+#endif |
} else { |
movq(dst, src1); |
+#if !V8_USE_31_BITS_SMI_VALUE |
addq(dst, src2); |
+#else |
+ addl(dst, src2); |
+#endif |
j(overflow, on_not_smi_result, near_jump); |
+#if V8_USE_31_BITS_SMI_VALUE |
+ movsxlq(dst, dst); |
+#endif |
} |
} |
@@ -1668,14 +1868,29 @@ void MacroAssembler::SmiAdd(Register dst, |
ASSERT_NOT_NULL(on_not_smi_result); |
if (dst.is(src1)) { |
movq(kScratchRegister, src1); |
+#if !V8_USE_31_BITS_SMI_VALUE |
addq(kScratchRegister, src2); |
+#else |
+ addl(kScratchRegister, src2); |
+#endif |
j(overflow, on_not_smi_result, near_jump); |
+#if !V8_USE_31_BITS_SMI_VALUE |
movq(dst, kScratchRegister); |
+#else |
+ movsxlq(dst, kScratchRegister); |
+#endif |
} else { |
ASSERT(!src2.AddressUsesRegister(dst)); |
movq(dst, src1); |
+#if !V8_USE_31_BITS_SMI_VALUE |
addq(dst, src2); |
+#else |
+ addl(dst, src2); |
+#endif |
j(overflow, on_not_smi_result, near_jump); |
+#if V8_USE_31_BITS_SMI_VALUE |
+ movsxlq(dst, dst); |
+#endif |
} |
} |
@@ -1707,13 +1922,25 @@ void MacroAssembler::SmiSub(Register dst, |
ASSERT_NOT_NULL(on_not_smi_result); |
ASSERT(!dst.is(src2)); |
if (dst.is(src1)) { |
+#if !V8_USE_31_BITS_SMI_VALUE |
cmpq(dst, src2); |
+#else |
+ cmpl(dst, src2); |
+#endif |
j(overflow, on_not_smi_result, near_jump); |
subq(dst, src2); |
} else { |
+#if !V8_USE_31_BITS_SMI_VALUE |
movq(dst, src1); |
subq(dst, src2); |
+#else |
+ movl(dst, src1); |
+ subl(dst, src2); |
+#endif |
j(overflow, on_not_smi_result, near_jump); |
+#if V8_USE_31_BITS_SMI_VALUE |
+ movsxlq(dst, dst); |
+#endif |
} |
} |
@@ -1737,14 +1964,31 @@ void MacroAssembler::SmiSub(Register dst, |
Label::Distance near_jump) { |
ASSERT_NOT_NULL(on_not_smi_result); |
if (dst.is(src1)) { |
+#if !V8_USE_31_BITS_SMI_VALUE |
movq(kScratchRegister, src2); |
cmpq(src1, kScratchRegister); |
+#else |
+ movl(kScratchRegister, src2); |
+ cmpl(src1, kScratchRegister); |
+#endif |
j(overflow, on_not_smi_result, near_jump); |
+#if !V8_USE_31_BITS_SMI_VALUE |
subq(src1, kScratchRegister); |
+#else |
+ movsxlq(src1, kScratchRegister); |
+#endif |
} else { |
+#if !V8_USE_31_BITS_SMI_VALUE |
movq(dst, src1); |
subq(dst, src2); |
+#else |
+ movl(dst, src1); |
+ subl(dst, src2); |
+#endif |
j(overflow, on_not_smi_result, near_jump); |
+#if V8_USE_31_BITS_SMI_VALUE |
+ movsxlq(dst, dst); |
+#endif |
} |
} |
@@ -1775,8 +2019,13 @@ void MacroAssembler::SmiMul(Register dst, |
if (dst.is(src1)) { |
Label failure, zero_correct_result; |
movq(kScratchRegister, src1); // Create backup for later testing. |
+#if !V8_USE_31_BITS_SMI_VALUE |
SmiToInteger64(dst, src1); |
imul(dst, src2); |
+#else |
+ SmiToInteger32(dst, src1); |
+ imull(dst, src2); |
+#endif |
j(overflow, &failure, Label::kNear); |
// Check for negative zero result. If product is zero, and one |
@@ -1798,9 +2047,17 @@ void MacroAssembler::SmiMul(Register dst, |
Set(dst, 0); |
bind(&correct_result); |
+#if V8_USE_31_BITS_SMI_VALUE |
+ movsxlq(dst, dst); |
+#endif |
} else { |
+#if !V8_USE_31_BITS_SMI_VALUE |
SmiToInteger64(dst, src1); |
imul(dst, src2); |
+#else |
+ SmiToInteger32(dst, src1); |
+ imull(dst, src2); |
+#endif |
j(overflow, on_not_smi_result, near_jump); |
// Check for negative zero result. If product is zero, and one |
// argument is negative, go to slow case. |
@@ -1813,6 +2070,9 @@ void MacroAssembler::SmiMul(Register dst, |
xor_(kScratchRegister, src2); |
j(negative, on_not_smi_result, near_jump); |
bind(&correct_result); |
+#if V8_USE_31_BITS_SMI_VALUE |
+ movsxlq(dst, dst); |
+#endif |
} |
} |
@@ -1845,7 +2105,11 @@ void MacroAssembler::SmiDiv(Register dst, |
// We overshoot a little and go to slow case if we divide min-value |
// by any negative value, not just -1. |
Label safe_div; |
+#if !V8_USE_31_BITS_SMI_VALUE |
testl(rax, Immediate(0x7fffffff)); |
+#else |
+ testl(rax, Immediate(0x3fffffff)); |
+#endif |
j(not_zero, &safe_div, Label::kNear); |
testq(src2, src2); |
if (src1.is(rax)) { |
@@ -1939,8 +2203,12 @@ void MacroAssembler::SmiMod(Register dst, |
void MacroAssembler::SmiNot(Register dst, Register src) { |
ASSERT(!dst.is(kScratchRegister)); |
ASSERT(!src.is(kScratchRegister)); |
+#if !V8_USE_31_BITS_SMI_VALUE |
// Set tag and padding bits before negating, so that they are zero afterwards. |
movl(kScratchRegister, Immediate(~0)); |
+#else |
+ movl(kScratchRegister, Immediate(1)); |
+#endif |
if (dst.is(src)) { |
xor_(dst, kScratchRegister); |
} else { |
@@ -2030,6 +2298,7 @@ void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst, |
} |
+#if !V8_USE_31_BITS_SMI_VALUE |
void MacroAssembler::SmiShiftLeftConstant(Register dst, |
Register src, |
int shift_value) { |
@@ -2040,6 +2309,27 @@ void MacroAssembler::SmiShiftLeftConstant(Register dst, |
shl(dst, Immediate(shift_value)); |
} |
} |
+#else |
+void MacroAssembler::SmiShiftLeftConstant(Register dst, |
+ Register src, |
+ int shift_value, |
+ Label* on_not_smi_result) { |
+ if (!dst.is(src)) { |
+ movq(dst, src); |
+ } |
+ |
+ if (shift_value > 0) { |
+ Label result_ok; |
+ SmiToInteger32(dst, dst); |
+ shll(dst, Immediate(shift_value)); |
+ cmpl(dst, Immediate(0xc0000000)); |
+ j(not_sign, &result_ok); |
+ jmp(on_not_smi_result); |
+ bind(&result_ok); |
+ Integer32ToSmi(dst, dst); |
+ } |
+} |
+#endif |
void MacroAssembler::SmiShiftLogicalRightConstant( |
@@ -2051,15 +2341,28 @@ void MacroAssembler::SmiShiftLogicalRightConstant( |
} else { |
movq(dst, src); |
if (shift_value == 0) { |
+#if !V8_USE_31_BITS_SMI_VALUE |
testq(dst, dst); |
+#else |
+ testl(dst, dst); |
+#endif |
j(negative, on_not_smi_result, near_jump); |
} |
+#if !V8_USE_31_BITS_SMI_VALUE |
shr(dst, Immediate(shift_value + kSmiShift)); |
shl(dst, Immediate(kSmiShift)); |
+#else |
+ SmiToInteger32(dst, dst); |
+ shrl(dst, Immediate(shift_value)); |
+ testl(dst, Immediate(0xc0000000)); |
+ j(not_zero, on_not_smi_result, near_jump); |
+ shll(dst, Immediate(kSmiShift)); |
+#endif |
} |
} |
+#if !V8_USE_31_BITS_SMI_VALUE |
void MacroAssembler::SmiShiftLeft(Register dst, |
Register src1, |
Register src2) { |
@@ -2073,6 +2376,43 @@ void MacroAssembler::SmiShiftLeft(Register dst, |
and_(rcx, Immediate(0x1f)); |
shl_cl(dst); |
} |
+#else |
+void MacroAssembler::SmiShiftLeft(Register dst, |
+ Register src1, |
+ Register src2, |
+ Label* on_not_smi_result) { |
+ ASSERT(!dst.is(kScratchRegister)); |
+ ASSERT(!src1.is(kScratchRegister)); |
+ ASSERT(!src2.is(kScratchRegister)); |
+ ASSERT(!dst.is(rcx)); |
+ Label result_ok; |
+ |
+ if (src1.is(rcx) || src2.is(rcx)) { |
+ movq(kScratchRegister, rcx); |
+ } |
+ // Untag shift amount. |
+ if (!dst.is(src1)) { |
+ movq(dst, src1); |
+ } |
+ SmiToInteger32(dst, dst); |
+ SmiToInteger32(rcx, src2); |
+ // Shift amount specified by lower 5 bits, not six as the shl opcode. |
+ andl(rcx, Immediate(0x1f)); |
+ shll_cl(dst); |
+ cmpl(dst, Immediate(0xc0000000)); |
+ j(not_sign, &result_ok); |
+ if (src1.is(rcx) || src2.is(rcx)) { |
+ if (src1.is(rcx)) { |
+ movq(src1, kScratchRegister); |
+ } else { |
+ movq(src2, kScratchRegister); |
+ } |
+ } |
+ jmp(on_not_smi_result); |
+ bind(&result_ok); |
+ Integer32ToSmi(dst, dst); |
+} |
+#endif |
void MacroAssembler::SmiShiftLogicalRight(Register dst, |
@@ -2080,6 +2420,7 @@ void MacroAssembler::SmiShiftLogicalRight(Register dst, |
Register src2, |
Label* on_not_smi_result, |
Label::Distance near_jump) { |
+#if !V8_USE_31_BITS_SMI_VALUE |
ASSERT(!dst.is(kScratchRegister)); |
ASSERT(!src1.is(kScratchRegister)); |
ASSERT(!src2.is(kScratchRegister)); |
@@ -2111,6 +2452,37 @@ void MacroAssembler::SmiShiftLogicalRight(Register dst, |
// src2 was zero and src1 negative. |
j(negative, on_not_smi_result, near_jump); |
} |
+#else |
+ ASSERT(!dst.is(kScratchRegister)); |
+ ASSERT(!src1.is(kScratchRegister)); |
+ ASSERT(!src2.is(kScratchRegister)); |
+ ASSERT(!dst.is(rcx)); |
+ Label result_ok; |
+ |
+ // dst and src1 can be the same, because the one case that bails out |
+ // is a shift by 0, which leaves dst, and therefore src1, unchanged. |
+ if (src1.is(rcx) || src2.is(rcx)) { |
+ movq(kScratchRegister, rcx); |
+ } |
+ if (!dst.is(src1)) { |
+ movq(dst, src1); |
+ } |
+ SmiToInteger32(rcx, src2); |
+ SmiToInteger32(dst, dst); |
+ shrl_cl(dst); |
+ testl(dst, Immediate(0xc0000000)); |
+ j(zero, &result_ok); |
+ if (src1.is(rcx) || src2.is(rcx)) { |
+ if (src1.is(rcx)) { |
+ movq(src1, kScratchRegister); |
+ } else { |
+ movq(src2, kScratchRegister); |
+ } |
+ } |
+ jmp(on_not_smi_result); |
+ bind(&result_ok); |
+ Integer32ToSmi(dst, dst); |
+#endif |
} |
@@ -2130,9 +2502,14 @@ void MacroAssembler::SmiShiftArithmeticRight(Register dst, |
movq(dst, src1); |
} |
SmiToInteger32(rcx, src2); |
+#if !V8_USE_31_BITS_SMI_VALUE |
orl(rcx, Immediate(kSmiShift)); |
sar_cl(dst); // Shift 32 + original rcx & 0x1f. |
- shl(dst, Immediate(kSmiShift)); |
+#else |
+ SmiToInteger32(dst, dst); |
+ sarl_cl(dst); |
+#endif |
+ Integer32ToSmi(dst, dst); |
if (src1.is(rcx)) { |
movq(src1, kScratchRegister); |
} else if (src2.is(rcx)) { |
@@ -2189,14 +2566,23 @@ SmiIndex MacroAssembler::SmiToIndex(Register dst, |
if (!dst.is(src)) { |
movq(dst, src); |
} |
+#if !V8_USE_31_BITS_SMI_VALUE |
if (shift < kSmiShift) { |
sar(dst, Immediate(kSmiShift - shift)); |
} else { |
shl(dst, Immediate(shift - kSmiShift)); |
} |
return SmiIndex(dst, times_1); |
+#else |
+ if (shift == times_1) { |
+ sar(dst, Immediate(kSmiShift)); |
+ return SmiIndex(dst, times_1); |
+ } |
+ return SmiIndex(dst, static_cast<ScaleFactor>(shift - 1)); |
+#endif |
} |
+ |
SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst, |
Register src, |
int shift) { |
@@ -2206,18 +2592,31 @@ SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst, |
movq(dst, src); |
} |
neg(dst); |
+#if !V8_USE_31_BITS_SMI_VALUE |
if (shift < kSmiShift) { |
sar(dst, Immediate(kSmiShift - shift)); |
} else { |
shl(dst, Immediate(shift - kSmiShift)); |
} |
return SmiIndex(dst, times_1); |
+#else |
+ if (shift == times_1) { |
+ sar(dst, Immediate(kSmiShift)); |
+ return SmiIndex(dst, times_1); |
+ } |
+ return SmiIndex(dst, static_cast<ScaleFactor>(shift - 1)); |
+#endif |
} |
void MacroAssembler::AddSmiField(Register dst, const Operand& src) { |
+#if !V8_USE_31_BITS_SMI_VALUE |
ASSERT_EQ(0, kSmiShift % kBitsPerByte); |
addl(dst, Operand(src, kSmiShift / kBitsPerByte)); |
+#else |
+ SmiToInteger32(kScratchRegister, src); |
+ addl(dst, kScratchRegister); |
+#endif |
} |
@@ -2479,11 +2878,18 @@ void MacroAssembler::Drop(int stack_elements) { |
void MacroAssembler::Test(const Operand& src, Smi* source) { |
+#if !V8_USE_31_BITS_SMI_VALUE |
testl(Operand(src, kIntSize), Immediate(source->value())); |
+#else |
+ testl(src, Immediate(source)); |
+#endif |
} |
void MacroAssembler::TestBit(const Operand& src, int bits) { |
+#if V8_USE_31_BITS_SMI_VALUE |
+ bits += kSmiTagSize + kSmiShiftSize; |
+#endif |
int byte_offset = bits / kBitsPerByte; |
int bit_in_byte = bits & (kBitsPerByte - 1); |
testb(Operand(src, byte_offset), Immediate(1 << bit_in_byte)); |