Index: src/x64/macro-assembler-x64.cc |
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc |
index 3768244e4d570f9c9fe69cb7c472d3f26c366581..614f67b0aae87b823545b9a71b86f0454e96d6ed 100644 |
--- a/src/x64/macro-assembler-x64.cc |
+++ b/src/x64/macro-assembler-x64.cc |
@@ -428,8 +428,6 @@ void MacroAssembler::Set(const Operand& dst, int64_t x) { |
// ---------------------------------------------------------------------------- |
// Smi tagging, untagging and tag detection. |
-#ifdef V8_LONG_SMI |
- |
static int kSmiShift = kSmiTagSize + kSmiShiftSize; |
void MacroAssembler::Integer32ToSmi(Register dst, Register src) { |
@@ -1210,736 +1208,6 @@ SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst, |
return SmiIndex(dst, times_1); |
} |
-#else // ! V8_LONG_SMI |
-// 31 bit smi operations |
- |
-// Extracts the low 32 bits of a Smi pointer, where the taqgged smi value |
-// is stored. |
-static int32_t SmiValue(Smi* smi) { |
- return static_cast<int32_t>(reinterpret_cast<intptr_t>(smi)); |
-} |
- |
- |
-void MacroAssembler::Integer32ToSmi(Register dst, Register src) { |
- ASSERT_EQ(1, kSmiTagSize); |
- ASSERT_EQ(0, kSmiTag); |
-#ifdef DEBUG |
- if (allow_stub_calls()) { |
- cmpl(src, Immediate(0xC0000000u)); |
- Check(positive, "Smi conversion overflow"); |
- } |
-#endif |
- if (dst.is(src)) { |
- addl(dst, src); |
- } else { |
- lea(dst, Operand(src, src, times_1, 0)); |
- } |
-} |
- |
- |
-void MacroAssembler::Integer32ToSmi(Register dst, |
- Register src, |
- Label* on_overflow) { |
- ASSERT_EQ(1, kSmiTagSize); |
- ASSERT_EQ(0, kSmiTag); |
- if (!dst.is(src)) { |
- movl(dst, src); |
- } |
- addl(dst, src); |
- j(overflow, on_overflow); |
-} |
- |
- |
-void MacroAssembler::Integer64PlusConstantToSmi(Register dst, |
- Register src, |
- int constant) { |
-#ifdef DEBUG |
- if (allow_stub_calls()) { |
- movl(kScratchRegister, src); |
- addl(kScratchRegister, Immediate(constant)); |
- Check(no_overflow, "Add-and-smi-convert overflow"); |
- Condition valid = CheckInteger32ValidSmiValue(kScratchRegister); |
- Check(valid, "Add-and-smi-convert overflow"); |
- } |
-#endif |
- lea(dst, Operand(src, src, times_1, constant << kSmiTagSize)); |
-} |
- |
- |
-void MacroAssembler::SmiToInteger32(Register dst, Register src) { |
- ASSERT_EQ(1, kSmiTagSize); |
- ASSERT_EQ(0, kSmiTag); |
- if (!dst.is(src)) { |
- movl(dst, src); |
- } |
- sarl(dst, Immediate(kSmiTagSize)); |
-} |
- |
- |
-void MacroAssembler::SmiToInteger64(Register dst, Register src) { |
- ASSERT_EQ(1, kSmiTagSize); |
- ASSERT_EQ(0, kSmiTag); |
- movsxlq(dst, src); |
- sar(dst, Immediate(kSmiTagSize)); |
-} |
- |
- |
-void MacroAssembler::SmiTest(Register src) { |
- testl(src, src); |
-} |
- |
- |
-void MacroAssembler::SmiCompare(Register dst, Register src) { |
- cmpl(dst, src); |
-} |
- |
- |
-void MacroAssembler::SmiCompare(Register dst, Smi* src) { |
- ASSERT(!dst.is(kScratchRegister)); |
- if (src->value() == 0) { |
- testl(dst, dst); |
- } else { |
- cmpl(dst, Immediate(SmiValue(src))); |
- } |
-} |
- |
- |
-void MacroAssembler::SmiCompare(const Operand& dst, Register src) { |
- cmpl(dst, src); |
-} |
- |
- |
-void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) { |
- if (src->value() == 0) { |
- movl(kScratchRegister, dst); |
- testl(kScratchRegister, kScratchRegister); |
- } else { |
- cmpl(dst, Immediate(SmiValue(src))); |
- } |
-} |
- |
- |
-void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst, |
- Register src, |
- int power) { |
- ASSERT(power >= 0); |
- ASSERT(power < 64); |
- if (power == 0) { |
- SmiToInteger64(dst, src); |
- return; |
- } |
- movsxlq(dst, src); |
- shl(dst, Immediate(power - 1)); |
-} |
- |
-Condition MacroAssembler::CheckSmi(Register src) { |
- testb(src, Immediate(kSmiTagMask)); |
- return zero; |
-} |
- |
-Condition MacroAssembler::CheckPositiveSmi(Register src) { |
- ASSERT_EQ(0, kSmiTag); |
- testl(src, Immediate(static_cast<uint32_t>(0x80000000u | kSmiTagMask))); |
- return zero; |
-} |
- |
-Condition MacroAssembler::CheckBothSmi(Register first, Register second) { |
- if (first.is(second)) { |
- return CheckSmi(first); |
- } |
- movl(kScratchRegister, first); |
- orl(kScratchRegister, second); |
- return CheckSmi(kScratchRegister); |
-} |
- |
-Condition MacroAssembler::CheckIsMinSmi(Register src) { |
- ASSERT(kSmiTag == 0 && kSmiTagSize == 1); |
- cmpl(src, Immediate(0x80000000u)); |
- return equal; |
-} |
- |
-Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) { |
- // A 32-bit integer value can be converted to a smi if it is in the |
- // range [-2^30 .. 2^30-1]. That is equivalent to having its 32-bit |
- // representation have bits 30 and 31 be equal. |
- cmpl(src, Immediate(0xC0000000u)); |
- return positive; |
-} |
- |
- |
-void MacroAssembler::SmiNeg(Register dst, |
- Register src, |
- Label* on_smi_result) { |
- if (!dst.is(src)) { |
- movl(dst, src); |
- } |
- negl(dst); |
- testl(dst, Immediate(0x7fffffff)); |
- // If the result is zero or 0x80000000, negation failed to create a smi. |
- j(not_equal, on_smi_result); |
-} |
- |
- |
-void MacroAssembler::SmiAdd(Register dst, |
- Register src1, |
- Register src2, |
- Label* on_not_smi_result) { |
- ASSERT(!dst.is(src2)); |
- if (!dst.is(src1)) { |
- movl(dst, src1); |
- } |
- addl(dst, src2); |
- if (!dst.is(src1)) { |
- j(overflow, on_not_smi_result); |
- } else { |
- Label smi_result; |
- j(no_overflow, &smi_result); |
- // Restore src1. |
- subl(src1, src2); |
- jmp(on_not_smi_result); |
- bind(&smi_result); |
- } |
-} |
- |
- |
-void MacroAssembler::SmiSub(Register dst, |
- Register src1, |
- Register src2, |
- Label* on_not_smi_result) { |
- ASSERT(!dst.is(src2)); |
- if (!dst.is(src1)) { |
- movl(dst, src1); |
- } |
- subl(dst, src2); |
- if (!dst.is(src1)) { |
- j(overflow, on_not_smi_result); |
- } else { |
- Label smi_result; |
- j(no_overflow, &smi_result); |
- // Restore src1. |
- addl(src1, src2); |
- jmp(on_not_smi_result); |
- bind(&smi_result); |
- } |
-} |
- |
- |
-void MacroAssembler::SmiMul(Register dst, |
- Register src1, |
- Register src2, |
- Label* on_not_smi_result) { |
- ASSERT(!dst.is(src2)); |
- |
- if (dst.is(src1)) { |
- // Copy src1 before overwriting. |
- movq(kScratchRegister, src1); |
- } |
- SmiToInteger32(dst, src1); |
- |
- imull(dst, src2); |
- j(overflow, on_not_smi_result); |
- |
- // Check for negative zero result. If product is zero, and one |
- // argument is negative, go to slow case. The frame is unchanged |
- // in this block, so local control flow can use a Label rather |
- // than a JumpTarget. |
- Label non_zero_result; |
- testl(dst, dst); |
- j(not_zero, &non_zero_result); |
- |
- // Test whether either operand is negative (the other must be zero). |
- if (!dst.is(src1)) { |
- movl(kScratchRegister, src1); |
- } |
- orl(kScratchRegister, src2); |
- j(negative, on_not_smi_result); |
- |
- bind(&non_zero_result); |
-} |
- |
- |
-void MacroAssembler::SmiTryAddConstant(Register dst, |
- Register src, |
- Smi* constant, |
- Label* on_not_smi_result) { |
- // Does not assume that src is a smi. |
- ASSERT_EQ(static_cast<intptr_t>(1), kSmiTagMask); |
- ASSERT_EQ(0, kSmiTag); |
- |
- Register tmp = (src.is(dst) ? kScratchRegister : dst); |
- movl(tmp, src); |
- addl(tmp, Immediate(SmiValue(constant))); |
- if (tmp.is(kScratchRegister)) { |
- j(overflow, on_not_smi_result); |
- testl(tmp, Immediate(kSmiTagMask)); |
- j(not_zero, on_not_smi_result); |
- movl(dst, tmp); |
- } else { |
- movl(kScratchRegister, Immediate(kSmiTagMask)); |
- cmovl(overflow, dst, kScratchRegister); |
- testl(dst, kScratchRegister); |
- j(not_zero, on_not_smi_result); |
- } |
-} |
- |
- |
-void MacroAssembler::SmiAddConstant(Register dst, |
- Register src, |
- Smi* constant) { |
- ASSERT_EQ(1, kSmiTagMask); |
- ASSERT_EQ(0, kSmiTag); |
- int32_t smi_value = SmiValue(constant); |
- if (dst.is(src)) { |
- addl(dst, Immediate(smi_value)); |
- } else { |
- lea(dst, Operand(src, smi_value)); |
- } |
-} |
- |
- |
-void MacroAssembler::SmiAddConstant(Register dst, |
- Register src, |
- Smi* constant, |
- Label* on_not_smi_result) { |
- ASSERT_EQ(1, kSmiTagMask); |
- ASSERT_EQ(0, kSmiTag); |
- int32_t smi_value = SmiValue(constant); |
- if (!dst.is(src)) { |
- movl(dst, src); |
- addl(dst, Immediate(smi_value)); |
- j(overflow, on_not_smi_result); |
- } else { |
- addl(dst, Immediate(smi_value)); |
- Label result_ok; |
- j(no_overflow, &result_ok); |
- subl(dst, Immediate(smi_value)); |
- jmp(on_not_smi_result); |
- bind(&result_ok); |
- } |
-} |
- |
- |
-void MacroAssembler::SmiSubConstant(Register dst, |
- Register src, |
- Smi* constant) { |
- ASSERT_EQ(1, kSmiTagMask); |
- ASSERT_EQ(0, kSmiTag); |
- if (!dst.is(src)) { |
- movl(dst, src); |
- } |
- subl(dst, Immediate(SmiValue(constant))); |
-} |
- |
- |
-void MacroAssembler::SmiSubConstant(Register dst, |
- Register src, |
- Smi* constant, |
- Label* on_not_smi_result) { |
- ASSERT_EQ(1, kSmiTagMask); |
- ASSERT_EQ(0, kSmiTag); |
- int32_t smi_value = SmiValue(constant); |
- if (dst.is(src)) { |
- // Optimistic subtract - may change value of dst register, |
- // if it has garbage bits in the higher half, but will not change |
- // the value as a tagged smi. |
- subl(dst, Immediate(smi_value)); |
- Label add_success; |
- j(no_overflow, &add_success); |
- addl(dst, Immediate(smi_value)); |
- jmp(on_not_smi_result); |
- bind(&add_success); |
- } else { |
- movl(dst, src); |
- subl(dst, Immediate(smi_value)); |
- j(overflow, on_not_smi_result); |
- } |
-} |
- |
- |
-void MacroAssembler::SmiDiv(Register dst, |
- Register src1, |
- Register src2, |
- Label* on_not_smi_result) { |
- ASSERT(!src2.is(rax)); |
- ASSERT(!src2.is(rdx)); |
- ASSERT(!src1.is(rdx)); |
- |
- // Check for 0 divisor (result is +/-Infinity). |
- Label positive_divisor; |
- testl(src2, src2); |
- j(zero, on_not_smi_result); |
- j(positive, &positive_divisor); |
- // Check for negative zero result. If the dividend is zero, and the |
- // divisor is negative, return a floating point negative zero. |
- testl(src1, src1); |
- j(zero, on_not_smi_result); |
- bind(&positive_divisor); |
- |
- // Sign extend src1 into edx:eax. |
- if (!src1.is(rax)) { |
- movl(rax, src1); |
- } |
- cdq(); |
- |
- idivl(src2); |
- // Check for the corner case of dividing the most negative smi by |
- // -1. We cannot use the overflow flag, since it is not set by |
- // idiv instruction. |
- ASSERT(kSmiTag == 0 && kSmiTagSize == 1); |
- cmpl(rax, Immediate(0x40000000)); |
- j(equal, on_not_smi_result); |
- // Check that the remainder is zero. |
- testl(rdx, rdx); |
- j(not_zero, on_not_smi_result); |
- // Tag the result and store it in the destination register. |
- Integer32ToSmi(dst, rax); |
-} |
- |
- |
-void MacroAssembler::SmiMod(Register dst, |
- Register src1, |
- Register src2, |
- Label* on_not_smi_result) { |
- ASSERT(!dst.is(kScratchRegister)); |
- ASSERT(!src1.is(kScratchRegister)); |
- ASSERT(!src2.is(kScratchRegister)); |
- ASSERT(!src2.is(rax)); |
- ASSERT(!src2.is(rdx)); |
- ASSERT(!src1.is(rdx)); |
- |
- testl(src2, src2); |
- j(zero, on_not_smi_result); |
- |
- if (src1.is(rax)) { |
- // Mist remember the value to see if a zero result should |
- // be a negative zero. |
- movl(kScratchRegister, rax); |
- } else { |
- movl(rax, src1); |
- } |
- // Sign extend eax into edx:eax. |
- cdq(); |
- idivl(src2); |
- // Check for a negative zero result. If the result is zero, and the |
- // dividend is negative, return a floating point negative zero. |
- Label non_zero_result; |
- testl(rdx, rdx); |
- j(not_zero, &non_zero_result); |
- if (src1.is(rax)) { |
- testl(kScratchRegister, kScratchRegister); |
- } else { |
- testl(src1, src1); |
- } |
- j(negative, on_not_smi_result); |
- bind(&non_zero_result); |
- if (!dst.is(rdx)) { |
- movl(dst, rdx); |
- } |
-} |
- |
- |
-void MacroAssembler::SmiNot(Register dst, Register src) { |
- if (dst.is(src)) { |
- not_(dst); |
- // Remove inverted smi-tag. The mask is sign-extended to 64 bits. |
- xor_(src, Immediate(kSmiTagMask)); |
- } else { |
- ASSERT_EQ(0, kSmiTag); |
- lea(dst, Operand(src, kSmiTagMask)); |
- not_(dst); |
- } |
-} |
- |
- |
-void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) { |
- if (!dst.is(src1)) { |
- movl(dst, src1); |
- } |
- and_(dst, src2); |
-} |
- |
- |
-void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) { |
- if (!dst.is(src)) { |
- movl(dst, src); |
- } |
- int32_t smi_value = SmiValue(constant); |
- and_(dst, Immediate(smi_value)); |
-} |
- |
- |
-void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) { |
- if (!dst.is(src1)) { |
- movl(dst, src1); |
- } |
- or_(dst, src2); |
-} |
- |
- |
-void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) { |
- if (!dst.is(src)) { |
- movl(dst, src); |
- } |
- int32_t smi_value = SmiValue(constant); |
- or_(dst, Immediate(smi_value)); |
-} |
- |
- |
-void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) { |
- if (!dst.is(src1)) { |
- movl(dst, src1); |
- } |
- xor_(dst, src2); |
-} |
- |
- |
-void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) { |
- if (!dst.is(src)) { |
- movl(dst, src); |
- } |
- int32_t smi_value = SmiValue(constant); |
- xor_(dst, Immediate(smi_value)); |
-} |
- |
- |
-void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst, |
- Register src, |
- int shift_value) { |
- if (shift_value > 0) { |
- if (dst.is(src)) { |
- sarl(dst, Immediate(shift_value)); |
- and_(dst, Immediate(~kSmiTagMask)); |
- } else { |
- UNIMPLEMENTED(); // Not used. |
- } |
- } |
-} |
- |
- |
-void MacroAssembler::SmiShiftLogicalRightConstant(Register dst, |
- Register src, |
- int shift_value, |
- Label* on_not_smi_result) { |
- // Logic right shift interprets its result as an *unsigned* number. |
- if (dst.is(src)) { |
- UNIMPLEMENTED(); // Not used. |
- } else { |
- movl(dst, src); |
- // Untag the smi. |
- sarl(dst, Immediate(kSmiTagSize)); |
- if (shift_value < 2) { |
- // A negative Smi shifted right two is in the positive Smi range, |
- // but if shifted only by zero or one, it never is. |
- j(negative, on_not_smi_result); |
- } |
- if (shift_value > 0) { |
- // Do the right shift on the integer value. |
- shrl(dst, Immediate(shift_value)); |
- } |
- // Re-tag the result. |
- addl(dst, dst); |
- } |
-} |
- |
- |
-void MacroAssembler::SmiShiftLeftConstant(Register dst, |
- Register src, |
- int shift_value, |
- Label* on_not_smi_result) { |
- if (dst.is(src)) { |
- if (shift_value > 0) { |
- movq(kScratchRegister, src); |
- // Treat scratch as an untagged integer value equal to two times the |
- // smi value of src, i.e., already shifted left by one. |
- if (shift_value > 1) { |
- shll(kScratchRegister, Immediate(shift_value - 1)); |
- } |
- JumpIfNotValidSmiValue(kScratchRegister, on_not_smi_result); |
- // Convert int result to Smi, checking that it is in smi range. |
- ASSERT(kSmiTagSize == 1); // adjust code if not the case |
- Integer32ToSmi(dst, kScratchRegister); |
- } |
- } else { |
- movl(dst, src); |
- if (shift_value > 0) { |
- // Treat dst as an untagged integer value equal to two times the |
- // smi value of src, i.e., already shifted left by one. |
- if (shift_value > 1) { |
- shll(dst, Immediate(shift_value - 1)); |
- } |
- // Convert int result to Smi, checking that it is in smi range. |
- ASSERT(kSmiTagSize == 1); // adjust code if not the case |
- Integer32ToSmi(dst, dst, on_not_smi_result); |
- } |
- } |
-} |
- |
- |
-void MacroAssembler::SmiShiftLeft(Register dst, |
- Register src1, |
- Register src2, |
- Label* on_not_smi_result) { |
- ASSERT(!dst.is(rcx)); |
- Label result_ok; |
- // Untag both operands. |
- if (dst.is(src1) || src1.is(rcx)) { |
- movq(kScratchRegister, src1); |
- } |
- SmiToInteger32(dst, src1); |
- SmiToInteger32(rcx, src2); |
- shll(dst); |
- // Check that the *signed* result fits in a smi. |
- Condition is_valid = CheckInteger32ValidSmiValue(dst); |
- j(is_valid, &result_ok); |
- // Restore the relevant bits of the source registers |
- // and call the slow version. |
- if (dst.is(src1) || src1.is(rcx)) { |
- movq(src1, kScratchRegister); |
- } |
- if (src2.is(rcx)) { |
- Integer32ToSmi(rcx, rcx); |
- } |
- jmp(on_not_smi_result); |
- bind(&result_ok); |
- Integer32ToSmi(dst, dst); |
-} |
- |
- |
-void MacroAssembler::SmiShiftLogicalRight(Register dst, |
- Register src1, |
- Register src2, |
- Label* on_not_smi_result) { |
- ASSERT(!dst.is(kScratchRegister)); |
- ASSERT(!src1.is(kScratchRegister)); |
- ASSERT(!src2.is(kScratchRegister)); |
- ASSERT(!dst.is(rcx)); |
- Label result_ok; |
- // Untag both operands. |
- if (src1.is(rcx)) { |
- movq(kScratchRegister, src1); |
- } |
- SmiToInteger32(dst, src1); |
- SmiToInteger32(rcx, src2); |
- |
- shrl(dst); |
- // Check that the *unsigned* result fits in a smi. |
- // I.e., that it is a valid positive smi value. The positive smi |
- // values are 0..0x3fffffff, i.e., neither of the top-most two |
- // bits can be set. |
- // |
- // These two cases can only happen with shifts by 0 or 1 when |
- // handed a valid smi. If the answer cannot be represented by a |
- // smi, restore the left and right arguments, and jump to slow |
- // case. The low bit of the left argument may be lost, but only |
- // in a case where it is dropped anyway. |
- testl(dst, Immediate(0xc0000000)); |
- j(zero, &result_ok); |
- if (dst.is(src1)) { |
- shll(dst); |
- Integer32ToSmi(dst, dst); |
- } else if (src1.is(rcx)) { |
- movq(rcx, kScratchRegister); |
- } else if (src2.is(rcx)) { |
- Integer32ToSmi(src2, src2); |
- } |
- jmp(on_not_smi_result); |
- bind(&result_ok); |
- // Smi-tag the result in answer. |
- Integer32ToSmi(dst, dst); |
-} |
- |
- |
-void MacroAssembler::SmiShiftArithmeticRight(Register dst, |
- Register src1, |
- Register src2) { |
- ASSERT(!dst.is(rcx)); |
- // Untag both operands. |
- SmiToInteger32(dst, src1); |
- SmiToInteger32(rcx, src2); |
- // Shift as integer. |
- sarl(dst); |
- // Retag result. |
- Integer32ToSmi(dst, dst); |
-} |
- |
- |
-void MacroAssembler::SelectNonSmi(Register dst, |
- Register src1, |
- Register src2, |
- Label* on_not_smis) { |
- ASSERT(!dst.is(src1)); |
- ASSERT(!dst.is(src2)); |
- // Both operands must not be smis. |
-#ifdef DEBUG |
- if (allow_stub_calls()) { |
- Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2)); |
- Check(not_both_smis, "Both registers were smis."); |
- } |
-#endif |
- ASSERT_EQ(0, kSmiTag); |
- ASSERT_EQ(0, Smi::FromInt(0)); |
- movq(kScratchRegister, Immediate(kSmiTagMask)); |
- and_(kScratchRegister, src1); |
- testl(kScratchRegister, src2); |
- j(not_zero, on_not_smis); |
- // One operand is a smi. |
- |
- ASSERT_EQ(1, static_cast<int>(kSmiTagMask)); |
- // kScratchRegister still holds src1 & kSmiTag, which is either zero or one. |
- subq(kScratchRegister, Immediate(1)); |
- // If src1 is a smi, then scratch register all 1s, else it is all 0s. |
- movq(dst, src1); |
- xor_(dst, src2); |
- and_(dst, kScratchRegister); |
- // If src1 is a smi, dst holds src1 ^ src2, else it is zero. |
- xor_(dst, src1); |
- // If src1 is a smi, dst is src2, else it is src1, i.e., a non-smi. |
-} |
- |
- |
-SmiIndex MacroAssembler::SmiToIndex(Register dst, Register src, int shift) { |
- ASSERT(is_uint6(shift)); |
- if (shift == 0) { // times_1. |
- SmiToInteger32(dst, src); |
- return SmiIndex(dst, times_1); |
- } |
- if (shift <= 4) { // 2 - 16 times multiplier is handled using ScaleFactor. |
- // We expect that all smis are actually zero-padded. If this holds after |
- // checking, this line can be omitted. |
- movl(dst, src); // Ensure that the smi is zero-padded. |
- return SmiIndex(dst, static_cast<ScaleFactor>(shift - kSmiTagSize)); |
- } |
- // Shift by shift-kSmiTagSize. |
- movl(dst, src); // Ensure that the smi is zero-padded. |
- shl(dst, Immediate(shift - kSmiTagSize)); |
- return SmiIndex(dst, times_1); |
-} |
- |
- |
-SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst, |
- Register src, |
- int shift) { |
- // Register src holds a positive smi. |
- ASSERT(is_uint6(shift)); |
- if (shift == 0) { // times_1. |
- SmiToInteger32(dst, src); |
- neg(dst); |
- return SmiIndex(dst, times_1); |
- } |
- if (shift <= 4) { // 2 - 16 times multiplier is handled using ScaleFactor. |
- movl(dst, src); |
- neg(dst); |
- return SmiIndex(dst, static_cast<ScaleFactor>(shift - kSmiTagSize)); |
- } |
- // Shift by shift-kSmiTagSize. |
- movl(dst, src); |
- neg(dst); |
- shl(dst, Immediate(shift - kSmiTagSize)); |
- return SmiIndex(dst, times_1); |
-} |
- |
-#endif // V8_LONG_SMI |
- |
void MacroAssembler::JumpIfSmi(Register src, Label* on_smi) { |
ASSERT_EQ(0, kSmiTag); |