Chromium Code Reviews| Index: src/x64/macro-assembler-x64.cc |
| diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc |
| index 68f0613e584629b211d2863254ad255cb9c95ac8..0263c7ad9d5c30fcf61fdaf4f5275dd762f38863 100644 |
| --- a/src/x64/macro-assembler-x64.cc |
| +++ b/src/x64/macro-assembler-x64.cc |
| @@ -984,6 +984,19 @@ void MacroAssembler::Set(const Operand& dst, int64_t x) { |
| } |
| +// ---------------------------------------------------------------------------- |
| +// Smi tagging, untagging and tag detection. |
| + |
| +static inline Immediate SmiToImmediate(Smi* src) { |
|
danno
2013/08/19 21:47:44
Why not add a constructor to the Immeidate constru
haitao.feng
2013/08/20 15:09:30
This is what I have done originally at https://cod
danno
2013/08/20 15:58:14
Ooops! I'm sorry, I apologize, that's embarrassing
haitao.feng
2013/08/21 13:18:03
I will use the constructor with an assertion in th
|
| + if (SmiValuesAre32Bits()) { |
| + UNREACHABLE(); |
| + return Immediate(2); |
| + } else { |
| + return Immediate(static_cast<int32_t>(reinterpret_cast<intptr_t>(src))); |
| + } |
| +} |
| + |
| + |
| bool MacroAssembler::IsUnsafeInt(const int x) { |
| static const int kMaxBits = 17; |
| return !is_intn(x, kMaxBits); |
| @@ -992,32 +1005,51 @@ bool MacroAssembler::IsUnsafeInt(const int x) { |
| void MacroAssembler::SafeMove(Register dst, Smi* src) { |
| ASSERT(!dst.is(kScratchRegister)); |
| - ASSERT(kSmiValueSize == 32); // JIT cookie can be converted to Smi. |
| - if (IsUnsafeInt(src->value()) && jit_cookie() != 0) { |
| - Move(dst, Smi::FromInt(src->value() ^ jit_cookie())); |
| - Move(kScratchRegister, Smi::FromInt(jit_cookie())); |
| - xor_(dst, kScratchRegister); |
| + if (SmiValuesAre32Bits()) { |
| + if (IsUnsafeInt(src->value()) && jit_cookie() != 0) { |
| + Move(dst, Smi::FromInt(src->value() ^ jit_cookie())); |
| + Move(kScratchRegister, Smi::FromInt(jit_cookie())); |
| + xor_(dst, kScratchRegister); |
| + } else { |
| + Move(dst, src); |
| + } |
| } else { |
| - Move(dst, src); |
| + ASSERT(SmiValuesAre31Bits()); |
| + if (IsUnsafeInt(src->value()) && jit_cookie() != 0) { |
| + movq(dst, Immediate(static_cast<int>(reinterpret_cast<intptr_t>(src)) ^ |
| + jit_cookie())); |
| + movq(kScratchRegister, Immediate(jit_cookie())); |
| + xor_(dst, kScratchRegister); |
| + } else { |
| + Move(dst, src); |
| + } |
| } |
| } |
| void MacroAssembler::SafePush(Smi* src) { |
| - ASSERT(kSmiValueSize == 32); // JIT cookie can be converted to Smi. |
| - if (IsUnsafeInt(src->value()) && jit_cookie() != 0) { |
| - Push(Smi::FromInt(src->value() ^ jit_cookie())); |
| - Move(kScratchRegister, Smi::FromInt(jit_cookie())); |
| - xor_(Operand(rsp, 0), kScratchRegister); |
| + if (SmiValuesAre32Bits()) { |
| + if (IsUnsafeInt(src->value()) && jit_cookie() != 0) { |
| + Push(Smi::FromInt(src->value() ^ jit_cookie())); |
| + Move(kScratchRegister, Smi::FromInt(jit_cookie())); |
| + xor_(Operand(rsp, 0), kScratchRegister); |
| + } else { |
| + Push(src); |
| + } |
| } else { |
| - Push(src); |
| + ASSERT(SmiValuesAre31Bits()); |
| + if (IsUnsafeInt(src->value()) && jit_cookie() != 0) { |
| + push(Immediate(static_cast<int>(reinterpret_cast<intptr_t>(src)) ^ |
| + jit_cookie())); |
| + movq(kScratchRegister, Immediate(jit_cookie())); |
| + xor_(Operand(rsp, 0), kScratchRegister); |
| + } else { |
| + Push(src); |
| + } |
| } |
| } |
| -// ---------------------------------------------------------------------------- |
| -// Smi tagging, untagging and tag detection. |
| - |
| Register MacroAssembler::GetSmiConstant(Smi* source) { |
| int value = source->value(); |
| if (value == 0) { |
| @@ -1097,7 +1129,13 @@ void MacroAssembler::Integer32ToSmi(Register dst, Register src) { |
| if (!dst.is(src)) { |
| movl(dst, src); |
| } |
| - shl(dst, Immediate(kSmiShift)); |
| + if (SmiValuesAre32Bits()) { |
| + shl(dst, Immediate(kSmiShift)); |
| + } else { |
| + ASSERT(SmiValuesAre31Bits()); |
| + shll(dst, Immediate(kSmiShift)); |
| + movsxlq(dst, dst); |
| + } |
| } |
| @@ -1113,8 +1151,14 @@ void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) { |
| } |
| bind(&ok); |
| } |
| - ASSERT(kSmiShift % kBitsPerByte == 0); |
| - movl(Operand(dst, kSmiShift / kBitsPerByte), src); |
| + if (SmiValuesAre32Bits()) { |
| + ASSERT(kSmiShift % kBitsPerByte == 0); |
| + movl(Operand(dst, kSmiShift / kBitsPerByte), src); |
| + } else { |
| + ASSERT(SmiValuesAre31Bits()); |
| + Integer32ToSmi(kScratchRegister, src); |
| + movq(dst, kScratchRegister); |
| + } |
| } |
| @@ -1126,7 +1170,7 @@ void MacroAssembler::Integer64PlusConstantToSmi(Register dst, |
| } else { |
| leal(dst, Operand(src, constant)); |
| } |
| - shl(dst, Immediate(kSmiShift)); |
| + Integer32ToSmi(dst, dst); |
| } |
| @@ -1135,12 +1179,23 @@ void MacroAssembler::SmiToInteger32(Register dst, Register src) { |
| if (!dst.is(src)) { |
| movq(dst, src); |
| } |
| - shr(dst, Immediate(kSmiShift)); |
| + if (SmiValuesAre32Bits()) { |
| + shr(dst, Immediate(kSmiShift)); |
| + } else { |
| + ASSERT(SmiValuesAre31Bits()); |
| + sarl(dst, Immediate(kSmiShift)); |
| + } |
| } |
| void MacroAssembler::SmiToInteger32(Register dst, const Operand& src) { |
| - movl(dst, Operand(src, kSmiShift / kBitsPerByte)); |
| + if (SmiValuesAre32Bits()) { |
| + movl(dst, Operand(src, kSmiShift / kBitsPerByte)); |
| + } else { |
| + ASSERT(SmiValuesAre31Bits()); |
| + movl(dst, src); |
| + sarl(dst, Immediate(kSmiShift)); |
| + } |
| } |
| @@ -1154,20 +1209,36 @@ void MacroAssembler::SmiToInteger64(Register dst, Register src) { |
| void MacroAssembler::SmiToInteger64(Register dst, const Operand& src) { |
| - movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte)); |
| + if (SmiValuesAre32Bits()) { |
| + movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte)); |
| + } else { |
| + ASSERT(SmiValuesAre31Bits()); |
| + movq(dst, src); |
| + SmiToInteger64(dst, dst); |
| + } |
| } |
| void MacroAssembler::SmiTest(Register src) { |
| AssertSmi(src); |
| - testq(src, src); |
| + if (SmiValuesAre32Bits()) { |
| + testq(src, src); |
| + } else { |
| + ASSERT(SmiValuesAre31Bits()); |
| + testl(src, src); |
| + } |
| } |
| void MacroAssembler::SmiCompare(Register smi1, Register smi2) { |
| AssertSmi(smi1); |
| AssertSmi(smi2); |
| - cmpq(smi1, smi2); |
| + if (SmiValuesAre32Bits()) { |
| + cmpq(smi1, smi2); |
| + } else { |
| + ASSERT(SmiValuesAre31Bits()); |
| + cmpl(smi1, smi2); |
| + } |
| } |
| @@ -1180,10 +1251,20 @@ void MacroAssembler::SmiCompare(Register dst, Smi* src) { |
| void MacroAssembler::Cmp(Register dst, Smi* src) { |
| ASSERT(!dst.is(kScratchRegister)); |
| if (src->value() == 0) { |
| - testq(dst, dst); |
| + if (SmiValuesAre32Bits()) { |
| + testq(dst, dst); |
| + } else { |
| + ASSERT(SmiValuesAre31Bits()); |
| + testl(dst, dst); |
| + } |
| } else { |
| Register constant_reg = GetSmiConstant(src); |
| - cmpq(dst, constant_reg); |
| + if (SmiValuesAre32Bits()) { |
| + cmpq(dst, constant_reg); |
| + } else { |
| + ASSERT(SmiValuesAre31Bits()); |
| + cmpl(dst, constant_reg); |
| + } |
| } |
| } |
| @@ -1191,33 +1272,59 @@ void MacroAssembler::Cmp(Register dst, Smi* src) { |
| void MacroAssembler::SmiCompare(Register dst, const Operand& src) { |
| AssertSmi(dst); |
| AssertSmi(src); |
| - cmpq(dst, src); |
| + if (SmiValuesAre32Bits()) { |
| + cmpq(dst, src); |
| + } else { |
| + ASSERT(SmiValuesAre31Bits()); |
| + cmpl(dst, src); |
| + } |
| } |
| void MacroAssembler::SmiCompare(const Operand& dst, Register src) { |
| AssertSmi(dst); |
| AssertSmi(src); |
| - cmpq(dst, src); |
| + if (SmiValuesAre32Bits()) { |
| + cmpq(dst, src); |
| + } else { |
| + ASSERT(SmiValuesAre31Bits()); |
| + cmpl(dst, src); |
| + } |
| } |
| void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) { |
| AssertSmi(dst); |
| - cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value())); |
| + if (SmiValuesAre32Bits()) { |
| + cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value())); |
| + } else { |
| + ASSERT(SmiValuesAre31Bits()); |
| + cmpl(dst, SmiToImmediate(src)); |
| + } |
| } |
| void MacroAssembler::Cmp(const Operand& dst, Smi* src) { |
| - // The Operand cannot use the smi register. |
| - Register smi_reg = GetSmiConstant(src); |
| - ASSERT(!dst.AddressUsesRegister(smi_reg)); |
| - cmpq(dst, smi_reg); |
| + if (SmiValuesAre32Bits()) { |
| + // The Operand cannot use the smi register. |
| + Register smi_reg = GetSmiConstant(src); |
| + ASSERT(!dst.AddressUsesRegister(smi_reg)); |
| + cmpq(dst, smi_reg); |
| + } else { |
| + ASSERT(SmiValuesAre31Bits()); |
| + cmpl(dst, SmiToImmediate(src)); |
| + } |
| } |
| void MacroAssembler::SmiCompareInteger32(const Operand& dst, Register src) { |
| - cmpl(Operand(dst, kSmiShift / kBitsPerByte), src); |
| + if (SmiValuesAre32Bits()) { |
| + cmpl(Operand(dst, kSmiShift / kBitsPerByte), src); |
| + } else { |
| + ASSERT(SmiValuesAre31Bits()); |
| + SmiToInteger32(kScratchRegister, dst); |
| + cmpl(kScratchRegister, src); |
| + } |
| } |
| @@ -1246,7 +1353,12 @@ void MacroAssembler::PositiveSmiDivPowerOfTwoToInteger32(Register dst, |
| int power) { |
| ASSERT((0 <= power) && (power < 32)); |
| if (dst.is(src)) { |
| - shr(dst, Immediate(power + kSmiShift)); |
| + if (SmiValuesAre32Bits()) { |
| + shr(dst, Immediate(power + kSmiShift)); |
| + } else { |
| + ASSERT(SmiValuesAre31Bits()); |
| + shrl(dst, Immediate(power + kSmiShift)); |
| + } |
| } else { |
| UNIMPLEMENTED(); // Not used. |
| } |
| @@ -1300,8 +1412,15 @@ Condition MacroAssembler::CheckBothSmi(Register first, Register second) { |
| return CheckSmi(first); |
| } |
| STATIC_ASSERT(kSmiTag == 0 && kHeapObjectTag == 1 && kHeapObjectTagMask == 3); |
| - leal(kScratchRegister, Operand(first, second, times_1, 0)); |
| - testb(kScratchRegister, Immediate(0x03)); |
| + if (SmiValuesAre32Bits()) { |
| + leal(kScratchRegister, Operand(first, second, times_1, 0)); |
| + testb(kScratchRegister, Immediate(0x03)); |
| + } else { |
| + ASSERT(SmiValuesAre31Bits()); |
| + movl(kScratchRegister, first); |
| + orl(kScratchRegister, second); |
| + testb(kScratchRegister, Immediate(kSmiTagMask)); |
| + } |
| return zero; |
| } |
| @@ -1341,22 +1460,39 @@ Condition MacroAssembler::CheckEitherSmi(Register first, |
| Condition MacroAssembler::CheckIsMinSmi(Register src) { |
| ASSERT(!src.is(kScratchRegister)); |
| // If we overflow by subtracting one, it's the minimal smi value. |
| - cmpq(src, kSmiConstantRegister); |
| + if (SmiValuesAre32Bits()) { |
| + cmpq(src, kSmiConstantRegister); |
| + } else { |
| + ASSERT(SmiValuesAre31Bits()); |
| + cmpl(src, kSmiConstantRegister); |
| + } |
| return overflow; |
| } |
| Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) { |
| - // A 32-bit integer value can always be converted to a smi. |
| - return always; |
| + if (SmiValuesAre32Bits()) { |
| + // A 32-bit integer value can always be converted to a smi. |
| + return always; |
| + } else { |
| + ASSERT(SmiValuesAre31Bits()); |
| + cmpl(src, Immediate(0xc0000000)); |
| + return positive; |
| + } |
| } |
| Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) { |
| - // An unsigned 32-bit integer value is valid as long as the high bit |
| - // is not set. |
| - testl(src, src); |
| - return positive; |
| + if (SmiValuesAre32Bits()) { |
| + // An unsigned 32-bit integer value is valid as long as the high bit |
| + // is not set. |
| + testl(src, src); |
| + return positive; |
| + } else { |
| + ASSERT(SmiValuesAre31Bits()); |
| + testl(src, Immediate(0xc0000000)); |
| + return zero; |
| + } |
| } |
| @@ -1389,6 +1525,14 @@ void MacroAssembler::JumpIfNotValidSmiValue(Register src, |
| } |
| +void MacroAssembler::JumpIfValidSmiValue(Register src, |
| + Label* on_valid, |
| + Label::Distance near_jump) { |
| + Condition is_valid = CheckInteger32ValidSmiValue(src); |
| + j(is_valid, on_valid, near_jump); |
| +} |
| + |
| + |
| void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src, |
| Label* on_invalid, |
| Label::Distance near_jump) { |
| @@ -1397,6 +1541,14 @@ void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src, |
| } |
| +void MacroAssembler::JumpIfUIntValidSmiValue(Register src, |
| + Label* on_valid, |
| + Label::Distance near_jump) { |
| + Condition is_valid = CheckUInteger32ValidSmiValue(src); |
| + j(is_valid, on_valid, near_jump); |
| +} |
| + |
| + |
| void MacroAssembler::JumpIfSmi(Register src, |
| Label* on_smi, |
| Label::Distance near_jump) { |
| @@ -1462,11 +1614,19 @@ void MacroAssembler::SmiTryAddConstant(Register dst, |
| JumpIfNotSmi(src, on_not_smi_result, near_jump); |
| Register tmp = (dst.is(src) ? kScratchRegister : dst); |
| LoadSmiConstant(tmp, constant); |
| - addq(tmp, src); |
| + if (SmiValuesAre32Bits()) { |
| + addq(tmp, src); |
| + } else { |
| + ASSERT(SmiValuesAre31Bits()); |
| + addl(tmp, src); |
| + } |
| j(overflow, on_not_smi_result, near_jump); |
| if (dst.is(src)) { |
| movq(dst, tmp); |
| } |
| + if (SmiValuesAre31Bits()) { |
| + movsxlq(dst, dst); |
| + } |
| } |
| @@ -1521,7 +1681,13 @@ void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) { |
| void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) { |
| if (constant->value() != 0) { |
| - addl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(constant->value())); |
| + if (SmiValuesAre32Bits()) { |
| + addl(Operand(dst, kSmiShift / kBitsPerByte), |
| + Immediate(constant->value())); |
| + } else { |
| + ASSERT(SmiValuesAre31Bits()); |
| + addq(dst, SmiToImmediate(constant)); |
| + } |
| } |
| } |
| @@ -1529,23 +1695,50 @@ void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) { |
| void MacroAssembler::SmiAddConstant(Register dst, |
| Register src, |
| Smi* constant, |
| - Label* on_not_smi_result, |
| - Label::Distance near_jump) { |
| + const SmiInstructionWrapper& wrapper) { |
| if (constant->value() == 0) { |
| if (!dst.is(src)) { |
| movq(dst, src); |
| } |
| - } else if (dst.is(src)) { |
| - ASSERT(!dst.is(kScratchRegister)); |
| - |
| - LoadSmiConstant(kScratchRegister, constant); |
| - addq(kScratchRegister, src); |
| - j(overflow, on_not_smi_result, near_jump); |
| - movq(dst, kScratchRegister); |
| + } else if (SmiValuesAre32Bits()) { |
| + if (dst.is(src)) { |
| + if (wrapper.NeedsKeepSourceOperandsIntact()) { |
|
danno
2013/08/19 21:47:44
Here and everywhere else for the Add/Sub operation
haitao.feng
2013/08/20 15:09:30
Thanks for the recommendation. I will use that tri
|
| + ASSERT(!dst.is(kScratchRegister)); |
| + ASSERT(wrapper.NeedsCheckOverflow()); |
| + LoadSmiConstant(kScratchRegister, constant); |
| + addq(kScratchRegister, src); |
| + wrapper.BailoutIf(overflow); |
| + movq(dst, kScratchRegister); |
| + } else { |
| + UNIMPLEMENTED(); // Not used. |
| + } |
| + } else { |
| + ASSERT(wrapper.NeedsCheckOverflow()); |
| + LoadSmiConstant(dst, constant); |
| + addq(dst, src); |
| + if (wrapper.NeedsCheckOverflow()) wrapper.BailoutIf(overflow); |
| + } |
| } else { |
| - LoadSmiConstant(dst, constant); |
| - addq(dst, src); |
| - j(overflow, on_not_smi_result, near_jump); |
| + ASSERT(SmiValuesAre31Bits()); |
| + if (dst.is(src)) { |
| + if (wrapper.NeedsKeepSourceOperandsIntact()) { |
| + ASSERT(!dst.is(kScratchRegister)); |
| + ASSERT(wrapper.NeedsCheckOverflow()); |
| + LoadSmiConstant(kScratchRegister, constant); |
| + addl(kScratchRegister, src); |
| + wrapper.BailoutIf(overflow); |
| + movsxlq(dst, kScratchRegister); |
| + } else { |
| + addl(dst, SmiToImmediate(constant)); |
| + if (wrapper.NeedsCheckOverflow()) wrapper.BailoutIf(overflow); |
| + movsxlq(dst, dst); |
| + } |
| + } else { |
| + movl(dst, src); |
| + addl(dst, SmiToImmediate(constant)); |
| + if (wrapper.NeedsCheckOverflow()) wrapper.BailoutIf(overflow); |
| + movsxlq(dst, dst); |
| + } |
| } |
| } |
| @@ -1561,10 +1754,16 @@ void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) { |
| subq(dst, constant_reg); |
| } else { |
| if (constant->value() == Smi::kMinValue) { |
| - LoadSmiConstant(dst, constant); |
| - // Adding and subtracting the min-value gives the same result, it only |
| - // differs on the overflow bit, which we don't check here. |
| - addq(dst, src); |
| + if (SmiValuesAre32Bits()) { |
| + LoadSmiConstant(dst, constant); |
| + // Adding and subtracting the min-value gives the same result, it only |
| + // differs on the overflow bit, which we don't check here. |
| + addq(dst, src); |
| + } else { |
| + ASSERT(SmiValuesAre31Bits()); |
| + movq(dst, src); |
| + subq(dst, SmiToImmediate(constant)); |
| + } |
| } else { |
| // Subtract by adding the negation. |
| LoadSmiConstant(dst, Smi::FromInt(-constant->value())); |
| @@ -1577,43 +1776,96 @@ void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) { |
| void MacroAssembler::SmiSubConstant(Register dst, |
| Register src, |
| Smi* constant, |
| - Label* on_not_smi_result, |
| - Label::Distance near_jump) { |
| + const SmiInstructionWrapper& wrapper) { |
| if (constant->value() == 0) { |
| if (!dst.is(src)) { |
| movq(dst, src); |
| } |
| - } else if (dst.is(src)) { |
| - ASSERT(!dst.is(kScratchRegister)); |
| - if (constant->value() == Smi::kMinValue) { |
| - // Subtracting min-value from any non-negative value will overflow. |
| - // We test the non-negativeness before doing the subtraction. |
| - testq(src, src); |
| - j(not_sign, on_not_smi_result, near_jump); |
| - LoadSmiConstant(kScratchRegister, constant); |
| - subq(dst, kScratchRegister); |
| + } else if (SmiValuesAre32Bits()) { |
| + if (dst.is(src)) { |
| + ASSERT(!dst.is(kScratchRegister)); |
| + if (constant->value() == Smi::kMinValue) { |
| + // Subtracting min-value from any non-negative value will overflow. |
| + // We test the non-negativeness before doing the subtraction. |
| + if (wrapper.NeedsCheckOverflow()) { |
| + testq(src, src); |
| + wrapper.BailoutIf(not_sign); |
| + } |
| + LoadSmiConstant(kScratchRegister, constant); |
| + subq(dst, kScratchRegister); |
| + } else { |
| + // Subtract by adding the negation. |
| + if (wrapper.NeedsKeepSourceOperandsIntact()) { |
| + ASSERT(wrapper.NeedsCheckOverflow()); |
| + LoadSmiConstant(kScratchRegister, Smi::FromInt(-constant->value())); |
| + addq(kScratchRegister, dst); |
| + wrapper.BailoutIf(overflow); |
| + movq(dst, kScratchRegister); |
| + } else { |
| + UNIMPLEMENTED(); // Not used. |
| + } |
| + } |
| } else { |
| - // Subtract by adding the negation. |
| - LoadSmiConstant(kScratchRegister, Smi::FromInt(-constant->value())); |
| - addq(kScratchRegister, dst); |
| - j(overflow, on_not_smi_result, near_jump); |
| - movq(dst, kScratchRegister); |
| + if (constant->value() == Smi::kMinValue) { |
| + // Subtracting min-value from any non-negative value will overflow. |
| + // We test the non-negativeness before doing the subtraction. |
| + if (wrapper.NeedsCheckOverflow()) { |
| + testq(src, src); |
| + wrapper.BailoutIf(not_sign); |
| + } |
| + LoadSmiConstant(dst, constant); |
| + // Adding and subtracting the min-value gives the same result, it only |
| + // differs on the overflow bit, which we don't check here. |
| + addq(dst, src); |
| + } else { |
| + // Subtract by adding the negation. |
| + LoadSmiConstant(dst, Smi::FromInt(-(constant->value()))); |
| + addq(dst, src); |
| + if (wrapper.NeedsCheckOverflow()) wrapper.BailoutIf(overflow); |
| + } |
| } |
| } else { |
| - if (constant->value() == Smi::kMinValue) { |
| - // Subtracting min-value from any non-negative value will overflow. |
| - // We test the non-negativeness before doing the subtraction. |
| - testq(src, src); |
| - j(not_sign, on_not_smi_result, near_jump); |
| - LoadSmiConstant(dst, constant); |
| - // Adding and subtracting the min-value gives the same result, it only |
| - // differs on the overflow bit, which we don't check here. |
| - addq(dst, src); |
| + ASSERT(SmiValuesAre31Bits()); |
| + if (dst.is(src)) { |
| + ASSERT(!dst.is(kScratchRegister)); |
| + if (constant->value() == Smi::kMinValue) { |
| + // Subtracting min-value from any non-negative value will overflow. |
| + // We test the non-negativeness before doing the subtraction. |
| + if (wrapper.NeedsCheckOverflow()) { |
| + testl(src, src); |
| + wrapper.BailoutIf(not_sign); |
| + } |
| + subq(dst, SmiToImmediate(constant)); |
| + } else { |
| + if (wrapper.NeedsKeepSourceOperandsIntact()) { |
| + ASSERT(wrapper.NeedsCheckOverflow()); |
| + // Subtract by adding the negation. |
| + LoadSmiConstant(kScratchRegister, Smi::FromInt(-constant->value())); |
| + addl(kScratchRegister, dst); |
| + wrapper.BailoutIf(overflow); |
| + movsxlq(dst, kScratchRegister); |
| + } else { |
| + subl(dst, SmiToImmediate(constant)); |
| + if (wrapper.NeedsCheckOverflow()) wrapper.BailoutIf(overflow); |
| + movsxlq(dst, dst); |
| + } |
| + } |
| } else { |
| - // Subtract by adding the negation. |
| - LoadSmiConstant(dst, Smi::FromInt(-(constant->value()))); |
| - addq(dst, src); |
| - j(overflow, on_not_smi_result, near_jump); |
| + if (constant->value() == Smi::kMinValue) { |
| + // Subtracting min-value from any non-negative value will overflow. |
| + // We test the non-negativeness before doing the subtraction. |
| + if (wrapper.NeedsCheckOverflow()) { |
| + testl(src, src); |
| + wrapper.BailoutIf(not_sign); |
| + } |
| + movq(dst, src); |
| + subq(dst, SmiToImmediate(constant)); |
| + } else { |
| + movl(dst, src); |
| + subl(dst, SmiToImmediate(constant)); |
| + if (wrapper.NeedsCheckOverflow()) wrapper.BailoutIf(overflow); |
| + movsxlq(dst, dst); |
| + } |
| } |
| } |
| } |
| @@ -1628,13 +1880,23 @@ void MacroAssembler::SmiNeg(Register dst, |
| movq(kScratchRegister, src); |
| neg(dst); // Low 32 bits are retained as zero by negation. |
| // Test if result is zero or Smi::kMinValue. |
| + if (SmiValuesAre32Bits()) { |
| cmpq(dst, kScratchRegister); |
| - j(not_equal, on_smi_result, near_jump); |
| - movq(src, kScratchRegister); |
| + } else { |
| + ASSERT(SmiValuesAre31Bits()); |
| + cmpl(dst, kScratchRegister); |
| + } |
| + j(not_equal, on_smi_result, near_jump); |
| + movq(src, kScratchRegister); |
| } else { |
| movq(dst, src); |
| neg(dst); |
| - cmpq(dst, src); |
| + if (SmiValuesAre32Bits()) { |
| + cmpq(dst, src); |
| + } else { |
| + ASSERT(SmiValuesAre31Bits()); |
| + cmpl(dst, src); |
| + } |
| // If the result is zero or Smi::kMinValue, negation failed to create a smi. |
| j(not_equal, on_smi_result, near_jump); |
| } |
| @@ -1644,19 +1906,45 @@ void MacroAssembler::SmiNeg(Register dst, |
| void MacroAssembler::SmiAdd(Register dst, |
| Register src1, |
| Register src2, |
| - Label* on_not_smi_result, |
| - Label::Distance near_jump) { |
| - ASSERT_NOT_NULL(on_not_smi_result); |
| + const SmiInstructionWrapper& wrapper) { |
| ASSERT(!dst.is(src2)); |
| - if (dst.is(src1)) { |
| - movq(kScratchRegister, src1); |
| - addq(kScratchRegister, src2); |
| - j(overflow, on_not_smi_result, near_jump); |
| - movq(dst, kScratchRegister); |
| + if (SmiValuesAre32Bits()) { |
| + if (dst.is(src1)) { |
| + if (wrapper.NeedsKeepSourceOperandsIntact()) { |
| + ASSERT(wrapper.NeedsCheckOverflow()); |
| + movq(kScratchRegister, src1); |
| + addq(kScratchRegister, src2); |
| + wrapper.BailoutIf(overflow); |
| + movq(dst, kScratchRegister); |
| + } else { |
| + addq(dst, src2); |
| + if (wrapper.NeedsCheckOverflow()) wrapper.BailoutIf(overflow); |
| + } |
| + } else { |
| + movq(dst, src1); |
| + addq(dst, src2); |
| + if (wrapper.NeedsCheckOverflow()) wrapper.BailoutIf(overflow); |
| + } |
| } else { |
| - movq(dst, src1); |
| - addq(dst, src2); |
| - j(overflow, on_not_smi_result, near_jump); |
| + ASSERT(SmiValuesAre31Bits()); |
| + if (dst.is(src1)) { |
| + if (wrapper.NeedsKeepSourceOperandsIntact()) { |
| + ASSERT(wrapper.NeedsCheckOverflow()); |
| + movl(kScratchRegister, src1); |
| + addl(kScratchRegister, src2); |
| + wrapper.BailoutIf(overflow); |
| + movsxlq(dst, kScratchRegister); |
| + } else { |
| + addl(dst, src2); |
| + if (wrapper.NeedsCheckOverflow()) wrapper.BailoutIf(overflow); |
| + movsxlq(dst, dst); |
| + } |
| + } else { |
| + movl(dst, src1); |
| + addl(dst, src2); |
| + if (wrapper.NeedsCheckOverflow()) wrapper.BailoutIf(overflow); |
| + movsxlq(dst, dst); |
| + } |
| } |
| } |
| @@ -1664,19 +1952,45 @@ void MacroAssembler::SmiAdd(Register dst, |
| void MacroAssembler::SmiAdd(Register dst, |
| Register src1, |
| const Operand& src2, |
| - Label* on_not_smi_result, |
| - Label::Distance near_jump) { |
| - ASSERT_NOT_NULL(on_not_smi_result); |
| - if (dst.is(src1)) { |
| - movq(kScratchRegister, src1); |
| - addq(kScratchRegister, src2); |
| - j(overflow, on_not_smi_result, near_jump); |
| - movq(dst, kScratchRegister); |
| + const SmiInstructionWrapper& wrapper) { |
|
danno
2013/08/19 21:47:44
This code seems identical to the version directly
haitao.feng
2013/08/20 15:09:30
I will do that.
|
| + if (SmiValuesAre32Bits()) { |
| + if (dst.is(src1)) { |
| + if (wrapper.NeedsKeepSourceOperandsIntact()) { |
| + ASSERT(wrapper.NeedsCheckOverflow()); |
| + movq(kScratchRegister, src1); |
| + addq(kScratchRegister, src2); |
| + wrapper.BailoutIf(overflow); |
| + movq(dst, kScratchRegister); |
| + } else { |
| + addq(dst, src2); |
| + if (wrapper.NeedsCheckOverflow()) wrapper.BailoutIf(overflow); |
|
danno
2013/08/19 21:47:44
Always call BaikoutIf, here and elsewhere.
|
| + } |
| + } else { |
| + ASSERT(!src2.AddressUsesRegister(dst)); |
| + movq(dst, src1); |
| + addq(dst, src2); |
| + if (wrapper.NeedsCheckOverflow()) wrapper.BailoutIf(overflow); |
| + } |
| } else { |
| - ASSERT(!src2.AddressUsesRegister(dst)); |
| - movq(dst, src1); |
| - addq(dst, src2); |
| - j(overflow, on_not_smi_result, near_jump); |
| + if (dst.is(src1)) { |
| + if (wrapper.NeedsKeepSourceOperandsIntact()) { |
| + ASSERT(wrapper.NeedsCheckOverflow()); |
| + movl(kScratchRegister, src1); |
| + addl(kScratchRegister, src2); |
| + wrapper.BailoutIf(overflow); |
| + movsxlq(dst, kScratchRegister); |
| + } else { |
| + addl(dst, src2); |
| + if (wrapper.NeedsCheckOverflow()) wrapper.BailoutIf(overflow); |
| + movsxlq(dst, dst); |
| + } |
| + } else { |
| + ASSERT(!src2.AddressUsesRegister(dst)); |
| + movl(dst, src1); |
| + addl(dst, src2); |
| + if (wrapper.NeedsCheckOverflow()) wrapper.BailoutIf(overflow); |
| + movsxlq(dst, dst); |
| + } |
| } |
| } |
| @@ -1703,18 +2017,34 @@ void MacroAssembler::SmiAdd(Register dst, |
| void MacroAssembler::SmiSub(Register dst, |
| Register src1, |
| Register src2, |
| - Label* on_not_smi_result, |
| - Label::Distance near_jump) { |
| - ASSERT_NOT_NULL(on_not_smi_result); |
| + const SmiInstructionWrapper& wrapper) { |
| ASSERT(!dst.is(src2)); |
| - if (dst.is(src1)) { |
| - cmpq(dst, src2); |
| - j(overflow, on_not_smi_result, near_jump); |
| - subq(dst, src2); |
| + if (SmiValuesAre32Bits()) { |
| + if (dst.is(src1)) { |
| + if (wrapper.NeedsCheckOverflow()) { |
| + cmpq(dst, src2); |
| + wrapper.BailoutIf(overflow); |
| + } |
| + subq(dst, src2); |
| + } else { |
| + movq(dst, src1); |
| + subq(dst, src2); |
| + if (wrapper.NeedsCheckOverflow()) wrapper.BailoutIf(overflow); |
| + } |
| } else { |
| - movq(dst, src1); |
| - subq(dst, src2); |
| - j(overflow, on_not_smi_result, near_jump); |
| + ASSERT(SmiValuesAre31Bits()); |
| + if (dst.is(src1)) { |
| + if (wrapper.NeedsCheckOverflow()) { |
| + cmpl(dst, src2); |
| + wrapper.BailoutIf(overflow); |
| + } |
| + subq(dst, src2); |
| + } else { |
| + movl(dst, src1); |
| + subl(dst, src2); |
| + if (wrapper.NeedsCheckOverflow()) wrapper.BailoutIf(overflow); |
| + movsxlq(dst, dst); |
| + } |
| } |
| } |
| @@ -1734,18 +2064,33 @@ void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) { |
| void MacroAssembler::SmiSub(Register dst, |
| Register src1, |
| const Operand& src2, |
| - Label* on_not_smi_result, |
| - Label::Distance near_jump) { |
| - ASSERT_NOT_NULL(on_not_smi_result); |
| - if (dst.is(src1)) { |
| - movq(kScratchRegister, src2); |
| - cmpq(src1, kScratchRegister); |
| - j(overflow, on_not_smi_result, near_jump); |
| - subq(src1, kScratchRegister); |
| + const SmiInstructionWrapper& wrapper) { |
|
danno
2013/08/19 21:47:44
This code seems identical to the version directly
|
| + if (SmiValuesAre32Bits()) { |
| + if (dst.is(src1)) { |
| + if (wrapper.NeedsCheckOverflow()) { |
| + cmpq(dst, src2); |
| + wrapper.BailoutIf(overflow); |
| + } |
| + subq(dst, src2); |
| + } else { |
| + movq(dst, src1); |
| + subq(dst, src2); |
| + if (wrapper.NeedsCheckOverflow()) wrapper.BailoutIf(overflow); |
| + } |
| } else { |
| - movq(dst, src1); |
| - subq(dst, src2); |
| - j(overflow, on_not_smi_result, near_jump); |
| + ASSERT(SmiValuesAre31Bits()); |
| + if (dst.is(src1)) { |
| + if (wrapper.NeedsCheckOverflow()) { |
| + cmpl(dst, src2); |
| + wrapper.BailoutIf(overflow); |
| + } |
| + subq(dst, src2); |
| + } else { |
| + movl(dst, src1); |
| + subl(dst, src2); |
| + if (wrapper.NeedsCheckOverflow()) wrapper.BailoutIf(overflow); |
| + movsxlq(dst, dst); |
| + } |
| } |
| } |
| @@ -1776,8 +2121,14 @@ void MacroAssembler::SmiMul(Register dst, |
| if (dst.is(src1)) { |
| Label failure, zero_correct_result; |
| movq(kScratchRegister, src1); // Create backup for later testing. |
| - SmiToInteger64(dst, src1); |
| - imul(dst, src2); |
| + if (SmiValuesAre32Bits()) { |
| + SmiToInteger64(dst, src1); |
| + imul(dst, src2); |
| + } else { |
| + ASSERT(SmiValuesAre31Bits()); |
| + SmiToInteger32(dst, src1); |
| + imull(dst, src2); |
| + } |
| j(overflow, &failure, Label::kNear); |
| // Check for negative zero result. If product is zero, and one |
| @@ -1799,9 +2150,18 @@ void MacroAssembler::SmiMul(Register dst, |
| Set(dst, 0); |
| bind(&correct_result); |
| + if (SmiValuesAre31Bits()) { |
| + movsxlq(dst, dst); |
| + } |
| } else { |
| - SmiToInteger64(dst, src1); |
| - imul(dst, src2); |
| + if (SmiValuesAre32Bits()) { |
| + SmiToInteger64(dst, src1); |
| + imul(dst, src2); |
| + } else { |
| + ASSERT(SmiValuesAre31Bits()); |
| + SmiToInteger32(dst, src1); |
| + imull(dst, src2); |
| + } |
| j(overflow, on_not_smi_result, near_jump); |
| // Check for negative zero result. If product is zero, and one |
| // argument is negative, go to slow case. |
| @@ -1814,6 +2174,9 @@ void MacroAssembler::SmiMul(Register dst, |
| xor_(kScratchRegister, src2); |
| j(negative, on_not_smi_result, near_jump); |
| bind(&correct_result); |
| + if (SmiValuesAre31Bits()) { |
| + movsxlq(dst, dst); |
| + } |
| } |
| } |
| @@ -1846,7 +2209,12 @@ void MacroAssembler::SmiDiv(Register dst, |
| // We overshoot a little and go to slow case if we divide min-value |
| // by any negative value, not just -1. |
| Label safe_div; |
| - testl(rax, Immediate(0x7fffffff)); |
| + if (SmiValuesAre32Bits()) { |
| + testl(rax, Immediate(0x7fffffff)); |
| + } else { |
| + ASSERT(SmiValuesAre31Bits()); |
| + testl(rax, Immediate(0x3fffffff)); |
| + } |
| j(not_zero, &safe_div, Label::kNear); |
| testq(src2, src2); |
| if (src1.is(rax)) { |
| @@ -1940,8 +2308,13 @@ void MacroAssembler::SmiMod(Register dst, |
| void MacroAssembler::SmiNot(Register dst, Register src) { |
| ASSERT(!dst.is(kScratchRegister)); |
| ASSERT(!src.is(kScratchRegister)); |
| + if (SmiValuesAre32Bits()) { |
| // Set tag and padding bits before negating, so that they are zero afterwards. |
| - movl(kScratchRegister, Immediate(~0)); |
| + movl(kScratchRegister, Immediate(~0)); |
| + } else { |
| + ASSERT(SmiValuesAre31Bits()); |
| + movl(kScratchRegister, Immediate(1)); |
| + } |
| if (dst.is(src)) { |
| xor_(dst, kScratchRegister); |
| } else { |
| @@ -1963,13 +2336,21 @@ void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) { |
| void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) { |
| if (constant->value() == 0) { |
| Set(dst, 0); |
| - } else if (dst.is(src)) { |
| - ASSERT(!dst.is(kScratchRegister)); |
| - Register constant_reg = GetSmiConstant(constant); |
| - and_(dst, constant_reg); |
| + } else if (SmiValuesAre32Bits()) { |
| + if (dst.is(src)) { |
| + ASSERT(!dst.is(kScratchRegister)); |
| + Register constant_reg = GetSmiConstant(constant); |
| + and_(dst, constant_reg); |
| + } else { |
| + LoadSmiConstant(dst, constant); |
| + and_(dst, src); |
| + } |
| } else { |
| - LoadSmiConstant(dst, constant); |
| - and_(dst, src); |
| + ASSERT(SmiValuesAre31Bits()); |
| + if (!dst.is(src)) { |
| + movq(dst, src); |
| + } |
| + and_(dst, SmiToImmediate(constant)); |
| } |
| } |
| @@ -1984,13 +2365,21 @@ void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) { |
| void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) { |
| - if (dst.is(src)) { |
| - ASSERT(!dst.is(kScratchRegister)); |
| - Register constant_reg = GetSmiConstant(constant); |
| - or_(dst, constant_reg); |
| + if (SmiValuesAre32Bits()) { |
| + if (dst.is(src)) { |
| + ASSERT(!dst.is(kScratchRegister)); |
| + Register constant_reg = GetSmiConstant(constant); |
| + or_(dst, constant_reg); |
| + } else { |
| + LoadSmiConstant(dst, constant); |
| + or_(dst, src); |
| + } |
| } else { |
| - LoadSmiConstant(dst, constant); |
| - or_(dst, src); |
| + ASSERT(SmiValuesAre31Bits()); |
| + if (!dst.is(src)) { |
| + movq(dst, src); |
| + } |
| + or_(dst, SmiToImmediate(constant)); |
| } |
| } |
| @@ -2005,13 +2394,21 @@ void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) { |
| void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) { |
| - if (dst.is(src)) { |
| - ASSERT(!dst.is(kScratchRegister)); |
| - Register constant_reg = GetSmiConstant(constant); |
| - xor_(dst, constant_reg); |
| + if (SmiValuesAre32Bits()) { |
| + if (dst.is(src)) { |
| + ASSERT(!dst.is(kScratchRegister)); |
| + Register constant_reg = GetSmiConstant(constant); |
| + xor_(dst, constant_reg); |
| + } else { |
| + LoadSmiConstant(dst, constant); |
| + xor_(dst, src); |
| + } |
| } else { |
| - LoadSmiConstant(dst, constant); |
| - xor_(dst, src); |
| + ASSERT(SmiValuesAre31Bits()); |
| + if (!dst.is(src)) { |
| + movq(dst, src); |
| + } |
| + xor_(dst, SmiToImmediate(constant)); |
| } |
| } |
| @@ -2031,14 +2428,32 @@ void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst, |
| } |
| -void MacroAssembler::SmiShiftLeftConstant(Register dst, |
| - Register src, |
| - int shift_value) { |
| - if (!dst.is(src)) { |
| - movq(dst, src); |
| - } |
| - if (shift_value > 0) { |
| - shl(dst, Immediate(shift_value)); |
| +void MacroAssembler::SmiShiftLeftConstant( |
| + Register dst, |
| + Register src, |
| + int shift_value, |
| + const SmiInstructionWrapper &wrapper) { |
| + if (SmiValuesAre32Bits()) { |
| + if (!dst.is(src)) { |
| + movq(dst, src); |
| + } |
| + if (shift_value > 0) { |
| + shl(dst, Immediate(shift_value)); |
| + } |
| + } else { |
| + ASSERT(SmiValuesAre31Bits()); |
| + if (!dst.is(src)) { |
| + movq(dst, src); |
| + } else { |
| + ASSERT(!wrapper.NeedsKeepSourceOperandsIntact()); |
| + } |
| + if (shift_value > 0) { |
| + SmiToInteger32(dst, dst); |
| + shll(dst, Immediate(shift_value)); |
| + Condition is_valid = CheckInteger32ValidSmiValue(dst); |
| + wrapper.BailoutIf(NegateCondition(is_valid)); |
| + Integer32ToSmi(dst, dst); |
| + } |
| } |
| } |
| @@ -2052,27 +2467,76 @@ void MacroAssembler::SmiShiftLogicalRightConstant( |
| } else { |
| movq(dst, src); |
| if (shift_value == 0) { |
| - testq(dst, dst); |
| + if (SmiValuesAre32Bits()) { |
| + testq(dst, dst); |
| + } else { |
| + ASSERT(SmiValuesAre31Bits()); |
| + testl(dst, dst); |
| + } |
| j(negative, on_not_smi_result, near_jump); |
| } |
| - shr(dst, Immediate(shift_value + kSmiShift)); |
| - shl(dst, Immediate(kSmiShift)); |
| + if (SmiValuesAre32Bits()) { |
| + shr(dst, Immediate(shift_value + kSmiShift)); |
| + shl(dst, Immediate(kSmiShift)); |
| + } else { |
| + ASSERT(SmiValuesAre31Bits()); |
| + SmiToInteger32(dst, dst); |
| + shrl(dst, Immediate(shift_value)); |
| + JumpIfUIntNotValidSmiValue(dst, on_not_smi_result, near_jump); |
| + shll(dst, Immediate(kSmiShift)); |
| + } |
| } |
| } |
| void MacroAssembler::SmiShiftLeft(Register dst, |
| Register src1, |
| - Register src2) { |
| - ASSERT(!dst.is(rcx)); |
| - // Untag shift amount. |
| - if (!dst.is(src1)) { |
| - movq(dst, src1); |
| + Register src2, |
| + Label* on_not_smi_result) { |
| + if (SmiValuesAre32Bits()) { |
| + ASSERT(!dst.is(rcx)); |
|
danno
2013/08/19 21:47:44
Here and elsewhere, I think it really makes testab
|
| + // Untag shift amount. |
| + if (!dst.is(src1)) { |
| + movq(dst, src1); |
| + } |
| + SmiToInteger32(rcx, src2); |
| + // Shift amount specified by lower 5 bits, not six as the shl opcode. |
| + and_(rcx, Immediate(0x1f)); |
| + shl_cl(dst); |
| + } else { |
| + ASSERT(SmiValuesAre31Bits()); |
| + ASSERT(!dst.is(kScratchRegister)); |
| + ASSERT(!src1.is(kScratchRegister)); |
| + ASSERT(!src2.is(kScratchRegister)); |
| + ASSERT(!dst.is(rcx)); |
| + Label result_ok; |
| + |
| + if (dst.is(src1)) { |
| + UNIMPLEMENTED(); // Not used. |
| + } else { |
| + if (src1.is(rcx) || src2.is(rcx)) { |
| + movq(kScratchRegister, rcx); |
| + } |
| + movq(dst, src1); |
| + SmiToInteger32(dst, dst); |
| + // Untag shift amount. |
| + SmiToInteger32(rcx, src2); |
| + // Shift amount specified by lower 5 bits, not six as the shl opcode. |
| + andl(rcx, Immediate(0x1f)); |
| + shll_cl(dst); |
| + JumpIfValidSmiValue(dst, &result_ok, Label::kNear); |
| + if (src1.is(rcx) || src2.is(rcx)) { |
| + if (src1.is(rcx)) { |
| + movq(src1, kScratchRegister); |
| + } else { |
| + movq(src2, kScratchRegister); |
| + } |
| + } |
| + jmp(on_not_smi_result); |
| + bind(&result_ok); |
| + Integer32ToSmi(dst, dst); |
| + } |
| } |
| - SmiToInteger32(rcx, src2); |
| - // Shift amount specified by lower 5 bits, not six as the shl opcode. |
| - and_(rcx, Immediate(0x1f)); |
| - shl_cl(dst); |
| } |
| @@ -2085,33 +2549,35 @@ void MacroAssembler::SmiShiftLogicalRight(Register dst, |
| ASSERT(!src1.is(kScratchRegister)); |
| ASSERT(!src2.is(kScratchRegister)); |
| ASSERT(!dst.is(rcx)); |
| - // dst and src1 can be the same, because the one case that bails out |
| - // is a shift by 0, which leaves dst, and therefore src1, unchanged. |
| + Label result_ok; |
| + |
| + if (dst.is(src1)) { |
| + ASSERT(SmiValuesAre32Bits()); |
| + } else { |
| + movq(dst, src1); |
| + } |
| + |
| if (src1.is(rcx) || src2.is(rcx)) { |
| movq(kScratchRegister, rcx); |
| } |
| - if (!dst.is(src1)) { |
| - movq(dst, src1); |
| - } |
| + movq(dst, src1); |
| + SmiToInteger32(dst, dst); |
| SmiToInteger32(rcx, src2); |
| - orl(rcx, Immediate(kSmiShift)); |
| - shr_cl(dst); // Shift is rcx modulo 0x1f + 32. |
| - shl(dst, Immediate(kSmiShift)); |
| - testq(dst, dst); |
| + shrl_cl(dst); |
| + JumpIfUIntValidSmiValue(dst, &result_ok, Label::kNear); |
| if (src1.is(rcx) || src2.is(rcx)) { |
| - Label positive_result; |
| - j(positive, &positive_result, Label::kNear); |
| if (src1.is(rcx)) { |
| movq(src1, kScratchRegister); |
| } else { |
| movq(src2, kScratchRegister); |
| } |
| - jmp(on_not_smi_result, near_jump); |
| - bind(&positive_result); |
| - } else { |
| - // src2 was zero and src1 negative. |
| - j(negative, on_not_smi_result, near_jump); |
| } |
| + if (dst.is(src1)) { |
| + Integer32ToSmi(dst, dst); |
| + } |
| + jmp(on_not_smi_result); |
| + bind(&result_ok); |
| + Integer32ToSmi(dst, dst); |
| } |
| @@ -2131,9 +2597,15 @@ void MacroAssembler::SmiShiftArithmeticRight(Register dst, |
| movq(dst, src1); |
| } |
| SmiToInteger32(rcx, src2); |
| - orl(rcx, Immediate(kSmiShift)); |
| - sar_cl(dst); // Shift 32 + original rcx & 0x1f. |
| - shl(dst, Immediate(kSmiShift)); |
| + if (SmiValuesAre32Bits()) { |
| + orl(rcx, Immediate(kSmiShift)); |
| + sar_cl(dst); // Shift 32 + original rcx & 0x1f. |
| + } else { |
| + ASSERT(SmiValuesAre31Bits()); |
| + SmiToInteger32(dst, dst); |
| + sarl_cl(dst); |
| + } |
| + Integer32ToSmi(dst, dst); |
| if (src1.is(rcx)) { |
| movq(src1, kScratchRegister); |
| } else if (src2.is(rcx)) { |
| @@ -2190,14 +2662,24 @@ SmiIndex MacroAssembler::SmiToIndex(Register dst, |
| if (!dst.is(src)) { |
| movq(dst, src); |
| } |
| - if (shift < kSmiShift) { |
| - sar(dst, Immediate(kSmiShift - shift)); |
| + if (SmiValuesAre32Bits()) { |
| + if (shift < kSmiShift) { |
| + sar(dst, Immediate(kSmiShift - shift)); |
| + } else { |
| + shl(dst, Immediate(shift - kSmiShift)); |
| + } |
| + return SmiIndex(dst, times_1); |
| } else { |
| - shl(dst, Immediate(shift - kSmiShift)); |
| + ASSERT(SmiValuesAre31Bits()); |
| + if (shift == times_1) { |
| + sar(dst, Immediate(kSmiShift)); |
| + return SmiIndex(dst, times_1); |
| + } |
| + return SmiIndex(dst, static_cast<ScaleFactor>(shift - 1)); |
| } |
| - return SmiIndex(dst, times_1); |
| } |
| + |
| SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst, |
| Register src, |
| int shift) { |
| @@ -2207,21 +2689,88 @@ SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst, |
| movq(dst, src); |
| } |
| neg(dst); |
| - if (shift < kSmiShift) { |
| - sar(dst, Immediate(kSmiShift - shift)); |
| + if (SmiValuesAre32Bits()) { |
| + if (shift < kSmiShift) { |
| + sar(dst, Immediate(kSmiShift - shift)); |
| + } else { |
| + shl(dst, Immediate(shift - kSmiShift)); |
| + } |
| + return SmiIndex(dst, times_1); |
| } else { |
| - shl(dst, Immediate(shift - kSmiShift)); |
| + ASSERT(SmiValuesAre31Bits()); |
| + if (shift == times_1) { |
| + sar(dst, Immediate(kSmiShift)); |
| + return SmiIndex(dst, times_1); |
| + } |
| + return SmiIndex(dst, static_cast<ScaleFactor>(shift - 1)); |
| } |
| - return SmiIndex(dst, times_1); |
| } |
| void MacroAssembler::AddSmiField(Register dst, const Operand& src) { |
| - ASSERT_EQ(0, kSmiShift % kBitsPerByte); |
| - addl(dst, Operand(src, kSmiShift / kBitsPerByte)); |
| + if (SmiValuesAre32Bits()) { |
| + ASSERT_EQ(0, kSmiShift % kBitsPerByte); |
| + addl(dst, Operand(src, kSmiShift / kBitsPerByte)); |
| + } else { |
| + ASSERT(SmiValuesAre31Bits()); |
| + SmiToInteger32(kScratchRegister, src); |
| + addl(dst, kScratchRegister); |
| + } |
| } |
| +void MacroAssembler::Test(const Operand& src, Smi* source) { |
| + if (SmiValuesAre32Bits()) { |
| + testl(Operand(src, kIntSize), Immediate(source->value())); |
| + } else { |
| + ASSERT(SmiValuesAre31Bits()); |
| + testl(src, SmiToImmediate(source)); |
| + } |
| +} |
| + |
| + |
| +void MacroAssembler::TestBit(const Operand& src, int bits) { |
| + int byte_offset = bits / kBitsPerByte; |
| + int bit_in_byte = bits & (kBitsPerByte - 1); |
| + testb(Operand(src, byte_offset), Immediate(1 << bit_in_byte)); |
| +} |
| + |
| + |
| +void MacroAssembler::PushInt64AsTwoSmis(Register src, Register scratch) { |
| + movq(scratch, src); |
| + // High bits. |
| + shr(src, Immediate(64 - kSmiShift)); |
| + shl(src, Immediate(kSmiShift)); |
| + push(src); |
| + // Low bits. |
| + shl(scratch, Immediate(kSmiShift)); |
| + push(scratch); |
| +} |
| + |
| + |
| +void MacroAssembler::PopInt64AsTwoSmis(Register dst, Register scratch) { |
| + pop(scratch); |
| + // Low bits. |
| + shr(scratch, Immediate(kSmiShift)); |
| + pop(dst); |
| + shr(dst, Immediate(kSmiShift)); |
| + // High bits. |
| + shl(dst, Immediate(64 - kSmiShift)); |
| + or_(dst, scratch); |
| +} |
| + |
| + |
| +bool MacroAssembler::IsUnsafeSmiOperator(Token::Value op) { |
| + return (op == Token::ADD || op == Token::SUB || op == Token::MUL || |
| + op == Token::DIV || (SmiValuesAre31Bits() && op == Token::SHL) || |
| + op == Token::SHR); |
| +} |
| + |
| + |
| +// End of smi tagging, untagging and tag detection. |
| +// ---------------------------------------------------------------------------- |
| + |
| + |
| void MacroAssembler::JumpIfNotString(Register object, |
| Register object_map, |
| Label* not_string, |
| @@ -2479,18 +3028,6 @@ void MacroAssembler::Drop(int stack_elements) { |
| } |
| -void MacroAssembler::Test(const Operand& src, Smi* source) { |
| - testl(Operand(src, kIntSize), Immediate(source->value())); |
| -} |
| - |
| - |
| -void MacroAssembler::TestBit(const Operand& src, int bits) { |
| - int byte_offset = bits / kBitsPerByte; |
| - int bit_in_byte = bits & (kBitsPerByte - 1); |
| - testb(Operand(src, byte_offset), Immediate(1 << bit_in_byte)); |
| -} |
| - |
| - |
| void MacroAssembler::Jump(ExternalReference ext) { |
| LoadAddress(kScratchRegister, ext); |
| jmp(kScratchRegister); |