| Index: src/x64/macro-assembler-x64.cc
|
| diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
|
| index 68f0613e584629b211d2863254ad255cb9c95ac8..a6629796902ddc0df45738cc4a4b937ed2ac2cea 100644
|
| --- a/src/x64/macro-assembler-x64.cc
|
| +++ b/src/x64/macro-assembler-x64.cc
|
| @@ -984,6 +984,20 @@ void MacroAssembler::Set(const Operand& dst, int64_t x) {
|
| }
|
|
|
|
|
| +// ----------------------------------------------------------------------------
|
| +// Smi tagging, untagging and tag detection.
|
| +
|
| +
|
| +static inline Immediate SmiToImmediate(Smi* src) {
|
| + if (kSmiValueSize == 32) {
|
| + UNREACHABLE();
|
| + return Immediate(2);
|
| + } else {
|
| + return Immediate(static_cast<int32_t>(reinterpret_cast<intptr_t>(src)));
|
| + }
|
| +}
|
| +
|
| +
|
| bool MacroAssembler::IsUnsafeInt(const int x) {
|
| static const int kMaxBits = 17;
|
| return !is_intn(x, kMaxBits);
|
| @@ -992,32 +1006,51 @@ bool MacroAssembler::IsUnsafeInt(const int x) {
|
|
|
| void MacroAssembler::SafeMove(Register dst, Smi* src) {
|
| ASSERT(!dst.is(kScratchRegister));
|
| - ASSERT(kSmiValueSize == 32); // JIT cookie can be converted to Smi.
|
| - if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
|
| - Move(dst, Smi::FromInt(src->value() ^ jit_cookie()));
|
| - Move(kScratchRegister, Smi::FromInt(jit_cookie()));
|
| - xor_(dst, kScratchRegister);
|
| + if (kSmiValueSize == 32) {
|
| + if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
|
| + Move(dst, Smi::FromInt(src->value() ^ jit_cookie()));
|
| + Move(kScratchRegister, Smi::FromInt(jit_cookie()));
|
| + xor_(dst, kScratchRegister);
|
| + } else {
|
| + Move(dst, src);
|
| + }
|
| } else {
|
| - Move(dst, src);
|
| + ASSERT(kSmiValueSize == 31);
|
| + if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
|
| + movq(dst, Immediate(static_cast<int>(reinterpret_cast<intptr_t>(src)) ^
|
| + jit_cookie()));
|
| + movq(kScratchRegister, Immediate(jit_cookie()));
|
| + xor_(dst, kScratchRegister);
|
| + } else {
|
| + Move(dst, src);
|
| + }
|
| }
|
| }
|
|
|
|
|
| void MacroAssembler::SafePush(Smi* src) {
|
| - ASSERT(kSmiValueSize == 32); // JIT cookie can be converted to Smi.
|
| - if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
|
| - Push(Smi::FromInt(src->value() ^ jit_cookie()));
|
| - Move(kScratchRegister, Smi::FromInt(jit_cookie()));
|
| - xor_(Operand(rsp, 0), kScratchRegister);
|
| + if (kSmiValueSize == 32) {
|
| + if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
|
| + Push(Smi::FromInt(src->value() ^ jit_cookie()));
|
| + Move(kScratchRegister, Smi::FromInt(jit_cookie()));
|
| + xor_(Operand(rsp, 0), kScratchRegister);
|
| + } else {
|
| + Push(src);
|
| + }
|
| } else {
|
| - Push(src);
|
| + ASSERT(kSmiValueSize == 31);
|
| + if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
|
| + push(Immediate(static_cast<int>(reinterpret_cast<intptr_t>(src)) ^
|
| + jit_cookie()));
|
| + movq(kScratchRegister, Immediate(jit_cookie()));
|
| + xor_(Operand(rsp, 0), kScratchRegister);
|
| + } else {
|
| + Push(src);
|
| + }
|
| }
|
| }
|
|
|
|
|
| -// ----------------------------------------------------------------------------
|
| -// Smi tagging, untagging and tag detection.
|
| -
|
| Register MacroAssembler::GetSmiConstant(Smi* source) {
|
| int value = source->value();
|
| if (value == 0) {
|
| @@ -1097,7 +1130,13 @@ void MacroAssembler::Integer32ToSmi(Register dst, Register src) {
|
| if (!dst.is(src)) {
|
| movl(dst, src);
|
| }
|
| - shl(dst, Immediate(kSmiShift));
|
| + if (kSmiValueSize == 32) {
|
| + shl(dst, Immediate(kSmiShift));
|
| + } else {
|
| + ASSERT(kSmiValueSize == 31);
|
| + shll(dst, Immediate(kSmiShift));
|
| + movsxlq(dst, dst);
|
| + }
|
| }
|
|
|
|
|
| @@ -1113,8 +1152,14 @@ void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) {
|
| }
|
| bind(&ok);
|
| }
|
| - ASSERT(kSmiShift % kBitsPerByte == 0);
|
| - movl(Operand(dst, kSmiShift / kBitsPerByte), src);
|
| + if (kSmiValueSize == 32) {
|
| + ASSERT(kSmiShift % kBitsPerByte == 0);
|
| + movl(Operand(dst, kSmiShift / kBitsPerByte), src);
|
| + } else {
|
| + ASSERT(kSmiValueSize == 31);
|
| + Integer32ToSmi(kScratchRegister, src);
|
| + movq(dst, kScratchRegister);
|
| + }
|
| }
|
|
|
|
|
| @@ -1126,7 +1171,7 @@ void MacroAssembler::Integer64PlusConstantToSmi(Register dst,
|
| } else {
|
| leal(dst, Operand(src, constant));
|
| }
|
| - shl(dst, Immediate(kSmiShift));
|
| + Integer32ToSmi(dst, dst);
|
| }
|
|
|
|
|
| @@ -1135,12 +1180,23 @@ void MacroAssembler::SmiToInteger32(Register dst, Register src) {
|
| if (!dst.is(src)) {
|
| movq(dst, src);
|
| }
|
| - shr(dst, Immediate(kSmiShift));
|
| + if (kSmiValueSize == 32) {
|
| + shr(dst, Immediate(kSmiShift));
|
| + } else {
|
| + ASSERT(kSmiValueSize == 31);
|
| + sarl(dst, Immediate(kSmiShift));
|
| + }
|
| }
|
|
|
|
|
| void MacroAssembler::SmiToInteger32(Register dst, const Operand& src) {
|
| - movl(dst, Operand(src, kSmiShift / kBitsPerByte));
|
| + if (kSmiValueSize == 32) {
|
| + movl(dst, Operand(src, kSmiShift / kBitsPerByte));
|
| + } else {
|
| + ASSERT(kSmiValueSize == 31);
|
| + movl(dst, src);
|
| + sarl(dst, Immediate(kSmiShift));
|
| + }
|
| }
|
|
|
|
|
| @@ -1154,20 +1210,36 @@ void MacroAssembler::SmiToInteger64(Register dst, Register src) {
|
|
|
|
|
| void MacroAssembler::SmiToInteger64(Register dst, const Operand& src) {
|
| - movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte));
|
| + if (kSmiValueSize == 32) {
|
| + movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte));
|
| + } else {
|
| + ASSERT(kSmiValueSize == 31);
|
| + movq(dst, src);
|
| + SmiToInteger64(dst, dst);
|
| + }
|
| }
|
|
|
|
|
| void MacroAssembler::SmiTest(Register src) {
|
| AssertSmi(src);
|
| - testq(src, src);
|
| + if (kSmiValueSize == 32) {
|
| + testq(src, src);
|
| + } else {
|
| + ASSERT(kSmiValueSize == 31);
|
| + testl(src, src);
|
| + }
|
| }
|
|
|
|
|
| void MacroAssembler::SmiCompare(Register smi1, Register smi2) {
|
| AssertSmi(smi1);
|
| AssertSmi(smi2);
|
| - cmpq(smi1, smi2);
|
| + if (kSmiValueSize == 32) {
|
| + cmpq(smi1, smi2);
|
| + } else {
|
| + ASSERT(kSmiValueSize == 31);
|
| + cmpl(smi1, smi2);
|
| + }
|
| }
|
|
|
|
|
| @@ -1180,10 +1252,20 @@ void MacroAssembler::SmiCompare(Register dst, Smi* src) {
|
| void MacroAssembler::Cmp(Register dst, Smi* src) {
|
| ASSERT(!dst.is(kScratchRegister));
|
| if (src->value() == 0) {
|
| - testq(dst, dst);
|
| + if (kSmiValueSize == 32) {
|
| + testq(dst, dst);
|
| + } else {
|
| + ASSERT(kSmiValueSize == 31);
|
| + testl(dst, dst);
|
| + }
|
| } else {
|
| Register constant_reg = GetSmiConstant(src);
|
| - cmpq(dst, constant_reg);
|
| + if (kSmiValueSize == 32) {
|
| + cmpq(dst, constant_reg);
|
| + } else {
|
| + ASSERT(kSmiValueSize == 31);
|
| + cmpl(dst, constant_reg);
|
| + }
|
| }
|
| }
|
|
|
| @@ -1191,33 +1273,59 @@ void MacroAssembler::Cmp(Register dst, Smi* src) {
|
| void MacroAssembler::SmiCompare(Register dst, const Operand& src) {
|
| AssertSmi(dst);
|
| AssertSmi(src);
|
| - cmpq(dst, src);
|
| + if (kSmiValueSize == 32) {
|
| + cmpq(dst, src);
|
| + } else {
|
| + ASSERT(kSmiValueSize == 31);
|
| + cmpl(dst, src);
|
| + }
|
| }
|
|
|
|
|
| void MacroAssembler::SmiCompare(const Operand& dst, Register src) {
|
| AssertSmi(dst);
|
| AssertSmi(src);
|
| - cmpq(dst, src);
|
| + if (kSmiValueSize == 32) {
|
| + cmpq(dst, src);
|
| + } else {
|
| + ASSERT(kSmiValueSize == 31);
|
| + cmpl(dst, src);
|
| + }
|
| }
|
|
|
|
|
| void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) {
|
| AssertSmi(dst);
|
| - cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value()));
|
| + if (kSmiValueSize == 32) {
|
| + cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value()));
|
| + } else {
|
| + ASSERT(kSmiValueSize == 31);
|
| + cmpl(dst, SmiToImmediate(src));
|
| + }
|
| }
|
|
|
|
|
| void MacroAssembler::Cmp(const Operand& dst, Smi* src) {
|
| - // The Operand cannot use the smi register.
|
| - Register smi_reg = GetSmiConstant(src);
|
| - ASSERT(!dst.AddressUsesRegister(smi_reg));
|
| - cmpq(dst, smi_reg);
|
| + if (kSmiValueSize == 32) {
|
| + // The Operand cannot use the smi register.
|
| + Register smi_reg = GetSmiConstant(src);
|
| + ASSERT(!dst.AddressUsesRegister(smi_reg));
|
| + cmpq(dst, smi_reg);
|
| + } else {
|
| + ASSERT(kSmiValueSize == 31);
|
| + cmpl(dst, SmiToImmediate(src));
|
| + }
|
| }
|
|
|
|
|
| void MacroAssembler::SmiCompareInteger32(const Operand& dst, Register src) {
|
| - cmpl(Operand(dst, kSmiShift / kBitsPerByte), src);
|
| + if (kSmiValueSize == 32) {
|
| + cmpl(Operand(dst, kSmiShift / kBitsPerByte), src);
|
| + } else {
|
| + ASSERT(kSmiValueSize == 31);
|
| + SmiToInteger32(kScratchRegister, dst);
|
| + cmpl(kScratchRegister, src);
|
| + }
|
| }
|
|
|
|
|
| @@ -1246,7 +1354,12 @@ void MacroAssembler::PositiveSmiDivPowerOfTwoToInteger32(Register dst,
|
| int power) {
|
| ASSERT((0 <= power) && (power < 32));
|
| if (dst.is(src)) {
|
| - shr(dst, Immediate(power + kSmiShift));
|
| + if (kSmiValueSize == 32) {
|
| + shr(dst, Immediate(power + kSmiShift));
|
| + } else {
|
| + ASSERT(kSmiValueSize == 31);
|
| + shrl(dst, Immediate(power + kSmiShift));
|
| + }
|
| } else {
|
| UNIMPLEMENTED(); // Not used.
|
| }
|
| @@ -1300,8 +1413,15 @@ Condition MacroAssembler::CheckBothSmi(Register first, Register second) {
|
| return CheckSmi(first);
|
| }
|
| STATIC_ASSERT(kSmiTag == 0 && kHeapObjectTag == 1 && kHeapObjectTagMask == 3);
|
| - leal(kScratchRegister, Operand(first, second, times_1, 0));
|
| - testb(kScratchRegister, Immediate(0x03));
|
| + if (kSmiValueSize == 32) {
|
| + leal(kScratchRegister, Operand(first, second, times_1, 0));
|
| + testb(kScratchRegister, Immediate(0x03));
|
| + } else {
|
| + ASSERT(kSmiValueSize == 31);
|
| + movl(kScratchRegister, first);
|
| + orl(kScratchRegister, second);
|
| + testb(kScratchRegister, Immediate(kSmiTagMask));
|
| + }
|
| return zero;
|
| }
|
|
|
| @@ -1341,22 +1461,39 @@ Condition MacroAssembler::CheckEitherSmi(Register first,
|
| Condition MacroAssembler::CheckIsMinSmi(Register src) {
|
| ASSERT(!src.is(kScratchRegister));
|
| // If we overflow by subtracting one, it's the minimal smi value.
|
| - cmpq(src, kSmiConstantRegister);
|
| + if (kSmiValueSize == 32) {
|
| + cmpq(src, kSmiConstantRegister);
|
| + } else {
|
| + ASSERT(kSmiValueSize == 31);
|
| + cmpl(src, kSmiConstantRegister);
|
| + }
|
| return overflow;
|
| }
|
|
|
|
|
| Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) {
|
| - // A 32-bit integer value can always be converted to a smi.
|
| - return always;
|
| + if (kSmiValueSize == 32) {
|
| + // A 32-bit integer value can always be converted to a smi.
|
| + return always;
|
| + } else {
|
| + ASSERT(kSmiValueSize == 31);
|
| + cmpl(src, Immediate(0xc0000000));
|
| + return positive;
|
| + }
|
| }
|
|
|
|
|
| Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) {
|
| - // An unsigned 32-bit integer value is valid as long as the high bit
|
| - // is not set.
|
| - testl(src, src);
|
| - return positive;
|
| + if (kSmiValueSize == 32) {
|
| + // An unsigned 32-bit integer value is valid as long as the high bit
|
| + // is not set.
|
| + testl(src, src);
|
| + return positive;
|
| + } else {
|
| + ASSERT(kSmiValueSize == 31);
|
| + testl(src, Immediate(0xc0000000));
|
| + return zero;
|
| + }
|
| }
|
|
|
|
|
| @@ -1389,6 +1526,14 @@ void MacroAssembler::JumpIfNotValidSmiValue(Register src,
|
| }
|
|
|
|
|
| +void MacroAssembler::JumpIfValidSmiValue(Register src,
|
| + Label* on_valid,
|
| + Label::Distance near_jump) {
|
| + Condition is_valid = CheckInteger32ValidSmiValue(src);
|
| + j(is_valid, on_valid, near_jump);
|
| +}
|
| +
|
| +
|
| void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
|
| Label* on_invalid,
|
| Label::Distance near_jump) {
|
| @@ -1397,6 +1542,14 @@ void MacroAssembler::JumpIfUIntNotValidSmiValue(Register src,
|
| }
|
|
|
|
|
| +void MacroAssembler::JumpIfUIntValidSmiValue(Register src,
|
| + Label* on_valid,
|
| + Label::Distance near_jump) {
|
| + Condition is_valid = CheckUInteger32ValidSmiValue(src);
|
| + j(is_valid, on_valid, near_jump);
|
| +}
|
| +
|
| +
|
| void MacroAssembler::JumpIfSmi(Register src,
|
| Label* on_smi,
|
| Label::Distance near_jump) {
|
| @@ -1462,11 +1615,19 @@ void MacroAssembler::SmiTryAddConstant(Register dst,
|
| JumpIfNotSmi(src, on_not_smi_result, near_jump);
|
| Register tmp = (dst.is(src) ? kScratchRegister : dst);
|
| LoadSmiConstant(tmp, constant);
|
| - addq(tmp, src);
|
| + if (kSmiValueSize == 32) {
|
| + addq(tmp, src);
|
| + } else {
|
| + ASSERT(kSmiValueSize == 31);
|
| + addl(tmp, src);
|
| + }
|
| j(overflow, on_not_smi_result, near_jump);
|
| if (dst.is(src)) {
|
| movq(dst, tmp);
|
| }
|
| + if (kSmiValueSize == 31) {
|
| + movsxlq(dst, dst);
|
| + }
|
| }
|
|
|
|
|
| @@ -1521,7 +1682,13 @@ void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) {
|
|
|
| void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) {
|
| if (constant->value() != 0) {
|
| - addl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(constant->value()));
|
| + if (kSmiValueSize == 32) {
|
| + addl(Operand(dst, kSmiShift / kBitsPerByte),
|
| + Immediate(constant->value()));
|
| + } else {
|
| + ASSERT(kSmiValueSize == 31);
|
| + addq(dst, SmiToImmediate(constant));
|
| + }
|
| }
|
| }
|
|
|
| @@ -1539,13 +1706,31 @@ void MacroAssembler::SmiAddConstant(Register dst,
|
| ASSERT(!dst.is(kScratchRegister));
|
|
|
| LoadSmiConstant(kScratchRegister, constant);
|
| - addq(kScratchRegister, src);
|
| + if (kSmiValueSize == 32) {
|
| + addq(kScratchRegister, src);
|
| + } else {
|
| + ASSERT(kSmiValueSize == 31);
|
| + addl(kScratchRegister, src);
|
| + }
|
| j(overflow, on_not_smi_result, near_jump);
|
| - movq(dst, kScratchRegister);
|
| + if (kSmiValueSize == 32) {
|
| + movq(dst, kScratchRegister);
|
| + } else {
|
| + ASSERT(kSmiValueSize == 31);
|
| + movsxlq(dst, kScratchRegister);
|
| + }
|
| } else {
|
| LoadSmiConstant(dst, constant);
|
| - addq(dst, src);
|
| + if (kSmiValueSize == 32) {
|
| + addq(dst, src);
|
| + } else {
|
| + ASSERT(kSmiValueSize == 31);
|
| + addl(dst, src);
|
| + }
|
| j(overflow, on_not_smi_result, near_jump);
|
| + if (kSmiValueSize == 31) {
|
| + movsxlq(dst, dst);
|
| + }
|
| }
|
| }
|
|
|
| @@ -1561,10 +1746,16 @@ void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) {
|
| subq(dst, constant_reg);
|
| } else {
|
| if (constant->value() == Smi::kMinValue) {
|
| - LoadSmiConstant(dst, constant);
|
| - // Adding and subtracting the min-value gives the same result, it only
|
| - // differs on the overflow bit, which we don't check here.
|
| - addq(dst, src);
|
| + if (kSmiValueSize == 32) {
|
| + LoadSmiConstant(dst, constant);
|
| + // Adding and subtracting the min-value gives the same result, it only
|
| + // differs on the overflow bit, which we don't check here.
|
| + addq(dst, src);
|
| + } else {
|
| + ASSERT(kSmiValueSize == 31);
|
| + movq(dst, src);
|
| + subq(dst, SmiToImmediate(constant));
|
| + }
|
| } else {
|
| // Subtract by adding the negation.
|
| LoadSmiConstant(dst, Smi::FromInt(-constant->value()));
|
| @@ -1588,32 +1779,63 @@ void MacroAssembler::SmiSubConstant(Register dst,
|
| if (constant->value() == Smi::kMinValue) {
|
| // Subtracting min-value from any non-negative value will overflow.
|
| // We test the non-negativeness before doing the subtraction.
|
| - testq(src, src);
|
| + if (kSmiValueSize == 32) {
|
| + testq(src, src);
|
| + } else {
|
| + ASSERT(kSmiValueSize == 31);
|
| + testl(src, src);
|
| + }
|
| j(not_sign, on_not_smi_result, near_jump);
|
| LoadSmiConstant(kScratchRegister, constant);
|
| subq(dst, kScratchRegister);
|
| } else {
|
| // Subtract by adding the negation.
|
| LoadSmiConstant(kScratchRegister, Smi::FromInt(-constant->value()));
|
| - addq(kScratchRegister, dst);
|
| + if (kSmiValueSize == 32) {
|
| + addq(kScratchRegister, dst);
|
| + } else {
|
| + ASSERT(kSmiValueSize == 31);
|
| + addl(kScratchRegister, dst);
|
| + }
|
| j(overflow, on_not_smi_result, near_jump);
|
| - movq(dst, kScratchRegister);
|
| + if (kSmiValueSize == 32) {
|
| + movq(dst, kScratchRegister);
|
| + } else {
|
| + ASSERT(kSmiValueSize == 31);
|
| + movsxlq(dst, kScratchRegister);
|
| + }
|
| }
|
| } else {
|
| if (constant->value() == Smi::kMinValue) {
|
| // Subtracting min-value from any non-negative value will overflow.
|
| // We test the non-negativeness before doing the subtraction.
|
| - testq(src, src);
|
| - j(not_sign, on_not_smi_result, near_jump);
|
| - LoadSmiConstant(dst, constant);
|
| - // Adding and subtracting the min-value gives the same result, it only
|
| - // differs on the overflow bit, which we don't check here.
|
| - addq(dst, src);
|
| + if (kSmiValueSize == 32) {
|
| + testq(src, src);
|
| + j(not_sign, on_not_smi_result, near_jump);
|
| + LoadSmiConstant(dst, constant);
|
| + // Adding and subtracting the min-value gives the same result, it only
|
| + // differs on the overflow bit, which we don't check here.
|
| + addq(dst, src);
|
| + } else {
|
| + ASSERT(kSmiValueSize == 31);
|
| + testl(src, src);
|
| + j(not_sign, on_not_smi_result, near_jump);
|
| + movq(dst, src);
|
| + subq(dst, SmiToImmediate(constant));
|
| + }
|
| } else {
|
| // Subtract by adding the negation.
|
| LoadSmiConstant(dst, Smi::FromInt(-(constant->value())));
|
| - addq(dst, src);
|
| + if (kSmiValueSize == 32) {
|
| + addq(dst, src);
|
| + } else {
|
| + ASSERT(kSmiValueSize == 31);
|
| + addl(dst, src);
|
| + }
|
| j(overflow, on_not_smi_result, near_jump);
|
| + if (kSmiValueSize == 31) {
|
| + movsxlq(dst, dst);
|
| + }
|
| }
|
| }
|
| }
|
| @@ -1628,13 +1850,23 @@ void MacroAssembler::SmiNeg(Register dst,
|
| movq(kScratchRegister, src);
|
| neg(dst); // Low 32 bits are retained as zero by negation.
|
| // Test if result is zero or Smi::kMinValue.
|
| + if (kSmiValueSize == 32) {
|
| cmpq(dst, kScratchRegister);
|
| - j(not_equal, on_smi_result, near_jump);
|
| - movq(src, kScratchRegister);
|
| + } else {
|
| + ASSERT(kSmiValueSize == 31);
|
| + cmpl(dst, kScratchRegister);
|
| + }
|
| + j(not_equal, on_smi_result, near_jump);
|
| + movq(src, kScratchRegister);
|
| } else {
|
| movq(dst, src);
|
| neg(dst);
|
| - cmpq(dst, src);
|
| + if (kSmiValueSize == 32) {
|
| + cmpq(dst, src);
|
| + } else {
|
| + ASSERT(kSmiValueSize == 31);
|
| + cmpl(dst, src);
|
| + }
|
| // If the result is zero or Smi::kMinValue, negation failed to create a smi.
|
| j(not_equal, on_smi_result, near_jump);
|
| }
|
| @@ -1650,13 +1882,31 @@ void MacroAssembler::SmiAdd(Register dst,
|
| ASSERT(!dst.is(src2));
|
| if (dst.is(src1)) {
|
| movq(kScratchRegister, src1);
|
| - addq(kScratchRegister, src2);
|
| + if (kSmiValueSize == 32) {
|
| + addq(kScratchRegister, src2);
|
| + } else {
|
| + ASSERT(kSmiValueSize == 31);
|
| + addl(kScratchRegister, src2);
|
| + }
|
| j(overflow, on_not_smi_result, near_jump);
|
| - movq(dst, kScratchRegister);
|
| + if (kSmiValueSize == 32) {
|
| + movq(dst, kScratchRegister);
|
| + } else {
|
| + ASSERT(kSmiValueSize == 31);
|
| + movsxlq(dst, kScratchRegister);
|
| + }
|
| } else {
|
| movq(dst, src1);
|
| - addq(dst, src2);
|
| + if (kSmiValueSize == 32) {
|
| + addq(dst, src2);
|
| + } else {
|
| + ASSERT(kSmiValueSize == 31);
|
| + addl(dst, src2);
|
| + }
|
| j(overflow, on_not_smi_result, near_jump);
|
| + if (kSmiValueSize == 31) {
|
| + movsxlq(dst, dst);
|
| + }
|
| }
|
| }
|
|
|
| @@ -1669,16 +1919,34 @@ void MacroAssembler::SmiAdd(Register dst,
|
| ASSERT_NOT_NULL(on_not_smi_result);
|
| if (dst.is(src1)) {
|
| movq(kScratchRegister, src1);
|
| - addq(kScratchRegister, src2);
|
| + if (kSmiValueSize == 32) {
|
| + addq(kScratchRegister, src2);
|
| + } else {
|
| + ASSERT(kSmiValueSize == 31);
|
| + addl(kScratchRegister, src2);
|
| + }
|
| j(overflow, on_not_smi_result, near_jump);
|
| - movq(dst, kScratchRegister);
|
| + if (kSmiValueSize == 32) {
|
| + movq(dst, kScratchRegister);
|
| + } else {
|
| + ASSERT(kSmiValueSize == 31);
|
| + movsxlq(dst, kScratchRegister);
|
| + }
|
| } else {
|
| ASSERT(!src2.AddressUsesRegister(dst));
|
| movq(dst, src1);
|
| - addq(dst, src2);
|
| + if (kSmiValueSize == 32) {
|
| + addq(dst, src2);
|
| + } else {
|
| + ASSERT(kSmiValueSize == 31);
|
| + addl(dst, src2);
|
| + }
|
| j(overflow, on_not_smi_result, near_jump);
|
| + if (kSmiValueSize == 31) {
|
| + movsxlq(dst, dst);
|
| }
|
| }
|
| +}
|
|
|
|
|
| void MacroAssembler::SmiAdd(Register dst,
|
| @@ -1708,13 +1976,27 @@ void MacroAssembler::SmiSub(Register dst,
|
| ASSERT_NOT_NULL(on_not_smi_result);
|
| ASSERT(!dst.is(src2));
|
| if (dst.is(src1)) {
|
| - cmpq(dst, src2);
|
| + if (kSmiValueSize == 32) {
|
| + cmpq(dst, src2);
|
| + } else {
|
| + ASSERT(kSmiValueSize == 31);
|
| + cmpl(dst, src2);
|
| + }
|
| j(overflow, on_not_smi_result, near_jump);
|
| subq(dst, src2);
|
| } else {
|
| - movq(dst, src1);
|
| - subq(dst, src2);
|
| + if (kSmiValueSize == 32) {
|
| + movq(dst, src1);
|
| + subq(dst, src2);
|
| + } else {
|
| + ASSERT(kSmiValueSize == 31);
|
| + movl(dst, src1);
|
| + subl(dst, src2);
|
| + }
|
| j(overflow, on_not_smi_result, near_jump);
|
| + if (kSmiValueSize == 31) {
|
| + movsxlq(dst, dst);
|
| + }
|
| }
|
| }
|
|
|
| @@ -1738,14 +2020,34 @@ void MacroAssembler::SmiSub(Register dst,
|
| Label::Distance near_jump) {
|
| ASSERT_NOT_NULL(on_not_smi_result);
|
| if (dst.is(src1)) {
|
| - movq(kScratchRegister, src2);
|
| - cmpq(src1, kScratchRegister);
|
| + if (kSmiValueSize == 32) {
|
| + movq(kScratchRegister, src2);
|
| + cmpq(src1, kScratchRegister);
|
| + } else {
|
| + ASSERT(kSmiValueSize == 31);
|
| + movl(kScratchRegister, src2);
|
| + cmpl(src1, kScratchRegister);
|
| + }
|
| j(overflow, on_not_smi_result, near_jump);
|
| - subq(src1, kScratchRegister);
|
| + if (kSmiValueSize == 32) {
|
| + subq(src1, kScratchRegister);
|
| + } else {
|
| + ASSERT(kSmiValueSize == 31);
|
| + movsxlq(src1, kScratchRegister);
|
| + }
|
| } else {
|
| - movq(dst, src1);
|
| - subq(dst, src2);
|
| + if (kSmiValueSize == 32) {
|
| + movq(dst, src1);
|
| + subq(dst, src2);
|
| + } else {
|
| + ASSERT(kSmiValueSize == 31);
|
| + movl(dst, src1);
|
| + subl(dst, src2);
|
| + }
|
| j(overflow, on_not_smi_result, near_jump);
|
| + if (kSmiValueSize == 31) {
|
| + movsxlq(dst, dst);
|
| + }
|
| }
|
| }
|
|
|
| @@ -1776,8 +2078,14 @@ void MacroAssembler::SmiMul(Register dst,
|
| if (dst.is(src1)) {
|
| Label failure, zero_correct_result;
|
| movq(kScratchRegister, src1); // Create backup for later testing.
|
| - SmiToInteger64(dst, src1);
|
| - imul(dst, src2);
|
| + if (kSmiValueSize == 32) {
|
| + SmiToInteger64(dst, src1);
|
| + imul(dst, src2);
|
| + } else {
|
| + ASSERT(kSmiValueSize == 31);
|
| + SmiToInteger32(dst, src1);
|
| + imull(dst, src2);
|
| + }
|
| j(overflow, &failure, Label::kNear);
|
|
|
| // Check for negative zero result. If product is zero, and one
|
| @@ -1799,9 +2107,18 @@ void MacroAssembler::SmiMul(Register dst,
|
| Set(dst, 0);
|
|
|
| bind(&correct_result);
|
| + if (kSmiValueSize == 31) {
|
| + movsxlq(dst, dst);
|
| + }
|
| } else {
|
| - SmiToInteger64(dst, src1);
|
| - imul(dst, src2);
|
| + if (kSmiValueSize == 32) {
|
| + SmiToInteger64(dst, src1);
|
| + imul(dst, src2);
|
| + } else {
|
| + ASSERT(kSmiValueSize == 31);
|
| + SmiToInteger32(dst, src1);
|
| + imull(dst, src2);
|
| + }
|
| j(overflow, on_not_smi_result, near_jump);
|
| // Check for negative zero result. If product is zero, and one
|
| // argument is negative, go to slow case.
|
| @@ -1814,6 +2131,9 @@ void MacroAssembler::SmiMul(Register dst,
|
| xor_(kScratchRegister, src2);
|
| j(negative, on_not_smi_result, near_jump);
|
| bind(&correct_result);
|
| + if (kSmiValueSize == 31) {
|
| + movsxlq(dst, dst);
|
| + }
|
| }
|
| }
|
|
|
| @@ -1846,7 +2166,12 @@ void MacroAssembler::SmiDiv(Register dst,
|
| // We overshoot a little and go to slow case if we divide min-value
|
| // by any negative value, not just -1.
|
| Label safe_div;
|
| - testl(rax, Immediate(0x7fffffff));
|
| + if (kSmiValueSize == 32) {
|
| + testl(rax, Immediate(0x7fffffff));
|
| + } else {
|
| + ASSERT(kSmiValueSize == 31);
|
| + testl(rax, Immediate(0x3fffffff));
|
| + }
|
| j(not_zero, &safe_div, Label::kNear);
|
| testq(src2, src2);
|
| if (src1.is(rax)) {
|
| @@ -1940,8 +2265,13 @@ void MacroAssembler::SmiMod(Register dst,
|
| void MacroAssembler::SmiNot(Register dst, Register src) {
|
| ASSERT(!dst.is(kScratchRegister));
|
| ASSERT(!src.is(kScratchRegister));
|
| + if (kSmiValueSize == 32) {
|
| // Set tag and padding bits before negating, so that they are zero afterwards.
|
| - movl(kScratchRegister, Immediate(~0));
|
| + movl(kScratchRegister, Immediate(~0));
|
| + } else {
|
| + ASSERT(kSmiValueSize == 31);
|
| + movl(kScratchRegister, Immediate(1));
|
| + }
|
| if (dst.is(src)) {
|
| xor_(dst, kScratchRegister);
|
| } else {
|
| @@ -2033,12 +2363,35 @@ void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst,
|
|
|
| void MacroAssembler::SmiShiftLeftConstant(Register dst,
|
| Register src,
|
| - int shift_value) {
|
| - if (!dst.is(src)) {
|
| - movq(dst, src);
|
| - }
|
| - if (shift_value > 0) {
|
| - shl(dst, Immediate(shift_value));
|
| + int shift_value,
|
| + SmiFunctionInvoker& invoker) {
|
| + if (kSmiValueSize == 32) {
|
| + if (!dst.is(src)) {
|
| + movq(dst, src);
|
| + }
|
| + if (shift_value > 0) {
|
| + shl(dst, Immediate(shift_value));
|
| + }
|
| + } else {
|
| + ASSERT(kSmiValueSize == 31);
|
| + if (dst.is(src)) {
|
| + ASSERT(!invoker.reserve_source_operands());
|
| + } else {
|
| + movq(dst, src);
|
| + }
|
| + if (shift_value > 0) {
|
| + SmiToInteger32(dst, dst);
|
| + shll(dst, Immediate(shift_value));
|
| + if (invoker.on_not_smi_result() != NULL) {
|
| + JumpIfNotValidSmiValue(dst, invoker.on_not_smi_result());
|
| + } else {
|
| + Label done;
|
| + JumpIfValidSmiValue(dst, &done, Label::kNear);
|
| + invoker.Bailout();
|
| + bind(&done);
|
| + }
|
| + Integer32ToSmi(dst, dst);
|
| + }
|
| }
|
| }
|
|
|
| @@ -2052,27 +2405,76 @@ void MacroAssembler::SmiShiftLogicalRightConstant(
|
| } else {
|
| movq(dst, src);
|
| if (shift_value == 0) {
|
| - testq(dst, dst);
|
| + if (kSmiValueSize == 32) {
|
| + testq(dst, dst);
|
| + } else {
|
| + ASSERT(kSmiValueSize == 31);
|
| + testl(dst, dst);
|
| + }
|
| j(negative, on_not_smi_result, near_jump);
|
| }
|
| - shr(dst, Immediate(shift_value + kSmiShift));
|
| - shl(dst, Immediate(kSmiShift));
|
| + if (kSmiValueSize == 32) {
|
| + shr(dst, Immediate(shift_value + kSmiShift));
|
| + shl(dst, Immediate(kSmiShift));
|
| + } else {
|
| + ASSERT(kSmiValueSize == 31);
|
| + SmiToInteger32(dst, dst);
|
| + shrl(dst, Immediate(shift_value));
|
| + JumpIfUIntNotValidSmiValue(dst, on_not_smi_result, near_jump);
|
| + shll(dst, Immediate(kSmiShift));
|
| + }
|
| }
|
| }
|
|
|
|
|
| void MacroAssembler::SmiShiftLeft(Register dst,
|
| Register src1,
|
| - Register src2) {
|
| - ASSERT(!dst.is(rcx));
|
| - // Untag shift amount.
|
| - if (!dst.is(src1)) {
|
| - movq(dst, src1);
|
| + Register src2,
|
| + Label* on_not_smi_result) {
|
| + if (kSmiValueSize == 32) {
|
| + ASSERT(!dst.is(rcx));
|
| + // Untag shift amount.
|
| + if (!dst.is(src1)) {
|
| + movq(dst, src1);
|
| + }
|
| + SmiToInteger32(rcx, src2);
|
| + // Shift amount specified by lower 5 bits, not six as the shl opcode.
|
| + and_(rcx, Immediate(0x1f));
|
| + shl_cl(dst);
|
| + } else {
|
| + ASSERT(kSmiValueSize == 31);
|
| + ASSERT(!dst.is(kScratchRegister));
|
| + ASSERT(!src1.is(kScratchRegister));
|
| + ASSERT(!src2.is(kScratchRegister));
|
| + ASSERT(!dst.is(rcx));
|
| + Label result_ok;
|
| +
|
| + if (dst.is(src1)) {
|
| + UNIMPLEMENTED(); // Not used.
|
| + } else {
|
| + if (src1.is(rcx) || src2.is(rcx)) {
|
| + movq(kScratchRegister, rcx);
|
| + }
|
| + movq(dst, src1);
|
| + SmiToInteger32(dst, dst);
|
| + // Untag shift amount.
|
| + SmiToInteger32(rcx, src2);
|
| + // Shift amount specified by lower 5 bits, not six as the shl opcode.
|
| + andl(rcx, Immediate(0x1f));
|
| + shll_cl(dst);
|
| + JumpIfValidSmiValue(dst, &result_ok, Label::kNear);
|
| + if (src1.is(rcx) || src2.is(rcx)) {
|
| + if (src1.is(rcx)) {
|
| + movq(src1, kScratchRegister);
|
| + } else {
|
| + movq(src2, kScratchRegister);
|
| + }
|
| + }
|
| + jmp(on_not_smi_result);
|
| + bind(&result_ok);
|
| + Integer32ToSmi(dst, dst);
|
| + }
|
| }
|
| - SmiToInteger32(rcx, src2);
|
| - // Shift amount specified by lower 5 bits, not six as the shl opcode.
|
| - and_(rcx, Immediate(0x1f));
|
| - shl_cl(dst);
|
| }
|
|
|
|
|
| @@ -2085,32 +2487,29 @@ void MacroAssembler::SmiShiftLogicalRight(Register dst,
|
| ASSERT(!src1.is(kScratchRegister));
|
| ASSERT(!src2.is(kScratchRegister));
|
| ASSERT(!dst.is(rcx));
|
| - // dst and src1 can be the same, because the one case that bails out
|
| - // is a shift by 0, which leaves dst, and therefore src1, unchanged.
|
| - if (src1.is(rcx) || src2.is(rcx)) {
|
| - movq(kScratchRegister, rcx);
|
| - }
|
| - if (!dst.is(src1)) {
|
| + Label result_ok;
|
| +
|
| + if (dst.is(src1)) {
|
| + UNIMPLEMENTED(); // Not used.
|
| + } else {
|
| + if (src1.is(rcx) || src2.is(rcx)) {
|
| + movq(kScratchRegister, rcx);
|
| + }
|
| movq(dst, src1);
|
| - }
|
| - SmiToInteger32(rcx, src2);
|
| - orl(rcx, Immediate(kSmiShift));
|
| - shr_cl(dst); // Shift is rcx modulo 0x1f + 32.
|
| - shl(dst, Immediate(kSmiShift));
|
| - testq(dst, dst);
|
| - if (src1.is(rcx) || src2.is(rcx)) {
|
| - Label positive_result;
|
| - j(positive, &positive_result, Label::kNear);
|
| - if (src1.is(rcx)) {
|
| - movq(src1, kScratchRegister);
|
| - } else {
|
| - movq(src2, kScratchRegister);
|
| + SmiToInteger32(dst, dst);
|
| + SmiToInteger32(rcx, src2);
|
| + shrl_cl(dst);
|
| + JumpIfUIntValidSmiValue(dst, &result_ok, Label::kNear);
|
| + if (src1.is(rcx) || src2.is(rcx)) {
|
| + if (src1.is(rcx)) {
|
| + movq(src1, kScratchRegister);
|
| + } else {
|
| + movq(src2, kScratchRegister);
|
| + }
|
| }
|
| - jmp(on_not_smi_result, near_jump);
|
| - bind(&positive_result);
|
| - } else {
|
| - // src2 was zero and src1 negative.
|
| - j(negative, on_not_smi_result, near_jump);
|
| + jmp(on_not_smi_result);
|
| + bind(&result_ok);
|
| + Integer32ToSmi(dst, dst);
|
| }
|
| }
|
|
|
| @@ -2131,9 +2530,15 @@ void MacroAssembler::SmiShiftArithmeticRight(Register dst,
|
| movq(dst, src1);
|
| }
|
| SmiToInteger32(rcx, src2);
|
| - orl(rcx, Immediate(kSmiShift));
|
| - sar_cl(dst); // Shift 32 + original rcx & 0x1f.
|
| - shl(dst, Immediate(kSmiShift));
|
| + if (kSmiValueSize == 32) {
|
| + orl(rcx, Immediate(kSmiShift));
|
| + sar_cl(dst); // Shift 32 + original rcx & 0x1f.
|
| + } else {
|
| + ASSERT(kSmiValueSize == 31);
|
| + SmiToInteger32(dst, dst);
|
| + sarl_cl(dst);
|
| + }
|
| + Integer32ToSmi(dst, dst);
|
| if (src1.is(rcx)) {
|
| movq(src1, kScratchRegister);
|
| } else if (src2.is(rcx)) {
|
| @@ -2190,14 +2595,24 @@ SmiIndex MacroAssembler::SmiToIndex(Register dst,
|
| if (!dst.is(src)) {
|
| movq(dst, src);
|
| }
|
| - if (shift < kSmiShift) {
|
| - sar(dst, Immediate(kSmiShift - shift));
|
| + if (kSmiValueSize == 32) {
|
| + if (shift < kSmiShift) {
|
| + sar(dst, Immediate(kSmiShift - shift));
|
| + } else {
|
| + shl(dst, Immediate(shift - kSmiShift));
|
| + }
|
| + return SmiIndex(dst, times_1);
|
| } else {
|
| - shl(dst, Immediate(shift - kSmiShift));
|
| + ASSERT(kSmiValueSize == 31);
|
| + if (shift == times_1) {
|
| + sar(dst, Immediate(kSmiShift));
|
| + return SmiIndex(dst, times_1);
|
| + }
|
| + return SmiIndex(dst, static_cast<ScaleFactor>(shift - 1));
|
| }
|
| - return SmiIndex(dst, times_1);
|
| }
|
|
|
| +
|
| SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
|
| Register src,
|
| int shift) {
|
| @@ -2207,21 +2622,88 @@ SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst,
|
| movq(dst, src);
|
| }
|
| neg(dst);
|
| - if (shift < kSmiShift) {
|
| - sar(dst, Immediate(kSmiShift - shift));
|
| + if (kSmiValueSize == 32) {
|
| + if (shift < kSmiShift) {
|
| + sar(dst, Immediate(kSmiShift - shift));
|
| + } else {
|
| + shl(dst, Immediate(shift - kSmiShift));
|
| + }
|
| + return SmiIndex(dst, times_1);
|
| } else {
|
| - shl(dst, Immediate(shift - kSmiShift));
|
| + ASSERT(kSmiValueSize == 31);
|
| + if (shift == times_1) {
|
| + sar(dst, Immediate(kSmiShift));
|
| + return SmiIndex(dst, times_1);
|
| + }
|
| + return SmiIndex(dst, static_cast<ScaleFactor>(shift - 1));
|
| }
|
| - return SmiIndex(dst, times_1);
|
| }
|
|
|
|
|
| void MacroAssembler::AddSmiField(Register dst, const Operand& src) {
|
| - ASSERT_EQ(0, kSmiShift % kBitsPerByte);
|
| - addl(dst, Operand(src, kSmiShift / kBitsPerByte));
|
| + if (kSmiValueSize == 32) {
|
| + ASSERT_EQ(0, kSmiShift % kBitsPerByte);
|
| + addl(dst, Operand(src, kSmiShift / kBitsPerByte));
|
| + } else {
|
| + ASSERT(kSmiValueSize == 31);
|
| + SmiToInteger32(kScratchRegister, src);
|
| + addl(dst, kScratchRegister);
|
| + }
|
| +}
|
| +
|
| +
|
| +void MacroAssembler::Test(const Operand& src, Smi* source) {
|
| + if (kSmiValueSize == 32) {
|
| + testl(Operand(src, kIntSize), Immediate(source->value()));
|
| + } else {
|
| + ASSERT(kSmiValueSize == 31);
|
| + testl(src, SmiToImmediate(source));
|
| + }
|
| +}
|
| +
|
| +
|
| +void MacroAssembler::TestBit(const Operand& src, int bits) {
|
| + int byte_offset = bits / kBitsPerByte;
|
| + int bit_in_byte = bits & (kBitsPerByte - 1);
|
| + testb(Operand(src, byte_offset), Immediate(1 << bit_in_byte));
|
| +}
|
| +
|
| +
|
| +void MacroAssembler::PushInt64AsTwoSmis(Register src, Register scratch) {
|
| + movq(scratch, src);
|
| + // High bits.
|
| + shr(src, Immediate(64 - kSmiShift));
|
| + shl(src, Immediate(kSmiShift));
|
| + push(src);
|
| + // Low bits.
|
| + shl(scratch, Immediate(kSmiShift));
|
| + push(scratch);
|
| +}
|
| +
|
| +
|
| +void MacroAssembler::PopInt64AsTwoSmis(Register dst, Register scratch) {
|
| + pop(scratch);
|
| + // Low bits.
|
| + shr(scratch, Immediate(kSmiShift));
|
| + pop(dst);
|
| + shr(dst, Immediate(kSmiShift));
|
| + // High bits.
|
| + shl(dst, Immediate(64 - kSmiShift));
|
| + or_(dst, scratch);
|
| }
|
|
|
|
|
| +bool MacroAssembler::IsUnsafeSmiOperator(Token::Value op) {
|
| + return (op == Token::ADD || op == Token::SUB || op == Token::MUL ||
|
| + op == Token::DIV || (kSmiValueSize == 31 && op == Token::SHL) ||
|
| + op == Token::SHR);
|
| +}
|
| +
|
| +
|
| +// End of smi tagging, untagging and tag detection.
|
| +// ----------------------------------------------------------------------------
|
| +
|
| +
|
| void MacroAssembler::JumpIfNotString(Register object,
|
| Register object_map,
|
| Label* not_string,
|
| @@ -2479,18 +2961,6 @@ void MacroAssembler::Drop(int stack_elements) {
|
| }
|
|
|
|
|
| -void MacroAssembler::Test(const Operand& src, Smi* source) {
|
| - testl(Operand(src, kIntSize), Immediate(source->value()));
|
| -}
|
| -
|
| -
|
| -void MacroAssembler::TestBit(const Operand& src, int bits) {
|
| - int byte_offset = bits / kBitsPerByte;
|
| - int bit_in_byte = bits & (kBitsPerByte - 1);
|
| - testb(Operand(src, byte_offset), Immediate(1 << bit_in_byte));
|
| -}
|
| -
|
| -
|
| void MacroAssembler::Jump(ExternalReference ext) {
|
| LoadAddress(kScratchRegister, ext);
|
| jmp(kScratchRegister);
|
|
|