| Index: src/x64/code-stubs-x64.cc
|
| diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
|
| index 76479d3e2ebec5dc63fa69a001f818fae339b7fc..70e8868ceeb8f5061d4b1d78d7f0d75b49c072ba 100644
|
| --- a/src/x64/code-stubs-x64.cc
|
| +++ b/src/x64/code-stubs-x64.cc
|
| @@ -603,6 +603,15 @@ class FloatingPointHelper : public AllStatic {
|
| Label* on_success,
|
| Label* on_not_smis,
|
| ConvertUndefined convert_undefined);
|
| +
|
| + // Checks that |operand| has an int32 value. If |int32_result| is different
|
| + // from |scratch|, it will contain that int32 value.
|
| + static void CheckSSE2OperandIsInt32(MacroAssembler* masm,
|
| + Label* non_int32,
|
| + XMMRegister operand,
|
| + Register int32_result,
|
| + Register scratch,
|
| + XMMRegister xmm_scratch);
|
| };
|
|
|
|
|
| @@ -721,13 +730,13 @@ static void BinaryOpStub_GenerateSmiCode(
|
| // Arguments to BinaryOpStub are in rdx and rax.
|
| const Register left = rdx;
|
| const Register right = rax;
|
| + const Register shift_op_result = (SmiValuesAre32Bits() || op == Token::SAR) ?
|
| + left : r9;
|
|
|
| - // We only generate heapnumber answers for overflowing calculations
|
| - // for the four basic arithmetic operations and logical right shift by 0.
|
| + // We only generate heapnumber answers for overflowing calculations.
|
| bool generate_inline_heapnumber_results =
|
| (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) &&
|
| - (op == Token::ADD || op == Token::SUB ||
|
| - op == Token::MUL || op == Token::DIV || op == Token::SHR);
|
| + MacroAssembler::IsUnsafeSmiOperator(op);
|
|
|
| // Smi check of both operands. If op is BIT_OR, the check is delayed
|
| // until after the OR operation.
|
| @@ -744,14 +753,15 @@ static void BinaryOpStub_GenerateSmiCode(
|
| __ bind(&smi_values);
|
| // Perform the operation.
|
| Comment perform_smi(masm, "-- Perform smi operation");
|
| + MacroAssembler::StrictSmiInstructionWrapper wrapper(masm, &use_fp_on_smis);
|
| switch (op) {
|
| case Token::ADD:
|
| ASSERT(right.is(rax));
|
| - __ SmiAdd(right, right, left, &use_fp_on_smis); // ADD is commutative.
|
| + __ SmiAdd(right, right, left, wrapper); // ADD is commutative.
|
| break;
|
|
|
| case Token::SUB:
|
| - __ SmiSub(left, left, right, &use_fp_on_smis);
|
| + __ SmiSub(left, left, right, wrapper);
|
| __ movq(rax, left);
|
| break;
|
|
|
| @@ -790,18 +800,18 @@ static void BinaryOpStub_GenerateSmiCode(
|
| break;
|
|
|
| case Token::SHL:
|
| - __ SmiShiftLeft(left, left, right);
|
| - __ movq(rax, left);
|
| + __ SmiShiftLeft(shift_op_result, left, right, &use_fp_on_smis);
|
| + __ movq(rax, shift_op_result);
|
| break;
|
|
|
| case Token::SAR:
|
| - __ SmiShiftArithmeticRight(left, left, right);
|
| - __ movq(rax, left);
|
| + __ SmiShiftArithmeticRight(shift_op_result, left, right);
|
| + __ movq(rax, shift_op_result);
|
| break;
|
|
|
| case Token::SHR:
|
| - __ SmiShiftLogicalRight(left, left, right, &use_fp_on_smis);
|
| - __ movq(rax, left);
|
| + __ SmiShiftLogicalRight(shift_op_result, left, right, &use_fp_on_smis);
|
| + __ movq(rax, shift_op_result);
|
| break;
|
|
|
| default:
|
| @@ -826,16 +836,21 @@ static void BinaryOpStub_GenerateSmiCode(
|
| __ AllocateHeapNumber(rcx, rbx, slow);
|
| Comment perform_float(masm, "-- Perform float operation on smis");
|
| if (op == Token::SHR) {
|
| - __ SmiToInteger32(left, left);
|
| - __ cvtqsi2sd(xmm0, left);
|
| + if (SmiValuesAre32Bits()) {
|
| + __ SmiToInteger32(shift_op_result, shift_op_result);
|
| + }
|
| + __ cvtqsi2sd(xmm0, shift_op_result);
|
| + } else if (op == Token::SHL) {
|
| + ASSERT(SmiValuesAre31Bits());
|
| + __ cvtlsi2sd(xmm0, shift_op_result);
|
| } else {
|
| FloatingPointHelper::LoadSSE2SmiOperands(masm);
|
| switch (op) {
|
| - case Token::ADD: __ addsd(xmm0, xmm1); break;
|
| - case Token::SUB: __ subsd(xmm0, xmm1); break;
|
| - case Token::MUL: __ mulsd(xmm0, xmm1); break;
|
| - case Token::DIV: __ divsd(xmm0, xmm1); break;
|
| - default: UNREACHABLE();
|
| + case Token::ADD: __ addsd(xmm0, xmm1); break;
|
| + case Token::SUB: __ subsd(xmm0, xmm1); break;
|
| + case Token::MUL: __ mulsd(xmm0, xmm1); break;
|
| + case Token::DIV: __ divsd(xmm0, xmm1); break;
|
| + default: UNREACHABLE();
|
| }
|
| }
|
| __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0);
|
| @@ -876,11 +891,14 @@ static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm,
|
| OverwriteMode mode);
|
|
|
|
|
| -static void BinaryOpStub_GenerateFloatingPointCode(MacroAssembler* masm,
|
| - Label* allocation_failure,
|
| - Label* non_numeric_failure,
|
| - Token::Value op,
|
| - OverwriteMode mode) {
|
| +static void BinaryOpStub_GenerateFloatingPointCode(
|
| + MacroAssembler* masm,
|
| + Label* allocation_failure,
|
| + Label* non_numeric_failure,
|
| + Token::Value op,
|
| + BinaryOpIC::TypeInfo result_type,
|
| + Label* non_int32_failure,
|
| + OverwriteMode mode) {
|
| switch (op) {
|
| case Token::ADD:
|
| case Token::SUB:
|
| @@ -895,6 +913,14 @@ static void BinaryOpStub_GenerateFloatingPointCode(MacroAssembler* masm,
|
| case Token::DIV: __ divsd(xmm0, xmm1); break;
|
| default: UNREACHABLE();
|
| }
|
| +
|
| + if (SmiValuesAre31Bits() && non_int32_failure != NULL) {
|
| + if (result_type <= BinaryOpIC::INT32) {
|
| + FloatingPointHelper::CheckSSE2OperandIsInt32(
|
| + masm, non_int32_failure, xmm0, rcx, rcx, xmm2);
|
| + }
|
| + }
|
| +
|
| BinaryOpStub_GenerateHeapResultAllocation(
|
| masm, allocation_failure, mode);
|
| __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
|
| @@ -912,8 +938,12 @@ static void BinaryOpStub_GenerateFloatingPointCode(MacroAssembler* masm,
|
| case Token::SAR:
|
| case Token::SHL:
|
| case Token::SHR: {
|
| - Label non_smi_shr_result;
|
| + Label non_smi_result;
|
| Register heap_number_map = r9;
|
| + Register saved_right = r11;
|
| + if (SmiValuesAre31Bits() || (SmiValuesAre32Bits() && op == Token::SHR)) {
|
| + __ movq(saved_right, rax);
|
| + }
|
| __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
|
| FloatingPointHelper::LoadAsIntegers(masm, non_numeric_failure,
|
| heap_number_map);
|
| @@ -923,48 +953,79 @@ static void BinaryOpStub_GenerateFloatingPointCode(MacroAssembler* masm,
|
| case Token::BIT_XOR: __ xorl(rax, rcx); break;
|
| case Token::SAR: __ sarl_cl(rax); break;
|
| case Token::SHL: __ shll_cl(rax); break;
|
| - case Token::SHR: {
|
| - __ shrl_cl(rax);
|
| - // Check if result is negative. This can only happen for a shift
|
| - // by zero.
|
| - __ testl(rax, rax);
|
| - __ j(negative, &non_smi_shr_result);
|
| - break;
|
| - }
|
| + case Token::SHR: __ shrl_cl(rax); break;
|
| default: UNREACHABLE();
|
| }
|
| - STATIC_ASSERT(kSmiValueSize == 32);
|
| +
|
| + if (op == Token::SHR) {
|
| + __ JumpIfUIntNotValidSmiValue(rax, &non_smi_result, Label::kNear);
|
| + } else {
|
| + if (SmiValuesAre31Bits()) {
|
| + __ JumpIfNotValidSmiValue(rax, &non_smi_result, Label::kNear);
|
| + }
|
| + }
|
| +
|
| // Tag smi result and return.
|
| __ Integer32ToSmi(rax, rax);
|
| __ Ret();
|
|
|
| - // Logical shift right can produce an unsigned int32 that is not
|
| - // an int32, and so is not in the smi range. Allocate a heap number
|
| - // in that case.
|
| - if (op == Token::SHR) {
|
| - __ bind(&non_smi_shr_result);
|
| + if (SmiValuesAre31Bits() || (SmiValuesAre32Bits() && op == Token::SHR)) {
|
| + __ bind(&non_smi_result);
|
| + __ movl(rbx, rax); // rbx holds result value.
|
| Label allocation_failed;
|
| - __ movl(rbx, rax); // rbx holds result value (uint32 value as int64).
|
| - // Allocate heap number in new space.
|
| - // Not using AllocateHeapNumber macro in order to reuse
|
| - // already loaded heap_number_map.
|
| - __ Allocate(HeapNumber::kSize, rax, rdx, no_reg, &allocation_failed,
|
| - TAG_OBJECT);
|
| - // Set the map.
|
| - __ AssertRootValue(heap_number_map,
|
| - Heap::kHeapNumberMapRootIndex,
|
| - kHeapNumberMapRegisterClobbered);
|
| - __ movq(FieldOperand(rax, HeapObject::kMapOffset),
|
| - heap_number_map);
|
| - __ cvtqsi2sd(xmm0, rbx);
|
| + Label skip_allocation;
|
| + // Allocate heap number in new space if we could not overwrite
|
| + // the left or right operand. Not using AllocateHeapNumber macro
|
| + // in order to reuse already loaded heap_number_map.
|
| + switch (mode) {
|
| + case OVERWRITE_LEFT:
|
| + __ movq(rax, rdx);
|
| + __ JumpIfNotSmi(rax, &skip_allocation);
|
| + __ Allocate(HeapNumber::kSize, rax, no_reg, no_reg,
|
| + &allocation_failed, TAG_OBJECT);
|
| + // Set the map.
|
| + __ AssertRootValue(heap_number_map,
|
| + Heap::kHeapNumberMapRootIndex,
|
| + kHeapNumberMapRegisterClobbered);
|
| + __ movq(FieldOperand(rax, HeapObject::kMapOffset),
|
| + heap_number_map);
|
| + __ bind(&skip_allocation);
|
| + break;
|
| + case OVERWRITE_RIGHT:
|
| + __ movq(rax, saved_right);
|
| + __ JumpIfNotSmi(rax, &skip_allocation);
|
| + // Fall through!
|
| + case NO_OVERWRITE:
|
| + __ Allocate(HeapNumber::kSize, rax, no_reg, no_reg,
|
| + &allocation_failed, TAG_OBJECT);
|
| + // Set the map.
|
| + __ AssertRootValue(heap_number_map,
|
| + Heap::kHeapNumberMapRootIndex,
|
| + kHeapNumberMapRegisterClobbered);
|
| + __ movq(FieldOperand(rax, HeapObject::kMapOffset),
|
| + heap_number_map);
|
| + __ bind(&skip_allocation);
|
| + break;
|
| + default: UNREACHABLE();
|
| + }
|
| +
|
| + if (op == Token::SHR) {
|
| + // Logical shift right can produce an unsigned int32 that is not
|
| + // an int32, or an int32 not in the smi range for 31 bits SMI value.
|
| + __ cvtqsi2sd(xmm0, rbx);
|
| + } else {
|
| + // All other operations returns a signed int32, so we
|
| + // use lsi2sd here to retain the sign bit.
|
| + ASSERT(SmiValuesAre31Bits());
|
| + __ cvtlsi2sd(xmm0, rbx);
|
| + }
|
| __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0);
|
| __ Ret();
|
|
|
| __ bind(&allocation_failed);
|
| // We need tagged values in rdx and rax for the following code,
|
| - // not int32 in rax and rcx.
|
| - __ Integer32ToSmi(rax, rcx);
|
| - __ Integer32ToSmi(rdx, rbx);
|
| + // rdx is un-changed and rax is saved at beginning.
|
| + __ movq(rax, saved_right);
|
| __ jmp(allocation_failure);
|
| }
|
| break;
|
| @@ -1061,9 +1122,24 @@ void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
|
|
|
|
|
| void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
|
| - // The int32 case is identical to the Smi case. We avoid creating this
|
| - // ic state on x64.
|
| - UNREACHABLE();
|
| + ASSERT(SmiValuesAre31Bits());
|
| + ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32);
|
| +
|
| + Label gc_required, not_number, not_int32;
|
| + BinaryOpStub_GenerateFloatingPointCode(masm, &gc_required, ¬_number, op_,
|
| + result_type_, ¬_int32, mode_);
|
| +
|
| + __ bind(¬_number);
|
| + __ bind(¬_int32);
|
| + GenerateTypeTransition(masm);
|
| +
|
| + __ bind(&gc_required);
|
| + {
|
| + FrameScope scope(masm, StackFrame::INTERNAL);
|
| + GenerateRegisterArgsPush(masm);
|
| + GenerateCallRuntime(masm);
|
| + }
|
| + __ Ret();
|
| }
|
|
|
|
|
| @@ -1169,7 +1245,7 @@ void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) {
|
| }
|
|
|
| BinaryOpStub_GenerateFloatingPointCode(
|
| - masm, &gc_required, ¬_number, op_, mode_);
|
| + masm, &gc_required, ¬_number, op_, result_type_, NULL, mode_);
|
|
|
| __ bind(¬_number);
|
| GenerateTypeTransition(masm);
|
| @@ -1191,7 +1267,8 @@ void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
|
| masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_);
|
|
|
| BinaryOpStub_GenerateFloatingPointCode(
|
| - masm, &call_runtime, &call_string_add_or_runtime, op_, mode_);
|
| + masm, &call_runtime, &call_string_add_or_runtime, op_,
|
| + result_type_, NULL, mode_);
|
|
|
| __ bind(&call_string_add_or_runtime);
|
| if (op_ == Token::ADD) {
|
| @@ -1742,6 +1819,7 @@ void FloatingPointHelper::NumbersToSmis(MacroAssembler* masm,
|
| __ movq(kScratchRegister, xmm1);
|
| __ cmpq(scratch2, kScratchRegister);
|
| __ j(not_equal, on_not_smis);
|
| + __ JumpIfNotValidSmiValue(smi_result, on_not_smis);
|
| __ Integer32ToSmi(first, smi_result);
|
|
|
| __ bind(&first_done);
|
| @@ -1761,6 +1839,7 @@ void FloatingPointHelper::NumbersToSmis(MacroAssembler* masm,
|
| __ movq(kScratchRegister, xmm1);
|
| __ cmpq(scratch2, kScratchRegister);
|
| __ j(not_equal, on_not_smis);
|
| + __ JumpIfNotValidSmiValue(smi_result, on_not_smis);
|
| __ Integer32ToSmi(second, smi_result);
|
| if (on_success != NULL) {
|
| __ jmp(on_success);
|
| @@ -1787,6 +1866,23 @@ void FloatingPointHelper::NumbersToSmis(MacroAssembler* masm,
|
| }
|
|
|
|
|
| +void FloatingPointHelper::CheckSSE2OperandIsInt32(MacroAssembler* masm,
|
| + Label* non_int32,
|
| + XMMRegister operand,
|
| + Register int32_result,
|
| + Register scratch,
|
| + XMMRegister xmm_scratch) {
|
| + __ cvttsd2si(int32_result, operand);
|
| + __ cvtlsi2sd(xmm_scratch, int32_result);
|
| + __ pcmpeqd(xmm_scratch, operand);
|
| + __ movmskps(scratch, xmm_scratch);
|
| + // Two least significant bits should be both set.
|
| + __ notl(scratch);
|
| + __ testl(scratch, Immediate(3));
|
| + __ j(not_zero, non_int32);
|
| +}
|
| +
|
| +
|
| void MathPowStub::Generate(MacroAssembler* masm) {
|
| const Register exponent = rdx;
|
| const Register base = rax;
|
| @@ -4569,8 +4665,9 @@ void StringAddStub::Generate(MacroAssembler* masm) {
|
| __ movzxbl(r9, FieldOperand(r9, Map::kInstanceTypeOffset));
|
|
|
| // Look at the length of the result of adding the two strings.
|
| - STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue / 2);
|
| - __ SmiAdd(rbx, rbx, rcx);
|
| + MacroAssembler::StrictSmiInstructionWrapper wrapper(masm, &call_runtime);
|
| + __ SmiAdd(rbx, rbx, rcx, wrapper);
|
| +
|
| // Use the string table when adding two one character strings, as it
|
| // helps later optimizations to return an internalized string here.
|
| __ SmiCompare(rbx, Smi::FromInt(2));
|
|
|