| Index: src/ia32/codegen-ia32.cc
|
| ===================================================================
|
| --- src/ia32/codegen-ia32.cc (revision 3405)
|
| +++ src/ia32/codegen-ia32.cc (working copy)
|
| @@ -742,7 +742,9 @@
|
| // be either a smi or a heap number object (fp value). Requirements:
|
| // operand in register number. Returns operand as floating point number
|
| // on FPU stack.
|
| - static void LoadFloatOperand(MacroAssembler* masm, Register number);
|
| + static void LoadFloatOperand(MacroAssembler* masm,
|
| + Register number,
|
| + Register scratch);
|
| // Code pattern for loading floating point values. Input values must
|
| // be either smi or heap number objects (fp values). Requirements:
|
| // operand_1 on TOS+1 , operand_2 on TOS+2; Returns operands as
|
| @@ -758,23 +760,41 @@
|
| // them into xmm0 and xmm1 if they are. Jump to label not_numbers if
|
| // either operand is not a number. Operands are in edx and eax.
|
| // Leaves operands unchanged.
|
| - static void LoadSse2Operands(MacroAssembler* masm, Label* not_numbers);
|
| + static void LoadSse2Operands(MacroAssembler* masm, Register scratch, Label* not_numbers);
|
| };
|
|
|
|
|
| const char* GenericBinaryOpStub::GetName() {
|
| + bool smi = (flags_ & NO_SMI_CODE_IN_STUB) == 0;
|
| switch (op_) {
|
| - case Token::ADD: return "GenericBinaryOpStub_ADD";
|
| - case Token::SUB: return "GenericBinaryOpStub_SUB";
|
| - case Token::MUL: return "GenericBinaryOpStub_MUL";
|
| - case Token::DIV: return "GenericBinaryOpStub_DIV";
|
| - case Token::BIT_OR: return "GenericBinaryOpStub_BIT_OR";
|
| - case Token::BIT_AND: return "GenericBinaryOpStub_BIT_AND";
|
| - case Token::BIT_XOR: return "GenericBinaryOpStub_BIT_XOR";
|
| - case Token::SAR: return "GenericBinaryOpStub_SAR";
|
| - case Token::SHL: return "GenericBinaryOpStub_SHL";
|
| - case Token::SHR: return "GenericBinaryOpStub_SHR";
|
| - default: return "GenericBinaryOpStub";
|
| + case Token::ADD:
|
| + return smi ? "GenericBinaryOpStub_ADD_Smi" : "GenericBinaryOpStub_ADD";
|
| + case Token::SUB:
|
| + return smi ? "GenericBinaryOpStub_SUB_Smi" : "GenericBinaryOpStub_SUB";
|
| + case Token::MUL:
|
| + return smi ? "GenericBinaryOpStub_MUL_Smi" : "GenericBinaryOpStub_MUL";
|
| + case Token::DIV:
|
| + return smi ? "GenericBinaryOpStub_DIV_Smi" : "GenericBinaryOpStub_DIV";
|
| + case Token::BIT_OR:
|
| + return smi ?
|
| + "GenericBinaryOpStub_BIT_OR_Smi" :
|
| + "GenericBinaryOpStub_BIT_OR";
|
| + case Token::BIT_AND:
|
| + return smi ?
|
| + "GenericBinaryOpStub_BIT_AND_Smi" :
|
| + "GenericBinaryOpStub_BIT_AND";
|
| + case Token::BIT_XOR:
|
| + return smi ?
|
| + "GenericBinaryOpStub_BIT_XOR_Smi" :
|
| + "GenericBinaryOpStub_BIT_XOR";
|
| + case Token::SAR:
|
| + return smi ? "GenericBinaryOpStub_SAR_Smi" : "GenericBinaryOpStub_SAR";
|
| + case Token::SHL:
|
| + return smi ? "GenericBinaryOpStub_SHL_Smi" : "GenericBinaryOpStub_SHL";
|
| + case Token::SHR:
|
| + return smi ? "GenericBinaryOpStub_SHR_Smi" : "GenericBinaryOpStub_SHR";
|
| + default:
|
| + return "GenericBinaryOpStub";
|
| }
|
| }
|
|
|
| @@ -5154,11 +5174,14 @@
|
|
|
| // Get the number into an unaliased register and load it onto the
|
| // floating point stack still leaving one copy on the frame.
|
| + Result scratch = allocator_->Allocate();
|
| + ASSERT(scratch.is_valid());
|
| Result number = frame_->Pop();
|
| number.ToRegister();
|
| frame_->Spill(number.reg());
|
| - FloatingPointHelper::LoadFloatOperand(masm_, number.reg());
|
| + FloatingPointHelper::LoadFloatOperand(masm_, number.reg(), scratch.reg());
|
| number.Unuse();
|
| + scratch.Unuse();
|
|
|
| // Perform the operation on the number.
|
| switch (op) {
|
| @@ -5187,10 +5210,12 @@
|
| scratch2.reg(),
|
| call_runtime.entry_label());
|
| scratch1.Unuse();
|
| +
|
| + // Store the result in the allocated heap number.
|
| + __ GenerateHeapNumberValueAddress(scratch2.reg(), heap_number.reg());
|
| + __ fstp_d(Operand(scratch2.reg(), 0));
|
| scratch2.Unuse();
|
|
|
| - // Store the result in the allocated heap number.
|
| - __ fstp_d(FieldOperand(heap_number.reg(), HeapNumber::kValueOffset));
|
| // Replace the extra copy of the argument with the result.
|
| frame_->SetElementAt(0, &heap_number);
|
| done.Jump();
|
| @@ -6514,7 +6539,8 @@
|
| __ cmp(edx, Factory::heap_number_map());
|
| __ j(not_equal, &true_result);
|
| __ fldz();
|
| - __ fld_d(FieldOperand(eax, HeapNumber::kValueOffset));
|
| + __ GenerateHeapNumberValueAddress(edx, eax);
|
| + __ fld_d(Operand(edx, 0));
|
| __ FCmp();
|
| __ j(zero, &false_result);
|
| // Fall through to |true_result|.
|
| @@ -6826,7 +6852,7 @@
|
|
|
| if (CpuFeatures::IsSupported(SSE2)) {
|
| CpuFeatures::Scope use_sse2(SSE2);
|
| - FloatingPointHelper::LoadSse2Operands(masm, &call_runtime);
|
| + FloatingPointHelper::LoadSse2Operands(masm, ecx, &call_runtime);
|
|
|
| switch (op_) {
|
| case Token::ADD: __ addsd(xmm0, xmm1); break;
|
| @@ -6859,7 +6885,8 @@
|
| }
|
| default: UNREACHABLE();
|
| }
|
| - __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
|
| + __ GenerateHeapNumberValueAddress(ecx, eax);
|
| + __ movdbl(Operand(ecx, 0), xmm0);
|
| GenerateReturn(masm);
|
| } else { // SSE2 not available, use FPU.
|
| FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx);
|
| @@ -6895,7 +6922,8 @@
|
| case Token::DIV: __ fdivp(1); break;
|
| default: UNREACHABLE();
|
| }
|
| - __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
|
| + __ GenerateHeapNumberValueAddress(ecx, eax);
|
| + __ fstp_d(Operand(ecx, 0));
|
| GenerateReturn(masm);
|
| }
|
| }
|
| @@ -6992,7 +7020,8 @@
|
| // Store the result in the HeapNumber and return.
|
| __ mov(Operand(esp, 1 * kPointerSize), ebx);
|
| __ fild_s(Operand(esp, 1 * kPointerSize));
|
| - __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
|
| + __ GenerateHeapNumberValueAddress(ecx, eax);
|
| + __ fstp_d(Operand(ecx, 0));
|
| GenerateReturn(masm);
|
| }
|
|
|
| @@ -7136,12 +7165,14 @@
|
|
|
|
|
| void FloatingPointHelper::LoadFloatOperand(MacroAssembler* masm,
|
| - Register number) {
|
| + Register number,
|
| + Register scratch) {
|
| Label load_smi, done;
|
|
|
| __ test(number, Immediate(kSmiTagMask));
|
| __ j(zero, &load_smi, not_taken);
|
| - __ fld_d(FieldOperand(number, HeapNumber::kValueOffset));
|
| + __ GenerateHeapNumberValueAddress(scratch, number);
|
| + __ fld_d(Operand(scratch, 0));
|
| __ jmp(&done);
|
|
|
| __ bind(&load_smi);
|
| @@ -7155,6 +7186,7 @@
|
|
|
|
|
| void FloatingPointHelper::LoadSse2Operands(MacroAssembler* masm,
|
| + Register scratch,
|
| Label* not_numbers) {
|
| Label load_smi_edx, load_eax, load_smi_eax, load_float_eax, done;
|
| // Load operand in edx into xmm0, or branch to not_numbers.
|
| @@ -7162,7 +7194,8 @@
|
| __ j(zero, &load_smi_edx, not_taken); // Argument in edx is a smi.
|
| __ cmp(FieldOperand(edx, HeapObject::kMapOffset), Factory::heap_number_map());
|
| __ j(not_equal, not_numbers); // Argument in edx is not a number.
|
| - __ movdbl(xmm0, FieldOperand(edx, HeapNumber::kValueOffset));
|
| + __ GenerateHeapNumberValueAddress(scratch, edx);
|
| + __ movdbl(xmm0, Operand(scratch, 0));
|
| __ bind(&load_eax);
|
| // Load operand in eax into xmm1, or branch to not_numbers.
|
| __ test(eax, Immediate(kSmiTagMask));
|
| @@ -7181,7 +7214,8 @@
|
| __ shl(eax, 1); // Retag smi for heap number overwriting test.
|
| __ jmp(&done);
|
| __ bind(&load_float_eax);
|
| - __ movdbl(xmm1, FieldOperand(eax, HeapNumber::kValueOffset));
|
| + __ GenerateHeapNumberValueAddress(scratch, eax);
|
| + __ movdbl(xmm1, Operand(scratch, 0));
|
| __ bind(&done);
|
| }
|
|
|
| @@ -7192,13 +7226,15 @@
|
| __ mov(scratch, Operand(esp, 2 * kPointerSize));
|
| __ test(scratch, Immediate(kSmiTagMask));
|
| __ j(zero, &load_smi_1, not_taken);
|
| - __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
|
| + __ GenerateHeapNumberValueAddress(scratch, scratch);
|
| + __ fld_d(Operand(scratch, 0));
|
| __ bind(&done_load_1);
|
|
|
| __ mov(scratch, Operand(esp, 1 * kPointerSize));
|
| __ test(scratch, Immediate(kSmiTagMask));
|
| __ j(zero, &load_smi_2, not_taken);
|
| - __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset));
|
| + __ GenerateHeapNumberValueAddress(scratch, scratch);
|
| + __ fld_d(Operand(scratch, 0));
|
| __ jmp(&done);
|
|
|
| __ bind(&load_smi_1);
|
| @@ -7285,19 +7321,22 @@
|
| __ cmp(edx, Factory::heap_number_map());
|
| __ j(not_equal, &slow);
|
| if (overwrite_) {
|
| - __ mov(edx, FieldOperand(eax, HeapNumber::kExponentOffset));
|
| + __ GenerateHeapNumberValueAddress(ebx, eax);
|
| + __ mov(edx, Operand(ebx, HeapNumber::kExponentRelativeOffset));
|
| __ xor_(edx, HeapNumber::kSignMask); // Flip sign.
|
| - __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), edx);
|
| + __ mov(Operand(ebx, HeapNumber::kExponentRelativeOffset), edx);
|
| } else {
|
| __ mov(edx, Operand(eax));
|
| // edx: operand
|
| __ AllocateHeapNumber(eax, ebx, ecx, &undo);
|
| // eax: allocated 'empty' number
|
| - __ mov(ecx, FieldOperand(edx, HeapNumber::kExponentOffset));
|
| + __ GenerateHeapNumberValueAddress(edx, edx);
|
| + __ GenerateHeapNumberValueAddress(ebx, eax);
|
| + __ mov(ecx, Operand(edx, HeapNumber::kExponentRelativeOffset));
|
| __ xor_(ecx, HeapNumber::kSignMask); // Flip sign.
|
| - __ mov(FieldOperand(eax, HeapNumber::kExponentOffset), ecx);
|
| - __ mov(ecx, FieldOperand(edx, HeapNumber::kMantissaOffset));
|
| - __ mov(FieldOperand(eax, HeapNumber::kMantissaOffset), ecx);
|
| + __ mov(Operand(ebx, HeapNumber::kExponentRelativeOffset), ecx);
|
| + __ mov(ecx, Operand(edx, HeapNumber::kMantissaRelativeOffset));
|
| + __ mov(Operand(ebx, HeapNumber::kMantissaRelativeOffset), ecx);
|
| }
|
|
|
| __ bind(&done);
|
| @@ -7448,7 +7487,8 @@
|
| // all bits in the mask are set. We only need to check the word
|
| // that contains the exponent and high bit of the mantissa.
|
| ASSERT_NE(0, (kQuietNaNHighBitsMask << 1) & 0x80000000u);
|
| - __ mov(edx, FieldOperand(edx, HeapNumber::kExponentOffset));
|
| + __ GenerateHeapNumberValueAddress(edx, edx);
|
| + __ mov(edx, Operand(edx, HeapNumber::kExponentRelativeOffset));
|
| __ xor_(eax, Operand(eax));
|
| // Shift value and mask so kQuietNaNHighBitsMask applies to topmost bits.
|
| __ add(edx, Operand(edx));
|
| @@ -7552,7 +7592,7 @@
|
| CpuFeatures::Scope use_sse2(SSE2);
|
| CpuFeatures::Scope use_cmov(CMOV);
|
|
|
| - FloatingPointHelper::LoadSse2Operands(masm, &check_for_symbols);
|
| + FloatingPointHelper::LoadSse2Operands(masm, ecx, &check_for_symbols);
|
| __ comisd(xmm0, xmm1);
|
|
|
| // Jump to builtin for NaN.
|
|
|