Chromium Code Reviews| Index: src/ia32/codegen-ia32.cc |
| =================================================================== |
| --- src/ia32/codegen-ia32.cc (revision 3626) |
| +++ src/ia32/codegen-ia32.cc (working copy) |
| @@ -741,6 +741,12 @@ |
| } |
| +enum ArgsLocation { |
| + ARGS_ON_STACK, |
| + ARGS_IN_REGISTERS |
| +}; |
| + |
| + |
| class FloatingPointHelper : public AllStatic { |
| public: |
| // Code pattern for loading a floating point value. Input value must |
| @@ -750,9 +756,18 @@ |
| static void LoadFloatOperand(MacroAssembler* masm, Register number); |
| // Code pattern for loading floating point values. Input values must |
| // be either smi or heap number objects (fp values). Requirements: |
| - // operand_1 on TOS+1 , operand_2 on TOS+2; Returns operands as |
| - // floating point numbers on FPU stack. |
| - static void LoadFloatOperands(MacroAssembler* masm, Register scratch); |
| + // operand_1 on TOS+1 or in edx, operand_2 on TOS+2 or in eax, |
| + // Returns operands as floating point numbers on FPU stack. |
| + static void LoadFloatOperands(MacroAssembler* masm, |
| + Register scratch, |
| + ArgsLocation args_location = ARGS_ON_STACK); |
| + |
| + // Similar to LoadFloatOperand but assumes that both operands are smis. |
| + // Accepts operands on stack or in eax,ebx. |
|
Mads Ager (chromium)
2010/01/22 12:14:26
Space after comma.
vladislav.kaznacheev
2010/01/22 14:09:42
Done.
|
| + static void LoadFloatSmis(MacroAssembler* masm, |
| + Register scratch, |
| + ArgsLocation args_location); |
| + |
| // Test if operands are smi or number objects (fp). Requirements: |
| // operand_1 in eax, operand_2 in edx; falls through on float |
| // operands, jumps to the non_float label otherwise. |
| @@ -769,14 +784,43 @@ |
| // either operand is not a number. Operands are in edx and eax. |
| // Leaves operands unchanged. |
| static void LoadSse2Operands(MacroAssembler* masm, Label* not_numbers); |
| + |
| + // Similar to LoadSse2Operands but assumes that both operands are smis. |
| + // Accepts operands on stack or in eax,ebx. |
|
Mads Ager (chromium)
2010/01/22 12:14:26
Space after comma.
vladislav.kaznacheev
2010/01/22 14:09:42
Done.
|
| + static void LoadSse2Smis(MacroAssembler* masm, |
| + Register scratch, |
| + ArgsLocation args_location); |
| }; |
| +const char* GenericBinaryOpStub::GetFastCaseName() { |
| + switch (fast_case_) { |
| + case BINARY_OP_STUB_UNINITIALIZED: return "Uninit"; |
| + case BINARY_OP_STUB_GENERIC: return "Universal"; break; |
|
Mads Ager (chromium)
2010/01/22 12:14:26
"Universal" -> "Generic"
vladislav.kaznacheev
2010/01/22 14:09:42
Done.
|
| + case BINARY_OP_STUB_FAST_FP: return "FastFP"; break; |
| + case BINARY_OP_STUB_FAST_STRING_ADD: return "FastStringAdd"; break; |
| + case BINARY_OP_STUB_FAST_RUNTIME: return "FastRuntime"; break; |
| + default: return "UnknownCase"; |
| + } |
| +} |
| + |
| + |
| const char* GenericBinaryOpStub::GetName() { |
| if (name_ != NULL) return name_; |
| const int kMaxNameLength = 100; |
| name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength); |
| if (name_ == NULL) return "OOM"; |
| + char static_kind[kMaxNameLength]; |
| + FormatStaticParameters(static_kind, kMaxNameLength); |
| + OS::SNPrintF(Vector<char>(static_kind, kMaxNameLength), |
| + "GenericBinaryOpStub_%s_%s", |
| + static_kind, |
| + GetFastCaseName()); |
| + return name_; |
| +} |
| + |
| + |
| +void GenericBinaryOpStub::FormatStaticParameters(char* name, int max_length) { |
| const char* op_name = Token::Name(op_); |
| const char* overwrite_name; |
| switch (mode_) { |
| @@ -785,15 +829,13 @@ |
| case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break; |
| default: overwrite_name = "UnknownOverwrite"; break; |
| } |
| - |
| - OS::SNPrintF(Vector<char>(name_, kMaxNameLength), |
| - "GenericBinaryOpStub_%s_%s%s_%s%s", |
| + OS::SNPrintF(Vector<char>(name, max_length), |
| + "%s_%s%s_%s%s", |
| op_name, |
| overwrite_name, |
| (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "", |
| args_in_registers_ ? "RegArgs" : "StackArgs", |
| args_reversed_ ? "_R" : ""); |
| - return name_; |
| } |
| @@ -1353,12 +1395,12 @@ |
| __ mov(answer.reg(), left->reg()); |
| switch (op) { |
| case Token::ADD: |
| - __ add(answer.reg(), Operand(right->reg())); // Add optimistically. |
| + __ add(answer.reg(), Operand(right->reg())); |
| deferred->Branch(overflow); |
| break; |
| case Token::SUB: |
| - __ sub(answer.reg(), Operand(right->reg())); // Subtract optimistically. |
| + __ sub(answer.reg(), Operand(right->reg())); |
| deferred->Branch(overflow); |
| break; |
| @@ -7063,6 +7105,16 @@ |
| void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { |
| + if (HasArgumentsInRegisters()) { |
| + __ mov(ebx, eax); |
| + __ mov(eax, edx); |
| + } else { |
| + __ mov(ebx, Operand(esp, 1 * kPointerSize)); |
| + __ mov(eax, Operand(esp, 2 * kPointerSize)); |
| + } |
| + |
| + Label not_smis, not_smis_or_overflow, use_fp_on_smis; |
| + |
| // Perform fast-case smi code for the operation (eax <op> ebx) and |
| // leave result in register eax. |
| @@ -7074,12 +7126,12 @@ |
| switch (op_) { |
| case Token::ADD: |
| __ add(eax, Operand(ebx)); // add optimistically |
| - __ j(overflow, slow, not_taken); |
| + __ j(overflow, ¬_smis_or_overflow, not_taken); |
| break; |
| case Token::SUB: |
| __ sub(eax, Operand(ebx)); // subtract optimistically |
| - __ j(overflow, slow, not_taken); |
| + __ j(overflow, ¬_smis_or_overflow, not_taken); |
| break; |
| case Token::DIV: |
| @@ -7088,7 +7140,7 @@ |
| __ cdq(); |
| // Check for 0 divisor. |
| __ test(ebx, Operand(ebx)); |
| - __ j(zero, slow, not_taken); |
| + __ j(zero, ¬_smis_or_overflow, not_taken); |
| break; |
| default: |
| @@ -7099,7 +7151,7 @@ |
| // Perform the actual smi check. |
| ASSERT(kSmiTag == 0); // adjust zero check if not the case |
| __ test(ecx, Immediate(kSmiTagMask)); |
| - __ j(not_zero, slow, not_taken); |
| + __ j(not_zero, ¬_smis, not_taken); |
| switch (op_) { |
| case Token::ADD: |
| @@ -7115,9 +7167,9 @@ |
| // Do multiplication. |
| __ imul(eax, Operand(ebx)); // multiplication of smis; result in eax |
| // Go slow on overflows. |
| - __ j(overflow, slow, not_taken); |
| + __ j(overflow, &use_fp_on_smis, not_taken); |
| // Check for negative zero result. |
| - __ NegativeZeroTest(eax, ecx, slow); // use ecx = x | y |
| + __ NegativeZeroTest(eax, ecx, &use_fp_on_smis); // use ecx = x | y |
| break; |
| case Token::DIV: |
| @@ -7128,12 +7180,12 @@ |
| // by idiv instruction. |
| ASSERT(kSmiTag == 0 && kSmiTagSize == 1); |
| __ cmp(eax, 0x40000000); |
| - __ j(equal, slow); |
| + __ j(equal, &use_fp_on_smis); |
| // Check for negative zero result. |
| - __ NegativeZeroTest(eax, ecx, slow); // use ecx = x | y |
| + __ NegativeZeroTest(eax, ecx, &use_fp_on_smis); // use ecx = x | y |
| // Check that the remainder is zero. |
| __ test(edx, Operand(edx)); |
| - __ j(not_zero, slow); |
| + __ j(not_zero, &use_fp_on_smis); |
| // Tag the result and store it in register eax. |
| __ SmiTag(eax); |
| break; |
| @@ -7188,7 +7240,7 @@ |
| __ shl_cl(eax); |
| // Check that the *signed* result fits in a smi. |
| __ cmp(eax, 0xc0000000); |
| - __ j(sign, slow, not_taken); |
| + __ j(sign, &use_fp_on_smis, not_taken); |
| break; |
| default: |
| UNREACHABLE(); |
| @@ -7201,43 +7253,35 @@ |
| UNREACHABLE(); |
| break; |
| } |
| -} |
| + GenerateReturn(masm); |
| - |
| -void GenericBinaryOpStub::Generate(MacroAssembler* masm) { |
| - Label call_runtime; |
| - |
| - __ IncrementCounter(&Counters::generic_binary_stub_calls, 1); |
| - |
| - // Generate fast case smi code if requested. This flag is set when the fast |
| - // case smi code is not generated by the caller. Generating it here will speed |
| - // up common operations. |
| - if (HasSmiCodeInStub()) { |
| - Label slow; |
| - __ mov(ebx, Operand(esp, 1 * kPointerSize)); |
| - __ mov(eax, Operand(esp, 2 * kPointerSize)); |
| - GenerateSmiCode(masm, &slow); |
| - GenerateReturn(masm); |
| - // Too bad. The fast case smi code didn't succeed. |
| - __ bind(&slow); |
| + __ bind(¬_smis_or_overflow); |
| + switch (op_) { |
| + case Token::ADD: __ sub(eax, Operand(ebx)); break; |
|
Mads Ager (chromium)
2010/01/22 12:14:26
Add comment that says that this reverts the optimi
vladislav.kaznacheev
2010/01/22 14:09:42
Done.
|
| + case Token::SUB: __ add(eax, Operand(ebx)); break; |
| + default: break; |
| } |
| - // Make sure the arguments are in edx and eax. |
| - GenerateLoadArguments(masm); |
| + ASSERT(kSmiTag == 0); // adjust zero check if not the case |
| + __ test(ecx, Immediate(kSmiTagMask)); |
| + __ j(not_zero, ¬_smis, not_taken); |
| - // Floating point case. |
| + __ bind(&use_fp_on_smis); |
| + Label after_alloc_failure; |
| + // Both operands are known to be SMIs but the result does not fit into a SMI. |
| switch (op_) { |
| case Token::ADD: |
| case Token::SUB: |
| case Token::MUL: |
| case Token::DIV: { |
| - // eax: y |
| - // edx: x |
| - |
| + __ AllocateHeapNumber(edx, ecx, no_reg, |
|
Mads Ager (chromium)
2010/01/22 12:14:26
Reindent argument list:
__ AllocateHeapNumber(
vladislav.kaznacheev
2010/01/22 14:09:42
Done.
|
| + HasArgumentsInRegisters() ? &after_alloc_failure : slow); |
| if (CpuFeatures::IsSupported(SSE2)) { |
| CpuFeatures::Scope use_sse2(SSE2); |
| - FloatingPointHelper::LoadSse2Operands(masm, &call_runtime); |
| - |
| + FloatingPointHelper::LoadSse2Smis( |
| + masm, |
| + ecx, |
| + HasArgumentsInRegisters() ? ARGS_IN_REGISTERS : ARGS_ON_STACK); |
| switch (op_) { |
| case Token::ADD: __ addsd(xmm0, xmm1); break; |
| case Token::SUB: __ subsd(xmm0, xmm1); break; |
| @@ -7245,59 +7289,12 @@ |
| case Token::DIV: __ divsd(xmm0, xmm1); break; |
| default: UNREACHABLE(); |
| } |
| - // Allocate a heap number, if needed. |
| - Label skip_allocation; |
| - switch (mode_) { |
| - case OVERWRITE_LEFT: |
| - __ mov(eax, Operand(edx)); |
| - // Fall through! |
| - case OVERWRITE_RIGHT: |
| - // If the argument in eax is already an object, we skip the |
| - // allocation of a heap number. |
| - __ test(eax, Immediate(kSmiTagMask)); |
| - __ j(not_zero, &skip_allocation, not_taken); |
| - // Fall through! |
| - case NO_OVERWRITE: { |
| - // Allocate a heap number for the result. Keep eax and edx intact |
| - // for the possible runtime call. |
| - __ AllocateHeapNumber(ebx, ecx, no_reg, &call_runtime); |
| - // Now eax can be overwritten losing one of the arguments as we are |
| - // now done and will not need it any more. |
| - __ mov(eax, ebx); |
| - __ bind(&skip_allocation); |
| - break; |
| - } |
| - default: UNREACHABLE(); |
| - } |
| - __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); |
| - GenerateReturn(masm); |
| + __ movdbl(FieldOperand(edx, HeapNumber::kValueOffset), xmm0); |
| } else { // SSE2 not available, use FPU. |
| - FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx); |
| - // Allocate a heap number, if needed. |
| - Label skip_allocation; |
| - switch (mode_) { |
| - case OVERWRITE_LEFT: |
| - __ mov(eax, Operand(edx)); |
| - // Fall through! |
| - case OVERWRITE_RIGHT: |
| - // If the argument in eax is already an object, we skip the |
| - // allocation of a heap number. |
| - __ test(eax, Immediate(kSmiTagMask)); |
| - __ j(not_zero, &skip_allocation, not_taken); |
| - // Fall through! |
| - case NO_OVERWRITE: |
| - // Allocate a heap number for the result. Keep eax and edx intact |
| - // for the possible runtime call. |
| - __ AllocateHeapNumber(ebx, ecx, no_reg, &call_runtime); |
| - // Now eax can be overwritten losing one of the arguments as we are |
| - // now done and will not need it any more. |
| - __ mov(eax, ebx); |
| - __ bind(&skip_allocation); |
| - break; |
| - default: UNREACHABLE(); |
| - } |
| - FloatingPointHelper::LoadFloatOperands(masm, ecx); |
| - |
| + FloatingPointHelper::LoadFloatSmis( |
| + masm, |
| + ecx, |
| + HasArgumentsInRegisters() ? ARGS_IN_REGISTERS : ARGS_ON_STACK); |
| switch (op_) { |
| case Token::ADD: __ faddp(1); break; |
| case Token::SUB: __ fsubp(1); break; |
| @@ -7305,187 +7302,526 @@ |
| case Token::DIV: __ fdivp(1); break; |
| default: UNREACHABLE(); |
| } |
| - __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); |
| - GenerateReturn(masm); |
| + __ fstp_d(FieldOperand(edx, HeapNumber::kValueOffset)); |
| } |
| + __ mov(eax, edx); |
| + GenerateReturn(masm); |
| } |
| - case Token::MOD: { |
| - // For MOD we go directly to runtime in the non-smi case. |
| - break; |
| - } |
| case Token::BIT_OR: |
| case Token::BIT_AND: |
| case Token::BIT_XOR: |
| case Token::SAR: |
| - case Token::SHL: |
| - case Token::SHR: { |
| - Label non_smi_result, skip_allocation; |
| - Label operand_conversion_failure; |
| - FloatingPointHelper::LoadAsIntegers( |
| - masm, |
| - use_sse3_, |
| - &operand_conversion_failure); |
| - switch (op_) { |
| - case Token::BIT_OR: __ or_(eax, Operand(ecx)); break; |
| - case Token::BIT_AND: __ and_(eax, Operand(ecx)); break; |
| - case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break; |
| - case Token::SAR: __ sar_cl(eax); break; |
| - case Token::SHL: __ shl_cl(eax); break; |
| - case Token::SHR: __ shr_cl(eax); break; |
| - default: UNREACHABLE(); |
| - } |
| - if (op_ == Token::SHR) { |
| - // Check if result is non-negative and fits in a smi. |
| - __ test(eax, Immediate(0xc0000000)); |
| - __ j(not_zero, &non_smi_result); |
| - } else { |
| - // Check if result fits in a smi. |
| - __ cmp(eax, 0xc0000000); |
| - __ j(negative, &non_smi_result); |
| - } |
| - // Tag smi result and return. |
| - __ SmiTag(eax); |
| - GenerateReturn(masm); |
| + // These operations always succeed on a pair of smis. |
|
Mads Ager (chromium)
2010/01/22 12:14:26
That means that we should never get to this place,
vladislav.kaznacheev
2010/01/22 14:09:42
We never get to this place when executing, but we
|
| + break; |
| - // All ops except SHR return a signed int32 that we load in a HeapNumber. |
| - if (op_ != Token::SHR) { |
| - __ bind(&non_smi_result); |
| - // Allocate a heap number if needed. |
| - __ mov(ebx, Operand(eax)); // ebx: result |
| - switch (mode_) { |
| - case OVERWRITE_LEFT: |
| - case OVERWRITE_RIGHT: |
| - // If the operand was an object, we skip the |
| - // allocation of a heap number. |
| - __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ? |
| - 1 * kPointerSize : 2 * kPointerSize)); |
| - __ test(eax, Immediate(kSmiTagMask)); |
| - __ j(not_zero, &skip_allocation, not_taken); |
| - // Fall through! |
| - case NO_OVERWRITE: |
| - __ AllocateHeapNumber(eax, ecx, edx, &call_runtime); |
| - __ bind(&skip_allocation); |
| - break; |
| - default: UNREACHABLE(); |
| - } |
| + case Token::MOD: |
| + case Token::SHR: |
| + // These go directly to runtime |
|
Mads Ager (chromium)
2010/01/22 12:14:26
Similarly here: UNREACHABLE()?
Three-space indent
vladislav.kaznacheev
2010/01/22 14:09:42
See previous reply.
On 2010/01/22 12:14:26, Mads A
|
| + break; |
| + |
| + case Token::SHL: { |
| + __ AllocateHeapNumber(ebx, ecx, edx, slow); |
| // Store the result in the HeapNumber and return. |
| if (CpuFeatures::IsSupported(SSE2)) { |
| CpuFeatures::Scope use_sse2(SSE2); |
| - __ cvtsi2sd(xmm0, Operand(ebx)); |
| - __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); |
| + __ cvtsi2sd(xmm0, Operand(eax)); |
| + __ movdbl(FieldOperand(ebx, HeapNumber::kValueOffset), xmm0); |
| } else { |
| - __ mov(Operand(esp, 1 * kPointerSize), ebx); |
| + __ mov(Operand(esp, 1 * kPointerSize), eax); |
| __ fild_s(Operand(esp, 1 * kPointerSize)); |
| - __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); |
| + __ fstp_d(FieldOperand(ebx, HeapNumber::kValueOffset)); |
| } |
| + __ mov(eax, ebx); |
| GenerateReturn(masm); |
| + break; |
| } |
| - // Go to runtime for non-number inputs. |
| - __ bind(&operand_conversion_failure); |
| - // SHR should return uint32 - go to runtime for non-smi/negative result. |
| - if (op_ == Token::SHR) { |
| - __ bind(&non_smi_result); |
| - } |
| - __ mov(eax, Operand(esp, 1 * kPointerSize)); |
| - __ mov(edx, Operand(esp, 2 * kPointerSize)); |
| - break; |
| - } |
| default: UNREACHABLE(); break; |
|
Mads Ager (chromium)
2010/01/22 12:14:26
Indentation is off for the cases in this switch.
vladislav.kaznacheev
2010/01/22 14:09:42
Done.
|
| } |
| - // If all else fails, use the runtime system to get the correct |
| - // result. If arguments was passed in registers now place them on the |
| - // stack in the correct order below the return address. |
| - __ bind(&call_runtime); |
| if (HasArgumentsInRegisters()) { |
| - __ pop(ecx); |
| - if (HasArgumentsReversed()) { |
| - __ push(eax); |
| - __ push(edx); |
| - } else { |
| - __ push(edx); |
| - __ push(eax); |
| + __ bind(&after_alloc_failure); |
| + __ mov(edx, eax); |
| + __ mov(eax, ebx); |
| + __ jmp(slow); |
| + } |
| + |
| + __ bind(¬_smis); |
| + |
| + if (HasArgumentsInRegisters()) { |
| + __ mov(edx, eax); |
| + __ mov(eax, ebx); |
| + } |
| +} |
| + |
| + |
| + |
| +void GenericBinaryOpStub::Generate(MacroAssembler* masm) { |
| + Label call_runtime; |
| + |
| + __ IncrementCounter(&Counters::generic_binary_stub_calls, 1); |
| + |
| + // Generate fast case smi code if requested. This flag is set when the fast |
| + // case smi code is not generated by the caller. Generating it here will speed |
| + // up common operations. |
| + |
| + if (ShouldGenerateSmiCode()) { |
| + GenerateSmiCode(masm, &call_runtime); |
| + } |
| + |
| + // Floating point case. |
| + if (ShouldGenerateFPCode()) { |
| + Label not_floats; |
| + |
| + switch (op_) { |
| + case Token::ADD: |
| + case Token::SUB: |
| + case Token::MUL: |
| + case Token::DIV: { |
| + // Make sure the arguments are in edx and eax. |
| + GenerateLoadArguments(masm); |
| + // eax: y |
| + // edx: x |
| + if (CpuFeatures::IsSupported(SSE2)) { |
| + CpuFeatures::Scope use_sse2(SSE2); |
| + FloatingPointHelper::LoadSse2Operands(masm, ¬_floats); |
| + |
| + switch (op_) { |
| + case Token::ADD: __ addsd(xmm0, xmm1); break; |
| + case Token::SUB: __ subsd(xmm0, xmm1); break; |
| + case Token::MUL: __ mulsd(xmm0, xmm1); break; |
| + case Token::DIV: __ divsd(xmm0, xmm1); break; |
| + default: UNREACHABLE(); |
| + } |
| + GenerateHeapResultAllocation(masm, &call_runtime); |
| + __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); |
| + GenerateSwitchAndReturn(masm, BINARY_OP_STUB_FAST_FP); |
| + } else { // SSE2 not available, use FPU. |
| + FloatingPointHelper::CheckFloatOperands(masm, ¬_floats, ebx); |
| + FloatingPointHelper::LoadFloatOperands(masm, ecx, ARGS_ON_STACK); |
| + switch (op_) { |
| + case Token::ADD: __ faddp(1); break; |
| + case Token::SUB: __ fsubp(1); break; |
| + case Token::MUL: __ fmulp(1); break; |
| + case Token::DIV: __ fdivp(1); break; |
| + default: UNREACHABLE(); |
| + } |
| + Label after_alloc_failure; |
| + GenerateHeapResultAllocation(masm, &after_alloc_failure); |
| + __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); |
| + GenerateSwitchAndReturn(masm, BINARY_OP_STUB_FAST_FP); |
| + __ bind(&after_alloc_failure); |
| + __ fstp(0); |
| + __ jmp(&call_runtime); |
| + } |
| + } |
| + case Token::MOD: { |
| + // For MOD we go directly to runtime in the non-smi case. |
|
Mads Ager (chromium)
2010/01/22 12:14:26
UNREACHABLE()?
vladislav.kaznacheev
2010/01/22 14:09:42
Similarly, this line is executed on code generatio
|
| + break; |
| + } |
| + case Token::BIT_OR: |
| + case Token::BIT_AND: |
| + case Token::BIT_XOR: |
| + case Token::SAR: |
| + case Token::SHL: |
| + case Token::SHR: { |
| + GenerateLoadArguments(masm); |
| + Label non_smi_result, skip_allocation; |
| + FloatingPointHelper::LoadAsIntegers( |
| + masm, |
| + use_sse3_, |
| + &call_runtime); |
| + switch (op_) { |
| + case Token::BIT_OR: __ or_(eax, Operand(ecx)); break; |
| + case Token::BIT_AND: __ and_(eax, Operand(ecx)); break; |
| + case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break; |
| + case Token::SAR: __ sar_cl(eax); break; |
| + case Token::SHL: __ shl_cl(eax); break; |
| + case Token::SHR: __ shr_cl(eax); break; |
| + default: UNREACHABLE(); |
| + } |
| + if (op_ == Token::SHR) { |
| + // Check if result is non-negative and fits in a smi. |
| + __ test(eax, Immediate(0xc0000000)); |
| + __ j(not_zero, &call_runtime); |
| + } else { |
| + // Check if result fits in a smi. |
| + __ cmp(eax, 0xc0000000); |
| + __ j(negative, &non_smi_result); |
| + } |
| + // Tag smi result and return. |
| + __ SmiTag(eax); |
| + GenerateReturn(masm); |
| + |
| + // All ops except SHR return a signed int32 that we load in |
| + // a HeapNumber. |
| + if (op_ != Token::SHR) { |
| + __ bind(&non_smi_result); |
| + // Allocate a heap number if needed. |
| + __ mov(ebx, Operand(eax)); // ebx: result |
| + switch (mode_) { |
| + case OVERWRITE_LEFT: |
| + case OVERWRITE_RIGHT: |
| + // If the operand was an object, we skip the |
| + // allocation of a heap number. |
| + __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ? |
| + 1 * kPointerSize : 2 * kPointerSize)); |
| + __ test(eax, Immediate(kSmiTagMask)); |
| + __ j(not_zero, &skip_allocation, not_taken); |
| + // Fall through! |
| + case NO_OVERWRITE: |
| + __ AllocateHeapNumber(eax, ecx, edx, &call_runtime); |
| + __ bind(&skip_allocation); |
| + break; |
| + default: UNREACHABLE(); |
| + } |
| + // Store the result in the HeapNumber and return. |
| + if (CpuFeatures::IsSupported(SSE2)) { |
| + CpuFeatures::Scope use_sse2(SSE2); |
| + __ cvtsi2sd(xmm0, Operand(ebx)); |
| + __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); |
| + } else { |
| + __ mov(Operand(esp, 1 * kPointerSize), ebx); |
| + __ fild_s(Operand(esp, 1 * kPointerSize)); |
| + __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); |
| + } |
| + GenerateReturn(masm); |
| + } |
| + |
| + break; |
| + } |
| + default: UNREACHABLE(); break; |
| } |
| - __ push(ecx); |
| + __ bind(¬_floats); |
| } |
| + |
| switch (op_) { |
| case Token::ADD: { |
| - // Test for string arguments before calling runtime. |
| - Label not_strings, not_string1, string1; |
| - Result answer; |
| - __ mov(eax, Operand(esp, 2 * kPointerSize)); // First argument. |
| - __ mov(edx, Operand(esp, 1 * kPointerSize)); // Second argument. |
| - __ test(eax, Immediate(kSmiTagMask)); |
| - __ j(zero, ¬_string1); |
| - __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, eax); |
| - __ j(above_equal, ¬_string1); |
| + if (ShouldGenerateStringAddCode()) { |
| + // Test for string arguments before calling runtime. |
| + Label not_strings, not_string1, string1; |
| + Result answer; |
| - // First argument is a a string, test second. |
| - __ test(edx, Immediate(kSmiTagMask)); |
| - __ j(zero, &string1); |
| - __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, edx); |
| - __ j(above_equal, &string1); |
| + // If this stub generated FP-specific code then the arguments |
| + // are already in edx,eax |
|
Mads Ager (chromium)
2010/01/22 12:14:26
Space after comma.
vladislav.kaznacheev
2010/01/22 14:09:42
Done.
|
| + if (!ShouldGenerateFPCode()) { |
| + GenerateLoadArguments(masm); |
| + } |
| - // First and second argument are strings. Jump to the string add stub. |
| - StringAddStub stub(NO_STRING_CHECK_IN_STUB); |
| - __ TailCallStub(&stub); |
| + __ test(edx, Immediate(kSmiTagMask)); |
| + __ j(zero, ¬_string1); |
| + __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, ecx); |
| + __ j(above_equal, ¬_string1); |
| - // Only first argument is a string. |
| - __ bind(&string1); |
| - __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION); |
| + // First argument is a a string, test second. |
|
Mads Ager (chromium)
2010/01/22 12:14:26
a a -> a
vladislav.kaznacheev
2010/01/22 14:09:42
Done.
|
| + __ test(eax, Immediate(kSmiTagMask)); |
| + __ j(zero, &string1); |
| + __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ecx); |
| + __ j(above_equal, &string1); |
| - // First argument was not a string, test second. |
| - __ bind(¬_string1); |
| - __ test(edx, Immediate(kSmiTagMask)); |
| - __ j(zero, ¬_strings); |
| - __ CmpObjectType(edx, FIRST_NONSTRING_TYPE, edx); |
| - __ j(above_equal, ¬_strings); |
| + // First and second argument are strings. Use the string add stub. |
| + StringAddStub stub(NO_STRING_CHECK_IN_STUB); |
| + GenerateSwitchViaStub(masm, stub, BINARY_OP_STUB_FAST_STRING_ADD); |
| - // Only second argument is a string. |
| - __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION); |
| + // Only first argument is a string. |
| + __ bind(&string1); |
| + GenerateSwitchViaBuiltin( |
| + masm, |
| + HasArgumentsReversed() ? |
| + Builtins::STRING_ADD_RIGHT : |
| + Builtins::STRING_ADD_LEFT, |
| + NULL, |
| + BINARY_OP_STUB_FAST_STRING_ADD); |
| - __ bind(¬_strings); |
| - // Neither argument is a string. |
| - __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION); |
| + // First argument was not a string, test second. |
| + __ bind(¬_string1); |
| + __ test(eax, Immediate(kSmiTagMask)); |
| + __ j(zero, ¬_strings); |
| + __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, ecx); |
| + __ j(above_equal, ¬_strings); |
| + |
| + // Only second argument is a string. |
| + GenerateSwitchViaBuiltin( |
| + masm, |
| + HasArgumentsReversed() ? |
| + Builtins::STRING_ADD_LEFT : |
| + Builtins::STRING_ADD_RIGHT, |
| + NULL, |
| + BINARY_OP_STUB_FAST_STRING_ADD); |
| + |
| + __ bind(¬_strings); |
| + } |
| + GenerateSwitchViaBuiltin( |
| + masm, |
| + Builtins::ADD, |
| + &call_runtime, |
| + BINARY_OP_STUB_FAST_RUNTIME); |
| break; |
| } |
| case Token::SUB: |
| - __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION); |
| + GenerateSwitchViaBuiltin( |
| + masm, |
| + Builtins::SUB, |
| + &call_runtime, |
| + BINARY_OP_STUB_FAST_RUNTIME); |
| break; |
| case Token::MUL: |
| - __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION); |
| + GenerateBuiltinTailCall(masm, Builtins::MUL, &call_runtime); |
| break; |
|
Mads Ager (chromium)
2010/01/22 12:14:26
indentation.
vladislav.kaznacheev
2010/01/22 14:09:42
Done.
|
| case Token::DIV: |
| - __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION); |
| + GenerateBuiltinTailCall(masm, Builtins::DIV, &call_runtime); |
| break; |
| case Token::MOD: |
| - __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION); |
| + GenerateBuiltinTailCall(masm, Builtins::MOD, &call_runtime); |
| break; |
| case Token::BIT_OR: |
| - __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION); |
| + GenerateBuiltinTailCall(masm, Builtins::BIT_OR, &call_runtime); |
| break; |
| case Token::BIT_AND: |
| - __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION); |
| + GenerateBuiltinTailCall(masm, Builtins::BIT_AND, &call_runtime); |
| break; |
| case Token::BIT_XOR: |
| - __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION); |
| + GenerateBuiltinTailCall(masm, Builtins::BIT_XOR, &call_runtime); |
| break; |
| case Token::SAR: |
| - __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION); |
| + GenerateBuiltinTailCall(masm, Builtins::SAR, &call_runtime); |
| break; |
| case Token::SHL: |
| - __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION); |
| + GenerateBuiltinTailCall(masm, Builtins::SHL, &call_runtime); |
| break; |
| case Token::SHR: |
| - __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION); |
| + GenerateBuiltinTailCall(masm, Builtins::SHR, &call_runtime); |
| break; |
| default: |
| UNREACHABLE(); |
| } |
| + |
| + // Generate the unreachable reference to the original stub so that it can be |
|
Mads Ager (chromium)
2010/01/22 12:14:26
the -> an
vladislav.kaznacheev
2010/01/22 14:09:42
Done.
|
| + // found at the end of this stub when clearing ICs at GC. |
| + if (fast_case_ != BINARY_OP_STUB_UNINITIALIZED) { |
| + int key = TransitionMinorKey(MinorKey(), BINARY_OP_STUB_UNINITIALIZED); |
| + GenericBinaryOpStub uninit(key); |
| + __ TailCallStub(&uninit); |
| + } |
| } |
| +int GenericBinaryOpStub::GetICState() { |
| + switch (fast_case_) { |
| + case BINARY_OP_STUB_UNINITIALIZED: return UNINITIALIZED; |
| + case BINARY_OP_STUB_GENERIC: return MEGAMORPHIC; |
| + default: return MONOMORPHIC; |
| + } |
| +} |
| + |
| + |
| +BinaryFastCase GenericBinaryOpStub::GetFastCase(BinaryFastCase target_case) { |
| + if (fast_case_ == target_case || fast_case_ == BINARY_OP_STUB_GENERIC) |
| + return fast_case_; |
| + |
| + switch (target_case) { |
| + case BINARY_OP_STUB_FAST_FP: |
| + if (ShouldGenerateSmiCode() && |
| + (fast_case_ == BINARY_OP_STUB_UNINITIALIZED)) { |
|
Mads Ager (chromium)
2010/01/22 12:14:26
Indent one more space.
Could you add a comment ex
vladislav.kaznacheev
2010/01/22 14:09:42
Done.
|
| + return BINARY_OP_STUB_FAST_FP; |
| + } |
| + break; |
| + |
| + case BINARY_OP_STUB_FAST_STRING_ADD: |
| + if (fast_case_ == BINARY_OP_STUB_UNINITIALIZED) |
| + return BINARY_OP_STUB_FAST_STRING_ADD; |
| + break; |
| + |
| + case BINARY_OP_STUB_FAST_RUNTIME: |
| + if (fast_case_ == BINARY_OP_STUB_UNINITIALIZED) |
| + return BINARY_OP_STUB_FAST_RUNTIME; |
| + break; |
| + |
| + case BINARY_OP_STUB_GENERIC: |
| + break; |
| + |
| + default: UNREACHABLE(); |
| + } |
| + |
| + return BINARY_OP_STUB_GENERIC; |
| +} |
| + |
| + |
| +void GenericBinaryOpStub::GenerateFastCaseSwitch(MacroAssembler* masm, |
| + BinaryFastCase fast_case) { |
| + int argc = 2; |
| + __ pop(ecx); |
| + __ push(eax); |
|
Mads Ager (chromium)
2010/01/22 12:14:26
Add comment explaining what eax is here to make th
vladislav.kaznacheev
2010/01/22 14:09:42
Done.
|
| + __ push(Immediate(Smi::FromInt(TransitionMinorKey(MinorKey(), fast_case)))); |
| +// __ push(eax); |
|
Mads Ager (chromium)
2010/01/22 12:14:26
Code in comment.
vladislav.kaznacheev
2010/01/22 14:09:42
Done.
|
| +#ifdef DEBUG |
| + __ push(Immediate(Smi::FromInt(TransitionMinorKey(MinorKey(), fast_case_)))); |
| + argc++; |
| +#endif |
| + __ push(ecx); |
| + __ TailCallRuntime(ExternalReference(IC_Utility(IC::kBinaryOp_Patch)), |
| + argc, |
| + 1); |
| +} |
| + |
| + |
| +void GenericBinaryOpStub::GenerateBuiltinCallPrologue(MacroAssembler* masm) { |
| + if (HasArgumentsInRegisters()) { |
| + __ EnterInternalFrame(); |
| + if (HasArgumentsReversed()) { |
| + __ push(eax); |
| + __ push(edx); |
| + } else { |
| + __ push(edx); |
| + __ push(eax); |
| + } |
| + } else { |
| + __ pop(ecx); |
| + __ pop(eax); |
|
Mads Ager (chromium)
2010/01/22 12:14:26
Popping the arguments instead of loading them seem
vladislav.kaznacheev
2010/01/22 14:09:42
There is always a tail call following this so we n
|
| + __ pop(edx); |
| + __ push(ecx); |
| + __ EnterInternalFrame(); |
| + __ push(edx); |
| + __ push(eax); |
| + } |
| +} |
| + |
| +void GenericBinaryOpStub::GenerateBuiltinCallEpilogue(MacroAssembler* masm) { |
| + __ LeaveInternalFrame(); |
| +} |
| + |
| + |
| +void GenericBinaryOpStub::GenerateBuiltinTailCall( |
| + MacroAssembler* masm, |
| + Builtins::JavaScript id, |
| + Label* exit_via_builtin) { |
| + if (exit_via_builtin) { |
| + __ bind(exit_via_builtin); |
| + } |
| + GenerateRegisterArgumentsPush(masm); |
| + __ InvokeBuiltin(id, JUMP_FUNCTION); |
| +} |
| + |
| + |
| +void GenericBinaryOpStub::GenerateSwitchAndReturn( |
| + MacroAssembler* masm, |
| + BinaryFastCase target_fast_case) { |
| + BinaryFastCase new_fast_case = GetFastCase(target_fast_case); |
| + if (new_fast_case != fast_case_) { |
| + if (!HasArgumentsInRegisters()) { |
| + __ pop(ecx); // return address |
| + __ pop(ebx); // drop second arg |
| + __ pop(edx); // drop first arg |
| + __ push(ecx); |
| + } |
| + GenerateFastCaseSwitch(masm, new_fast_case); |
| + } else { |
| + GenerateReturn(masm); |
| + } |
| +} |
| + |
| + |
| +void GenericBinaryOpStub::GenerateSwitchViaBuiltin( |
| + MacroAssembler* masm, |
| + Builtins::JavaScript id, |
| + Label* exit_via_builtin, |
| + BinaryFastCase target_fast_case) { |
| + BinaryFastCase new_fast_case = GetFastCase(target_fast_case); |
| + if (new_fast_case != fast_case_) { |
| + GenerateBuiltinCallPrologue(masm); |
| + __ InvokeBuiltin(id, CALL_FUNCTION); |
| + GenerateBuiltinCallEpilogue(masm); |
| + GenerateFastCaseSwitch(masm, new_fast_case); |
| + if (exit_via_builtin) { |
| + GenerateBuiltinTailCall(masm, id, exit_via_builtin); |
| + } |
| + } else { |
| + GenerateBuiltinTailCall(masm, id, exit_via_builtin); |
| + } |
| +} |
| + |
| + |
| +void GenericBinaryOpStub::GenerateSwitchViaStub( |
| + MacroAssembler* masm, |
| + CodeStub& stub, |
| + BinaryFastCase target_fast_case) { |
| + BinaryFastCase new_fast_case = GetFastCase(target_fast_case); |
| + if (new_fast_case != fast_case_) { |
| + GenerateBuiltinCallPrologue(masm); |
| + __ CallStub(&stub); |
| + GenerateBuiltinCallEpilogue(masm); |
| + GenerateFastCaseSwitch(masm, new_fast_case); |
| + } else { |
| + GenerateRegisterArgumentsPush(masm); |
| + __ TailCallStub(&stub); |
| + } |
| +} |
| + |
| + |
| +void GenericBinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm, |
| + Label* alloc_failure) { |
|
Mads Ager (chromium)
2010/01/22 12:14:26
Indentation.
vladislav.kaznacheev
2010/01/22 14:09:42
Done.
|
| + Label skip_allocation; |
| + OverwriteMode mode = mode_; |
| + if (HasArgumentsReversed()) { |
| + if (mode == OVERWRITE_RIGHT) |
| + mode = OVERWRITE_LEFT; |
| + else if (mode == OVERWRITE_LEFT) |
| + mode = OVERWRITE_RIGHT; |
| + } |
| + switch (mode) { |
| + case OVERWRITE_LEFT: { |
| + // If the argument in edx is already an object, we skip the |
| + // allocation of a heap number. |
| + __ test(edx, Immediate(kSmiTagMask)); |
| + __ j(not_zero, &skip_allocation, not_taken); |
| + // Allocate a heap number for the result. Keep eax and edx intact |
| + // for the possible runtime call. |
| + __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure); |
| + // Now edx can be overwritten losing one of the arguments as we are |
| + // now done and will not need it any more. |
| + __ mov(edx, Operand(ebx)); |
| + __ bind(&skip_allocation); |
| + // Use object in edx as a result holder |
| + __ mov(eax, Operand(edx)); |
| + break; |
| + } |
| + case OVERWRITE_RIGHT: |
| + // If the argument in eax is already an object, we skip the |
| + // allocation of a heap number. |
| + __ test(eax, Immediate(kSmiTagMask)); |
| + __ j(not_zero, &skip_allocation, not_taken); |
| + // Fall through! |
| + case NO_OVERWRITE: |
| + // Allocate a heap number for the result. Keep eax and edx intact |
| + // for the possible runtime call. |
| + __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure); |
| + // Now eax can be overwritten losing one of the arguments as we are |
| + // now done and will not need it any more. |
| + __ mov(eax, ebx); |
| + __ bind(&skip_allocation); |
| + break; |
| + default: UNREACHABLE(); |
| + } |
| +} |
| + |
| + |
| +void GenericBinaryOpStub::GenerateRegisterArgumentsPush(MacroAssembler* masm) { |
| +// Builtin functions expect arguments on stack so we need to push them |
| +// there in the correct order. |
| + if (HasArgumentsInRegisters()) { |
| + __ pop(ecx); |
| + if (HasArgumentsReversed()) { |
| + __ push(eax); |
| + __ push(edx); |
| + } else { |
| + __ push(edx); |
| + __ push(eax); |
| + } |
| + __ push(ecx); |
| + } |
| +} |
| + |
| + |
| void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) { |
| // If arguments are not passed in registers read them from the stack. |
| if (!HasArgumentsInRegisters()) { |
| @@ -7506,6 +7842,28 @@ |
| } |
| +Code* GetBinaryOpStub(int minor_key) { |
| + HandleScope scope; |
| + GenericBinaryOpStub stub(minor_key); |
| + return *stub.GetCode(); |
| +} |
| + |
| + |
| +#ifdef DEBUG |
| +void TraceBinaryOp(int old_key, int new_key) { |
| + GenericBinaryOpStub old_stub(old_key); |
| + GenericBinaryOpStub new_stub(new_key); |
| + const int kMaxNameLength = 100; |
| + char old_name[kMaxNameLength]; |
| + old_stub.FormatStaticParameters(old_name, kMaxNameLength); |
| + PrintF("[BinaryOpIC (%s->%s)#%s]\n", |
| + old_stub.GetFastCaseName(), |
| + new_stub.GetFastCaseName(), |
| + old_name); |
| +} |
| +#endif // DEBUG |
| + |
| + |
| // Get the integer part of a heap number. Surprisingly, all this bit twiddling |
| // is faster than using the built-in instructions on floating point registers. |
| // Trashes edi and ebx. Dest is ecx. Source cannot be ecx or one of the |
| @@ -7748,16 +8106,47 @@ |
| } |
| +void FloatingPointHelper::LoadSse2Smis(MacroAssembler* masm, |
| + Register scratch, |
| + ArgsLocation args_location) { |
| + if (args_location == ARGS_IN_REGISTERS) { |
| + __ mov(scratch, eax); |
| + } else { |
| + __ mov(scratch, Operand(esp, 2 * kPointerSize)); |
| + } |
| + __ SmiUntag(scratch); // Untag smi before converting to float. |
| + __ cvtsi2sd(xmm0, Operand(scratch)); |
| + |
| + |
| + if (args_location == ARGS_IN_REGISTERS) { |
| + __ mov(scratch, ebx); |
| + } else { |
| + __ mov(scratch, Operand(esp, 1 * kPointerSize)); |
| + } |
| + __ SmiUntag(scratch); // Untag smi before converting to float. |
| + __ cvtsi2sd(xmm1, Operand(scratch)); |
| +} |
| + |
| + |
| void FloatingPointHelper::LoadFloatOperands(MacroAssembler* masm, |
| - Register scratch) { |
| + Register scratch, |
| + ArgsLocation args_location) { |
| Label load_smi_1, load_smi_2, done_load_1, done; |
| - __ mov(scratch, Operand(esp, 2 * kPointerSize)); |
| + if (args_location == ARGS_IN_REGISTERS) { |
| + __ mov(scratch, edx); |
| + } else { |
| + __ mov(scratch, Operand(esp, 2 * kPointerSize)); |
| + } |
| __ test(scratch, Immediate(kSmiTagMask)); |
| __ j(zero, &load_smi_1, not_taken); |
| __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset)); |
| __ bind(&done_load_1); |
| - __ mov(scratch, Operand(esp, 1 * kPointerSize)); |
| + if (args_location == ARGS_IN_REGISTERS) { |
| + __ mov(scratch, eax); |
| + } else { |
| + __ mov(scratch, Operand(esp, 1 * kPointerSize)); |
| + } |
| __ test(scratch, Immediate(kSmiTagMask)); |
| __ j(zero, &load_smi_2, not_taken); |
| __ fld_d(FieldOperand(scratch, HeapNumber::kValueOffset)); |
| @@ -7780,6 +8169,31 @@ |
| } |
| +void FloatingPointHelper::LoadFloatSmis(MacroAssembler* masm, |
| + Register scratch, |
| + ArgsLocation args_location) { |
| + if (args_location == ARGS_IN_REGISTERS) { |
| + __ mov(scratch, eax); |
| + } else { |
| + __ mov(scratch, Operand(esp, 2 * kPointerSize)); |
| + } |
| + __ SmiUntag(scratch); |
| + __ push(scratch); |
| + __ fild_s(Operand(esp, 0)); |
| + __ pop(scratch); |
| + |
| + if (args_location == ARGS_IN_REGISTERS) { |
| + __ mov(scratch, ebx); |
| + } else { |
| + __ mov(scratch, Operand(esp, 1 * kPointerSize)); |
| + } |
| + __ SmiUntag(scratch); |
| + __ push(scratch); |
| + __ fild_s(Operand(esp, 0)); |
| + __ pop(scratch); |
| +} |
| + |
| + |
| void FloatingPointHelper::CheckFloatOperands(MacroAssembler* masm, |
| Label* non_float, |
| Register scratch) { |