Index: src/ia32/codegen-ia32.cc |
=================================================================== |
--- src/ia32/codegen-ia32.cc (revision 3980) |
+++ src/ia32/codegen-ia32.cc (working copy) |
@@ -850,13 +850,14 @@ |
} |
OS::SNPrintF(Vector<char>(name_, kMaxNameLength), |
- "GenericBinaryOpStub_%s_%s%s_%s%s_%s", |
+ "GenericBinaryOpStub_%s_%s%s_%s%s_%s_%s", |
op_name, |
overwrite_name, |
(flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "", |
args_in_registers_ ? "RegArgs" : "StackArgs", |
args_reversed_ ? "_R" : "", |
- NumberInfo::ToString(operands_type_)); |
+ NumberInfo::ToString(static_operands_type_), |
+ BinaryOpIC::GetName(runtime_operands_type_)); |
return name_; |
} |
@@ -8083,146 +8084,174 @@ |
// Generate fast case smi code if requested. This flag is set when the fast |
// case smi code is not generated by the caller. Generating it here will speed |
// up common operations. |
- if (HasSmiCodeInStub()) { |
+ if (ShouldGenerateSmiCode()) { |
GenerateSmiCode(masm, &call_runtime); |
} else if (op_ != Token::MOD) { // MOD goes straight to runtime. |
- GenerateLoadArguments(masm); |
+ if (!HasArgsInRegisters()) { |
+ GenerateLoadArguments(masm); |
+ } |
} |
// Floating point case. |
- switch (op_) { |
- case Token::ADD: |
- case Token::SUB: |
- case Token::MUL: |
- case Token::DIV: { |
- if (CpuFeatures::IsSupported(SSE2)) { |
- CpuFeatures::Scope use_sse2(SSE2); |
- if (NumberInfo::IsNumber(operands_type_)) { |
- if (FLAG_debug_code) { |
- // Assert at runtime that inputs are only numbers. |
- __ AbortIfNotNumber(edx, |
- "GenericBinaryOpStub operand not a number."); |
- __ AbortIfNotNumber(eax, |
- "GenericBinaryOpStub operand not a number."); |
- } |
- FloatingPointHelper::LoadSSE2Operands(masm); |
- } else { |
- FloatingPointHelper::LoadSSE2Operands(masm, &call_runtime); |
+ if (ShouldGenerateFPCode()) { |
+ switch (op_) { |
+ case Token::ADD: |
+ case Token::SUB: |
+ case Token::MUL: |
+ case Token::DIV: { |
+ if (runtime_operands_type_ == BinaryOpIC::DEFAULT && |
+ HasSmiCodeInStub()) { |
+ // Execution reaches this point when the first non-smi argument occurs |
+ // (and only if smi code is generated). This is the right moment to |
+ // patch to HEAP_NUMBERS state. The transition is attempted only for |
+ // the four basic operations. The stub stays in the DEFAULT state |
+ // forever for all other operations (also if smi code is skipped). |
+ GenerateTypeTransition(masm); |
} |
- switch (op_) { |
- case Token::ADD: __ addsd(xmm0, xmm1); break; |
- case Token::SUB: __ subsd(xmm0, xmm1); break; |
- case Token::MUL: __ mulsd(xmm0, xmm1); break; |
- case Token::DIV: __ divsd(xmm0, xmm1); break; |
- default: UNREACHABLE(); |
+ Label not_floats; |
+ if (CpuFeatures::IsSupported(SSE2)) { |
+ CpuFeatures::Scope use_sse2(SSE2); |
+ if (NumberInfo::IsNumber(static_operands_type_)) { |
+ if (FLAG_debug_code) { |
+ // Assert at runtime that inputs are only numbers. |
+ __ AbortIfNotNumber(edx, |
+ "GenericBinaryOpStub operand not a number."); |
+ __ AbortIfNotNumber(eax, |
+ "GenericBinaryOpStub operand not a number."); |
+ } |
+ FloatingPointHelper::LoadSSE2Operands(masm); |
+ } else { |
+ FloatingPointHelper::LoadSSE2Operands(masm, &call_runtime); |
+ } |
+ |
+ switch (op_) { |
+ case Token::ADD: __ addsd(xmm0, xmm1); break; |
+ case Token::SUB: __ subsd(xmm0, xmm1); break; |
+ case Token::MUL: __ mulsd(xmm0, xmm1); break; |
+ case Token::DIV: __ divsd(xmm0, xmm1); break; |
+ default: UNREACHABLE(); |
+ } |
+ GenerateHeapResultAllocation(masm, &call_runtime); |
+ __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); |
+ GenerateReturn(masm); |
+ } else { // SSE2 not available, use FPU. |
+ if (NumberInfo::IsNumber(static_operands_type_)) { |
+ if (FLAG_debug_code) { |
+ // Assert at runtime that inputs are only numbers. |
+ __ AbortIfNotNumber(edx, |
+ "GenericBinaryOpStub operand not a number."); |
+ __ AbortIfNotNumber(eax, |
+ "GenericBinaryOpStub operand not a number."); |
+ } |
+ } else { |
+ FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx); |
+ } |
+ FloatingPointHelper::LoadFloatOperands( |
+ masm, |
+ ecx, |
+ FloatingPointHelper::ARGS_IN_REGISTERS); |
+ switch (op_) { |
+ case Token::ADD: __ faddp(1); break; |
+ case Token::SUB: __ fsubp(1); break; |
+ case Token::MUL: __ fmulp(1); break; |
+ case Token::DIV: __ fdivp(1); break; |
+ default: UNREACHABLE(); |
+ } |
+ Label after_alloc_failure; |
+ GenerateHeapResultAllocation(masm, &after_alloc_failure); |
+ __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); |
+ GenerateReturn(masm); |
+ __ bind(&after_alloc_failure); |
+ __ ffree(); |
+ __ jmp(&call_runtime); |
} |
- GenerateHeapResultAllocation(masm, &call_runtime); |
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); |
- GenerateReturn(masm); |
- } else { // SSE2 not available, use FPU. |
- if (NumberInfo::IsNumber(operands_type_)) { |
- if (FLAG_debug_code) { |
- // Assert at runtime that inputs are only numbers. |
- __ AbortIfNotNumber(edx, |
- "GenericBinaryOpStub operand not a number."); |
- __ AbortIfNotNumber(eax, |
- "GenericBinaryOpStub operand not a number."); |
+ __ bind(¬_floats); |
+ if (runtime_operands_type_ == BinaryOpIC::DEFAULT && |
+ !HasSmiCodeInStub()) { |
+ // Execution reaches this point when the first non-number argument |
+ // occurs (and only if smi code is skipped from the stub, otherwise |
+ // the patching has already been done earlier in this case branch). |
+ // Try patching to STRINGS for ADD operation. |
+ if (op_ == Token::ADD) { |
+ GenerateTypeTransition(masm); |
} |
- } else { |
- FloatingPointHelper::CheckFloatOperands(masm, &call_runtime, ebx); |
} |
- FloatingPointHelper::LoadFloatOperands( |
- masm, |
- ecx, |
- FloatingPointHelper::ARGS_IN_REGISTERS); |
- switch (op_) { |
- case Token::ADD: __ faddp(1); break; |
- case Token::SUB: __ fsubp(1); break; |
- case Token::MUL: __ fmulp(1); break; |
- case Token::DIV: __ fdivp(1); break; |
- default: UNREACHABLE(); |
- } |
- Label after_alloc_failure; |
- GenerateHeapResultAllocation(masm, &after_alloc_failure); |
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); |
- GenerateReturn(masm); |
- __ bind(&after_alloc_failure); |
- __ ffree(); |
- __ jmp(&call_runtime); |
+ break; |
} |
- } |
- case Token::MOD: { |
- // For MOD we go directly to runtime in the non-smi case. |
- break; |
- } |
- case Token::BIT_OR: |
- case Token::BIT_AND: |
- case Token::BIT_XOR: |
- case Token::SAR: |
- case Token::SHL: |
- case Token::SHR: { |
- Label non_smi_result; |
- FloatingPointHelper::LoadAsIntegers(masm, use_sse3_, &call_runtime); |
- switch (op_) { |
- case Token::BIT_OR: __ or_(eax, Operand(ecx)); break; |
- case Token::BIT_AND: __ and_(eax, Operand(ecx)); break; |
- case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break; |
- case Token::SAR: __ sar_cl(eax); break; |
- case Token::SHL: __ shl_cl(eax); break; |
- case Token::SHR: __ shr_cl(eax); break; |
- default: UNREACHABLE(); |
+ case Token::MOD: { |
+ // For MOD we go directly to runtime in the non-smi case. |
+ break; |
} |
- if (op_ == Token::SHR) { |
- // Check if result is non-negative and fits in a smi. |
- __ test(eax, Immediate(0xc0000000)); |
- __ j(not_zero, &call_runtime); |
- } else { |
- // Check if result fits in a smi. |
- __ cmp(eax, 0xc0000000); |
- __ j(negative, &non_smi_result); |
- } |
- // Tag smi result and return. |
- __ SmiTag(eax); |
- GenerateReturn(masm); |
- |
- // All ops except SHR return a signed int32 that we load in a HeapNumber. |
- if (op_ != Token::SHR) { |
- __ bind(&non_smi_result); |
- // Allocate a heap number if needed. |
- __ mov(ebx, Operand(eax)); // ebx: result |
- Label skip_allocation; |
- switch (mode_) { |
- case OVERWRITE_LEFT: |
- case OVERWRITE_RIGHT: |
- // If the operand was an object, we skip the |
- // allocation of a heap number. |
- __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ? |
- 1 * kPointerSize : 2 * kPointerSize)); |
- __ test(eax, Immediate(kSmiTagMask)); |
- __ j(not_zero, &skip_allocation, not_taken); |
- // Fall through! |
- case NO_OVERWRITE: |
- __ AllocateHeapNumber(eax, ecx, edx, &call_runtime); |
- __ bind(&skip_allocation); |
- break; |
+ case Token::BIT_OR: |
+ case Token::BIT_AND: |
+ case Token::BIT_XOR: |
+ case Token::SAR: |
+ case Token::SHL: |
+ case Token::SHR: { |
+ Label non_smi_result; |
+ FloatingPointHelper::LoadAsIntegers(masm, use_sse3_, &call_runtime); |
+ switch (op_) { |
+ case Token::BIT_OR: __ or_(eax, Operand(ecx)); break; |
+ case Token::BIT_AND: __ and_(eax, Operand(ecx)); break; |
+ case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break; |
+ case Token::SAR: __ sar_cl(eax); break; |
+ case Token::SHL: __ shl_cl(eax); break; |
+ case Token::SHR: __ shr_cl(eax); break; |
default: UNREACHABLE(); |
} |
- // Store the result in the HeapNumber and return. |
- if (CpuFeatures::IsSupported(SSE2)) { |
- CpuFeatures::Scope use_sse2(SSE2); |
- __ cvtsi2sd(xmm0, Operand(ebx)); |
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); |
+ if (op_ == Token::SHR) { |
+ // Check if result is non-negative and fits in a smi. |
+ __ test(eax, Immediate(0xc0000000)); |
+ __ j(not_zero, &call_runtime); |
} else { |
- __ mov(Operand(esp, 1 * kPointerSize), ebx); |
- __ fild_s(Operand(esp, 1 * kPointerSize)); |
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); |
+ // Check if result fits in a smi. |
+ __ cmp(eax, 0xc0000000); |
+ __ j(negative, &non_smi_result); |
} |
+ // Tag smi result and return. |
+ __ SmiTag(eax); |
GenerateReturn(masm); |
+ |
+ // All ops except SHR return a signed int32 that we load in |
+ // a HeapNumber. |
+ if (op_ != Token::SHR) { |
+ __ bind(&non_smi_result); |
+ // Allocate a heap number if needed. |
+ __ mov(ebx, Operand(eax)); // ebx: result |
+ Label skip_allocation; |
+ switch (mode_) { |
+ case OVERWRITE_LEFT: |
+ case OVERWRITE_RIGHT: |
+ // If the operand was an object, we skip the |
+ // allocation of a heap number. |
+ __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ? |
+ 1 * kPointerSize : 2 * kPointerSize)); |
+ __ test(eax, Immediate(kSmiTagMask)); |
+ __ j(not_zero, &skip_allocation, not_taken); |
+ // Fall through! |
+ case NO_OVERWRITE: |
+ __ AllocateHeapNumber(eax, ecx, edx, &call_runtime); |
+ __ bind(&skip_allocation); |
+ break; |
+ default: UNREACHABLE(); |
+ } |
+ // Store the result in the HeapNumber and return. |
+ if (CpuFeatures::IsSupported(SSE2)) { |
+ CpuFeatures::Scope use_sse2(SSE2); |
+ __ cvtsi2sd(xmm0, Operand(ebx)); |
+ __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); |
+ } else { |
+ __ mov(Operand(esp, 1 * kPointerSize), ebx); |
+ __ fild_s(Operand(esp, 1 * kPointerSize)); |
+ __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); |
+ } |
+ GenerateReturn(masm); |
+ } |
+ break; |
} |
- break; |
+ default: UNREACHABLE(); break; |
} |
- default: UNREACHABLE(); break; |
} |
// If all else fails, use the runtime system to get the correct |
@@ -8230,21 +8259,20 @@ |
// stack in the correct order below the return address. |
__ bind(&call_runtime); |
if (HasArgsInRegisters()) { |
- __ pop(ecx); |
- if (HasArgsReversed()) { |
- __ push(eax); |
- __ push(edx); |
- } else { |
- __ push(edx); |
- __ push(eax); |
- } |
- __ push(ecx); |
+ GenerateRegisterArgsPush(masm); |
} |
+ |
switch (op_) { |
case Token::ADD: { |
// Test for string arguments before calling runtime. |
Label not_strings, not_string1, string1, string1_smi2; |
- Result answer; |
+ |
+ // If this stub has already generated FP-specific code then the arguments |
+ // are already in edx, eax |
+ if (!ShouldGenerateFPCode() && !HasArgsInRegisters()) { |
+ GenerateLoadArguments(masm); |
+ } |
+ |
__ test(edx, Immediate(kSmiTagMask)); |
__ j(zero, ¬_string1); |
__ CmpObjectType(edx, FIRST_NONSTRING_TYPE, ecx); |
@@ -8333,6 +8361,13 @@ |
default: |
UNREACHABLE(); |
} |
+ |
+ // Generate an unreachable reference to the DEFAULT stub so that it can be |
+ // found at the end of this stub when clearing ICs at GC. |
+ if (runtime_operands_type_ != BinaryOpIC::DEFAULT) { |
+ GenericBinaryOpStub uninit(MinorKey(), BinaryOpIC::DEFAULT); |
+ __ TailCallStub(&uninit); |
+ } |
} |
@@ -8386,10 +8421,9 @@ |
void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) { |
// If arguments are not passed in registers read them from the stack. |
- if (!HasArgsInRegisters()) { |
- __ mov(eax, Operand(esp, 1 * kPointerSize)); |
- __ mov(edx, Operand(esp, 2 * kPointerSize)); |
- } |
+ ASSERT(!HasArgsInRegisters()); |
+ __ mov(eax, Operand(esp, 1 * kPointerSize)); |
+ __ mov(edx, Operand(esp, 2 * kPointerSize)); |
} |
@@ -8404,6 +8438,75 @@ |
} |
+void GenericBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) { |
+ ASSERT(HasArgsInRegisters()); |
+ __ pop(ecx); |
+ if (HasArgsReversed()) { |
+ __ push(eax); |
+ __ push(edx); |
+ } else { |
+ __ push(edx); |
+ __ push(eax); |
+ } |
+ __ push(ecx); |
+} |
+ |
+ |
+void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { |
+ Label get_result; |
+ |
+ // Keep a copy of operands on the stack and make sure they are also in |
+ // edx, eax. |
+ if (HasArgsInRegisters()) { |
+ GenerateRegisterArgsPush(masm); |
+ } else { |
+ GenerateLoadArguments(masm); |
+ } |
+ |
+ // Internal frame is necessary to handle exceptions properly. |
+ __ EnterInternalFrame(); |
+ |
+ // Push arguments on stack if the stub expects them there. |
+ if (!HasArgsInRegisters()) { |
+ __ push(edx); |
+ __ push(eax); |
+ } |
+ // Call the stub proper to get the result in eax. |
+ __ call(&get_result); |
+ __ LeaveInternalFrame(); |
+ |
+ __ pop(ecx); // Return address. |
+ // Left and right arguments are now on top. |
+ // Push the operation result. The tail call to BinaryOp_Patch will |
+ // return it to the original caller. |
+ __ push(eax); |
+ // Push this stub's key. Although the operation and the type info are |
+ // encoded into the key, the encoding is opaque, so push them too. |
+ __ push(Immediate(Smi::FromInt(MinorKey()))); |
+ __ push(Immediate(Smi::FromInt(op_))); |
+ __ push(Immediate(Smi::FromInt(runtime_operands_type_))); |
+ |
+ __ push(ecx); // Return address. |
+ |
+ // Patch the caller to an appropriate specialized stub |
+ // and return the operation result. |
+ __ TailCallExternalReference( |
+ ExternalReference(IC_Utility(IC::kBinaryOp_Patch)), |
+ 6, |
+ 1); |
+ |
+ // The entry point for the result calculation is assumed to be immediately |
+ // after this sequence. |
+ __ bind(&get_result); |
+} |
+ |
+Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) { |
+ GenericBinaryOpStub stub(key, type_info); |
+ HandleScope scope; |
+ return stub.GetCode(); |
+} |
+ |
+ |
void TranscendentalCacheStub::Generate(MacroAssembler* masm) { |
// Input on stack: |
// esp[4]: argument (should be number). |