Index: src/ia32/code-stubs-ia32.cc |
=================================================================== |
--- src/ia32/code-stubs-ia32.cc (revision 7552) |
+++ src/ia32/code-stubs-ia32.cc (working copy) |
@@ -291,166 +291,6 @@ |
} |
-const char* GenericBinaryOpStub::GetName() { |
- if (name_ != NULL) return name_; |
- const int kMaxNameLength = 100; |
- name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray( |
- kMaxNameLength); |
- if (name_ == NULL) return "OOM"; |
- const char* op_name = Token::Name(op_); |
- const char* overwrite_name; |
- switch (mode_) { |
- case NO_OVERWRITE: overwrite_name = "Alloc"; break; |
- case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break; |
- case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break; |
- default: overwrite_name = "UnknownOverwrite"; break; |
- } |
- |
- OS::SNPrintF(Vector<char>(name_, kMaxNameLength), |
- "GenericBinaryOpStub_%s_%s%s_%s%s_%s_%s", |
- op_name, |
- overwrite_name, |
- (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "", |
- args_in_registers_ ? "RegArgs" : "StackArgs", |
- args_reversed_ ? "_R" : "", |
- static_operands_type_.ToString(), |
- BinaryOpIC::GetName(runtime_operands_type_)); |
- return name_; |
-} |
- |
- |
-void GenericBinaryOpStub::GenerateCall( |
- MacroAssembler* masm, |
- Register left, |
- Register right) { |
- if (!ArgsInRegistersSupported()) { |
- // Pass arguments on the stack. |
- __ push(left); |
- __ push(right); |
- } else { |
- // The calling convention with registers is left in edx and right in eax. |
- Register left_arg = edx; |
- Register right_arg = eax; |
- if (!(left.is(left_arg) && right.is(right_arg))) { |
- if (left.is(right_arg) && right.is(left_arg)) { |
- if (IsOperationCommutative()) { |
- SetArgsReversed(); |
- } else { |
- __ xchg(left, right); |
- } |
- } else if (left.is(left_arg)) { |
- __ mov(right_arg, right); |
- } else if (right.is(right_arg)) { |
- __ mov(left_arg, left); |
- } else if (left.is(right_arg)) { |
- if (IsOperationCommutative()) { |
- __ mov(left_arg, right); |
- SetArgsReversed(); |
- } else { |
- // Order of moves important to avoid destroying left argument. |
- __ mov(left_arg, left); |
- __ mov(right_arg, right); |
- } |
- } else if (right.is(left_arg)) { |
- if (IsOperationCommutative()) { |
- __ mov(right_arg, left); |
- SetArgsReversed(); |
- } else { |
- // Order of moves important to avoid destroying right argument. |
- __ mov(right_arg, right); |
- __ mov(left_arg, left); |
- } |
- } else { |
- // Order of moves is not important. |
- __ mov(left_arg, left); |
- __ mov(right_arg, right); |
- } |
- } |
- |
- // Update flags to indicate that arguments are in registers. |
- SetArgsInRegisters(); |
- __ IncrementCounter( |
- masm->isolate()->counters()->generic_binary_stub_calls_regs(), 1); |
- } |
- |
- // Call the stub. |
- __ CallStub(this); |
-} |
- |
- |
-void GenericBinaryOpStub::GenerateCall( |
- MacroAssembler* masm, |
- Register left, |
- Smi* right) { |
- if (!ArgsInRegistersSupported()) { |
- // Pass arguments on the stack. |
- __ push(left); |
- __ push(Immediate(right)); |
- } else { |
- // The calling convention with registers is left in edx and right in eax. |
- Register left_arg = edx; |
- Register right_arg = eax; |
- if (left.is(left_arg)) { |
- __ mov(right_arg, Immediate(right)); |
- } else if (left.is(right_arg) && IsOperationCommutative()) { |
- __ mov(left_arg, Immediate(right)); |
- SetArgsReversed(); |
- } else { |
- // For non-commutative operations, left and right_arg might be |
- // the same register. Therefore, the order of the moves is |
- // important here in order to not overwrite left before moving |
- // it to left_arg. |
- __ mov(left_arg, left); |
- __ mov(right_arg, Immediate(right)); |
- } |
- |
- // Update flags to indicate that arguments are in registers. |
- SetArgsInRegisters(); |
- __ IncrementCounter( |
- masm->isolate()->counters()->generic_binary_stub_calls_regs(), 1); |
- } |
- |
- // Call the stub. |
- __ CallStub(this); |
-} |
- |
- |
-void GenericBinaryOpStub::GenerateCall( |
- MacroAssembler* masm, |
- Smi* left, |
- Register right) { |
- if (!ArgsInRegistersSupported()) { |
- // Pass arguments on the stack. |
- __ push(Immediate(left)); |
- __ push(right); |
- } else { |
- // The calling convention with registers is left in edx and right in eax. |
- Register left_arg = edx; |
- Register right_arg = eax; |
- if (right.is(right_arg)) { |
- __ mov(left_arg, Immediate(left)); |
- } else if (right.is(left_arg) && IsOperationCommutative()) { |
- __ mov(right_arg, Immediate(left)); |
- SetArgsReversed(); |
- } else { |
- // For non-commutative operations, right and left_arg might be |
- // the same register. Therefore, the order of the moves is |
- // important here in order to not overwrite right before moving |
- // it to right_arg. |
- __ mov(right_arg, right); |
- __ mov(left_arg, Immediate(left)); |
- } |
- // Update flags to indicate that arguments are in registers. |
- SetArgsInRegisters(); |
- Counters* counters = masm->isolate()->counters(); |
- __ IncrementCounter(counters->generic_binary_stub_calls_regs(), 1); |
- } |
- |
- // Call the stub. |
- __ CallStub(this); |
-} |
- |
- |
class FloatingPointHelper : public AllStatic { |
public: |
@@ -534,762 +374,6 @@ |
}; |
-void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { |
- // 1. Move arguments into edx, eax except for DIV and MOD, which need the |
- // dividend in eax and edx free for the division. Use eax, ebx for those. |
- Comment load_comment(masm, "-- Load arguments"); |
- Register left = edx; |
- Register right = eax; |
- if (op_ == Token::DIV || op_ == Token::MOD) { |
- left = eax; |
- right = ebx; |
- if (HasArgsInRegisters()) { |
- __ mov(ebx, eax); |
- __ mov(eax, edx); |
- } |
- } |
- if (!HasArgsInRegisters()) { |
- __ mov(right, Operand(esp, 1 * kPointerSize)); |
- __ mov(left, Operand(esp, 2 * kPointerSize)); |
- } |
- |
- if (static_operands_type_.IsSmi()) { |
- if (FLAG_debug_code) { |
- __ AbortIfNotSmi(left); |
- __ AbortIfNotSmi(right); |
- } |
- if (op_ == Token::BIT_OR) { |
- __ or_(right, Operand(left)); |
- GenerateReturn(masm); |
- return; |
- } else if (op_ == Token::BIT_AND) { |
- __ and_(right, Operand(left)); |
- GenerateReturn(masm); |
- return; |
- } else if (op_ == Token::BIT_XOR) { |
- __ xor_(right, Operand(left)); |
- GenerateReturn(masm); |
- return; |
- } |
- } |
- |
- // 2. Prepare the smi check of both operands by oring them together. |
- Comment smi_check_comment(masm, "-- Smi check arguments"); |
- Label not_smis; |
- Register combined = ecx; |
- ASSERT(!left.is(combined) && !right.is(combined)); |
- switch (op_) { |
- case Token::BIT_OR: |
- // Perform the operation into eax and smi check the result. Preserve |
- // eax in case the result is not a smi. |
- ASSERT(!left.is(ecx) && !right.is(ecx)); |
- __ mov(ecx, right); |
- __ or_(right, Operand(left)); // Bitwise or is commutative. |
- combined = right; |
- break; |
- |
- case Token::BIT_XOR: |
- case Token::BIT_AND: |
- case Token::ADD: |
- case Token::SUB: |
- case Token::MUL: |
- case Token::DIV: |
- case Token::MOD: |
- __ mov(combined, right); |
- __ or_(combined, Operand(left)); |
- break; |
- |
- case Token::SHL: |
- case Token::SAR: |
- case Token::SHR: |
- // Move the right operand into ecx for the shift operation, use eax |
- // for the smi check register. |
- ASSERT(!left.is(ecx) && !right.is(ecx)); |
- __ mov(ecx, right); |
- __ or_(right, Operand(left)); |
- combined = right; |
- break; |
- |
- default: |
- break; |
- } |
- |
- // 3. Perform the smi check of the operands. |
- STATIC_ASSERT(kSmiTag == 0); // Adjust zero check if not the case. |
- __ test(combined, Immediate(kSmiTagMask)); |
- __ j(not_zero, ¬_smis, not_taken); |
- |
- // 4. Operands are both smis, perform the operation leaving the result in |
- // eax and check the result if necessary. |
- Comment perform_smi(masm, "-- Perform smi operation"); |
- Label use_fp_on_smis; |
- switch (op_) { |
- case Token::BIT_OR: |
- // Nothing to do. |
- break; |
- |
- case Token::BIT_XOR: |
- ASSERT(right.is(eax)); |
- __ xor_(right, Operand(left)); // Bitwise xor is commutative. |
- break; |
- |
- case Token::BIT_AND: |
- ASSERT(right.is(eax)); |
- __ and_(right, Operand(left)); // Bitwise and is commutative. |
- break; |
- |
- case Token::SHL: |
- // Remove tags from operands (but keep sign). |
- __ SmiUntag(left); |
- __ SmiUntag(ecx); |
- // Perform the operation. |
- __ shl_cl(left); |
- // Check that the *signed* result fits in a smi. |
- __ cmp(left, 0xc0000000); |
- __ j(sign, &use_fp_on_smis, not_taken); |
- // Tag the result and store it in register eax. |
- __ SmiTag(left); |
- __ mov(eax, left); |
- break; |
- |
- case Token::SAR: |
- // Remove tags from operands (but keep sign). |
- __ SmiUntag(left); |
- __ SmiUntag(ecx); |
- // Perform the operation. |
- __ sar_cl(left); |
- // Tag the result and store it in register eax. |
- __ SmiTag(left); |
- __ mov(eax, left); |
- break; |
- |
- case Token::SHR: |
- // Remove tags from operands (but keep sign). |
- __ SmiUntag(left); |
- __ SmiUntag(ecx); |
- // Perform the operation. |
- __ shr_cl(left); |
- // Check that the *unsigned* result fits in a smi. |
- // Neither of the two high-order bits can be set: |
- // - 0x80000000: high bit would be lost when smi tagging. |
- // - 0x40000000: this number would convert to negative when |
- // Smi tagging these two cases can only happen with shifts |
- // by 0 or 1 when handed a valid smi. |
- __ test(left, Immediate(0xc0000000)); |
- __ j(not_zero, slow, not_taken); |
- // Tag the result and store it in register eax. |
- __ SmiTag(left); |
- __ mov(eax, left); |
- break; |
- |
- case Token::ADD: |
- ASSERT(right.is(eax)); |
- __ add(right, Operand(left)); // Addition is commutative. |
- __ j(overflow, &use_fp_on_smis, not_taken); |
- break; |
- |
- case Token::SUB: |
- __ sub(left, Operand(right)); |
- __ j(overflow, &use_fp_on_smis, not_taken); |
- __ mov(eax, left); |
- break; |
- |
- case Token::MUL: |
- // If the smi tag is 0 we can just leave the tag on one operand. |
- STATIC_ASSERT(kSmiTag == 0); // Adjust code below if not the case. |
- // We can't revert the multiplication if the result is not a smi |
- // so save the right operand. |
- __ mov(ebx, right); |
- // Remove tag from one of the operands (but keep sign). |
- __ SmiUntag(right); |
- // Do multiplication. |
- __ imul(right, Operand(left)); // Multiplication is commutative. |
- __ j(overflow, &use_fp_on_smis, not_taken); |
- // Check for negative zero result. Use combined = left | right. |
- __ NegativeZeroTest(right, combined, &use_fp_on_smis); |
- break; |
- |
- case Token::DIV: |
- // We can't revert the division if the result is not a smi so |
- // save the left operand. |
- __ mov(edi, left); |
- // Check for 0 divisor. |
- __ test(right, Operand(right)); |
- __ j(zero, &use_fp_on_smis, not_taken); |
- // Sign extend left into edx:eax. |
- ASSERT(left.is(eax)); |
- __ cdq(); |
- // Divide edx:eax by right. |
- __ idiv(right); |
- // Check for the corner case of dividing the most negative smi by |
- // -1. We cannot use the overflow flag, since it is not set by idiv |
- // instruction. |
- STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1); |
- __ cmp(eax, 0x40000000); |
- __ j(equal, &use_fp_on_smis); |
- // Check for negative zero result. Use combined = left | right. |
- __ NegativeZeroTest(eax, combined, &use_fp_on_smis); |
- // Check that the remainder is zero. |
- __ test(edx, Operand(edx)); |
- __ j(not_zero, &use_fp_on_smis); |
- // Tag the result and store it in register eax. |
- __ SmiTag(eax); |
- break; |
- |
- case Token::MOD: |
- // Check for 0 divisor. |
- __ test(right, Operand(right)); |
- __ j(zero, ¬_smis, not_taken); |
- |
- // Sign extend left into edx:eax. |
- ASSERT(left.is(eax)); |
- __ cdq(); |
- // Divide edx:eax by right. |
- __ idiv(right); |
- // Check for negative zero result. Use combined = left | right. |
- __ NegativeZeroTest(edx, combined, slow); |
- // Move remainder to register eax. |
- __ mov(eax, edx); |
- break; |
- |
- default: |
- UNREACHABLE(); |
- } |
- |
- // 5. Emit return of result in eax. |
- GenerateReturn(masm); |
- |
- // 6. For some operations emit inline code to perform floating point |
- // operations on known smis (e.g., if the result of the operation |
- // overflowed the smi range). |
- switch (op_) { |
- case Token::SHL: { |
- Comment perform_float(masm, "-- Perform float operation on smis"); |
- __ bind(&use_fp_on_smis); |
- if (runtime_operands_type_ != BinaryOpIC::UNINIT_OR_SMI) { |
- // Result we want is in left == edx, so we can put the allocated heap |
- // number in eax. |
- __ AllocateHeapNumber(eax, ecx, ebx, slow); |
- // Store the result in the HeapNumber and return. |
- if (CpuFeatures::IsSupported(SSE2)) { |
- CpuFeatures::Scope use_sse2(SSE2); |
- __ cvtsi2sd(xmm0, Operand(left)); |
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); |
- } else { |
- // It's OK to overwrite the right argument on the stack because we |
- // are about to return. |
- __ mov(Operand(esp, 1 * kPointerSize), left); |
- __ fild_s(Operand(esp, 1 * kPointerSize)); |
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); |
- } |
- GenerateReturn(masm); |
- } else { |
- ASSERT(runtime_operands_type_ == BinaryOpIC::UNINIT_OR_SMI); |
- __ jmp(slow); |
- } |
- break; |
- } |
- |
- case Token::ADD: |
- case Token::SUB: |
- case Token::MUL: |
- case Token::DIV: { |
- Comment perform_float(masm, "-- Perform float operation on smis"); |
- __ bind(&use_fp_on_smis); |
- // Restore arguments to edx, eax. |
- switch (op_) { |
- case Token::ADD: |
- // Revert right = right + left. |
- __ sub(right, Operand(left)); |
- break; |
- case Token::SUB: |
- // Revert left = left - right. |
- __ add(left, Operand(right)); |
- break; |
- case Token::MUL: |
- // Right was clobbered but a copy is in ebx. |
- __ mov(right, ebx); |
- break; |
- case Token::DIV: |
- // Left was clobbered but a copy is in edi. Right is in ebx for |
- // division. |
- __ mov(edx, edi); |
- __ mov(eax, right); |
- break; |
- default: UNREACHABLE(); |
- break; |
- } |
- if (runtime_operands_type_ != BinaryOpIC::UNINIT_OR_SMI) { |
- __ AllocateHeapNumber(ecx, ebx, no_reg, slow); |
- if (CpuFeatures::IsSupported(SSE2)) { |
- CpuFeatures::Scope use_sse2(SSE2); |
- FloatingPointHelper::LoadSSE2Smis(masm, ebx); |
- switch (op_) { |
- case Token::ADD: __ addsd(xmm0, xmm1); break; |
- case Token::SUB: __ subsd(xmm0, xmm1); break; |
- case Token::MUL: __ mulsd(xmm0, xmm1); break; |
- case Token::DIV: __ divsd(xmm0, xmm1); break; |
- default: UNREACHABLE(); |
- } |
- __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0); |
- } else { // SSE2 not available, use FPU. |
- FloatingPointHelper::LoadFloatSmis(masm, ebx); |
- switch (op_) { |
- case Token::ADD: __ faddp(1); break; |
- case Token::SUB: __ fsubp(1); break; |
- case Token::MUL: __ fmulp(1); break; |
- case Token::DIV: __ fdivp(1); break; |
- default: UNREACHABLE(); |
- } |
- __ fstp_d(FieldOperand(ecx, HeapNumber::kValueOffset)); |
- } |
- __ mov(eax, ecx); |
- GenerateReturn(masm); |
- } else { |
- ASSERT(runtime_operands_type_ == BinaryOpIC::UNINIT_OR_SMI); |
- __ jmp(slow); |
- } |
- break; |
- } |
- |
- default: |
- break; |
- } |
- |
- // 7. Non-smi operands, fall out to the non-smi code with the operands in |
- // edx and eax. |
- Comment done_comment(masm, "-- Enter non-smi code"); |
- __ bind(¬_smis); |
- switch (op_) { |
- case Token::BIT_OR: |
- case Token::SHL: |
- case Token::SAR: |
- case Token::SHR: |
- // Right operand is saved in ecx and eax was destroyed by the smi |
- // check. |
- __ mov(eax, ecx); |
- break; |
- |
- case Token::DIV: |
- case Token::MOD: |
- // Operands are in eax, ebx at this point. |
- __ mov(edx, eax); |
- __ mov(eax, ebx); |
- break; |
- |
- default: |
- break; |
- } |
-} |
- |
- |
-void GenericBinaryOpStub::Generate(MacroAssembler* masm) { |
- Label call_runtime; |
- |
- Counters* counters = masm->isolate()->counters(); |
- __ IncrementCounter(counters->generic_binary_stub_calls(), 1); |
- |
- if (runtime_operands_type_ == BinaryOpIC::UNINIT_OR_SMI) { |
- Label slow; |
- if (ShouldGenerateSmiCode()) GenerateSmiCode(masm, &slow); |
- __ bind(&slow); |
- GenerateTypeTransition(masm); |
- } |
- |
- // Generate fast case smi code if requested. This flag is set when the fast |
- // case smi code is not generated by the caller. Generating it here will speed |
- // up common operations. |
- if (ShouldGenerateSmiCode()) { |
- GenerateSmiCode(masm, &call_runtime); |
- } else if (op_ != Token::MOD) { // MOD goes straight to runtime. |
- if (!HasArgsInRegisters()) { |
- GenerateLoadArguments(masm); |
- } |
- } |
- |
- // Floating point case. |
- if (ShouldGenerateFPCode()) { |
- switch (op_) { |
- case Token::ADD: |
- case Token::SUB: |
- case Token::MUL: |
- case Token::DIV: { |
- if (runtime_operands_type_ == BinaryOpIC::DEFAULT && |
- HasSmiCodeInStub()) { |
- // Execution reaches this point when the first non-smi argument occurs |
- // (and only if smi code is generated). This is the right moment to |
- // patch to HEAP_NUMBERS state. The transition is attempted only for |
- // the four basic operations. The stub stays in the DEFAULT state |
- // forever for all other operations (also if smi code is skipped). |
- GenerateTypeTransition(masm); |
- break; |
- } |
- |
- Label not_floats; |
- if (CpuFeatures::IsSupported(SSE2)) { |
- CpuFeatures::Scope use_sse2(SSE2); |
- if (static_operands_type_.IsNumber()) { |
- if (FLAG_debug_code) { |
- // Assert at runtime that inputs are only numbers. |
- __ AbortIfNotNumber(edx); |
- __ AbortIfNotNumber(eax); |
- } |
- if (static_operands_type_.IsSmi()) { |
- if (FLAG_debug_code) { |
- __ AbortIfNotSmi(edx); |
- __ AbortIfNotSmi(eax); |
- } |
- FloatingPointHelper::LoadSSE2Smis(masm, ecx); |
- } else { |
- FloatingPointHelper::LoadSSE2Operands(masm); |
- } |
- } else { |
- FloatingPointHelper::LoadSSE2Operands(masm, ¬_floats); |
- } |
- |
- switch (op_) { |
- case Token::ADD: __ addsd(xmm0, xmm1); break; |
- case Token::SUB: __ subsd(xmm0, xmm1); break; |
- case Token::MUL: __ mulsd(xmm0, xmm1); break; |
- case Token::DIV: __ divsd(xmm0, xmm1); break; |
- default: UNREACHABLE(); |
- } |
- GenerateHeapResultAllocation(masm, &call_runtime); |
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); |
- GenerateReturn(masm); |
- } else { // SSE2 not available, use FPU. |
- if (static_operands_type_.IsNumber()) { |
- if (FLAG_debug_code) { |
- // Assert at runtime that inputs are only numbers. |
- __ AbortIfNotNumber(edx); |
- __ AbortIfNotNumber(eax); |
- } |
- } else { |
- FloatingPointHelper::CheckFloatOperands(masm, ¬_floats, ebx); |
- } |
- FloatingPointHelper::LoadFloatOperands( |
- masm, |
- ecx, |
- FloatingPointHelper::ARGS_IN_REGISTERS); |
- switch (op_) { |
- case Token::ADD: __ faddp(1); break; |
- case Token::SUB: __ fsubp(1); break; |
- case Token::MUL: __ fmulp(1); break; |
- case Token::DIV: __ fdivp(1); break; |
- default: UNREACHABLE(); |
- } |
- Label after_alloc_failure; |
- GenerateHeapResultAllocation(masm, &after_alloc_failure); |
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); |
- GenerateReturn(masm); |
- __ bind(&after_alloc_failure); |
- __ ffree(); |
- __ jmp(&call_runtime); |
- } |
- __ bind(¬_floats); |
- if (runtime_operands_type_ == BinaryOpIC::DEFAULT && |
- !HasSmiCodeInStub()) { |
- // Execution reaches this point when the first non-number argument |
- // occurs (and only if smi code is skipped from the stub, otherwise |
- // the patching has already been done earlier in this case branch). |
- // Try patching to STRINGS for ADD operation. |
- if (op_ == Token::ADD) { |
- GenerateTypeTransition(masm); |
- } |
- } |
- break; |
- } |
- case Token::MOD: { |
- // For MOD we go directly to runtime in the non-smi case. |
- break; |
- } |
- case Token::BIT_OR: |
- case Token::BIT_AND: |
- case Token::BIT_XOR: |
- case Token::SAR: |
- case Token::SHL: |
- case Token::SHR: { |
- Label non_smi_result; |
- FloatingPointHelper::LoadAsIntegers(masm, |
- static_operands_type_, |
- use_sse3_, |
- &call_runtime); |
- switch (op_) { |
- case Token::BIT_OR: __ or_(eax, Operand(ecx)); break; |
- case Token::BIT_AND: __ and_(eax, Operand(ecx)); break; |
- case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break; |
- case Token::SAR: __ sar_cl(eax); break; |
- case Token::SHL: __ shl_cl(eax); break; |
- case Token::SHR: __ shr_cl(eax); break; |
- default: UNREACHABLE(); |
- } |
- if (op_ == Token::SHR) { |
- // Check if result is non-negative and fits in a smi. |
- __ test(eax, Immediate(0xc0000000)); |
- __ j(not_zero, &call_runtime); |
- } else { |
- // Check if result fits in a smi. |
- __ cmp(eax, 0xc0000000); |
- __ j(negative, &non_smi_result); |
- } |
- // Tag smi result and return. |
- __ SmiTag(eax); |
- GenerateReturn(masm); |
- |
- // All ops except SHR return a signed int32 that we load in |
- // a HeapNumber. |
- if (op_ != Token::SHR) { |
- __ bind(&non_smi_result); |
- // Allocate a heap number if needed. |
- __ mov(ebx, Operand(eax)); // ebx: result |
- NearLabel skip_allocation; |
- switch (mode_) { |
- case OVERWRITE_LEFT: |
- case OVERWRITE_RIGHT: |
- // If the operand was an object, we skip the |
- // allocation of a heap number. |
- __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ? |
- 1 * kPointerSize : 2 * kPointerSize)); |
- __ test(eax, Immediate(kSmiTagMask)); |
- __ j(not_zero, &skip_allocation, not_taken); |
- // Fall through! |
- case NO_OVERWRITE: |
- __ AllocateHeapNumber(eax, ecx, edx, &call_runtime); |
- __ bind(&skip_allocation); |
- break; |
- default: UNREACHABLE(); |
- } |
- // Store the result in the HeapNumber and return. |
- if (CpuFeatures::IsSupported(SSE2)) { |
- CpuFeatures::Scope use_sse2(SSE2); |
- __ cvtsi2sd(xmm0, Operand(ebx)); |
- __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0); |
- } else { |
- __ mov(Operand(esp, 1 * kPointerSize), ebx); |
- __ fild_s(Operand(esp, 1 * kPointerSize)); |
- __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset)); |
- } |
- GenerateReturn(masm); |
- } |
- break; |
- } |
- default: UNREACHABLE(); break; |
- } |
- } |
- |
- // If all else fails, use the runtime system to get the correct |
- // result. If arguments was passed in registers now place them on the |
- // stack in the correct order below the return address. |
- |
- // Avoid hitting the string ADD code below when allocation fails in |
- // the floating point code above. |
- if (op_ != Token::ADD) { |
- __ bind(&call_runtime); |
- } |
- |
- if (HasArgsInRegisters()) { |
- GenerateRegisterArgsPush(masm); |
- } |
- |
- switch (op_) { |
- case Token::ADD: { |
- // Test for string arguments before calling runtime. |
- |
- // If this stub has already generated FP-specific code then the arguments |
- // are already in edx, eax |
- if (!ShouldGenerateFPCode() && !HasArgsInRegisters()) { |
- GenerateLoadArguments(masm); |
- } |
- |
- // Registers containing left and right operands respectively. |
- Register lhs, rhs; |
- if (HasArgsReversed()) { |
- lhs = eax; |
- rhs = edx; |
- } else { |
- lhs = edx; |
- rhs = eax; |
- } |
- |
- // Test if left operand is a string. |
- NearLabel lhs_not_string; |
- __ test(lhs, Immediate(kSmiTagMask)); |
- __ j(zero, &lhs_not_string); |
- __ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, ecx); |
- __ j(above_equal, &lhs_not_string); |
- |
- StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB); |
- __ TailCallStub(&string_add_left_stub); |
- |
- NearLabel call_runtime_with_args; |
- // Left operand is not a string, test right. |
- __ bind(&lhs_not_string); |
- __ test(rhs, Immediate(kSmiTagMask)); |
- __ j(zero, &call_runtime_with_args); |
- __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, ecx); |
- __ j(above_equal, &call_runtime_with_args); |
- |
- StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB); |
- __ TailCallStub(&string_add_right_stub); |
- |
- // Neither argument is a string. |
- __ bind(&call_runtime); |
- if (HasArgsInRegisters()) { |
- GenerateRegisterArgsPush(masm); |
- } |
- __ bind(&call_runtime_with_args); |
- __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION); |
- break; |
- } |
- case Token::SUB: |
- __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION); |
- break; |
- case Token::MUL: |
- __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION); |
- break; |
- case Token::DIV: |
- __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION); |
- break; |
- case Token::MOD: |
- __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION); |
- break; |
- case Token::BIT_OR: |
- __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION); |
- break; |
- case Token::BIT_AND: |
- __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION); |
- break; |
- case Token::BIT_XOR: |
- __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION); |
- break; |
- case Token::SAR: |
- __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION); |
- break; |
- case Token::SHL: |
- __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION); |
- break; |
- case Token::SHR: |
- __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION); |
- break; |
- default: |
- UNREACHABLE(); |
- } |
-} |
- |
- |
-void GenericBinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm, |
- Label* alloc_failure) { |
- Label skip_allocation; |
- OverwriteMode mode = mode_; |
- if (HasArgsReversed()) { |
- if (mode == OVERWRITE_RIGHT) { |
- mode = OVERWRITE_LEFT; |
- } else if (mode == OVERWRITE_LEFT) { |
- mode = OVERWRITE_RIGHT; |
- } |
- } |
- switch (mode) { |
- case OVERWRITE_LEFT: { |
- // If the argument in edx is already an object, we skip the |
- // allocation of a heap number. |
- __ test(edx, Immediate(kSmiTagMask)); |
- __ j(not_zero, &skip_allocation, not_taken); |
- // Allocate a heap number for the result. Keep eax and edx intact |
- // for the possible runtime call. |
- __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure); |
- // Now edx can be overwritten losing one of the arguments as we are |
- // now done and will not need it any more. |
- __ mov(edx, Operand(ebx)); |
- __ bind(&skip_allocation); |
- // Use object in edx as a result holder |
- __ mov(eax, Operand(edx)); |
- break; |
- } |
- case OVERWRITE_RIGHT: |
- // If the argument in eax is already an object, we skip the |
- // allocation of a heap number. |
- __ test(eax, Immediate(kSmiTagMask)); |
- __ j(not_zero, &skip_allocation, not_taken); |
- // Fall through! |
- case NO_OVERWRITE: |
- // Allocate a heap number for the result. Keep eax and edx intact |
- // for the possible runtime call. |
- __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure); |
- // Now eax can be overwritten losing one of the arguments as we are |
- // now done and will not need it any more. |
- __ mov(eax, ebx); |
- __ bind(&skip_allocation); |
- break; |
- default: UNREACHABLE(); |
- } |
-} |
- |
- |
-void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) { |
- // If arguments are not passed in registers read them from the stack. |
- ASSERT(!HasArgsInRegisters()); |
- __ mov(eax, Operand(esp, 1 * kPointerSize)); |
- __ mov(edx, Operand(esp, 2 * kPointerSize)); |
-} |
- |
- |
-void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) { |
- // If arguments are not passed in registers remove them from the stack before |
- // returning. |
- if (!HasArgsInRegisters()) { |
- __ ret(2 * kPointerSize); // Remove both operands |
- } else { |
- __ ret(0); |
- } |
-} |
- |
- |
-void GenericBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) { |
- ASSERT(HasArgsInRegisters()); |
- __ pop(ecx); |
- if (HasArgsReversed()) { |
- __ push(eax); |
- __ push(edx); |
- } else { |
- __ push(edx); |
- __ push(eax); |
- } |
- __ push(ecx); |
-} |
- |
- |
-void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { |
- // Ensure the operands are on the stack. |
- if (HasArgsInRegisters()) { |
- GenerateRegisterArgsPush(masm); |
- } |
- |
- __ pop(ecx); // Save return address. |
- |
- // Left and right arguments are now on top. |
- // Push this stub's key. Although the operation and the type info are |
- // encoded into the key, the encoding is opaque, so push them too. |
- __ push(Immediate(Smi::FromInt(MinorKey()))); |
- __ push(Immediate(Smi::FromInt(op_))); |
- __ push(Immediate(Smi::FromInt(runtime_operands_type_))); |
- |
- __ push(ecx); // Push return address. |
- |
- // Patch the caller to an appropriate specialized stub and return the |
- // operation result to the caller of the stub. |
- __ TailCallExternalReference( |
- ExternalReference(IC_Utility(IC::kBinaryOp_Patch), masm->isolate()), |
- 5, |
- 1); |
-} |
- |
- |
-Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) { |
- GenericBinaryOpStub stub(key, type_info); |
- return stub.GetCode(); |
-} |
- |
- |
Handle<Code> GetTypeRecordingBinaryOpStub(int key, |
TRBinaryOpIC::TypeInfo type_info, |
TRBinaryOpIC::TypeInfo result_type_info) { |