Index: src/arm/code-stubs-arm.cc |
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc |
index 5c5231bb8861f2c5be13e52e4c8b54cac5ebefa8..2c7fb7804c43ca0665eb3149f75bccdc99f43fa0 100644 |
--- a/src/arm/code-stubs-arm.cc |
+++ b/src/arm/code-stubs-arm.cc |
@@ -168,18 +168,6 @@ void CompareNilICStub::InitializeInterfaceDescriptor( |
} |
-void BinaryOpStub::InitializeInterfaceDescriptor( |
- Isolate* isolate, |
- CodeStubInterfaceDescriptor* descriptor) { |
- static Register registers[] = { r1, r0 }; |
- descriptor->register_param_count_ = 2; |
- descriptor->register_params_ = registers; |
- descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss); |
- descriptor->SetMissHandler( |
- ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate)); |
-} |
- |
- |
static void InitializeArrayConstructorDescriptor( |
Isolate* isolate, |
CodeStubInterfaceDescriptor* descriptor, |
@@ -1197,6 +1185,993 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { |
} |
+// Generates code to call a C function to do a double operation. |
+// This code never falls through, but returns with a heap number containing |
+// the result in r0. |
+// Register heapnumber_result must be a heap number in which the |
+// result of the operation will be stored. |
+// Requires the following layout on entry: |
+// d0: Left value. |
+// d1: Right value. |
+// If soft float ABI, use also r0, r1, r2, r3. |
+static void CallCCodeForDoubleOperation(MacroAssembler* masm, |
+ Token::Value op, |
+ Register heap_number_result, |
+ Register scratch) { |
+ // Assert that heap_number_result is callee-saved. |
+ // We currently always use r5 to pass it. |
+ ASSERT(heap_number_result.is(r5)); |
+ |
+ // Push the current return address before the C call. Return will be |
+ // through pop(pc) below. |
+ __ push(lr); |
+ __ PrepareCallCFunction(0, 2, scratch); |
+ if (!masm->use_eabi_hardfloat()) { |
+ __ vmov(r0, r1, d0); |
+ __ vmov(r2, r3, d1); |
+ } |
+ { |
+ AllowExternalCallThatCantCauseGC scope(masm); |
+ __ CallCFunction( |
+ ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2); |
+ } |
+ // Store answer in the overwritable heap number. Double returned in |
+ // registers r0 and r1 or in d0. |
+ if (masm->use_eabi_hardfloat()) { |
+ __ vstr(d0, FieldMemOperand(heap_number_result, HeapNumber::kValueOffset)); |
+ } else { |
+ __ Strd(r0, r1, |
+ FieldMemOperand(heap_number_result, HeapNumber::kValueOffset)); |
+ } |
+ // Place heap_number_result in r0 and return to the pushed return address. |
+ __ mov(r0, Operand(heap_number_result)); |
+ __ pop(pc); |
+} |
+ |
+ |
+void BinaryOpStub::Initialize() { |
+ platform_specific_bit_ = true; // VFP2 is a base requirement for V8 |
+} |
+ |
+ |
+void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { |
+ Label get_result; |
+ |
+ __ Push(r1, r0); |
+ |
+ __ mov(r2, Operand(Smi::FromInt(MinorKey()))); |
+ __ push(r2); |
+ |
+ __ TailCallExternalReference( |
+ ExternalReference(IC_Utility(IC::kBinaryOp_Patch), |
+ masm->isolate()), |
+ 3, |
+ 1); |
+} |
+ |
+ |
+void BinaryOpStub::GenerateTypeTransitionWithSavedArgs( |
+ MacroAssembler* masm) { |
+ UNIMPLEMENTED(); |
+} |
+ |
+ |
+void BinaryOpStub_GenerateSmiSmiOperation(MacroAssembler* masm, |
+ Token::Value op, |
+ Register scratch1, |
+ Register scratch2) { |
+ Register left = r1; |
+ Register right = r0; |
+ |
+ ASSERT(right.is(r0)); |
+ ASSERT(!AreAliased(left, right, scratch1, scratch2, ip)); |
+ STATIC_ASSERT(kSmiTag == 0); |
+ |
+ Label not_smi_result; |
+ switch (op) { |
+ case Token::ADD: |
+ __ add(right, left, Operand(right), SetCC); // Add optimistically. |
+ __ Ret(vc); |
+ __ sub(right, right, Operand(left)); // Revert optimistic add. |
+ break; |
+ case Token::SUB: |
+ __ sub(right, left, Operand(right), SetCC); // Subtract optimistically. |
+ __ Ret(vc); |
+ __ sub(right, left, Operand(right)); // Revert optimistic subtract. |
+ break; |
+ case Token::MUL: |
+ // Remove tag from one of the operands. This way the multiplication result |
+ // will be a smi if it fits the smi range. |
+ __ SmiUntag(ip, right); |
+ // Do multiplication |
+ // scratch1 = lower 32 bits of ip * left. |
+ // scratch2 = higher 32 bits of ip * left. |
+ __ smull(scratch1, scratch2, left, ip); |
+ // Check for overflowing the smi range - no overflow if higher 33 bits of |
+ // the result are identical. |
+ __ mov(ip, Operand(scratch1, ASR, 31)); |
+ __ cmp(ip, Operand(scratch2)); |
+ __ b(ne, ¬_smi_result); |
+ // Go slow on zero result to handle -0. |
+ __ cmp(scratch1, Operand::Zero()); |
+ __ mov(right, Operand(scratch1), LeaveCC, ne); |
+ __ Ret(ne); |
+ // We need -0 if we were multiplying a negative number with 0 to get 0. |
+ // We know one of them was zero. |
+ __ add(scratch2, right, Operand(left), SetCC); |
+ __ mov(right, Operand(Smi::FromInt(0)), LeaveCC, pl); |
+ __ Ret(pl); // Return smi 0 if the non-zero one was positive. |
+ // We fall through here if we multiplied a negative number with 0, because |
+ // that would mean we should produce -0. |
+ break; |
+ case Token::DIV: { |
+ Label div_with_sdiv; |
+ |
+ // Check for 0 divisor. |
+ __ cmp(right, Operand::Zero()); |
+ __ b(eq, ¬_smi_result); |
+ |
+ // Check for power of two on the right hand side. |
+ __ sub(scratch1, right, Operand(1)); |
+ __ tst(scratch1, right); |
+ if (CpuFeatures::IsSupported(SUDIV)) { |
+ __ b(ne, &div_with_sdiv); |
+ // Check for no remainder. |
+ __ tst(left, scratch1); |
+ __ b(ne, ¬_smi_result); |
+ // Check for positive left hand side. |
+ __ cmp(left, Operand::Zero()); |
+ __ b(mi, &div_with_sdiv); |
+ } else { |
+ __ b(ne, ¬_smi_result); |
+ // Check for positive and no remainder. |
+ __ orr(scratch2, scratch1, Operand(0x80000000u)); |
+ __ tst(left, scratch2); |
+ __ b(ne, ¬_smi_result); |
+ } |
+ |
+ // Perform division by shifting. |
+ __ clz(scratch1, scratch1); |
+ __ rsb(scratch1, scratch1, Operand(31)); |
+ __ mov(right, Operand(left, LSR, scratch1)); |
+ __ Ret(); |
+ |
+ if (CpuFeatures::IsSupported(SUDIV)) { |
+ CpuFeatureScope scope(masm, SUDIV); |
+ Label result_not_zero; |
+ |
+ __ bind(&div_with_sdiv); |
+ // Do division. |
+ __ sdiv(scratch1, left, right); |
+ // Check that the remainder is zero. |
+ __ mls(scratch2, scratch1, right, left); |
+ __ cmp(scratch2, Operand::Zero()); |
+ __ b(ne, ¬_smi_result); |
+ // Check for negative zero result. |
+ __ cmp(scratch1, Operand::Zero()); |
+ __ b(ne, &result_not_zero); |
+ __ cmp(right, Operand::Zero()); |
+ __ b(lt, ¬_smi_result); |
+ __ bind(&result_not_zero); |
+ // Check for the corner case of dividing the most negative smi by -1. |
+ __ cmp(scratch1, Operand(0x40000000)); |
+ __ b(eq, ¬_smi_result); |
+ // Tag and return the result. |
+ __ SmiTag(right, scratch1); |
+ __ Ret(); |
+ } |
+ break; |
+ } |
+ case Token::MOD: { |
+ Label modulo_with_sdiv; |
+ |
+ if (CpuFeatures::IsSupported(SUDIV)) { |
+ // Check for x % 0. |
+ __ cmp(right, Operand::Zero()); |
+ __ b(eq, ¬_smi_result); |
+ |
+ // Check for two positive smis. |
+ __ orr(scratch1, left, Operand(right)); |
+ __ tst(scratch1, Operand(0x80000000u)); |
+ __ b(ne, &modulo_with_sdiv); |
+ |
+ // Check for power of two on the right hand side. |
+ __ sub(scratch1, right, Operand(1)); |
+ __ tst(scratch1, right); |
+ __ b(ne, &modulo_with_sdiv); |
+ } else { |
+ // Check for two positive smis. |
+ __ orr(scratch1, left, Operand(right)); |
+ __ tst(scratch1, Operand(0x80000000u)); |
+ __ b(ne, ¬_smi_result); |
+ |
+ // Check for power of two on the right hand side. |
+ __ JumpIfNotPowerOfTwoOrZero(right, scratch1, ¬_smi_result); |
+ } |
+ |
+ // Perform modulus by masking (scratch1 contains right - 1). |
+ __ and_(right, left, Operand(scratch1)); |
+ __ Ret(); |
+ |
+ if (CpuFeatures::IsSupported(SUDIV)) { |
+ CpuFeatureScope scope(masm, SUDIV); |
+ __ bind(&modulo_with_sdiv); |
+ __ mov(scratch2, right); |
+ // Perform modulus with sdiv and mls. |
+ __ sdiv(scratch1, left, right); |
+ __ mls(right, scratch1, right, left); |
+ // Return if the result is not 0. |
+ __ cmp(right, Operand::Zero()); |
+ __ Ret(ne); |
+ // The result is 0, check for -0 case. |
+ __ cmp(left, Operand::Zero()); |
+ __ Ret(pl); |
+ // This is a -0 case, restore the value of right. |
+ __ mov(right, scratch2); |
+ // We fall through here to not_smi_result to produce -0. |
+ } |
+ break; |
+ } |
+ case Token::BIT_OR: |
+ __ orr(right, left, Operand(right)); |
+ __ Ret(); |
+ break; |
+ case Token::BIT_AND: |
+ __ and_(right, left, Operand(right)); |
+ __ Ret(); |
+ break; |
+ case Token::BIT_XOR: |
+ __ eor(right, left, Operand(right)); |
+ __ Ret(); |
+ break; |
+ case Token::SAR: |
+ // Remove tags from right operand. |
+ __ GetLeastBitsFromSmi(scratch1, right, 5); |
+ __ mov(right, Operand(left, ASR, scratch1)); |
+ // Smi tag result. |
+ __ bic(right, right, Operand(kSmiTagMask)); |
+ __ Ret(); |
+ break; |
+ case Token::SHR: |
+ // Remove tags from operands. We can't do this on a 31 bit number |
+ // because then the 0s get shifted into bit 30 instead of bit 31. |
+ __ SmiUntag(scratch1, left); |
+ __ GetLeastBitsFromSmi(scratch2, right, 5); |
+ __ mov(scratch1, Operand(scratch1, LSR, scratch2)); |
+ // Unsigned shift is not allowed to produce a negative number, so |
+ // check the sign bit and the sign bit after Smi tagging. |
+ __ tst(scratch1, Operand(0xc0000000)); |
+ __ b(ne, ¬_smi_result); |
+ // Smi tag result. |
+ __ SmiTag(right, scratch1); |
+ __ Ret(); |
+ break; |
+ case Token::SHL: |
+ // Remove tags from operands. |
+ __ SmiUntag(scratch1, left); |
+ __ GetLeastBitsFromSmi(scratch2, right, 5); |
+ __ mov(scratch1, Operand(scratch1, LSL, scratch2)); |
+ // Check that the signed result fits in a Smi. |
+ __ TrySmiTag(right, scratch1, ¬_smi_result); |
+ __ Ret(); |
+ break; |
+ default: |
+ UNREACHABLE(); |
+ } |
+ __ bind(¬_smi_result); |
+} |
+ |
+ |
+void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm, |
+ Register result, |
+ Register heap_number_map, |
+ Register scratch1, |
+ Register scratch2, |
+ Label* gc_required, |
+ OverwriteMode mode); |
+ |
+ |
+void BinaryOpStub_GenerateFPOperation(MacroAssembler* masm, |
+ BinaryOpIC::TypeInfo left_type, |
+ BinaryOpIC::TypeInfo right_type, |
+ bool smi_operands, |
+ Label* not_numbers, |
+ Label* gc_required, |
+ Label* miss, |
+ Token::Value op, |
+ OverwriteMode mode, |
+ Register scratch1, |
+ Register scratch2, |
+ Register scratch3, |
+ Register scratch4) { |
+ Register left = r1; |
+ Register right = r0; |
+ Register result = scratch3; |
+ ASSERT(!AreAliased(left, right, scratch1, scratch2, scratch3, scratch4)); |
+ |
+ ASSERT(smi_operands || (not_numbers != NULL)); |
+ if (smi_operands) { |
+ __ AssertSmi(left); |
+ __ AssertSmi(right); |
+ } |
+ if (left_type == BinaryOpIC::SMI) { |
+ __ JumpIfNotSmi(left, miss); |
+ } |
+ if (right_type == BinaryOpIC::SMI) { |
+ __ JumpIfNotSmi(right, miss); |
+ } |
+ |
+ Register heap_number_map = scratch4; |
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
+ |
+ switch (op) { |
+ case Token::ADD: |
+ case Token::SUB: |
+ case Token::MUL: |
+ case Token::DIV: |
+ case Token::MOD: { |
+ // Allocate new heap number for result. |
+ BinaryOpStub_GenerateHeapResultAllocation( |
+ masm, result, heap_number_map, scratch1, scratch2, gc_required, mode); |
+ |
+ // Load left and right operands into d0 and d1. |
+ if (smi_operands) { |
+ __ SmiToDouble(d1, right); |
+ __ SmiToDouble(d0, left); |
+ } else { |
+ // Load right operand into d1. |
+ if (right_type == BinaryOpIC::INT32) { |
+ __ LoadNumberAsInt32Double( |
+ right, d1, heap_number_map, scratch1, d8, miss); |
+ } else { |
+ Label* fail = (right_type == BinaryOpIC::NUMBER) ? miss : not_numbers; |
+ __ LoadNumber(right, d1, heap_number_map, scratch1, fail); |
+ } |
+ // Load left operand into d0. |
+ if (left_type == BinaryOpIC::INT32) { |
+ __ LoadNumberAsInt32Double( |
+ left, d0, heap_number_map, scratch1, d8, miss); |
+ } else { |
+ Label* fail = (left_type == BinaryOpIC::NUMBER) ? miss : not_numbers; |
+ __ LoadNumber( |
+ left, d0, heap_number_map, scratch1, fail); |
+ } |
+ } |
+ |
+ // Calculate the result. |
+ if (op != Token::MOD) { |
+ // Using VFP registers: |
+ // d0: Left value |
+ // d1: Right value |
+ switch (op) { |
+ case Token::ADD: |
+ __ vadd(d5, d0, d1); |
+ break; |
+ case Token::SUB: |
+ __ vsub(d5, d0, d1); |
+ break; |
+ case Token::MUL: |
+ __ vmul(d5, d0, d1); |
+ break; |
+ case Token::DIV: |
+ __ vdiv(d5, d0, d1); |
+ break; |
+ default: |
+ UNREACHABLE(); |
+ } |
+ |
+ __ sub(r0, result, Operand(kHeapObjectTag)); |
+ __ vstr(d5, r0, HeapNumber::kValueOffset); |
+ __ add(r0, r0, Operand(kHeapObjectTag)); |
+ __ Ret(); |
+ } else { |
+ // Call the C function to handle the double operation. |
+ CallCCodeForDoubleOperation(masm, op, result, scratch1); |
+ if (FLAG_debug_code) { |
+ __ stop("Unreachable code."); |
+ } |
+ } |
+ break; |
+ } |
+ case Token::BIT_OR: |
+ case Token::BIT_XOR: |
+ case Token::BIT_AND: |
+ case Token::SAR: |
+ case Token::SHR: |
+ case Token::SHL: { |
+ if (smi_operands) { |
+ __ SmiUntag(r3, left); |
+ __ SmiUntag(r2, right); |
+ } else { |
+ // Convert operands to 32-bit integers. Right in r2 and left in r3. |
+ __ TruncateNumberToI(left, r3, heap_number_map, scratch1, not_numbers); |
+ __ TruncateNumberToI(right, r2, heap_number_map, scratch1, not_numbers); |
+ } |
+ |
+ Label result_not_a_smi; |
+ switch (op) { |
+ case Token::BIT_OR: |
+ __ orr(r2, r3, Operand(r2)); |
+ break; |
+ case Token::BIT_XOR: |
+ __ eor(r2, r3, Operand(r2)); |
+ break; |
+ case Token::BIT_AND: |
+ __ and_(r2, r3, Operand(r2)); |
+ break; |
+ case Token::SAR: |
+ // Use only the 5 least significant bits of the shift count. |
+ __ GetLeastBitsFromInt32(r2, r2, 5); |
+ __ mov(r2, Operand(r3, ASR, r2)); |
+ break; |
+ case Token::SHR: |
+ // Use only the 5 least significant bits of the shift count. |
+ __ GetLeastBitsFromInt32(r2, r2, 5); |
+ __ mov(r2, Operand(r3, LSR, r2), SetCC); |
+ // SHR is special because it is required to produce a positive answer. |
+ // The code below for writing into heap numbers isn't capable of |
+ // writing the register as an unsigned int so we go to slow case if we |
+ // hit this case. |
+ __ b(mi, &result_not_a_smi); |
+ break; |
+ case Token::SHL: |
+ // Use only the 5 least significant bits of the shift count. |
+ __ GetLeastBitsFromInt32(r2, r2, 5); |
+ __ mov(r2, Operand(r3, LSL, r2)); |
+ break; |
+ default: |
+ UNREACHABLE(); |
+ } |
+ |
+ // Check that the *signed* result fits in a smi. |
+ __ TrySmiTag(r0, r2, &result_not_a_smi); |
+ __ Ret(); |
+ |
+ // Allocate new heap number for result. |
+ __ bind(&result_not_a_smi); |
+ if (smi_operands) { |
+ __ AllocateHeapNumber( |
+ result, scratch1, scratch2, heap_number_map, gc_required); |
+ } else { |
+ BinaryOpStub_GenerateHeapResultAllocation( |
+ masm, result, heap_number_map, scratch1, scratch2, gc_required, |
+ mode); |
+ } |
+ |
+ // r2: Answer as signed int32. |
+ // result: Heap number to write answer into. |
+ |
+ // Nothing can go wrong now, so move the heap number to r0, which is the |
+ // result. |
+ __ mov(r0, Operand(result)); |
+ |
+ // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As |
+ // mentioned above SHR needs to always produce a positive result. |
+ __ vmov(s0, r2); |
+ if (op == Token::SHR) { |
+ __ vcvt_f64_u32(d0, s0); |
+ } else { |
+ __ vcvt_f64_s32(d0, s0); |
+ } |
+ __ sub(r3, r0, Operand(kHeapObjectTag)); |
+ __ vstr(d0, r3, HeapNumber::kValueOffset); |
+ __ Ret(); |
+ break; |
+ } |
+ default: |
+ UNREACHABLE(); |
+ } |
+} |
+ |
+ |
+// Generate the smi code. If the operation on smis are successful this return is |
+// generated. If the result is not a smi and heap number allocation is not |
+// requested the code falls through. If number allocation is requested but a |
+// heap number cannot be allocated the code jumps to the label gc_required. |
+void BinaryOpStub_GenerateSmiCode( |
+ MacroAssembler* masm, |
+ Label* use_runtime, |
+ Label* gc_required, |
+ Token::Value op, |
+ BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results, |
+ OverwriteMode mode, |
+ Register scratch1, |
+ Register scratch2, |
+ Register scratch3, |
+ Register scratch4) { |
+ Label not_smis; |
+ |
+ Register left = r1; |
+ Register right = r0; |
+ ASSERT(!AreAliased(left, right, scratch1, scratch2, scratch3, scratch4)); |
+ |
+ // Perform combined smi check on both operands. |
+ __ orr(scratch1, left, Operand(right)); |
+ __ JumpIfNotSmi(scratch1, ¬_smis); |
+ |
+ // If the smi-smi operation results in a smi return is generated. |
+ BinaryOpStub_GenerateSmiSmiOperation(masm, op, scratch1, scratch2); |
+ |
+ // If heap number results are possible generate the result in an allocated |
+ // heap number. |
+ if (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) { |
+ BinaryOpStub_GenerateFPOperation( |
+ masm, BinaryOpIC::UNINITIALIZED, BinaryOpIC::UNINITIALIZED, true, |
+ use_runtime, gc_required, ¬_smis, op, mode, scratch2, scratch3, |
+ scratch1, scratch4); |
+ } |
+ __ bind(¬_smis); |
+} |
+ |
+ |
+void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { |
+ Label right_arg_changed, call_runtime; |
+ |
+ if (op_ == Token::MOD && encoded_right_arg_.has_value) { |
+ // It is guaranteed that the value will fit into a Smi, because if it |
+ // didn't, we wouldn't be here, see BinaryOp_Patch. |
+ __ cmp(r0, Operand(Smi::FromInt(fixed_right_arg_value()))); |
+ __ b(ne, &right_arg_changed); |
+ } |
+ |
+ if (result_type_ == BinaryOpIC::UNINITIALIZED || |
+ result_type_ == BinaryOpIC::SMI) { |
+ // Only allow smi results. |
+ BinaryOpStub_GenerateSmiCode(masm, &call_runtime, NULL, op_, |
+ NO_HEAPNUMBER_RESULTS, mode_, r5, r6, r4, r9); |
+ } else { |
+ // Allow heap number result and don't make a transition if a heap number |
+ // cannot be allocated. |
+ BinaryOpStub_GenerateSmiCode(masm, &call_runtime, &call_runtime, op_, |
+ ALLOW_HEAPNUMBER_RESULTS, mode_, r5, r6, r4, r9); |
+ } |
+ |
+ // Code falls through if the result is not returned as either a smi or heap |
+ // number. |
+ __ bind(&right_arg_changed); |
+ GenerateTypeTransition(masm); |
+ |
+ __ bind(&call_runtime); |
+ { |
+ FrameScope scope(masm, StackFrame::INTERNAL); |
+ GenerateRegisterArgsPush(masm); |
+ GenerateCallRuntime(masm); |
+ } |
+ __ Ret(); |
+} |
+ |
+ |
+void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) { |
+ Label call_runtime; |
+ ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING); |
+ ASSERT(op_ == Token::ADD); |
+ // If both arguments are strings, call the string add stub. |
+ // Otherwise, do a transition. |
+ |
+ // Registers containing left and right operands respectively. |
+ Register left = r1; |
+ Register right = r0; |
+ |
+ // Test if left operand is a string. |
+ __ JumpIfSmi(left, &call_runtime); |
+ __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE); |
+ __ b(ge, &call_runtime); |
+ |
+ // Test if right operand is a string. |
+ __ JumpIfSmi(right, &call_runtime); |
+ __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE); |
+ __ b(ge, &call_runtime); |
+ |
+ StringAddStub string_add_stub( |
+ (StringAddFlags)(STRING_ADD_CHECK_NONE | STRING_ADD_ERECT_FRAME)); |
+ GenerateRegisterArgsPush(masm); |
+ __ TailCallStub(&string_add_stub); |
+ |
+ __ bind(&call_runtime); |
+ GenerateTypeTransition(masm); |
+} |
+ |
+ |
+void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { |
+ ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32); |
+ |
+ Register left = r1; |
+ Register right = r0; |
+ Register scratch1 = r4; |
+ Register scratch2 = r9; |
+ Register scratch3 = r5; |
+ LowDwVfpRegister double_scratch = d0; |
+ |
+ Register heap_number_result = no_reg; |
+ Register heap_number_map = r6; |
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
+ |
+ Label call_runtime; |
+ // Labels for type transition, used for wrong input or output types. |
+ // Both label are currently actually bound to the same position. We use two |
+ // different label to differentiate the cause leading to type transition. |
+ Label transition; |
+ |
+ // Smi-smi fast case. |
+ Label skip; |
+ __ orr(scratch1, left, right); |
+ __ JumpIfNotSmi(scratch1, &skip); |
+ BinaryOpStub_GenerateSmiSmiOperation(masm, op_, scratch2, scratch3); |
+ // Fall through if the result is not a smi. |
+ __ bind(&skip); |
+ |
+ switch (op_) { |
+ case Token::ADD: |
+ case Token::SUB: |
+ case Token::MUL: |
+ case Token::DIV: |
+ case Token::MOD: { |
+ // It could be that only SMIs have been seen at either the left |
+ // or the right operand. For precise type feedback, patch the IC |
+ // again if this changes. |
+ if (left_type_ == BinaryOpIC::SMI) { |
+ __ JumpIfNotSmi(left, &transition); |
+ } |
+ if (right_type_ == BinaryOpIC::SMI) { |
+ __ JumpIfNotSmi(right, &transition); |
+ } |
+ // Load both operands and check that they are 32-bit integer. |
+ // Jump to type transition if they are not. The registers r0 and r1 (right |
+ // and left) are preserved for the runtime call. |
+ __ LoadNumberAsInt32Double( |
+ right, d1, heap_number_map, scratch1, d8, &transition); |
+ __ LoadNumberAsInt32Double( |
+ left, d0, heap_number_map, scratch1, d8, &transition); |
+ |
+ if (op_ != Token::MOD) { |
+ Label return_heap_number; |
+ switch (op_) { |
+ case Token::ADD: |
+ __ vadd(d5, d0, d1); |
+ break; |
+ case Token::SUB: |
+ __ vsub(d5, d0, d1); |
+ break; |
+ case Token::MUL: |
+ __ vmul(d5, d0, d1); |
+ break; |
+ case Token::DIV: |
+ __ vdiv(d5, d0, d1); |
+ break; |
+ default: |
+ UNREACHABLE(); |
+ } |
+ |
+ if (result_type_ <= BinaryOpIC::INT32) { |
+ __ TryDoubleToInt32Exact(scratch1, d5, d8); |
+ // If the ne condition is set, result does |
+ // not fit in a 32-bit integer. |
+ __ b(ne, &transition); |
+ // Try to tag the result as a Smi, return heap number on overflow. |
+ __ SmiTag(scratch1, SetCC); |
+ __ b(vs, &return_heap_number); |
+ // Check for minus zero, transition in that case (because we need |
+ // to return a heap number). |
+ Label not_zero; |
+ ASSERT(kSmiTag == 0); |
+ __ b(ne, ¬_zero); |
+ __ VmovHigh(scratch2, d5); |
+ __ tst(scratch2, Operand(HeapNumber::kSignMask)); |
+ __ b(ne, &transition); |
+ __ bind(¬_zero); |
+ __ mov(r0, scratch1); |
+ __ Ret(); |
+ } |
+ |
+ __ bind(&return_heap_number); |
+ // Return a heap number, or fall through to type transition or runtime |
+ // call if we can't. |
+ // We are using vfp registers so r5 is available. |
+ heap_number_result = r5; |
+ BinaryOpStub_GenerateHeapResultAllocation(masm, |
+ heap_number_result, |
+ heap_number_map, |
+ scratch1, |
+ scratch2, |
+ &call_runtime, |
+ mode_); |
+ __ sub(r0, heap_number_result, Operand(kHeapObjectTag)); |
+ __ vstr(d5, r0, HeapNumber::kValueOffset); |
+ __ mov(r0, heap_number_result); |
+ __ Ret(); |
+ |
+ // A DIV operation expecting an integer result falls through |
+ // to type transition. |
+ |
+ } else { |
+ if (encoded_right_arg_.has_value) { |
+ __ Vmov(d8, fixed_right_arg_value(), scratch1); |
+ __ VFPCompareAndSetFlags(d1, d8); |
+ __ b(ne, &transition); |
+ } |
+ |
+ // Allocate a heap number to store the result. |
+ heap_number_result = r5; |
+ BinaryOpStub_GenerateHeapResultAllocation(masm, |
+ heap_number_result, |
+ heap_number_map, |
+ scratch1, |
+ scratch2, |
+ &call_runtime, |
+ mode_); |
+ |
+ // Call the C function to handle the double operation. |
+ CallCCodeForDoubleOperation(masm, op_, heap_number_result, scratch1); |
+ if (FLAG_debug_code) { |
+ __ stop("Unreachable code."); |
+ } |
+ |
+ __ b(&call_runtime); |
+ } |
+ |
+ break; |
+ } |
+ |
+ case Token::BIT_OR: |
+ case Token::BIT_XOR: |
+ case Token::BIT_AND: |
+ case Token::SAR: |
+ case Token::SHR: |
+ case Token::SHL: { |
+ Label return_heap_number; |
+ // Convert operands to 32-bit integers. Right in r2 and left in r3. The |
+ // registers r0 and r1 (right and left) are preserved for the runtime |
+ // call. |
+ __ LoadNumberAsInt32(left, r3, heap_number_map, |
+ scratch1, d0, d1, &transition); |
+ __ LoadNumberAsInt32(right, r2, heap_number_map, |
+ scratch1, d0, d1, &transition); |
+ |
+ // The ECMA-262 standard specifies that, for shift operations, only the |
+ // 5 least significant bits of the shift value should be used. |
+ switch (op_) { |
+ case Token::BIT_OR: |
+ __ orr(r2, r3, Operand(r2)); |
+ break; |
+ case Token::BIT_XOR: |
+ __ eor(r2, r3, Operand(r2)); |
+ break; |
+ case Token::BIT_AND: |
+ __ and_(r2, r3, Operand(r2)); |
+ break; |
+ case Token::SAR: |
+ __ and_(r2, r2, Operand(0x1f)); |
+ __ mov(r2, Operand(r3, ASR, r2)); |
+ break; |
+ case Token::SHR: |
+ __ and_(r2, r2, Operand(0x1f)); |
+ __ mov(r2, Operand(r3, LSR, r2), SetCC); |
+ // SHR is special because it is required to produce a positive answer. |
+ // We only get a negative result if the shift value (r2) is 0. |
+ // This result cannot be respresented as a signed 32-bit integer, try |
+ // to return a heap number if we can. |
+ __ b(mi, (result_type_ <= BinaryOpIC::INT32) |
+ ? &transition |
+ : &return_heap_number); |
+ break; |
+ case Token::SHL: |
+ __ and_(r2, r2, Operand(0x1f)); |
+ __ mov(r2, Operand(r3, LSL, r2)); |
+ break; |
+ default: |
+ UNREACHABLE(); |
+ } |
+ |
+ // Check if the result fits in a smi. If not try to return a heap number. |
+ // (We know the result is an int32). |
+ __ TrySmiTag(r0, r2, &return_heap_number); |
+ __ Ret(); |
+ |
+ __ bind(&return_heap_number); |
+ heap_number_result = r5; |
+ BinaryOpStub_GenerateHeapResultAllocation(masm, |
+ heap_number_result, |
+ heap_number_map, |
+ scratch1, |
+ scratch2, |
+ &call_runtime, |
+ mode_); |
+ |
+ if (op_ != Token::SHR) { |
+ // Convert the result to a floating point value. |
+ __ vmov(double_scratch.low(), r2); |
+ __ vcvt_f64_s32(double_scratch, double_scratch.low()); |
+ } else { |
+ // The result must be interpreted as an unsigned 32-bit integer. |
+ __ vmov(double_scratch.low(), r2); |
+ __ vcvt_f64_u32(double_scratch, double_scratch.low()); |
+ } |
+ |
+ // Store the result. |
+ __ sub(r0, heap_number_result, Operand(kHeapObjectTag)); |
+ __ vstr(double_scratch, r0, HeapNumber::kValueOffset); |
+ __ mov(r0, heap_number_result); |
+ __ Ret(); |
+ |
+ break; |
+ } |
+ |
+ default: |
+ UNREACHABLE(); |
+ } |
+ |
+ // We never expect DIV to yield an integer result, so we always generate |
+ // type transition code for DIV operations expecting an integer result: the |
+ // code will fall through to this type transition. |
+ if (transition.is_linked() || |
+ ((op_ == Token::DIV) && (result_type_ <= BinaryOpIC::INT32))) { |
+ __ bind(&transition); |
+ GenerateTypeTransition(masm); |
+ } |
+ |
+ __ bind(&call_runtime); |
+ { |
+ FrameScope scope(masm, StackFrame::INTERNAL); |
+ GenerateRegisterArgsPush(masm); |
+ GenerateCallRuntime(masm); |
+ } |
+ __ Ret(); |
+} |
+ |
+ |
+void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) { |
+ Label call_runtime; |
+ |
+ if (op_ == Token::ADD) { |
+ // Handle string addition here, because it is the only operation |
+ // that does not do a ToNumber conversion on the operands. |
+ GenerateAddStrings(masm); |
+ } |
+ |
+ // Convert oddball arguments to numbers. |
+ Label check, done; |
+ __ CompareRoot(r1, Heap::kUndefinedValueRootIndex); |
+ __ b(ne, &check); |
+ if (Token::IsBitOp(op_)) { |
+ __ mov(r1, Operand(Smi::FromInt(0))); |
+ } else { |
+ __ LoadRoot(r1, Heap::kNanValueRootIndex); |
+ } |
+ __ jmp(&done); |
+ __ bind(&check); |
+ __ CompareRoot(r0, Heap::kUndefinedValueRootIndex); |
+ __ b(ne, &done); |
+ if (Token::IsBitOp(op_)) { |
+ __ mov(r0, Operand(Smi::FromInt(0))); |
+ } else { |
+ __ LoadRoot(r0, Heap::kNanValueRootIndex); |
+ } |
+ __ bind(&done); |
+ |
+ GenerateNumberStub(masm); |
+} |
+ |
+ |
+void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) { |
+ Label call_runtime, transition; |
+ BinaryOpStub_GenerateFPOperation( |
+ masm, left_type_, right_type_, false, |
+ &transition, &call_runtime, &transition, op_, mode_, r6, r4, r5, r9); |
+ |
+ __ bind(&transition); |
+ GenerateTypeTransition(masm); |
+ |
+ __ bind(&call_runtime); |
+ { |
+ FrameScope scope(masm, StackFrame::INTERNAL); |
+ GenerateRegisterArgsPush(masm); |
+ GenerateCallRuntime(masm); |
+ } |
+ __ Ret(); |
+} |
+ |
+ |
+void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { |
+ Label call_runtime, call_string_add_or_runtime, transition; |
+ |
+ BinaryOpStub_GenerateSmiCode( |
+ masm, &call_runtime, &call_runtime, op_, ALLOW_HEAPNUMBER_RESULTS, mode_, |
+ r5, r6, r4, r9); |
+ |
+ BinaryOpStub_GenerateFPOperation( |
+ masm, left_type_, right_type_, false, |
+ &call_string_add_or_runtime, &call_runtime, &transition, op_, mode_, r6, |
+ r4, r5, r9); |
+ |
+ __ bind(&transition); |
+ GenerateTypeTransition(masm); |
+ |
+ __ bind(&call_string_add_or_runtime); |
+ if (op_ == Token::ADD) { |
+ GenerateAddStrings(masm); |
+ } |
+ |
+ __ bind(&call_runtime); |
+ { |
+ FrameScope scope(masm, StackFrame::INTERNAL); |
+ GenerateRegisterArgsPush(masm); |
+ GenerateCallRuntime(masm); |
+ } |
+ __ Ret(); |
+} |
+ |
+ |
+void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) { |
+ ASSERT(op_ == Token::ADD); |
+ Label left_not_string, call_runtime; |
+ |
+ Register left = r1; |
+ Register right = r0; |
+ |
+ // Check if left argument is a string. |
+ __ JumpIfSmi(left, &left_not_string); |
+ __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE); |
+ __ b(ge, &left_not_string); |
+ |
+ StringAddStub string_add_left_stub( |
+ (StringAddFlags)(STRING_ADD_CHECK_RIGHT | STRING_ADD_ERECT_FRAME)); |
+ GenerateRegisterArgsPush(masm); |
+ __ TailCallStub(&string_add_left_stub); |
+ |
+ // Left operand is not a string, test right. |
+ __ bind(&left_not_string); |
+ __ JumpIfSmi(right, &call_runtime); |
+ __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE); |
+ __ b(ge, &call_runtime); |
+ |
+ StringAddStub string_add_right_stub( |
+ (StringAddFlags)(STRING_ADD_CHECK_LEFT | STRING_ADD_ERECT_FRAME)); |
+ GenerateRegisterArgsPush(masm); |
+ __ TailCallStub(&string_add_right_stub); |
+ |
+ // At least one argument is not a string. |
+ __ bind(&call_runtime); |
+} |
+ |
+ |
+void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm, |
+ Register result, |
+ Register heap_number_map, |
+ Register scratch1, |
+ Register scratch2, |
+ Label* gc_required, |
+ OverwriteMode mode) { |
+ // Code below will scratch result if allocation fails. To keep both arguments |
+ // intact for the runtime call result cannot be one of these. |
+ ASSERT(!result.is(r0) && !result.is(r1)); |
+ |
+ if (mode == OVERWRITE_LEFT || mode == OVERWRITE_RIGHT) { |
+ Label skip_allocation, allocated; |
+ Register overwritable_operand = mode == OVERWRITE_LEFT ? r1 : r0; |
+ // If the overwritable operand is already an object, we skip the |
+ // allocation of a heap number. |
+ __ JumpIfNotSmi(overwritable_operand, &skip_allocation); |
+ // Allocate a heap number for the result. |
+ __ AllocateHeapNumber( |
+ result, scratch1, scratch2, heap_number_map, gc_required); |
+ __ b(&allocated); |
+ __ bind(&skip_allocation); |
+ // Use object holding the overwritable operand for result. |
+ __ mov(result, Operand(overwritable_operand)); |
+ __ bind(&allocated); |
+ } else { |
+ ASSERT(mode == NO_OVERWRITE); |
+ __ AllocateHeapNumber( |
+ result, scratch1, scratch2, heap_number_map, gc_required); |
+ } |
+} |
+ |
+ |
+void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) { |
+ __ Push(r1, r0); |
+} |
+ |
+ |
void TranscendentalCacheStub::Generate(MacroAssembler* masm) { |
// Untagged case: double input in d2, double result goes |
// into d2. |
@@ -1639,7 +2614,6 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) { |
RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate); |
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate); |
CreateAllocationSiteStub::GenerateAheadOfTime(isolate); |
- BinaryOpStub::GenerateAheadOfTime(isolate); |
} |