Index: src/x64/code-stubs-x64.cc |
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc |
index 62b376f657724cafab7cd3a92f39b5368bec9484..1896c00f92a4eefd29202473204f5ca86ca5ec4a 100644 |
--- a/src/x64/code-stubs-x64.cc |
+++ b/src/x64/code-stubs-x64.cc |
@@ -155,18 +155,6 @@ void TransitionElementsKindStub::InitializeInterfaceDescriptor( |
} |
-void BinaryOpStub::InitializeInterfaceDescriptor( |
- Isolate* isolate, |
- CodeStubInterfaceDescriptor* descriptor) { |
- static Register registers[] = { rdx, rax }; |
- descriptor->register_param_count_ = 2; |
- descriptor->register_params_ = registers; |
- descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss); |
- descriptor->SetMissHandler( |
- ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate)); |
-} |
- |
- |
static void InitializeArrayConstructorDescriptor( |
Isolate* isolate, |
CodeStubInterfaceDescriptor* descriptor, |
@@ -459,8 +447,35 @@ class FloatingPointHelper : public AllStatic { |
// If the operands are not both numbers, jump to not_numbers. |
// Leaves rdx and rax unchanged. SmiOperands assumes both are smis. |
// NumberOperands assumes both are smis or heap numbers. |
+ static void LoadSSE2SmiOperands(MacroAssembler* masm); |
static void LoadSSE2UnknownOperands(MacroAssembler* masm, |
Label* not_numbers); |
+ |
+ // Takes the operands in rdx and rax and loads them as integers in rax |
+ // and rcx. |
+ static void LoadAsIntegers(MacroAssembler* masm, |
+ Label* operand_conversion_failure, |
+ Register heap_number_map); |
+ |
+ // Tries to convert two values to smis losslessly. |
+ // This fails if either argument is not a Smi nor a HeapNumber, |
+ // or if it's a HeapNumber with a value that can't be converted |
+ // losslessly to a Smi. In that case, control transitions to the |
+ // on_not_smis label. |
+ // On success, either control goes to the on_success label (if one is |
+ // provided), or it falls through at the end of the code (if on_success |
+ // is NULL). |
+ // On success, both first and second holds Smi tagged values. |
+ // One of first or second must be non-Smi when entering. |
+ static void NumbersToSmis(MacroAssembler* masm, |
+ Register first, |
+ Register second, |
+ Register scratch1, |
+ Register scratch2, |
+ Register scratch3, |
+ Label* on_success, |
+ Label* on_not_smis, |
+ ConvertUndefined convert_undefined); |
}; |
@@ -548,6 +563,569 @@ void DoubleToIStub::Generate(MacroAssembler* masm) { |
} |
+void BinaryOpStub::Initialize() {} |
+ |
+ |
+void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { |
+ __ PopReturnAddressTo(rcx); |
+ __ push(rdx); |
+ __ push(rax); |
+ // Left and right arguments are now on top. |
+ __ Push(Smi::FromInt(MinorKey())); |
+ |
+ __ PushReturnAddressFrom(rcx); |
+ |
+ // Patch the caller to an appropriate specialized stub and return the |
+ // operation result to the caller of the stub. |
+ __ TailCallExternalReference( |
+ ExternalReference(IC_Utility(IC::kBinaryOp_Patch), |
+ masm->isolate()), |
+ 3, |
+ 1); |
+} |
+ |
+ |
+static void BinaryOpStub_GenerateSmiCode( |
+ MacroAssembler* masm, |
+ Label* slow, |
+ BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results, |
+ Token::Value op) { |
+ |
+ // Arguments to BinaryOpStub are in rdx and rax. |
+ const Register left = rdx; |
+ const Register right = rax; |
+ |
+ // We only generate heapnumber answers for overflowing calculations |
+ // for the four basic arithmetic operations and logical right shift by 0. |
+ bool generate_inline_heapnumber_results = |
+ (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) && |
+ (op == Token::ADD || op == Token::SUB || |
+ op == Token::MUL || op == Token::DIV || op == Token::SHR); |
+ |
+ // Smi check of both operands. If op is BIT_OR, the check is delayed |
+ // until after the OR operation. |
+ Label not_smis; |
+ Label use_fp_on_smis; |
+ Label fail; |
+ |
+ if (op != Token::BIT_OR) { |
+ Comment smi_check_comment(masm, "-- Smi check arguments"); |
+ __ JumpIfNotBothSmi(left, right, ¬_smis); |
+ } |
+ |
+ Label smi_values; |
+ __ bind(&smi_values); |
+ // Perform the operation. |
+ Comment perform_smi(masm, "-- Perform smi operation"); |
+ switch (op) { |
+ case Token::ADD: |
+ ASSERT(right.is(rax)); |
+ __ SmiAdd(right, right, left, &use_fp_on_smis); // ADD is commutative. |
+ break; |
+ |
+ case Token::SUB: |
+ __ SmiSub(left, left, right, &use_fp_on_smis); |
+ __ movq(rax, left); |
+ break; |
+ |
+ case Token::MUL: |
+ ASSERT(right.is(rax)); |
+ __ SmiMul(right, right, left, &use_fp_on_smis); // MUL is commutative. |
+ break; |
+ |
+ case Token::DIV: |
+ // SmiDiv will not accept left in rdx or right in rax. |
+ __ movq(rbx, rax); |
+ __ movq(rcx, rdx); |
+ __ SmiDiv(rax, rcx, rbx, &use_fp_on_smis); |
+ break; |
+ |
+ case Token::MOD: |
+ // SmiMod will not accept left in rdx or right in rax. |
+ __ movq(rbx, rax); |
+ __ movq(rcx, rdx); |
+ __ SmiMod(rax, rcx, rbx, &use_fp_on_smis); |
+ break; |
+ |
+ case Token::BIT_OR: { |
+ ASSERT(right.is(rax)); |
+ __ SmiOrIfSmis(right, right, left, ¬_smis); // BIT_OR is commutative. |
+ break; |
+ } |
+ case Token::BIT_XOR: |
+ ASSERT(right.is(rax)); |
+ __ SmiXor(right, right, left); // BIT_XOR is commutative. |
+ break; |
+ |
+ case Token::BIT_AND: |
+ ASSERT(right.is(rax)); |
+ __ SmiAnd(right, right, left); // BIT_AND is commutative. |
+ break; |
+ |
+ case Token::SHL: |
+ __ SmiShiftLeft(left, left, right); |
+ __ movq(rax, left); |
+ break; |
+ |
+ case Token::SAR: |
+ __ SmiShiftArithmeticRight(left, left, right); |
+ __ movq(rax, left); |
+ break; |
+ |
+ case Token::SHR: |
+ __ SmiShiftLogicalRight(left, left, right, &use_fp_on_smis); |
+ __ movq(rax, left); |
+ break; |
+ |
+ default: |
+ UNREACHABLE(); |
+ } |
+ |
+ // 5. Emit return of result in rax. Some operations have registers pushed. |
+ __ ret(0); |
+ |
+ if (use_fp_on_smis.is_linked()) { |
+ // 6. For some operations emit inline code to perform floating point |
+ // operations on known smis (e.g., if the result of the operation |
+ // overflowed the smi range). |
+ __ bind(&use_fp_on_smis); |
+ if (op == Token::DIV || op == Token::MOD) { |
+ // Restore left and right to rdx and rax. |
+ __ movq(rdx, rcx); |
+ __ movq(rax, rbx); |
+ } |
+ |
+ if (generate_inline_heapnumber_results) { |
+ __ AllocateHeapNumber(rcx, rbx, slow); |
+ Comment perform_float(masm, "-- Perform float operation on smis"); |
+ if (op == Token::SHR) { |
+ __ SmiToInteger32(left, left); |
+ __ cvtqsi2sd(xmm0, left); |
+ } else { |
+ FloatingPointHelper::LoadSSE2SmiOperands(masm); |
+ switch (op) { |
+ case Token::ADD: __ addsd(xmm0, xmm1); break; |
+ case Token::SUB: __ subsd(xmm0, xmm1); break; |
+ case Token::MUL: __ mulsd(xmm0, xmm1); break; |
+ case Token::DIV: __ divsd(xmm0, xmm1); break; |
+ default: UNREACHABLE(); |
+ } |
+ } |
+ __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0); |
+ __ movq(rax, rcx); |
+ __ ret(0); |
+ } else { |
+ __ jmp(&fail); |
+ } |
+ } |
+ |
+ // 7. Non-smi operands reach the end of the code generated by |
+ // GenerateSmiCode, and fall through to subsequent code, |
+ // with the operands in rdx and rax. |
+ // But first we check if non-smi values are HeapNumbers holding |
+ // values that could be smi. |
+ __ bind(¬_smis); |
+ Comment done_comment(masm, "-- Enter non-smi code"); |
+ FloatingPointHelper::ConvertUndefined convert_undefined = |
+ FloatingPointHelper::BAILOUT_ON_UNDEFINED; |
+ // This list must be in sync with BinaryOpPatch() behavior in ic.cc. |
+ if (op == Token::BIT_AND || |
+ op == Token::BIT_OR || |
+ op == Token::BIT_XOR || |
+ op == Token::SAR || |
+ op == Token::SHL || |
+ op == Token::SHR) { |
+ convert_undefined = FloatingPointHelper::CONVERT_UNDEFINED_TO_ZERO; |
+ } |
+ FloatingPointHelper::NumbersToSmis(masm, left, right, rbx, rdi, rcx, |
+ &smi_values, &fail, convert_undefined); |
+ __ jmp(&smi_values); |
+ __ bind(&fail); |
+} |
+ |
+ |
+static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm, |
+ Label* alloc_failure, |
+ OverwriteMode mode); |
+ |
+ |
+static void BinaryOpStub_GenerateFloatingPointCode(MacroAssembler* masm, |
+ Label* allocation_failure, |
+ Label* non_numeric_failure, |
+ Token::Value op, |
+ OverwriteMode mode) { |
+ switch (op) { |
+ case Token::ADD: |
+ case Token::SUB: |
+ case Token::MUL: |
+ case Token::DIV: { |
+ FloatingPointHelper::LoadSSE2UnknownOperands(masm, non_numeric_failure); |
+ |
+ switch (op) { |
+ case Token::ADD: __ addsd(xmm0, xmm1); break; |
+ case Token::SUB: __ subsd(xmm0, xmm1); break; |
+ case Token::MUL: __ mulsd(xmm0, xmm1); break; |
+ case Token::DIV: __ divsd(xmm0, xmm1); break; |
+ default: UNREACHABLE(); |
+ } |
+ BinaryOpStub_GenerateHeapResultAllocation( |
+ masm, allocation_failure, mode); |
+ __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0); |
+ __ ret(0); |
+ break; |
+ } |
+ case Token::MOD: { |
+ // For MOD we jump to the allocation_failure label, to call runtime. |
+ __ jmp(allocation_failure); |
+ break; |
+ } |
+ case Token::BIT_OR: |
+ case Token::BIT_AND: |
+ case Token::BIT_XOR: |
+ case Token::SAR: |
+ case Token::SHL: |
+ case Token::SHR: { |
+ Label non_smi_shr_result; |
+ Register heap_number_map = r9; |
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
+ FloatingPointHelper::LoadAsIntegers(masm, non_numeric_failure, |
+ heap_number_map); |
+ switch (op) { |
+ case Token::BIT_OR: __ orl(rax, rcx); break; |
+ case Token::BIT_AND: __ andl(rax, rcx); break; |
+ case Token::BIT_XOR: __ xorl(rax, rcx); break; |
+ case Token::SAR: __ sarl_cl(rax); break; |
+ case Token::SHL: __ shll_cl(rax); break; |
+ case Token::SHR: { |
+ __ shrl_cl(rax); |
+ // Check if result is negative. This can only happen for a shift |
+ // by zero. |
+ __ testl(rax, rax); |
+ __ j(negative, &non_smi_shr_result); |
+ break; |
+ } |
+ default: UNREACHABLE(); |
+ } |
+ STATIC_ASSERT(kSmiValueSize == 32); |
+ // Tag smi result and return. |
+ __ Integer32ToSmi(rax, rax); |
+ __ Ret(); |
+ |
+ // Logical shift right can produce an unsigned int32 that is not |
+ // an int32, and so is not in the smi range. Allocate a heap number |
+ // in that case. |
+ if (op == Token::SHR) { |
+ __ bind(&non_smi_shr_result); |
+ Label allocation_failed; |
+ __ movl(rbx, rax); // rbx holds result value (uint32 value as int64). |
+ // Allocate heap number in new space. |
+ // Not using AllocateHeapNumber macro in order to reuse |
+ // already loaded heap_number_map. |
+ __ Allocate(HeapNumber::kSize, rax, rdx, no_reg, &allocation_failed, |
+ TAG_OBJECT); |
+ // Set the map. |
+ __ AssertRootValue(heap_number_map, |
+ Heap::kHeapNumberMapRootIndex, |
+ kHeapNumberMapRegisterClobbered); |
+ __ movq(FieldOperand(rax, HeapObject::kMapOffset), |
+ heap_number_map); |
+ __ cvtqsi2sd(xmm0, rbx); |
+ __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0); |
+ __ Ret(); |
+ |
+ __ bind(&allocation_failed); |
+ // We need tagged values in rdx and rax for the following code, |
+ // not int32 in rax and rcx. |
+ __ Integer32ToSmi(rax, rcx); |
+ __ Integer32ToSmi(rdx, rbx); |
+ __ jmp(allocation_failure); |
+ } |
+ break; |
+ } |
+ default: UNREACHABLE(); break; |
+ } |
+ // No fall-through from this generated code. |
+ if (FLAG_debug_code) { |
+ __ Abort(kUnexpectedFallThroughInBinaryStubGenerateFloatingPointCode); |
+ } |
+} |
+ |
+ |
+static void BinaryOpStub_GenerateRegisterArgsPushUnderReturn( |
+ MacroAssembler* masm) { |
+ // Push arguments, but ensure they are under the return address |
+ // for a tail call. |
+ __ PopReturnAddressTo(rcx); |
+ __ push(rdx); |
+ __ push(rax); |
+ __ PushReturnAddressFrom(rcx); |
+} |
+ |
+ |
+void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) { |
+ ASSERT(op_ == Token::ADD); |
+ Label left_not_string, call_runtime; |
+ |
+ // Registers containing left and right operands respectively. |
+ Register left = rdx; |
+ Register right = rax; |
+ |
+ // Test if left operand is a string. |
+ __ JumpIfSmi(left, &left_not_string, Label::kNear); |
+ __ CmpObjectType(left, FIRST_NONSTRING_TYPE, rcx); |
+ __ j(above_equal, &left_not_string, Label::kNear); |
+ StringAddStub string_add_left_stub( |
+ (StringAddFlags)(STRING_ADD_CHECK_RIGHT | STRING_ADD_ERECT_FRAME)); |
+ BinaryOpStub_GenerateRegisterArgsPushUnderReturn(masm); |
+ __ TailCallStub(&string_add_left_stub); |
+ |
+ // Left operand is not a string, test right. |
+ __ bind(&left_not_string); |
+ __ JumpIfSmi(right, &call_runtime, Label::kNear); |
+ __ CmpObjectType(right, FIRST_NONSTRING_TYPE, rcx); |
+ __ j(above_equal, &call_runtime, Label::kNear); |
+ |
+ StringAddStub string_add_right_stub( |
+ (StringAddFlags)(STRING_ADD_CHECK_LEFT | STRING_ADD_ERECT_FRAME)); |
+ BinaryOpStub_GenerateRegisterArgsPushUnderReturn(masm); |
+ __ TailCallStub(&string_add_right_stub); |
+ |
+ // Neither argument is a string. |
+ __ bind(&call_runtime); |
+} |
+ |
+ |
+void BinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { |
+ Label right_arg_changed, call_runtime; |
+ |
+ if (op_ == Token::MOD && encoded_right_arg_.has_value) { |
+ // It is guaranteed that the value will fit into a Smi, because if it |
+ // didn't, we wouldn't be here, see BinaryOp_Patch. |
+ __ Cmp(rax, Smi::FromInt(fixed_right_arg_value())); |
+ __ j(not_equal, &right_arg_changed); |
+ } |
+ |
+ if (result_type_ == BinaryOpIC::UNINITIALIZED || |
+ result_type_ == BinaryOpIC::SMI) { |
+ // Only allow smi results. |
+ BinaryOpStub_GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS, op_); |
+ } else { |
+ // Allow heap number result and don't make a transition if a heap number |
+ // cannot be allocated. |
+ BinaryOpStub_GenerateSmiCode( |
+ masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_); |
+ } |
+ |
+ // Code falls through if the result is not returned as either a smi or heap |
+ // number. |
+ __ bind(&right_arg_changed); |
+ GenerateTypeTransition(masm); |
+ |
+ if (call_runtime.is_linked()) { |
+ __ bind(&call_runtime); |
+ { |
+ FrameScope scope(masm, StackFrame::INTERNAL); |
+ GenerateRegisterArgsPush(masm); |
+ GenerateCallRuntime(masm); |
+ } |
+ __ Ret(); |
+ } |
+} |
+ |
+ |
+void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { |
+ // The int32 case is identical to the Smi case. We avoid creating this |
+ // ic state on x64. |
+ UNREACHABLE(); |
+} |
+ |
+ |
+void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) { |
+ Label call_runtime; |
+ ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING); |
+ ASSERT(op_ == Token::ADD); |
+ // If both arguments are strings, call the string add stub. |
+ // Otherwise, do a transition. |
+ |
+ // Registers containing left and right operands respectively. |
+ Register left = rdx; |
+ Register right = rax; |
+ |
+ // Test if left operand is a string. |
+ __ JumpIfSmi(left, &call_runtime); |
+ __ CmpObjectType(left, FIRST_NONSTRING_TYPE, rcx); |
+ __ j(above_equal, &call_runtime); |
+ |
+ // Test if right operand is a string. |
+ __ JumpIfSmi(right, &call_runtime); |
+ __ CmpObjectType(right, FIRST_NONSTRING_TYPE, rcx); |
+ __ j(above_equal, &call_runtime); |
+ |
+ StringAddStub string_add_stub( |
+ (StringAddFlags)(STRING_ADD_CHECK_NONE | STRING_ADD_ERECT_FRAME)); |
+ BinaryOpStub_GenerateRegisterArgsPushUnderReturn(masm); |
+ __ TailCallStub(&string_add_stub); |
+ |
+ __ bind(&call_runtime); |
+ GenerateTypeTransition(masm); |
+} |
+ |
+ |
+void BinaryOpStub::GenerateOddballStub(MacroAssembler* masm) { |
+ Label call_runtime; |
+ |
+ if (op_ == Token::ADD) { |
+ // Handle string addition here, because it is the only operation |
+ // that does not do a ToNumber conversion on the operands. |
+ GenerateAddStrings(masm); |
+ } |
+ |
+ // Convert oddball arguments to numbers. |
+ Label check, done; |
+ __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex); |
+ __ j(not_equal, &check, Label::kNear); |
+ if (Token::IsBitOp(op_)) { |
+ __ xor_(rdx, rdx); |
+ } else { |
+ __ LoadRoot(rdx, Heap::kNanValueRootIndex); |
+ } |
+ __ jmp(&done, Label::kNear); |
+ __ bind(&check); |
+ __ CompareRoot(rax, Heap::kUndefinedValueRootIndex); |
+ __ j(not_equal, &done, Label::kNear); |
+ if (Token::IsBitOp(op_)) { |
+ __ xor_(rax, rax); |
+ } else { |
+ __ LoadRoot(rax, Heap::kNanValueRootIndex); |
+ } |
+ __ bind(&done); |
+ |
+ GenerateNumberStub(masm); |
+} |
+ |
+ |
+static void BinaryOpStub_CheckSmiInput(MacroAssembler* masm, |
+ Register input, |
+ Label* fail) { |
+ Label ok; |
+ __ JumpIfSmi(input, &ok, Label::kNear); |
+ Register heap_number_map = r8; |
+ Register scratch1 = r9; |
+ Register scratch2 = r10; |
+ // HeapNumbers containing 32bit integer values are also allowed. |
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
+ __ cmpq(FieldOperand(input, HeapObject::kMapOffset), heap_number_map); |
+ __ j(not_equal, fail); |
+ __ movsd(xmm0, FieldOperand(input, HeapNumber::kValueOffset)); |
+ // Convert, convert back, and compare the two doubles' bits. |
+ __ cvttsd2siq(scratch2, xmm0); |
+ __ Cvtlsi2sd(xmm1, scratch2); |
+ __ movq(scratch1, xmm0); |
+ __ movq(scratch2, xmm1); |
+ __ cmpq(scratch1, scratch2); |
+ __ j(not_equal, fail); |
+ __ bind(&ok); |
+} |
+ |
+ |
+void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) { |
+ Label gc_required, not_number; |
+ |
+ // It could be that only SMIs have been seen at either the left |
+ // or the right operand. For precise type feedback, patch the IC |
+ // again if this changes. |
+ if (left_type_ == BinaryOpIC::SMI) { |
+ BinaryOpStub_CheckSmiInput(masm, rdx, ¬_number); |
+ } |
+ if (right_type_ == BinaryOpIC::SMI) { |
+ BinaryOpStub_CheckSmiInput(masm, rax, ¬_number); |
+ } |
+ |
+ BinaryOpStub_GenerateFloatingPointCode( |
+ masm, &gc_required, ¬_number, op_, mode_); |
+ |
+ __ bind(¬_number); |
+ GenerateTypeTransition(masm); |
+ |
+ __ bind(&gc_required); |
+ { |
+ FrameScope scope(masm, StackFrame::INTERNAL); |
+ GenerateRegisterArgsPush(masm); |
+ GenerateCallRuntime(masm); |
+ } |
+ __ Ret(); |
+} |
+ |
+ |
+void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { |
+ Label call_runtime, call_string_add_or_runtime; |
+ |
+ BinaryOpStub_GenerateSmiCode( |
+ masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_); |
+ |
+ BinaryOpStub_GenerateFloatingPointCode( |
+ masm, &call_runtime, &call_string_add_or_runtime, op_, mode_); |
+ |
+ __ bind(&call_string_add_or_runtime); |
+ if (op_ == Token::ADD) { |
+ GenerateAddStrings(masm); |
+ } |
+ |
+ __ bind(&call_runtime); |
+ { |
+ FrameScope scope(masm, StackFrame::INTERNAL); |
+ GenerateRegisterArgsPush(masm); |
+ GenerateCallRuntime(masm); |
+ } |
+ __ Ret(); |
+} |
+ |
+ |
+static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm, |
+ Label* alloc_failure, |
+ OverwriteMode mode) { |
+ Label skip_allocation; |
+ switch (mode) { |
+ case OVERWRITE_LEFT: { |
+ // If the argument in rdx is already an object, we skip the |
+ // allocation of a heap number. |
+ __ JumpIfNotSmi(rdx, &skip_allocation); |
+ // Allocate a heap number for the result. Keep rax and rdx intact |
+ // for the possible runtime call. |
+ __ AllocateHeapNumber(rbx, rcx, alloc_failure); |
+ // Now rdx can be overwritten losing one of the arguments as we are |
+ // now done and will not need it any more. |
+ __ movq(rdx, rbx); |
+ __ bind(&skip_allocation); |
+ // Use object in rdx as a result holder |
+ __ movq(rax, rdx); |
+ break; |
+ } |
+ case OVERWRITE_RIGHT: |
+ // If the argument in rax is already an object, we skip the |
+ // allocation of a heap number. |
+ __ JumpIfNotSmi(rax, &skip_allocation); |
+ // Fall through! |
+ case NO_OVERWRITE: |
+ // Allocate a heap number for the result. Keep rax and rdx intact |
+ // for the possible runtime call. |
+ __ AllocateHeapNumber(rbx, rcx, alloc_failure); |
+ // Now rax can be overwritten losing one of the arguments as we are |
+ // now done and will not need it any more. |
+ __ movq(rax, rbx); |
+ __ bind(&skip_allocation); |
+ break; |
+ default: UNREACHABLE(); |
+ } |
+} |
+ |
+ |
+void BinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) { |
+ __ push(rdx); |
+ __ push(rax); |
+} |
+ |
+ |
void TranscendentalCacheStub::Generate(MacroAssembler* masm) { |
// TAGGED case: |
// Input: |
@@ -854,6 +1432,67 @@ void TranscendentalCacheStub::GenerateOperation( |
} |
+// Input: rdx, rax are the left and right objects of a bit op. |
+// Output: rax, rcx are left and right integers for a bit op. |
+// Jump to conversion_failure: rdx and rax are unchanged. |
+void FloatingPointHelper::LoadAsIntegers(MacroAssembler* masm, |
+ Label* conversion_failure, |
+ Register heap_number_map) { |
+ // Check float operands. |
+ Label arg1_is_object, check_undefined_arg1; |
+ Label arg2_is_object, check_undefined_arg2; |
+ Label load_arg2, done; |
+ |
+ __ JumpIfNotSmi(rdx, &arg1_is_object); |
+ __ SmiToInteger32(r8, rdx); |
+ __ jmp(&load_arg2); |
+ |
+ // If the argument is undefined it converts to zero (ECMA-262, section 9.5). |
+ __ bind(&check_undefined_arg1); |
+ __ CompareRoot(rdx, Heap::kUndefinedValueRootIndex); |
+ __ j(not_equal, conversion_failure); |
+ __ Set(r8, 0); |
+ __ jmp(&load_arg2); |
+ |
+ __ bind(&arg1_is_object); |
+ __ cmpq(FieldOperand(rdx, HeapObject::kMapOffset), heap_number_map); |
+ __ j(not_equal, &check_undefined_arg1); |
+ // Get the untagged integer version of the rdx heap number in r8. |
+ __ TruncateHeapNumberToI(r8, rdx); |
+ |
+ // Here r8 has the untagged integer, rax has a Smi or a heap number. |
+ __ bind(&load_arg2); |
+ // Test if arg2 is a Smi. |
+ __ JumpIfNotSmi(rax, &arg2_is_object); |
+ __ SmiToInteger32(rcx, rax); |
+ __ jmp(&done); |
+ |
+ // If the argument is undefined it converts to zero (ECMA-262, section 9.5). |
+ __ bind(&check_undefined_arg2); |
+ __ CompareRoot(rax, Heap::kUndefinedValueRootIndex); |
+ __ j(not_equal, conversion_failure); |
+ __ Set(rcx, 0); |
+ __ jmp(&done); |
+ |
+ __ bind(&arg2_is_object); |
+ __ cmpq(FieldOperand(rax, HeapObject::kMapOffset), heap_number_map); |
+ __ j(not_equal, &check_undefined_arg2); |
+ // Get the untagged integer version of the rax heap number in rcx. |
+ __ TruncateHeapNumberToI(rcx, rax); |
+ |
+ __ bind(&done); |
+ __ movl(rax, r8); |
+} |
+ |
+ |
+void FloatingPointHelper::LoadSSE2SmiOperands(MacroAssembler* masm) { |
+ __ SmiToInteger32(kScratchRegister, rdx); |
+ __ Cvtlsi2sd(xmm0, kScratchRegister); |
+ __ SmiToInteger32(kScratchRegister, rax); |
+ __ Cvtlsi2sd(xmm1, kScratchRegister); |
+} |
+ |
+ |
void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm, |
Label* not_numbers) { |
Label load_smi_rdx, load_nonsmi_rax, load_smi_rax, load_float_rax, done; |
@@ -884,6 +1523,83 @@ void FloatingPointHelper::LoadSSE2UnknownOperands(MacroAssembler* masm, |
} |
+void FloatingPointHelper::NumbersToSmis(MacroAssembler* masm, |
+ Register first, |
+ Register second, |
+ Register scratch1, |
+ Register scratch2, |
+ Register scratch3, |
+ Label* on_success, |
+ Label* on_not_smis, |
+ ConvertUndefined convert_undefined) { |
+ Register heap_number_map = scratch3; |
+ Register smi_result = scratch1; |
+ Label done, maybe_undefined_first, maybe_undefined_second, first_done; |
+ |
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
+ |
+ Label first_smi; |
+ __ JumpIfSmi(first, &first_smi, Label::kNear); |
+ __ cmpq(FieldOperand(first, HeapObject::kMapOffset), heap_number_map); |
+ __ j(not_equal, |
+ (convert_undefined == CONVERT_UNDEFINED_TO_ZERO) |
+ ? &maybe_undefined_first |
+ : on_not_smis); |
+ // Convert HeapNumber to smi if possible. |
+ __ movsd(xmm0, FieldOperand(first, HeapNumber::kValueOffset)); |
+ __ movq(scratch2, xmm0); |
+ __ cvttsd2siq(smi_result, xmm0); |
+ // Check if conversion was successful by converting back and |
+ // comparing to the original double's bits. |
+ __ Cvtlsi2sd(xmm1, smi_result); |
+ __ movq(kScratchRegister, xmm1); |
+ __ cmpq(scratch2, kScratchRegister); |
+ __ j(not_equal, on_not_smis); |
+ __ Integer32ToSmi(first, smi_result); |
+ |
+ __ bind(&first_done); |
+ __ JumpIfSmi(second, (on_success != NULL) ? on_success : &done); |
+ __ bind(&first_smi); |
+ __ AssertNotSmi(second); |
+ __ cmpq(FieldOperand(second, HeapObject::kMapOffset), heap_number_map); |
+ __ j(not_equal, |
+ (convert_undefined == CONVERT_UNDEFINED_TO_ZERO) |
+ ? &maybe_undefined_second |
+ : on_not_smis); |
+ // Convert second to smi, if possible. |
+ __ movsd(xmm0, FieldOperand(second, HeapNumber::kValueOffset)); |
+ __ movq(scratch2, xmm0); |
+ __ cvttsd2siq(smi_result, xmm0); |
+ __ Cvtlsi2sd(xmm1, smi_result); |
+ __ movq(kScratchRegister, xmm1); |
+ __ cmpq(scratch2, kScratchRegister); |
+ __ j(not_equal, on_not_smis); |
+ __ Integer32ToSmi(second, smi_result); |
+ if (on_success != NULL) { |
+ __ jmp(on_success); |
+ } else { |
+ __ jmp(&done); |
+ } |
+ |
+ __ bind(&maybe_undefined_first); |
+ __ CompareRoot(first, Heap::kUndefinedValueRootIndex); |
+ __ j(not_equal, on_not_smis); |
+ __ xor_(first, first); |
+ __ jmp(&first_done); |
+ |
+ __ bind(&maybe_undefined_second); |
+ __ CompareRoot(second, Heap::kUndefinedValueRootIndex); |
+ __ j(not_equal, on_not_smis); |
+ __ xor_(second, second); |
+ if (on_success != NULL) { |
+ __ jmp(on_success); |
+ } |
+ // Else: fall through. |
+ |
+ __ bind(&done); |
+} |
+ |
+ |
void MathPowStub::Generate(MacroAssembler* masm) { |
const Register exponent = rdx; |
const Register base = rax; |
@@ -2750,7 +3466,6 @@ void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) { |
RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate); |
ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate); |
CreateAllocationSiteStub::GenerateAheadOfTime(isolate); |
- BinaryOpStub::GenerateAheadOfTime(isolate); |
} |