Index: src/arm/code-stubs-arm.cc |
=================================================================== |
--- src/arm/code-stubs-arm.cc (revision 7552) |
+++ src/arm/code-stubs-arm.cc (working copy) |
@@ -1780,1088 +1780,6 @@ |
} |
-const char* GenericBinaryOpStub::GetName() { |
- if (name_ != NULL) return name_; |
- const int len = 100; |
- name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(len); |
- if (name_ == NULL) return "OOM"; |
- const char* op_name = Token::Name(op_); |
- const char* overwrite_name; |
- switch (mode_) { |
- case NO_OVERWRITE: overwrite_name = "Alloc"; break; |
- case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break; |
- case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break; |
- default: overwrite_name = "UnknownOverwrite"; break; |
- } |
- |
- OS::SNPrintF(Vector<char>(name_, len), |
- "GenericBinaryOpStub_%s_%s%s_%s", |
- op_name, |
- overwrite_name, |
- specialized_on_rhs_ ? "_ConstantRhs" : "", |
- BinaryOpIC::GetName(runtime_operands_type_)); |
- return name_; |
-} |
- |
- |
-// We fall into this code if the operands were Smis, but the result was |
-// not (eg. overflow). We branch into this code (to the not_smi label) if |
-// the operands were not both Smi. The operands are in r0 and r1. In order |
-// to call the C-implemented binary fp operation routines we need to end up |
-// with the double precision floating point operands in r0 and r1 (for the |
-// value in r1) and r2 and r3 (for the value in r0). |
-void GenericBinaryOpStub::HandleBinaryOpSlowCases( |
- MacroAssembler* masm, |
- Label* not_smi, |
- Register lhs, |
- Register rhs, |
- const Builtins::JavaScript& builtin) { |
- Label slow, slow_reverse, do_the_call; |
- bool use_fp_registers = |
- CpuFeatures::IsSupported(VFP3) && |
- Token::MOD != op_; |
- |
- ASSERT((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0))); |
- Register heap_number_map = r6; |
- |
- if (ShouldGenerateSmiCode()) { |
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
- |
- // Smi-smi case (overflow). |
- // Since both are Smis there is no heap number to overwrite, so allocate. |
- // The new heap number is in r5. r3 and r7 are scratch. |
- __ AllocateHeapNumber( |
- r5, r3, r7, heap_number_map, lhs.is(r0) ? &slow_reverse : &slow); |
- |
- // If we have floating point hardware, inline ADD, SUB, MUL, and DIV, |
- // using registers d7 and d6 for the double values. |
- if (CpuFeatures::IsSupported(VFP3)) { |
- CpuFeatures::Scope scope(VFP3); |
- __ mov(r7, Operand(rhs, ASR, kSmiTagSize)); |
- __ vmov(s15, r7); |
- __ vcvt_f64_s32(d7, s15); |
- __ mov(r7, Operand(lhs, ASR, kSmiTagSize)); |
- __ vmov(s13, r7); |
- __ vcvt_f64_s32(d6, s13); |
- if (!use_fp_registers) { |
- __ vmov(r2, r3, d7); |
- __ vmov(r0, r1, d6); |
- } |
- } else { |
- // Write Smi from rhs to r3 and r2 in double format. r9 is scratch. |
- __ mov(r7, Operand(rhs)); |
- ConvertToDoubleStub stub1(r3, r2, r7, r9); |
- __ push(lr); |
- __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); |
- // Write Smi from lhs to r1 and r0 in double format. r9 is scratch. |
- __ mov(r7, Operand(lhs)); |
- ConvertToDoubleStub stub2(r1, r0, r7, r9); |
- __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); |
- __ pop(lr); |
- } |
- __ jmp(&do_the_call); // Tail call. No return. |
- } |
- |
- // We branch here if at least one of r0 and r1 is not a Smi. |
- __ bind(not_smi); |
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
- |
- // After this point we have the left hand side in r1 and the right hand side |
- // in r0. |
- if (lhs.is(r0)) { |
- __ Swap(r0, r1, ip); |
- } |
- |
- // The type transition also calculates the answer. |
- bool generate_code_to_calculate_answer = true; |
- |
- if (ShouldGenerateFPCode()) { |
- // DIV has neither SmiSmi fast code nor specialized slow code. |
- // So don't try to patch a DIV Stub. |
- if (runtime_operands_type_ == BinaryOpIC::DEFAULT) { |
- switch (op_) { |
- case Token::ADD: |
- case Token::SUB: |
- case Token::MUL: |
- GenerateTypeTransition(masm); // Tail call. |
- generate_code_to_calculate_answer = false; |
- break; |
- |
- case Token::DIV: |
- // DIV has neither SmiSmi fast code nor specialized slow code. |
- // So don't try to patch a DIV Stub. |
- break; |
- |
- default: |
- break; |
- } |
- } |
- |
- if (generate_code_to_calculate_answer) { |
- Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1; |
- if (mode_ == NO_OVERWRITE) { |
- // In the case where there is no chance of an overwritable float we may |
- // as well do the allocation immediately while r0 and r1 are untouched. |
- __ AllocateHeapNumber(r5, r3, r7, heap_number_map, &slow); |
- } |
- |
- // Move r0 to a double in r2-r3. |
- __ tst(r0, Operand(kSmiTagMask)); |
- __ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number. |
- __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); |
- __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
- __ cmp(r4, heap_number_map); |
- __ b(ne, &slow); |
- if (mode_ == OVERWRITE_RIGHT) { |
- __ mov(r5, Operand(r0)); // Overwrite this heap number. |
- } |
- if (use_fp_registers) { |
- CpuFeatures::Scope scope(VFP3); |
- // Load the double from tagged HeapNumber r0 to d7. |
- __ sub(r7, r0, Operand(kHeapObjectTag)); |
- __ vldr(d7, r7, HeapNumber::kValueOffset); |
- } else { |
- // Calling convention says that second double is in r2 and r3. |
- __ Ldrd(r2, r3, FieldMemOperand(r0, HeapNumber::kValueOffset)); |
- } |
- __ jmp(&finished_loading_r0); |
- __ bind(&r0_is_smi); |
- if (mode_ == OVERWRITE_RIGHT) { |
- // We can't overwrite a Smi so get address of new heap number into r5. |
- __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow); |
- } |
- |
- if (CpuFeatures::IsSupported(VFP3)) { |
- CpuFeatures::Scope scope(VFP3); |
- // Convert smi in r0 to double in d7. |
- __ mov(r7, Operand(r0, ASR, kSmiTagSize)); |
- __ vmov(s15, r7); |
- __ vcvt_f64_s32(d7, s15); |
- if (!use_fp_registers) { |
- __ vmov(r2, r3, d7); |
- } |
- } else { |
- // Write Smi from r0 to r3 and r2 in double format. |
- __ mov(r7, Operand(r0)); |
- ConvertToDoubleStub stub3(r3, r2, r7, r4); |
- __ push(lr); |
- __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET); |
- __ pop(lr); |
- } |
- |
- // HEAP_NUMBERS stub is slower than GENERIC on a pair of smis. |
- // r0 is known to be a smi. If r1 is also a smi then switch to GENERIC. |
- Label r1_is_not_smi; |
- if ((runtime_operands_type_ == BinaryOpIC::HEAP_NUMBERS) && |
- HasSmiSmiFastPath()) { |
- __ tst(r1, Operand(kSmiTagMask)); |
- __ b(ne, &r1_is_not_smi); |
- GenerateTypeTransition(masm); // Tail call. |
- } |
- |
- __ bind(&finished_loading_r0); |
- |
- // Move r1 to a double in r0-r1. |
- __ tst(r1, Operand(kSmiTagMask)); |
- __ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number. |
- __ bind(&r1_is_not_smi); |
- __ ldr(r4, FieldMemOperand(r1, HeapNumber::kMapOffset)); |
- __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
- __ cmp(r4, heap_number_map); |
- __ b(ne, &slow); |
- if (mode_ == OVERWRITE_LEFT) { |
- __ mov(r5, Operand(r1)); // Overwrite this heap number. |
- } |
- if (use_fp_registers) { |
- CpuFeatures::Scope scope(VFP3); |
- // Load the double from tagged HeapNumber r1 to d6. |
- __ sub(r7, r1, Operand(kHeapObjectTag)); |
- __ vldr(d6, r7, HeapNumber::kValueOffset); |
- } else { |
- // Calling convention says that first double is in r0 and r1. |
- __ Ldrd(r0, r1, FieldMemOperand(r1, HeapNumber::kValueOffset)); |
- } |
- __ jmp(&finished_loading_r1); |
- __ bind(&r1_is_smi); |
- if (mode_ == OVERWRITE_LEFT) { |
- // We can't overwrite a Smi so get address of new heap number into r5. |
- __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow); |
- } |
- |
- if (CpuFeatures::IsSupported(VFP3)) { |
- CpuFeatures::Scope scope(VFP3); |
- // Convert smi in r1 to double in d6. |
- __ mov(r7, Operand(r1, ASR, kSmiTagSize)); |
- __ vmov(s13, r7); |
- __ vcvt_f64_s32(d6, s13); |
- if (!use_fp_registers) { |
- __ vmov(r0, r1, d6); |
- } |
- } else { |
- // Write Smi from r1 to r1 and r0 in double format. |
- __ mov(r7, Operand(r1)); |
- ConvertToDoubleStub stub4(r1, r0, r7, r9); |
- __ push(lr); |
- __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET); |
- __ pop(lr); |
- } |
- |
- __ bind(&finished_loading_r1); |
- } |
- |
- if (generate_code_to_calculate_answer || do_the_call.is_linked()) { |
- __ bind(&do_the_call); |
- // If we are inlining the operation using VFP3 instructions for |
- // add, subtract, multiply, or divide, the arguments are in d6 and d7. |
- if (use_fp_registers) { |
- CpuFeatures::Scope scope(VFP3); |
- // ARMv7 VFP3 instructions to implement |
- // double precision, add, subtract, multiply, divide. |
- |
- if (Token::MUL == op_) { |
- __ vmul(d5, d6, d7); |
- } else if (Token::DIV == op_) { |
- __ vdiv(d5, d6, d7); |
- } else if (Token::ADD == op_) { |
- __ vadd(d5, d6, d7); |
- } else if (Token::SUB == op_) { |
- __ vsub(d5, d6, d7); |
- } else { |
- UNREACHABLE(); |
- } |
- __ sub(r0, r5, Operand(kHeapObjectTag)); |
- __ vstr(d5, r0, HeapNumber::kValueOffset); |
- __ add(r0, r0, Operand(kHeapObjectTag)); |
- __ Ret(); |
- } else { |
- // If we did not inline the operation, then the arguments are in: |
- // r0: Left value (least significant part of mantissa). |
- // r1: Left value (sign, exponent, top of mantissa). |
- // r2: Right value (least significant part of mantissa). |
- // r3: Right value (sign, exponent, top of mantissa). |
- // r5: Address of heap number for result. |
- |
- __ push(lr); // For later. |
- __ PrepareCallCFunction(4, r4); // Two doubles count as 4 arguments. |
- // Call C routine that may not cause GC or other trouble. r5 is callee |
- // save. |
- __ CallCFunction( |
- ExternalReference::double_fp_operation(op_, masm->isolate()), 4); |
- // Store answer in the overwritable heap number. |
- #if !defined(USE_ARM_EABI) |
- // Double returned in fp coprocessor register 0 and 1, encoded as |
- // register cr8. Offsets must be divisible by 4 for coprocessor so we |
- // need to substract the tag from r5. |
- __ sub(r4, r5, Operand(kHeapObjectTag)); |
- __ stc(p1, cr8, MemOperand(r4, HeapNumber::kValueOffset)); |
- #else |
- // Double returned in registers 0 and 1. |
- __ Strd(r0, r1, FieldMemOperand(r5, HeapNumber::kValueOffset)); |
- #endif |
- __ mov(r0, Operand(r5)); |
- // And we are done. |
- __ pop(pc); |
- } |
- } |
- } |
- |
- if (!generate_code_to_calculate_answer && |
- !slow_reverse.is_linked() && |
- !slow.is_linked()) { |
- return; |
- } |
- |
- if (lhs.is(r0)) { |
- __ b(&slow); |
- __ bind(&slow_reverse); |
- __ Swap(r0, r1, ip); |
- } |
- |
- heap_number_map = no_reg; // Don't use this any more from here on. |
- |
- // We jump to here if something goes wrong (one param is not a number of any |
- // sort or new-space allocation fails). |
- __ bind(&slow); |
- |
- // Push arguments to the stack |
- __ Push(r1, r0); |
- |
- if (Token::ADD == op_) { |
- // Test for string arguments before calling runtime. |
- // r1 : first argument |
- // r0 : second argument |
- // sp[0] : second argument |
- // sp[4] : first argument |
- |
- Label not_strings, not_string1, string1, string1_smi2; |
- __ tst(r1, Operand(kSmiTagMask)); |
- __ b(eq, ¬_string1); |
- __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE); |
- __ b(ge, ¬_string1); |
- |
- // First argument is a a string, test second. |
- __ tst(r0, Operand(kSmiTagMask)); |
- __ b(eq, &string1_smi2); |
- __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE); |
- __ b(ge, &string1); |
- |
- // First and second argument are strings. |
- StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB); |
- __ TailCallStub(&string_add_stub); |
- |
- __ bind(&string1_smi2); |
- // First argument is a string, second is a smi. Try to lookup the number |
- // string for the smi in the number string cache. |
- NumberToStringStub::GenerateLookupNumberStringCache( |
- masm, r0, r2, r4, r5, r6, true, &string1); |
- |
- // Replace second argument on stack and tailcall string add stub to make |
- // the result. |
- __ str(r2, MemOperand(sp, 0)); |
- __ TailCallStub(&string_add_stub); |
- |
- // Only first argument is a string. |
- __ bind(&string1); |
- __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_JS); |
- |
- // First argument was not a string, test second. |
- __ bind(¬_string1); |
- __ tst(r0, Operand(kSmiTagMask)); |
- __ b(eq, ¬_strings); |
- __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE); |
- __ b(ge, ¬_strings); |
- |
- // Only second argument is a string. |
- __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_JS); |
- |
- __ bind(¬_strings); |
- } |
- |
- __ InvokeBuiltin(builtin, JUMP_JS); // Tail call. No return. |
-} |
- |
- |
-// For bitwise ops where the inputs are not both Smis we here try to determine |
-// whether both inputs are either Smis or at least heap numbers that can be |
-// represented by a 32 bit signed value. We truncate towards zero as required |
-// by the ES spec. If this is the case we do the bitwise op and see if the |
-// result is a Smi. If so, great, otherwise we try to find a heap number to |
-// write the answer into (either by allocating or by overwriting). |
-// On entry the operands are in lhs and rhs. On exit the answer is in r0. |
-void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm, |
- Register lhs, |
- Register rhs) { |
- Label slow, result_not_a_smi; |
- Label rhs_is_smi, lhs_is_smi; |
- Label done_checking_rhs, done_checking_lhs; |
- |
- Register heap_number_map = r6; |
- __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
- |
- __ tst(lhs, Operand(kSmiTagMask)); |
- __ b(eq, &lhs_is_smi); // It's a Smi so don't check it's a heap number. |
- __ ldr(r4, FieldMemOperand(lhs, HeapNumber::kMapOffset)); |
- __ cmp(r4, heap_number_map); |
- __ b(ne, &slow); |
- __ ConvertToInt32(lhs, r3, r5, r4, d0, &slow); |
- __ jmp(&done_checking_lhs); |
- __ bind(&lhs_is_smi); |
- __ mov(r3, Operand(lhs, ASR, 1)); |
- __ bind(&done_checking_lhs); |
- |
- __ tst(rhs, Operand(kSmiTagMask)); |
- __ b(eq, &rhs_is_smi); // It's a Smi so don't check it's a heap number. |
- __ ldr(r4, FieldMemOperand(rhs, HeapNumber::kMapOffset)); |
- __ cmp(r4, heap_number_map); |
- __ b(ne, &slow); |
- __ ConvertToInt32(rhs, r2, r5, r4, d0, &slow); |
- __ jmp(&done_checking_rhs); |
- __ bind(&rhs_is_smi); |
- __ mov(r2, Operand(rhs, ASR, 1)); |
- __ bind(&done_checking_rhs); |
- |
- ASSERT(((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0)))); |
- |
- // r0 and r1: Original operands (Smi or heap numbers). |
- // r2 and r3: Signed int32 operands. |
- switch (op_) { |
- case Token::BIT_OR: __ orr(r2, r2, Operand(r3)); break; |
- case Token::BIT_XOR: __ eor(r2, r2, Operand(r3)); break; |
- case Token::BIT_AND: __ and_(r2, r2, Operand(r3)); break; |
- case Token::SAR: |
- // Use only the 5 least significant bits of the shift count. |
- __ and_(r2, r2, Operand(0x1f)); |
- __ mov(r2, Operand(r3, ASR, r2)); |
- break; |
- case Token::SHR: |
- // Use only the 5 least significant bits of the shift count. |
- __ and_(r2, r2, Operand(0x1f)); |
- __ mov(r2, Operand(r3, LSR, r2), SetCC); |
- // SHR is special because it is required to produce a positive answer. |
- // The code below for writing into heap numbers isn't capable of writing |
- // the register as an unsigned int so we go to slow case if we hit this |
- // case. |
- if (CpuFeatures::IsSupported(VFP3)) { |
- __ b(mi, &result_not_a_smi); |
- } else { |
- __ b(mi, &slow); |
- } |
- break; |
- case Token::SHL: |
- // Use only the 5 least significant bits of the shift count. |
- __ and_(r2, r2, Operand(0x1f)); |
- __ mov(r2, Operand(r3, LSL, r2)); |
- break; |
- default: UNREACHABLE(); |
- } |
- // check that the *signed* result fits in a smi |
- __ add(r3, r2, Operand(0x40000000), SetCC); |
- __ b(mi, &result_not_a_smi); |
- __ mov(r0, Operand(r2, LSL, kSmiTagSize)); |
- __ Ret(); |
- |
- Label have_to_allocate, got_a_heap_number; |
- __ bind(&result_not_a_smi); |
- switch (mode_) { |
- case OVERWRITE_RIGHT: { |
- __ tst(rhs, Operand(kSmiTagMask)); |
- __ b(eq, &have_to_allocate); |
- __ mov(r5, Operand(rhs)); |
- break; |
- } |
- case OVERWRITE_LEFT: { |
- __ tst(lhs, Operand(kSmiTagMask)); |
- __ b(eq, &have_to_allocate); |
- __ mov(r5, Operand(lhs)); |
- break; |
- } |
- case NO_OVERWRITE: { |
- // Get a new heap number in r5. r4 and r7 are scratch. |
- __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow); |
- } |
- default: break; |
- } |
- __ bind(&got_a_heap_number); |
- // r2: Answer as signed int32. |
- // r5: Heap number to write answer into. |
- |
- // Nothing can go wrong now, so move the heap number to r0, which is the |
- // result. |
- __ mov(r0, Operand(r5)); |
- |
- if (CpuFeatures::IsSupported(VFP3)) { |
- // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. |
- CpuFeatures::Scope scope(VFP3); |
- __ vmov(s0, r2); |
- if (op_ == Token::SHR) { |
- __ vcvt_f64_u32(d0, s0); |
- } else { |
- __ vcvt_f64_s32(d0, s0); |
- } |
- __ sub(r3, r0, Operand(kHeapObjectTag)); |
- __ vstr(d0, r3, HeapNumber::kValueOffset); |
- __ Ret(); |
- } else { |
- // Tail call that writes the int32 in r2 to the heap number in r0, using |
- // r3 as scratch. r0 is preserved and returned. |
- WriteInt32ToHeapNumberStub stub(r2, r0, r3); |
- __ TailCallStub(&stub); |
- } |
- |
- if (mode_ != NO_OVERWRITE) { |
- __ bind(&have_to_allocate); |
- // Get a new heap number in r5. r4 and r7 are scratch. |
- __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow); |
- __ jmp(&got_a_heap_number); |
- } |
- |
- // If all else failed then we go to the runtime system. |
- __ bind(&slow); |
- __ Push(lhs, rhs); // Restore stack. |
- switch (op_) { |
- case Token::BIT_OR: |
- __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS); |
- break; |
- case Token::BIT_AND: |
- __ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS); |
- break; |
- case Token::BIT_XOR: |
- __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS); |
- break; |
- case Token::SAR: |
- __ InvokeBuiltin(Builtins::SAR, JUMP_JS); |
- break; |
- case Token::SHR: |
- __ InvokeBuiltin(Builtins::SHR, JUMP_JS); |
- break; |
- case Token::SHL: |
- __ InvokeBuiltin(Builtins::SHL, JUMP_JS); |
- break; |
- default: |
- UNREACHABLE(); |
- } |
-} |
- |
- |
- |
- |
-// This function takes the known int in a register for the cases |
-// where it doesn't know a good trick, and may deliver |
-// a result that needs shifting. |
-static void MultiplyByKnownIntInStub( |
- MacroAssembler* masm, |
- Register result, |
- Register source, |
- Register known_int_register, // Smi tagged. |
- int known_int, |
- int* required_shift) { // Including Smi tag shift |
- switch (known_int) { |
- case 3: |
- __ add(result, source, Operand(source, LSL, 1)); |
- *required_shift = 1; |
- break; |
- case 5: |
- __ add(result, source, Operand(source, LSL, 2)); |
- *required_shift = 1; |
- break; |
- case 6: |
- __ add(result, source, Operand(source, LSL, 1)); |
- *required_shift = 2; |
- break; |
- case 7: |
- __ rsb(result, source, Operand(source, LSL, 3)); |
- *required_shift = 1; |
- break; |
- case 9: |
- __ add(result, source, Operand(source, LSL, 3)); |
- *required_shift = 1; |
- break; |
- case 10: |
- __ add(result, source, Operand(source, LSL, 2)); |
- *required_shift = 2; |
- break; |
- default: |
- ASSERT(!IsPowerOf2(known_int)); // That would be very inefficient. |
- __ mul(result, source, known_int_register); |
- *required_shift = 0; |
- } |
-} |
- |
- |
-// This uses versions of the sum-of-digits-to-see-if-a-number-is-divisible-by-3 |
-// trick. See http://en.wikipedia.org/wiki/Divisibility_rule |
-// Takes the sum of the digits base (mask + 1) repeatedly until we have a |
-// number from 0 to mask. On exit the 'eq' condition flags are set if the |
-// answer is exactly the mask. |
-void IntegerModStub::DigitSum(MacroAssembler* masm, |
- Register lhs, |
- int mask, |
- int shift, |
- Label* entry) { |
- ASSERT(mask > 0); |
- ASSERT(mask <= 0xff); // This ensures we don't need ip to use it. |
- Label loop; |
- __ bind(&loop); |
- __ and_(ip, lhs, Operand(mask)); |
- __ add(lhs, ip, Operand(lhs, LSR, shift)); |
- __ bind(entry); |
- __ cmp(lhs, Operand(mask)); |
- __ b(gt, &loop); |
-} |
- |
- |
-void IntegerModStub::DigitSum(MacroAssembler* masm, |
- Register lhs, |
- Register scratch, |
- int mask, |
- int shift1, |
- int shift2, |
- Label* entry) { |
- ASSERT(mask > 0); |
- ASSERT(mask <= 0xff); // This ensures we don't need ip to use it. |
- Label loop; |
- __ bind(&loop); |
- __ bic(scratch, lhs, Operand(mask)); |
- __ and_(ip, lhs, Operand(mask)); |
- __ add(lhs, ip, Operand(lhs, LSR, shift1)); |
- __ add(lhs, lhs, Operand(scratch, LSR, shift2)); |
- __ bind(entry); |
- __ cmp(lhs, Operand(mask)); |
- __ b(gt, &loop); |
-} |
- |
- |
-// Splits the number into two halves (bottom half has shift bits). The top |
-// half is subtracted from the bottom half. If the result is negative then |
-// rhs is added. |
-void IntegerModStub::ModGetInRangeBySubtraction(MacroAssembler* masm, |
- Register lhs, |
- int shift, |
- int rhs) { |
- int mask = (1 << shift) - 1; |
- __ and_(ip, lhs, Operand(mask)); |
- __ sub(lhs, ip, Operand(lhs, LSR, shift), SetCC); |
- __ add(lhs, lhs, Operand(rhs), LeaveCC, mi); |
-} |
- |
- |
-void IntegerModStub::ModReduce(MacroAssembler* masm, |
- Register lhs, |
- int max, |
- int denominator) { |
- int limit = denominator; |
- while (limit * 2 <= max) limit *= 2; |
- while (limit >= denominator) { |
- __ cmp(lhs, Operand(limit)); |
- __ sub(lhs, lhs, Operand(limit), LeaveCC, ge); |
- limit >>= 1; |
- } |
-} |
- |
- |
-void IntegerModStub::ModAnswer(MacroAssembler* masm, |
- Register result, |
- Register shift_distance, |
- Register mask_bits, |
- Register sum_of_digits) { |
- __ add(result, mask_bits, Operand(sum_of_digits, LSL, shift_distance)); |
- __ Ret(); |
-} |
- |
- |
-// See comment for class. |
-void IntegerModStub::Generate(MacroAssembler* masm) { |
- __ mov(lhs_, Operand(lhs_, LSR, shift_distance_)); |
- __ bic(odd_number_, odd_number_, Operand(1)); |
- __ mov(odd_number_, Operand(odd_number_, LSL, 1)); |
- // We now have (odd_number_ - 1) * 2 in the register. |
- // Build a switch out of branches instead of data because it avoids |
- // having to teach the assembler about intra-code-object pointers |
- // that are not in relative branch instructions. |
- Label mod3, mod5, mod7, mod9, mod11, mod13, mod15, mod17, mod19; |
- Label mod21, mod23, mod25; |
- { Assembler::BlockConstPoolScope block_const_pool(masm); |
- __ add(pc, pc, Operand(odd_number_)); |
- // When you read pc it is always 8 ahead, but when you write it you always |
- // write the actual value. So we put in two nops to take up the slack. |
- __ nop(); |
- __ nop(); |
- __ b(&mod3); |
- __ b(&mod5); |
- __ b(&mod7); |
- __ b(&mod9); |
- __ b(&mod11); |
- __ b(&mod13); |
- __ b(&mod15); |
- __ b(&mod17); |
- __ b(&mod19); |
- __ b(&mod21); |
- __ b(&mod23); |
- __ b(&mod25); |
- } |
- |
- // For each denominator we find a multiple that is almost only ones |
- // when expressed in binary. Then we do the sum-of-digits trick for |
- // that number. If the multiple is not 1 then we have to do a little |
- // more work afterwards to get the answer into the 0-denominator-1 |
- // range. |
- DigitSum(masm, lhs_, 3, 2, &mod3); // 3 = b11. |
- __ sub(lhs_, lhs_, Operand(3), LeaveCC, eq); |
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); |
- |
- DigitSum(masm, lhs_, 0xf, 4, &mod5); // 5 * 3 = b1111. |
- ModGetInRangeBySubtraction(masm, lhs_, 2, 5); |
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); |
- |
- DigitSum(masm, lhs_, 7, 3, &mod7); // 7 = b111. |
- __ sub(lhs_, lhs_, Operand(7), LeaveCC, eq); |
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); |
- |
- DigitSum(masm, lhs_, 0x3f, 6, &mod9); // 7 * 9 = b111111. |
- ModGetInRangeBySubtraction(masm, lhs_, 3, 9); |
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); |
- |
- DigitSum(masm, lhs_, r5, 0x3f, 6, 3, &mod11); // 5 * 11 = b110111. |
- ModReduce(masm, lhs_, 0x3f, 11); |
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); |
- |
- DigitSum(masm, lhs_, r5, 0xff, 8, 5, &mod13); // 19 * 13 = b11110111. |
- ModReduce(masm, lhs_, 0xff, 13); |
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); |
- |
- DigitSum(masm, lhs_, 0xf, 4, &mod15); // 15 = b1111. |
- __ sub(lhs_, lhs_, Operand(15), LeaveCC, eq); |
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); |
- |
- DigitSum(masm, lhs_, 0xff, 8, &mod17); // 15 * 17 = b11111111. |
- ModGetInRangeBySubtraction(masm, lhs_, 4, 17); |
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); |
- |
- DigitSum(masm, lhs_, r5, 0xff, 8, 5, &mod19); // 13 * 19 = b11110111. |
- ModReduce(masm, lhs_, 0xff, 19); |
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); |
- |
- DigitSum(masm, lhs_, 0x3f, 6, &mod21); // 3 * 21 = b111111. |
- ModReduce(masm, lhs_, 0x3f, 21); |
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); |
- |
- DigitSum(masm, lhs_, r5, 0xff, 8, 7, &mod23); // 11 * 23 = b11111101. |
- ModReduce(masm, lhs_, 0xff, 23); |
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); |
- |
- DigitSum(masm, lhs_, r5, 0x7f, 7, 6, &mod25); // 5 * 25 = b1111101. |
- ModReduce(masm, lhs_, 0x7f, 25); |
- ModAnswer(masm, result_, shift_distance_, mask_bits_, lhs_); |
-} |
- |
- |
-void GenericBinaryOpStub::Generate(MacroAssembler* masm) { |
- // lhs_ : x |
- // rhs_ : y |
- // r0 : result |
- |
- Register result = r0; |
- Register lhs = lhs_; |
- Register rhs = rhs_; |
- |
- // This code can't cope with other register allocations yet. |
- ASSERT(result.is(r0) && |
- ((lhs.is(r0) && rhs.is(r1)) || |
- (lhs.is(r1) && rhs.is(r0)))); |
- |
- Register smi_test_reg = r7; |
- Register scratch = r9; |
- |
- // All ops need to know whether we are dealing with two Smis. Set up |
- // smi_test_reg to tell us that. |
- if (ShouldGenerateSmiCode()) { |
- __ orr(smi_test_reg, lhs, Operand(rhs)); |
- } |
- |
- switch (op_) { |
- case Token::ADD: { |
- Label not_smi; |
- // Fast path. |
- if (ShouldGenerateSmiCode()) { |
- STATIC_ASSERT(kSmiTag == 0); // Adjust code below. |
- __ tst(smi_test_reg, Operand(kSmiTagMask)); |
- __ b(ne, ¬_smi); |
- __ add(r0, r1, Operand(r0), SetCC); // Add y optimistically. |
- // Return if no overflow. |
- __ Ret(vc); |
- __ sub(r0, r0, Operand(r1)); // Revert optimistic add. |
- } |
- HandleBinaryOpSlowCases(masm, ¬_smi, lhs, rhs, Builtins::ADD); |
- break; |
- } |
- |
- case Token::SUB: { |
- Label not_smi; |
- // Fast path. |
- if (ShouldGenerateSmiCode()) { |
- STATIC_ASSERT(kSmiTag == 0); // Adjust code below. |
- __ tst(smi_test_reg, Operand(kSmiTagMask)); |
- __ b(ne, ¬_smi); |
- if (lhs.is(r1)) { |
- __ sub(r0, r1, Operand(r0), SetCC); // Subtract y optimistically. |
- // Return if no overflow. |
- __ Ret(vc); |
- __ sub(r0, r1, Operand(r0)); // Revert optimistic subtract. |
- } else { |
- __ sub(r0, r0, Operand(r1), SetCC); // Subtract y optimistically. |
- // Return if no overflow. |
- __ Ret(vc); |
- __ add(r0, r0, Operand(r1)); // Revert optimistic subtract. |
- } |
- } |
- HandleBinaryOpSlowCases(masm, ¬_smi, lhs, rhs, Builtins::SUB); |
- break; |
- } |
- |
- case Token::MUL: { |
- Label not_smi, slow; |
- if (ShouldGenerateSmiCode()) { |
- STATIC_ASSERT(kSmiTag == 0); // adjust code below |
- __ tst(smi_test_reg, Operand(kSmiTagMask)); |
- Register scratch2 = smi_test_reg; |
- smi_test_reg = no_reg; |
- __ b(ne, ¬_smi); |
- // Remove tag from one operand (but keep sign), so that result is Smi. |
- __ mov(ip, Operand(rhs, ASR, kSmiTagSize)); |
- // Do multiplication |
- // scratch = lower 32 bits of ip * lhs. |
- __ smull(scratch, scratch2, lhs, ip); |
- // Go slow on overflows (overflow bit is not set). |
- __ mov(ip, Operand(scratch, ASR, 31)); |
- // No overflow if higher 33 bits are identical. |
- __ cmp(ip, Operand(scratch2)); |
- __ b(ne, &slow); |
- // Go slow on zero result to handle -0. |
- __ tst(scratch, Operand(scratch)); |
- __ mov(result, Operand(scratch), LeaveCC, ne); |
- __ Ret(ne); |
- // We need -0 if we were multiplying a negative number with 0 to get 0. |
- // We know one of them was zero. |
- __ add(scratch2, rhs, Operand(lhs), SetCC); |
- __ mov(result, Operand(Smi::FromInt(0)), LeaveCC, pl); |
- __ Ret(pl); // Return Smi 0 if the non-zero one was positive. |
- // Slow case. We fall through here if we multiplied a negative number |
- // with 0, because that would mean we should produce -0. |
- __ bind(&slow); |
- } |
- HandleBinaryOpSlowCases(masm, ¬_smi, lhs, rhs, Builtins::MUL); |
- break; |
- } |
- |
- case Token::DIV: |
- case Token::MOD: { |
- Label not_smi; |
- if (ShouldGenerateSmiCode() && specialized_on_rhs_) { |
- Label lhs_is_unsuitable; |
- __ JumpIfNotSmi(lhs, ¬_smi); |
- if (IsPowerOf2(constant_rhs_)) { |
- if (op_ == Token::MOD) { |
- __ and_(rhs, |
- lhs, |
- Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)), |
- SetCC); |
- // We now have the answer, but if the input was negative we also |
- // have the sign bit. Our work is done if the result is |
- // positive or zero: |
- if (!rhs.is(r0)) { |
- __ mov(r0, rhs, LeaveCC, pl); |
- } |
- __ Ret(pl); |
- // A mod of a negative left hand side must return a negative number. |
- // Unfortunately if the answer is 0 then we must return -0. And we |
- // already optimistically trashed rhs so we may need to restore it. |
- __ eor(rhs, rhs, Operand(0x80000000u), SetCC); |
- // Next two instructions are conditional on the answer being -0. |
- __ mov(rhs, Operand(Smi::FromInt(constant_rhs_)), LeaveCC, eq); |
- __ b(eq, &lhs_is_unsuitable); |
- // We need to subtract the dividend. Eg. -3 % 4 == -3. |
- __ sub(result, rhs, Operand(Smi::FromInt(constant_rhs_))); |
- } else { |
- ASSERT(op_ == Token::DIV); |
- __ tst(lhs, |
- Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1))); |
- __ b(ne, &lhs_is_unsuitable); // Go slow on negative or remainder. |
- int shift = 0; |
- int d = constant_rhs_; |
- while ((d & 1) == 0) { |
- d >>= 1; |
- shift++; |
- } |
- __ mov(r0, Operand(lhs, LSR, shift)); |
- __ bic(r0, r0, Operand(kSmiTagMask)); |
- } |
- } else { |
- // Not a power of 2. |
- __ tst(lhs, Operand(0x80000000u)); |
- __ b(ne, &lhs_is_unsuitable); |
- // Find a fixed point reciprocal of the divisor so we can divide by |
- // multiplying. |
- double divisor = 1.0 / constant_rhs_; |
- int shift = 32; |
- double scale = 4294967296.0; // 1 << 32. |
- uint32_t mul; |
- // Maximise the precision of the fixed point reciprocal. |
- while (true) { |
- mul = static_cast<uint32_t>(scale * divisor); |
- if (mul >= 0x7fffffff) break; |
- scale *= 2.0; |
- shift++; |
- } |
- mul++; |
- Register scratch2 = smi_test_reg; |
- smi_test_reg = no_reg; |
- __ mov(scratch2, Operand(mul)); |
- __ umull(scratch, scratch2, scratch2, lhs); |
- __ mov(scratch2, Operand(scratch2, LSR, shift - 31)); |
- // scratch2 is lhs / rhs. scratch2 is not Smi tagged. |
- // rhs is still the known rhs. rhs is Smi tagged. |
- // lhs is still the unkown lhs. lhs is Smi tagged. |
- int required_scratch_shift = 0; // Including the Smi tag shift of 1. |
- // scratch = scratch2 * rhs. |
- MultiplyByKnownIntInStub(masm, |
- scratch, |
- scratch2, |
- rhs, |
- constant_rhs_, |
- &required_scratch_shift); |
- // scratch << required_scratch_shift is now the Smi tagged rhs * |
- // (lhs / rhs) where / indicates integer division. |
- if (op_ == Token::DIV) { |
- __ cmp(lhs, Operand(scratch, LSL, required_scratch_shift)); |
- __ b(ne, &lhs_is_unsuitable); // There was a remainder. |
- __ mov(result, Operand(scratch2, LSL, kSmiTagSize)); |
- } else { |
- ASSERT(op_ == Token::MOD); |
- __ sub(result, lhs, Operand(scratch, LSL, required_scratch_shift)); |
- } |
- } |
- __ Ret(); |
- __ bind(&lhs_is_unsuitable); |
- } else if (op_ == Token::MOD && |
- runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS && |
- runtime_operands_type_ != BinaryOpIC::STRINGS) { |
- // Do generate a bit of smi code for modulus even though the default for |
- // modulus is not to do it, but as the ARM processor has no coprocessor |
- // support for modulus checking for smis makes sense. We can handle |
- // 1 to 25 times any power of 2. This covers over half the numbers from |
- // 1 to 100 including all of the first 25. (Actually the constants < 10 |
- // are handled above by reciprocal multiplication. We only get here for |
- // those cases if the right hand side is not a constant or for cases |
- // like 192 which is 3*2^6 and ends up in the 3 case in the integer mod |
- // stub.) |
- Label slow; |
- Label not_power_of_2; |
- ASSERT(!ShouldGenerateSmiCode()); |
- STATIC_ASSERT(kSmiTag == 0); // Adjust code below. |
- // Check for two positive smis. |
- __ orr(smi_test_reg, lhs, Operand(rhs)); |
- __ tst(smi_test_reg, Operand(0x80000000u | kSmiTagMask)); |
- __ b(ne, &slow); |
- // Check that rhs is a power of two and not zero. |
- Register mask_bits = r3; |
- __ sub(scratch, rhs, Operand(1), SetCC); |
- __ b(mi, &slow); |
- __ and_(mask_bits, rhs, Operand(scratch), SetCC); |
- __ b(ne, ¬_power_of_2); |
- // Calculate power of two modulus. |
- __ and_(result, lhs, Operand(scratch)); |
- __ Ret(); |
- |
- __ bind(¬_power_of_2); |
- __ eor(scratch, scratch, Operand(mask_bits)); |
- // At least two bits are set in the modulus. The high one(s) are in |
- // mask_bits and the low one is scratch + 1. |
- __ and_(mask_bits, scratch, Operand(lhs)); |
- Register shift_distance = scratch; |
- scratch = no_reg; |
- |
- // The rhs consists of a power of 2 multiplied by some odd number. |
- // The power-of-2 part we handle by putting the corresponding bits |
- // from the lhs in the mask_bits register, and the power in the |
- // shift_distance register. Shift distance is never 0 due to Smi |
- // tagging. |
- __ CountLeadingZeros(r4, shift_distance, shift_distance); |
- __ rsb(shift_distance, r4, Operand(32)); |
- |
- // Now we need to find out what the odd number is. The last bit is |
- // always 1. |
- Register odd_number = r4; |
- __ mov(odd_number, Operand(rhs, LSR, shift_distance)); |
- __ cmp(odd_number, Operand(25)); |
- __ b(gt, &slow); |
- |
- IntegerModStub stub( |
- result, shift_distance, odd_number, mask_bits, lhs, r5); |
- __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); // Tail call. |
- |
- __ bind(&slow); |
- } |
- HandleBinaryOpSlowCases( |
- masm, |
- ¬_smi, |
- lhs, |
- rhs, |
- op_ == Token::MOD ? Builtins::MOD : Builtins::DIV); |
- break; |
- } |
- |
- case Token::BIT_OR: |
- case Token::BIT_AND: |
- case Token::BIT_XOR: |
- case Token::SAR: |
- case Token::SHR: |
- case Token::SHL: { |
- Label slow; |
- STATIC_ASSERT(kSmiTag == 0); // adjust code below |
- __ tst(smi_test_reg, Operand(kSmiTagMask)); |
- __ b(ne, &slow); |
- Register scratch2 = smi_test_reg; |
- smi_test_reg = no_reg; |
- switch (op_) { |
- case Token::BIT_OR: __ orr(result, rhs, Operand(lhs)); break; |
- case Token::BIT_AND: __ and_(result, rhs, Operand(lhs)); break; |
- case Token::BIT_XOR: __ eor(result, rhs, Operand(lhs)); break; |
- case Token::SAR: |
- // Remove tags from right operand. |
- __ GetLeastBitsFromSmi(scratch2, rhs, 5); |
- __ mov(result, Operand(lhs, ASR, scratch2)); |
- // Smi tag result. |
- __ bic(result, result, Operand(kSmiTagMask)); |
- break; |
- case Token::SHR: |
- // Remove tags from operands. We can't do this on a 31 bit number |
- // because then the 0s get shifted into bit 30 instead of bit 31. |
- __ mov(scratch, Operand(lhs, ASR, kSmiTagSize)); // x |
- __ GetLeastBitsFromSmi(scratch2, rhs, 5); |
- __ mov(scratch, Operand(scratch, LSR, scratch2)); |
- // Unsigned shift is not allowed to produce a negative number, so |
- // check the sign bit and the sign bit after Smi tagging. |
- __ tst(scratch, Operand(0xc0000000)); |
- __ b(ne, &slow); |
- // Smi tag result. |
- __ mov(result, Operand(scratch, LSL, kSmiTagSize)); |
- break; |
- case Token::SHL: |
- // Remove tags from operands. |
- __ mov(scratch, Operand(lhs, ASR, kSmiTagSize)); // x |
- __ GetLeastBitsFromSmi(scratch2, rhs, 5); |
- __ mov(scratch, Operand(scratch, LSL, scratch2)); |
- // Check that the signed result fits in a Smi. |
- __ add(scratch2, scratch, Operand(0x40000000), SetCC); |
- __ b(mi, &slow); |
- __ mov(result, Operand(scratch, LSL, kSmiTagSize)); |
- break; |
- default: UNREACHABLE(); |
- } |
- __ Ret(); |
- __ bind(&slow); |
- HandleNonSmiBitwiseOp(masm, lhs, rhs); |
- break; |
- } |
- |
- default: UNREACHABLE(); |
- } |
- // This code should be unreachable. |
- __ stop("Unreachable"); |
- |
- // Generate an unreachable reference to the DEFAULT stub so that it can be |
- // found at the end of this stub when clearing ICs at GC. |
- // TODO(kaznacheev): Check performance impact and get rid of this. |
- if (runtime_operands_type_ != BinaryOpIC::DEFAULT) { |
- GenericBinaryOpStub uninit(MinorKey(), BinaryOpIC::DEFAULT); |
- __ CallStub(&uninit); |
- } |
-} |
- |
- |
-void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { |
- Label get_result; |
- |
- __ Push(r1, r0); |
- |
- __ mov(r2, Operand(Smi::FromInt(MinorKey()))); |
- __ mov(r1, Operand(Smi::FromInt(op_))); |
- __ mov(r0, Operand(Smi::FromInt(runtime_operands_type_))); |
- __ Push(r2, r1, r0); |
- |
- __ TailCallExternalReference( |
- ExternalReference(IC_Utility(IC::kBinaryOp_Patch), masm->isolate()), |
- 5, |
- 1); |
-} |
- |
- |
-Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) { |
- GenericBinaryOpStub stub(key, type_info); |
- return stub.GetCode(); |
-} |
- |
- |
Handle<Code> GetTypeRecordingBinaryOpStub(int key, |
TRBinaryOpIC::TypeInfo type_info, |
TRBinaryOpIC::TypeInfo result_type_info) { |