Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2010 the V8 project authors. All rights reserved. | 1 // Copyright 2010 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 7630 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 7641 // We branch here if at least one of r0 and r1 is not a Smi. | 7641 // We branch here if at least one of r0 and r1 is not a Smi. |
| 7642 __ bind(not_smi); | 7642 __ bind(not_smi); |
| 7643 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | 7643 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
| 7644 | 7644 |
| 7645 // After this point we have the left hand side in r1 and the right hand side | 7645 // After this point we have the left hand side in r1 and the right hand side |
| 7646 // in r0. | 7646 // in r0. |
| 7647 if (lhs.is(r0)) { | 7647 if (lhs.is(r0)) { |
| 7648 __ Swap(r0, r1, ip); | 7648 __ Swap(r0, r1, ip); |
| 7649 } | 7649 } |
| 7650 | 7650 |
| 7651 // The type transition also calculates the answer. | |
| 7652 bool generate_code_to_calculate_answer = true; | |
| 7653 | |
| 7651 if (ShouldGenerateFPCode()) { | 7654 if (ShouldGenerateFPCode()) { |
| 7652 Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1; | |
| 7653 | 7655 |
| 7654 if (runtime_operands_type_ == BinaryOpIC::DEFAULT) { | 7656 if (runtime_operands_type_ == BinaryOpIC::DEFAULT) { |
| 7655 switch (op_) { | 7657 switch (op_) { |
| 7656 case Token::ADD: | 7658 case Token::ADD: |
| 7657 case Token::SUB: | 7659 case Token::SUB: |
| 7658 case Token::MUL: | 7660 case Token::MUL: |
| 7659 case Token::DIV: | 7661 case Token::DIV: |
| 7660 GenerateTypeTransition(masm); | 7662 GenerateTypeTransition(masm); // Tail call. |
| 7663 generate_code_to_calculate_answer = false; | |
| 7661 break; | 7664 break; |
| 7662 | 7665 |
| 7663 default: | 7666 default: |
| 7664 break; | 7667 break; |
| 7665 } | 7668 } |
| 7666 // Restore heap number map register. | 7669 } |
| 7667 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | 7670 |
| 7668 } | 7671 if (generate_code_to_calculate_answer) { |
| 7669 | 7672 Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1; |
| 7670 if (mode_ == NO_OVERWRITE) { | 7673 if (mode_ == NO_OVERWRITE) { |
| 7671 // In the case where there is no chance of an overwritable float we may as | 7674 // In the case where there is no chance of an overwritable float we may as |
| 7672 // well do the allocation immediately while r0 and r1 are untouched. | 7675 // well do the allocation immediately while r0 and r1 are untouched. |
| 7673 __ AllocateHeapNumber(r5, r3, r7, heap_number_map, &slow); | 7676 __ AllocateHeapNumber(r5, r3, r7, heap_number_map, &slow); |
| 7674 } | 7677 } |
| 7675 | 7678 |
| 7676 // Move r0 to a double in r2-r3. | 7679 // Move r0 to a double in r2-r3. |
| 7677 __ tst(r0, Operand(kSmiTagMask)); | 7680 __ tst(r0, Operand(kSmiTagMask)); |
| 7678 __ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number. | 7681 __ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number. |
| 7679 __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); | 7682 __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); |
| 7680 __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | 7683 __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
| 7681 __ cmp(r4, heap_number_map); | 7684 __ cmp(r4, heap_number_map); |
| 7682 __ b(ne, &slow); | 7685 __ b(ne, &slow); |
| 7683 if (mode_ == OVERWRITE_RIGHT) { | 7686 if (mode_ == OVERWRITE_RIGHT) { |
| 7684 __ mov(r5, Operand(r0)); // Overwrite this heap number. | 7687 __ mov(r5, Operand(r0)); // Overwrite this heap number. |
| 7685 } | 7688 } |
| 7686 if (use_fp_registers) { | 7689 if (use_fp_registers) { |
| 7687 CpuFeatures::Scope scope(VFP3); | 7690 CpuFeatures::Scope scope(VFP3); |
| 7688 // Load the double from tagged HeapNumber r0 to d7. | 7691 // Load the double from tagged HeapNumber r0 to d7. |
| 7689 __ sub(r7, r0, Operand(kHeapObjectTag)); | 7692 __ sub(r7, r0, Operand(kHeapObjectTag)); |
| 7690 __ vldr(d7, r7, HeapNumber::kValueOffset); | 7693 __ vldr(d7, r7, HeapNumber::kValueOffset); |
| 7691 } else { | 7694 } else { |
| 7692 // Calling convention says that second double is in r2 and r3. | 7695 // Calling convention says that second double is in r2 and r3. |
| 7693 __ Ldrd(r2, r3, FieldMemOperand(r0, HeapNumber::kValueOffset)); | 7696 __ Ldrd(r2, r3, FieldMemOperand(r0, HeapNumber::kValueOffset)); |
| 7694 } | 7697 } |
| 7695 __ jmp(&finished_loading_r0); | 7698 __ jmp(&finished_loading_r0); |
| 7696 __ bind(&r0_is_smi); | 7699 __ bind(&r0_is_smi); |
| 7697 if (mode_ == OVERWRITE_RIGHT) { | 7700 if (mode_ == OVERWRITE_RIGHT) { |
| 7698 // We can't overwrite a Smi so get address of new heap number into r5. | 7701 // We can't overwrite a Smi so get address of new heap number into r5. |
| 7699 __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow); | 7702 __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow); |
| 7700 } | 7703 } |
| 7701 | 7704 |
| 7702 if (CpuFeatures::IsSupported(VFP3)) { | 7705 if (CpuFeatures::IsSupported(VFP3)) { |
| 7703 CpuFeatures::Scope scope(VFP3); | 7706 CpuFeatures::Scope scope(VFP3); |
| 7704 // Convert smi in r0 to double in d7. | 7707 // Convert smi in r0 to double in d7. |
| 7705 __ mov(r7, Operand(r0, ASR, kSmiTagSize)); | 7708 __ mov(r7, Operand(r0, ASR, kSmiTagSize)); |
| 7706 __ vmov(s15, r7); | 7709 __ vmov(s15, r7); |
| 7707 __ vcvt_f64_s32(d7, s15); | 7710 __ vcvt_f64_s32(d7, s15); |
| 7708 if (!use_fp_registers) { | 7711 if (!use_fp_registers) { |
| 7709 __ vmov(r2, r3, d7); | 7712 __ vmov(r2, r3, d7); |
| 7710 } | 7713 } |
| 7711 } else { | 7714 } else { |
| 7712 // Write Smi from r0 to r3 and r2 in double format. | 7715 // Write Smi from r0 to r3 and r2 in double format. |
| 7713 __ mov(r7, Operand(r0)); | 7716 __ mov(r7, Operand(r0)); |
| 7714 ConvertToDoubleStub stub3(r3, r2, r7, r4); | 7717 ConvertToDoubleStub stub3(r3, r2, r7, r4); |
| 7715 __ push(lr); | 7718 __ push(lr); |
| 7716 __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET); | 7719 __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET); |
| 7717 __ pop(lr); | 7720 __ pop(lr); |
| 7718 } | 7721 } |
| 7719 | 7722 |
| 7720 // HEAP_NUMBERS stub is slower than GENERIC on a pair of smis. | 7723 // HEAP_NUMBERS stub is slower than GENERIC on a pair of smis. |
| 7721 // r0 is known to be a smi. If r1 is also a smi then switch to GENERIC. | 7724 // r0 is known to be a smi. If r1 is also a smi then switch to GENERIC. |
| 7722 Label r1_is_not_smi; | 7725 Label r1_is_not_smi; |
| 7723 if (runtime_operands_type_ == BinaryOpIC::HEAP_NUMBERS) { | 7726 if (runtime_operands_type_ == BinaryOpIC::HEAP_NUMBERS) { |
| 7727 __ tst(r1, Operand(kSmiTagMask)); | |
| 7728 __ b(ne, &r1_is_not_smi); | |
| 7729 GenerateTypeTransition(masm); // Tail call. | |
| 7730 } | |
| 7731 | |
| 7732 __ bind(&finished_loading_r0); | |
| 7733 | |
| 7734 // Move r1 to a double in r0-r1. | |
| 7724 __ tst(r1, Operand(kSmiTagMask)); | 7735 __ tst(r1, Operand(kSmiTagMask)); |
| 7725 __ b(ne, &r1_is_not_smi); | 7736 __ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number. |
| 7726 GenerateTypeTransition(masm); | 7737 __ bind(&r1_is_not_smi); |
| 7727 // Restore heap number map register. | 7738 __ ldr(r4, FieldMemOperand(r1, HeapNumber::kMapOffset)); |
| 7728 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | 7739 __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
| 7729 __ jmp(&r1_is_smi); | 7740 __ cmp(r4, heap_number_map); |
| 7730 } | 7741 __ b(ne, &slow); |
| 7731 | 7742 if (mode_ == OVERWRITE_LEFT) { |
| 7732 __ bind(&finished_loading_r0); | 7743 __ mov(r5, Operand(r1)); // Overwrite this heap number. |
| 7733 | 7744 } |
| 7734 // Move r1 to a double in r0-r1. | 7745 if (use_fp_registers) { |
| 7735 __ tst(r1, Operand(kSmiTagMask)); | 7746 CpuFeatures::Scope scope(VFP3); |
| 7736 __ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number. | 7747 // Load the double from tagged HeapNumber r1 to d6. |
| 7737 __ bind(&r1_is_not_smi); | 7748 __ sub(r7, r1, Operand(kHeapObjectTag)); |
| 7738 __ ldr(r4, FieldMemOperand(r1, HeapNumber::kMapOffset)); | 7749 __ vldr(d6, r7, HeapNumber::kValueOffset); |
| 7739 __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | 7750 } else { |
| 7740 __ cmp(r4, heap_number_map); | 7751 // Calling convention says that first double is in r0 and r1. |
| 7741 __ b(ne, &slow); | 7752 __ Ldrd(r0, r1, FieldMemOperand(r1, HeapNumber::kValueOffset)); |
| 7742 if (mode_ == OVERWRITE_LEFT) { | 7753 } |
| 7743 __ mov(r5, Operand(r1)); // Overwrite this heap number. | 7754 __ jmp(&finished_loading_r1); |
| 7744 } | 7755 __ bind(&r1_is_smi); |
| 7745 if (use_fp_registers) { | 7756 if (mode_ == OVERWRITE_LEFT) { |
| 7746 CpuFeatures::Scope scope(VFP3); | 7757 // We can't overwrite a Smi so get address of new heap number into r5. |
| 7747 // Load the double from tagged HeapNumber r1 to d6. | 7758 __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow); |
| 7748 __ sub(r7, r1, Operand(kHeapObjectTag)); | 7759 } |
| 7749 __ vldr(d6, r7, HeapNumber::kValueOffset); | 7760 |
| 7750 } else { | 7761 if (CpuFeatures::IsSupported(VFP3)) { |
| 7751 // Calling convention says that first double is in r0 and r1. | 7762 CpuFeatures::Scope scope(VFP3); |
| 7752 __ Ldrd(r0, r1, FieldMemOperand(r1, HeapNumber::kValueOffset)); | 7763 // Convert smi in r1 to double in d6. |
| 7753 } | 7764 __ mov(r7, Operand(r1, ASR, kSmiTagSize)); |
| 7754 __ jmp(&finished_loading_r1); | 7765 __ vmov(s13, r7); |
| 7755 __ bind(&r1_is_smi); | 7766 __ vcvt_f64_s32(d6, s13); |
| 7756 if (mode_ == OVERWRITE_LEFT) { | 7767 if (!use_fp_registers) { |
| 7757 // We can't overwrite a Smi so get address of new heap number into r5. | 7768 __ vmov(r0, r1, d6); |
| 7758 __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow); | 7769 } |
| 7759 } | 7770 } else { |
| 7760 | 7771 // Write Smi from r1 to r1 and r0 in double format. |
| 7761 if (CpuFeatures::IsSupported(VFP3)) { | 7772 __ mov(r7, Operand(r1)); |
| 7762 CpuFeatures::Scope scope(VFP3); | 7773 ConvertToDoubleStub stub4(r1, r0, r7, r9); |
| 7763 // Convert smi in r1 to double in d6. | 7774 __ push(lr); |
| 7764 __ mov(r7, Operand(r1, ASR, kSmiTagSize)); | 7775 __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET); |
| 7765 __ vmov(s13, r7); | 7776 __ pop(lr); |
| 7766 __ vcvt_f64_s32(d6, s13); | 7777 } |
| 7767 if (!use_fp_registers) { | 7778 |
| 7768 __ vmov(r0, r1, d6); | 7779 __ bind(&finished_loading_r1); |
| 7769 } | 7780 } |
| 7770 } else { | 7781 |
| 7771 // Write Smi from r1 to r1 and r0 in double format. | 7782 if (generate_code_to_calculate_answer || do_the_call.is_linked()) { |
| 7772 __ mov(r7, Operand(r1)); | 7783 __ bind(&do_the_call); |
| 7773 ConvertToDoubleStub stub4(r1, r0, r7, r9); | 7784 // If we are inlining the operation using VFP3 instructions for |
| 7774 __ push(lr); | 7785 // add, subtract, multiply, or divide, the arguments are in d6 and d7. |
| 7775 __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET); | 7786 if (use_fp_registers) { |
| 7776 __ pop(lr); | 7787 CpuFeatures::Scope scope(VFP3); |
| 7777 } | 7788 // ARMv7 VFP3 instructions to implement |
| 7778 | 7789 // double precision, add, subtract, multiply, divide. |
| 7779 __ bind(&finished_loading_r1); | 7790 |
| 7780 | 7791 if (Token::MUL == op_) { |
| 7781 __ bind(&do_the_call); | 7792 __ vmul(d5, d6, d7); |
| 7782 // If we are inlining the operation using VFP3 instructions for | 7793 } else if (Token::DIV == op_) { |
| 7783 // add, subtract, multiply, or divide, the arguments are in d6 and d7. | 7794 __ vdiv(d5, d6, d7); |
| 7784 if (use_fp_registers) { | 7795 } else if (Token::ADD == op_) { |
| 7785 CpuFeatures::Scope scope(VFP3); | 7796 __ vadd(d5, d6, d7); |
| 7786 // ARMv7 VFP3 instructions to implement | 7797 } else if (Token::SUB == op_) { |
| 7787 // double precision, add, subtract, multiply, divide. | 7798 __ vsub(d5, d6, d7); |
| 7788 | 7799 } else { |
| 7789 if (Token::MUL == op_) { | 7800 UNREACHABLE(); |
| 7790 __ vmul(d5, d6, d7); | 7801 } |
| 7791 } else if (Token::DIV == op_) { | 7802 __ sub(r0, r5, Operand(kHeapObjectTag)); |
| 7792 __ vdiv(d5, d6, d7); | 7803 __ vstr(d5, r0, HeapNumber::kValueOffset); |
| 7793 } else if (Token::ADD == op_) { | 7804 __ add(r0, r0, Operand(kHeapObjectTag)); |
| 7794 __ vadd(d5, d6, d7); | 7805 __ mov(pc, lr); |
| 7795 } else if (Token::SUB == op_) { | 7806 } else { |
| 7796 __ vsub(d5, d6, d7); | 7807 // If we did not inline the operation, then the arguments are in: |
| 7797 } else { | 7808 // r0: Left value (least significant part of mantissa). |
| 7798 UNREACHABLE(); | 7809 // r1: Left value (sign, exponent, top of mantissa). |
| 7799 } | 7810 // r2: Right value (least significant part of mantissa). |
| 7800 __ sub(r0, r5, Operand(kHeapObjectTag)); | 7811 // r3: Right value (sign, exponent, top of mantissa). |
| 7801 __ vstr(d5, r0, HeapNumber::kValueOffset); | 7812 // r5: Address of heap number for result. |
| 7802 __ add(r0, r0, Operand(kHeapObjectTag)); | 7813 |
| 7803 __ mov(pc, lr); | 7814 __ push(lr); // For later. |
| 7804 } else { | 7815 __ PrepareCallCFunction(4, r4); // Two doubles count as 4 arguments. |
| 7805 // If we did not inline the operation, then the arguments are in: | 7816 // Call C routine that may not cause GC or other trouble. r5 is callee |
| 7806 // r0: Left value (least significant part of mantissa). | 7817 // save. |
| 7807 // r1: Left value (sign, exponent, top of mantissa). | 7818 __ CallCFunction(ExternalReference::double_fp_operation(op_), 4); |
| 7808 // r2: Right value (least significant part of mantissa). | 7819 // Store answer in the overwritable heap number. |
| 7809 // r3: Right value (sign, exponent, top of mantissa). | 7820 #if !defined(USE_ARM_EABI) |
| 7810 // r5: Address of heap number for result. | 7821 // Double returned in fp coprocessor register 0 and 1, encoded as regist er |
| 7811 | 7822 // cr8. Offsets must be divisible by 4 for coprocessor so we need to |
| 7812 __ push(lr); // For later. | 7823 // substract the tag from r5. |
| 7813 __ PrepareCallCFunction(4, r4); // Two doubles count as 4 arguments. | 7824 __ sub(r4, r5, Operand(kHeapObjectTag)); |
| 7814 // Call C routine that may not cause GC or other trouble. r5 is callee | 7825 __ stc(p1, cr8, MemOperand(r4, HeapNumber::kValueOffset)); |
| 7815 // save. | 7826 #else |
| 7816 __ CallCFunction(ExternalReference::double_fp_operation(op_), 4); | 7827 // Double returned in registers 0 and 1. |
| 7817 // Store answer in the overwritable heap number. | 7828 __ Strd(r0, r1, FieldMemOperand(r5, HeapNumber::kValueOffset)); |
| 7818 #if !defined(USE_ARM_EABI) | 7829 #endif |
| 7819 // Double returned in fp coprocessor register 0 and 1, encoded as register | 7830 __ mov(r0, Operand(r5)); |
| 7820 // cr8. Offsets must be divisible by 4 for coprocessor so we need to | 7831 // And we are done. |
| 7821 // substract the tag from r5. | 7832 __ pop(pc); |
| 7822 __ sub(r4, r5, Operand(kHeapObjectTag)); | 7833 } |
| 7823 __ stc(p1, cr8, MemOperand(r4, HeapNumber::kValueOffset)); | |
| 7824 #else | |
| 7825 // Double returned in registers 0 and 1. | |
| 7826 __ Strd(r0, r1, FieldMemOperand(r5, HeapNumber::kValueOffset)); | |
| 7827 #endif | |
| 7828 __ mov(r0, Operand(r5)); | |
| 7829 // And we are done. | |
| 7830 __ pop(pc); | |
| 7831 } | 7834 } |
| 7832 } | 7835 } |
| 7833 | 7836 |
| 7834 if (lhs.is(r0)) { | 7837 if (lhs.is(r0)) { |
| 7835 __ b(&slow); | 7838 if (generate_code_to_calculate_answer || slow_reverse.is_linked()) { |
|
Kasper Lund
2010/07/06 11:56:35
Maybe just move the if (lhs.is(r0)) code into the
Erik Corry
2010/07/06 12:53:21
Done.
| |
| 7836 __ bind(&slow_reverse); | 7839 __ b(&slow); |
| 7837 __ Swap(r0, r1, ip); | 7840 __ bind(&slow_reverse); |
| 7841 __ Swap(r0, r1, ip); | |
| 7842 } | |
| 7838 } | 7843 } |
| 7839 | 7844 |
| 7840 heap_number_map = no_reg; // Don't use this any more from here on. | 7845 heap_number_map = no_reg; // Don't use this any more from here on. |
| 7841 | 7846 |
| 7842 // We jump to here if something goes wrong (one param is not a number of any | 7847 // We jump to here if something goes wrong (one param is not a number of any |
| 7843 // sort or new-space allocation fails). | 7848 // sort or new-space allocation fails). |
| 7844 __ bind(&slow); | 7849 if (generate_code_to_calculate_answer || slow.is_linked()) { |
|
Kasper Lund
2010/07/06 11:56:35
I would consider negating the condition and return
Erik Corry
2010/07/06 12:53:21
Done.
| |
| 7845 | 7850 __ bind(&slow); |
| 7846 // Push arguments to the stack | 7851 |
| 7847 __ Push(r1, r0); | 7852 // Push arguments to the stack |
| 7848 | 7853 __ Push(r1, r0); |
| 7849 if (Token::ADD == op_) { | 7854 |
| 7850 // Test for string arguments before calling runtime. | 7855 if (Token::ADD == op_) { |
| 7851 // r1 : first argument | 7856 // Test for string arguments before calling runtime. |
| 7852 // r0 : second argument | 7857 // r1 : first argument |
| 7853 // sp[0] : second argument | 7858 // r0 : second argument |
| 7854 // sp[4] : first argument | 7859 // sp[0] : second argument |
| 7855 | 7860 // sp[4] : first argument |
| 7856 Label not_strings, not_string1, string1, string1_smi2; | 7861 |
| 7857 __ tst(r1, Operand(kSmiTagMask)); | 7862 Label not_strings, not_string1, string1, string1_smi2; |
| 7858 __ b(eq, ¬_string1); | 7863 __ tst(r1, Operand(kSmiTagMask)); |
| 7859 __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE); | 7864 __ b(eq, ¬_string1); |
| 7860 __ b(ge, ¬_string1); | 7865 __ CompareObjectType(r1, r2, r2, FIRST_NONSTRING_TYPE); |
| 7861 | 7866 __ b(ge, ¬_string1); |
| 7862 // First argument is a a string, test second. | 7867 |
| 7863 __ tst(r0, Operand(kSmiTagMask)); | 7868 // First argument is a a string, test second. |
| 7864 __ b(eq, &string1_smi2); | 7869 __ tst(r0, Operand(kSmiTagMask)); |
| 7865 __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE); | 7870 __ b(eq, &string1_smi2); |
| 7866 __ b(ge, &string1); | 7871 __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE); |
| 7867 | 7872 __ b(ge, &string1); |
| 7868 // First and second argument are strings. | 7873 |
| 7869 StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB); | 7874 // First and second argument are strings. |
| 7870 __ TailCallStub(&string_add_stub); | 7875 StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB); |
| 7871 | 7876 __ TailCallStub(&string_add_stub); |
| 7872 __ bind(&string1_smi2); | 7877 |
| 7873 // First argument is a string, second is a smi. Try to lookup the number | 7878 __ bind(&string1_smi2); |
| 7874 // string for the smi in the number string cache. | 7879 // First argument is a string, second is a smi. Try to lookup the number |
| 7875 NumberToStringStub::GenerateLookupNumberStringCache( | 7880 // string for the smi in the number string cache. |
| 7876 masm, r0, r2, r4, r5, r6, true, &string1); | 7881 NumberToStringStub::GenerateLookupNumberStringCache( |
| 7877 | 7882 masm, r0, r2, r4, r5, r6, true, &string1); |
| 7878 // Replace second argument on stack and tailcall string add stub to make | 7883 |
| 7879 // the result. | 7884 // Replace second argument on stack and tailcall string add stub to make |
| 7880 __ str(r2, MemOperand(sp, 0)); | 7885 // the result. |
| 7881 __ TailCallStub(&string_add_stub); | 7886 __ str(r2, MemOperand(sp, 0)); |
| 7882 | 7887 __ TailCallStub(&string_add_stub); |
| 7883 // Only first argument is a string. | 7888 |
| 7884 __ bind(&string1); | 7889 // Only first argument is a string. |
| 7885 __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_JS); | 7890 __ bind(&string1); |
| 7886 | 7891 __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_JS); |
| 7887 // First argument was not a string, test second. | 7892 |
| 7888 __ bind(¬_string1); | 7893 // First argument was not a string, test second. |
| 7889 __ tst(r0, Operand(kSmiTagMask)); | 7894 __ bind(¬_string1); |
| 7890 __ b(eq, ¬_strings); | 7895 __ tst(r0, Operand(kSmiTagMask)); |
| 7891 __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE); | 7896 __ b(eq, ¬_strings); |
| 7892 __ b(ge, ¬_strings); | 7897 __ CompareObjectType(r0, r2, r2, FIRST_NONSTRING_TYPE); |
| 7893 | 7898 __ b(ge, ¬_strings); |
| 7894 // Only second argument is a string. | 7899 |
| 7895 __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_JS); | 7900 // Only second argument is a string. |
| 7896 | 7901 __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_JS); |
| 7897 __ bind(¬_strings); | 7902 |
| 7903 __ bind(¬_strings); | |
| 7904 } | |
| 7905 | |
| 7906 __ InvokeBuiltin(builtin, JUMP_JS); // Tail call. No return. | |
| 7898 } | 7907 } |
| 7899 | |
| 7900 __ InvokeBuiltin(builtin, JUMP_JS); // Tail call. No return. | |
| 7901 } | 7908 } |
| 7902 | 7909 |
| 7903 | 7910 |
| 7904 // Tries to get a signed int32 out of a double precision floating point heap | 7911 // Tries to get a signed int32 out of a double precision floating point heap |
| 7905 // number. Rounds towards 0. Fastest for doubles that are in the ranges | 7912 // number. Rounds towards 0. Fastest for doubles that are in the ranges |
| 7906 // -0x7fffffff to -0x40000000 or 0x40000000 to 0x7fffffff. This corresponds | 7913 // -0x7fffffff to -0x40000000 or 0x40000000 to 0x7fffffff. This corresponds |
| 7907 // almost to the range of signed int32 values that are not Smis. Jumps to the | 7914 // almost to the range of signed int32 values that are not Smis. Jumps to the |
| 7908 // label 'slow' if the double isn't in the range -0x80000000.0 to 0x80000000.0 | 7915 // label 'slow' if the double isn't in the range -0x80000000.0 to 0x80000000.0 |
| 7909 // (excluding the endpoints). | 7916 // (excluding the endpoints). |
| 7910 static void GetInt32(MacroAssembler* masm, | 7917 static void GetInt32(MacroAssembler* masm, |
| (...skipping 834 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 8745 __ CallStub(&uninit); | 8752 __ CallStub(&uninit); |
| 8746 } | 8753 } |
| 8747 } | 8754 } |
| 8748 | 8755 |
| 8749 | 8756 |
| 8750 void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { | 8757 void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { |
| 8751 Label get_result; | 8758 Label get_result; |
| 8752 | 8759 |
| 8753 __ Push(r1, r0); | 8760 __ Push(r1, r0); |
| 8754 | 8761 |
| 8755 // Internal frame is necessary to handle exceptions properly. | 8762 __ mov(r2, Operand(Smi::FromInt(MinorKey()))); |
| 8756 __ EnterInternalFrame(); | 8763 __ mov(r1, Operand(Smi::FromInt(op_))); |
| 8757 // Call the stub proper to get the result in r0. | |
| 8758 __ Call(&get_result); | |
| 8759 __ LeaveInternalFrame(); | |
| 8760 | |
| 8761 __ push(r0); | |
| 8762 | |
| 8763 __ mov(r0, Operand(Smi::FromInt(MinorKey()))); | |
| 8764 __ push(r0); | |
| 8765 __ mov(r0, Operand(Smi::FromInt(op_))); | |
| 8766 __ push(r0); | |
| 8767 __ mov(r0, Operand(Smi::FromInt(runtime_operands_type_))); | 8764 __ mov(r0, Operand(Smi::FromInt(runtime_operands_type_))); |
| 8768 __ push(r0); | 8765 __ Push(r2, r1, r0); |
| 8769 | 8766 |
| 8770 __ TailCallExternalReference( | 8767 __ TailCallExternalReference( |
| 8771 ExternalReference(IC_Utility(IC::kBinaryOp_Patch)), | 8768 ExternalReference(IC_Utility(IC::kBinaryOp_Patch)), |
| 8772 6, | 8769 5, |
| 8773 1); | 8770 1); |
| 8774 | |
| 8775 // The entry point for the result calculation is assumed to be immediately | |
| 8776 // after this sequence. | |
| 8777 __ bind(&get_result); | |
| 8778 } | 8771 } |
| 8779 | 8772 |
| 8780 | 8773 |
| 8781 Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) { | 8774 Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) { |
| 8782 GenericBinaryOpStub stub(key, type_info); | 8775 GenericBinaryOpStub stub(key, type_info); |
| 8783 return stub.GetCode(); | 8776 return stub.GetCode(); |
| 8784 } | 8777 } |
| 8785 | 8778 |
| 8786 | 8779 |
| 8787 void TranscendentalCacheStub::Generate(MacroAssembler* masm) { | 8780 void TranscendentalCacheStub::Generate(MacroAssembler* masm) { |
| (...skipping 2425 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 11213 __ bind(&string_add_runtime); | 11206 __ bind(&string_add_runtime); |
| 11214 __ TailCallRuntime(Runtime::kStringAdd, 2, 1); | 11207 __ TailCallRuntime(Runtime::kStringAdd, 2, 1); |
| 11215 } | 11208 } |
| 11216 | 11209 |
| 11217 | 11210 |
| 11218 #undef __ | 11211 #undef __ |
| 11219 | 11212 |
| 11220 } } // namespace v8::internal | 11213 } } // namespace v8::internal |
| 11221 | 11214 |
| 11222 #endif // V8_TARGET_ARCH_ARM | 11215 #endif // V8_TARGET_ARCH_ARM |
| OLD | NEW |