OLD | NEW |
1 // Copyright 2010 the V8 project authors. All rights reserved. | 1 // Copyright 2010 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 5405 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5416 } | 5416 } |
5417 | 5417 |
5418 } else if (op == Token::TYPEOF) { | 5418 } else if (op == Token::TYPEOF) { |
5419 // Special case for loading the typeof expression; see comment on | 5419 // Special case for loading the typeof expression; see comment on |
5420 // LoadTypeofExpression(). | 5420 // LoadTypeofExpression(). |
5421 LoadTypeofExpression(node->expression()); | 5421 LoadTypeofExpression(node->expression()); |
5422 frame_->CallRuntime(Runtime::kTypeof, 1); | 5422 frame_->CallRuntime(Runtime::kTypeof, 1); |
5423 frame_->EmitPush(r0); // r0 has result | 5423 frame_->EmitPush(r0); // r0 has result |
5424 | 5424 |
5425 } else { | 5425 } else { |
5426 bool overwrite = | 5426 bool can_overwrite = |
5427 (node->expression()->AsBinaryOperation() != NULL && | 5427 (node->expression()->AsBinaryOperation() != NULL && |
5428 node->expression()->AsBinaryOperation()->ResultOverwriteAllowed()); | 5428 node->expression()->AsBinaryOperation()->ResultOverwriteAllowed()); |
| 5429 UnaryOverwriteMode overwrite = |
| 5430 can_overwrite ? UNARY_OVERWRITE : UNARY_NO_OVERWRITE; |
| 5431 |
| 5432 bool no_negative_zero = node->expression()->no_negative_zero(); |
5429 Load(node->expression()); | 5433 Load(node->expression()); |
5430 switch (op) { | 5434 switch (op) { |
5431 case Token::NOT: | 5435 case Token::NOT: |
5432 case Token::DELETE: | 5436 case Token::DELETE: |
5433 case Token::TYPEOF: | 5437 case Token::TYPEOF: |
5434 UNREACHABLE(); // handled above | 5438 UNREACHABLE(); // handled above |
5435 break; | 5439 break; |
5436 | 5440 |
5437 case Token::SUB: { | 5441 case Token::SUB: { |
5438 frame_->PopToR0(); | 5442 frame_->PopToR0(); |
5439 GenericUnaryOpStub stub(Token::SUB, overwrite); | 5443 GenericUnaryOpStub stub( |
| 5444 Token::SUB, |
| 5445 overwrite, |
| 5446 no_negative_zero ? kIgnoreNegativeZero : kStrictNegativeZero); |
5440 frame_->CallStub(&stub, 0); | 5447 frame_->CallStub(&stub, 0); |
5441 frame_->EmitPush(r0); // r0 has result | 5448 frame_->EmitPush(r0); // r0 has result |
5442 break; | 5449 break; |
5443 } | 5450 } |
5444 | 5451 |
5445 case Token::BIT_NOT: { | 5452 case Token::BIT_NOT: { |
5446 Register tos = frame_->PopToRegister(); | 5453 Register tos = frame_->PopToRegister(); |
5447 JumpTarget not_smi_label; | 5454 JumpTarget not_smi_label; |
5448 JumpTarget continue_label; | 5455 JumpTarget continue_label; |
5449 // Smi check. | 5456 // Smi check. |
(...skipping 2184 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
7634 // We branch here if at least one of r0 and r1 is not a Smi. | 7641 // We branch here if at least one of r0 and r1 is not a Smi. |
7635 __ bind(not_smi); | 7642 __ bind(not_smi); |
7636 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | 7643 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
7637 | 7644 |
7638 // After this point we have the left hand side in r1 and the right hand side | 7645 // After this point we have the left hand side in r1 and the right hand side |
7639 // in r0. | 7646 // in r0. |
7640 if (lhs.is(r0)) { | 7647 if (lhs.is(r0)) { |
7641 __ Swap(r0, r1, ip); | 7648 __ Swap(r0, r1, ip); |
7642 } | 7649 } |
7643 | 7650 |
| 7651 // The type transition also calculates the answer. |
| 7652 bool generate_code_to_calculate_answer = true; |
| 7653 |
7644 if (ShouldGenerateFPCode()) { | 7654 if (ShouldGenerateFPCode()) { |
7645 Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1; | |
7646 | |
7647 if (runtime_operands_type_ == BinaryOpIC::DEFAULT) { | 7655 if (runtime_operands_type_ == BinaryOpIC::DEFAULT) { |
7648 switch (op_) { | 7656 switch (op_) { |
7649 case Token::ADD: | 7657 case Token::ADD: |
7650 case Token::SUB: | 7658 case Token::SUB: |
7651 case Token::MUL: | 7659 case Token::MUL: |
7652 case Token::DIV: | 7660 case Token::DIV: |
7653 GenerateTypeTransition(masm); | 7661 GenerateTypeTransition(masm); // Tail call. |
| 7662 generate_code_to_calculate_answer = false; |
7654 break; | 7663 break; |
7655 | 7664 |
7656 default: | 7665 default: |
7657 break; | 7666 break; |
7658 } | 7667 } |
7659 // Restore heap number map register. | |
7660 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | |
7661 } | 7668 } |
7662 | 7669 |
7663 if (mode_ == NO_OVERWRITE) { | 7670 if (generate_code_to_calculate_answer) { |
7664 // In the case where there is no chance of an overwritable float we may as | 7671 Label r0_is_smi, r1_is_smi, finished_loading_r0, finished_loading_r1; |
7665 // well do the allocation immediately while r0 and r1 are untouched. | 7672 if (mode_ == NO_OVERWRITE) { |
7666 __ AllocateHeapNumber(r5, r3, r7, heap_number_map, &slow); | 7673 // In the case where there is no chance of an overwritable float we may |
| 7674 // as well do the allocation immediately while r0 and r1 are untouched. |
| 7675 __ AllocateHeapNumber(r5, r3, r7, heap_number_map, &slow); |
| 7676 } |
| 7677 |
| 7678 // Move r0 to a double in r2-r3. |
| 7679 __ tst(r0, Operand(kSmiTagMask)); |
| 7680 __ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number. |
| 7681 __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); |
| 7682 __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
| 7683 __ cmp(r4, heap_number_map); |
| 7684 __ b(ne, &slow); |
| 7685 if (mode_ == OVERWRITE_RIGHT) { |
| 7686 __ mov(r5, Operand(r0)); // Overwrite this heap number. |
| 7687 } |
| 7688 if (use_fp_registers) { |
| 7689 CpuFeatures::Scope scope(VFP3); |
| 7690 // Load the double from tagged HeapNumber r0 to d7. |
| 7691 __ sub(r7, r0, Operand(kHeapObjectTag)); |
| 7692 __ vldr(d7, r7, HeapNumber::kValueOffset); |
| 7693 } else { |
| 7694 // Calling convention says that second double is in r2 and r3. |
| 7695 __ Ldrd(r2, r3, FieldMemOperand(r0, HeapNumber::kValueOffset)); |
| 7696 } |
| 7697 __ jmp(&finished_loading_r0); |
| 7698 __ bind(&r0_is_smi); |
| 7699 if (mode_ == OVERWRITE_RIGHT) { |
| 7700 // We can't overwrite a Smi so get address of new heap number into r5. |
| 7701 __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow); |
| 7702 } |
| 7703 |
| 7704 if (CpuFeatures::IsSupported(VFP3)) { |
| 7705 CpuFeatures::Scope scope(VFP3); |
| 7706 // Convert smi in r0 to double in d7. |
| 7707 __ mov(r7, Operand(r0, ASR, kSmiTagSize)); |
| 7708 __ vmov(s15, r7); |
| 7709 __ vcvt_f64_s32(d7, s15); |
| 7710 if (!use_fp_registers) { |
| 7711 __ vmov(r2, r3, d7); |
| 7712 } |
| 7713 } else { |
| 7714 // Write Smi from r0 to r3 and r2 in double format. |
| 7715 __ mov(r7, Operand(r0)); |
| 7716 ConvertToDoubleStub stub3(r3, r2, r7, r4); |
| 7717 __ push(lr); |
| 7718 __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET); |
| 7719 __ pop(lr); |
| 7720 } |
| 7721 |
| 7722 // HEAP_NUMBERS stub is slower than GENERIC on a pair of smis. |
| 7723 // r0 is known to be a smi. If r1 is also a smi then switch to GENERIC. |
| 7724 Label r1_is_not_smi; |
| 7725 if (runtime_operands_type_ == BinaryOpIC::HEAP_NUMBERS) { |
| 7726 __ tst(r1, Operand(kSmiTagMask)); |
| 7727 __ b(ne, &r1_is_not_smi); |
| 7728 GenerateTypeTransition(masm); // Tail call. |
| 7729 } |
| 7730 |
| 7731 __ bind(&finished_loading_r0); |
| 7732 |
| 7733 // Move r1 to a double in r0-r1. |
| 7734 __ tst(r1, Operand(kSmiTagMask)); |
| 7735 __ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number. |
| 7736 __ bind(&r1_is_not_smi); |
| 7737 __ ldr(r4, FieldMemOperand(r1, HeapNumber::kMapOffset)); |
| 7738 __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
| 7739 __ cmp(r4, heap_number_map); |
| 7740 __ b(ne, &slow); |
| 7741 if (mode_ == OVERWRITE_LEFT) { |
| 7742 __ mov(r5, Operand(r1)); // Overwrite this heap number. |
| 7743 } |
| 7744 if (use_fp_registers) { |
| 7745 CpuFeatures::Scope scope(VFP3); |
| 7746 // Load the double from tagged HeapNumber r1 to d6. |
| 7747 __ sub(r7, r1, Operand(kHeapObjectTag)); |
| 7748 __ vldr(d6, r7, HeapNumber::kValueOffset); |
| 7749 } else { |
| 7750 // Calling convention says that first double is in r0 and r1. |
| 7751 __ Ldrd(r0, r1, FieldMemOperand(r1, HeapNumber::kValueOffset)); |
| 7752 } |
| 7753 __ jmp(&finished_loading_r1); |
| 7754 __ bind(&r1_is_smi); |
| 7755 if (mode_ == OVERWRITE_LEFT) { |
| 7756 // We can't overwrite a Smi so get address of new heap number into r5. |
| 7757 __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow); |
| 7758 } |
| 7759 |
| 7760 if (CpuFeatures::IsSupported(VFP3)) { |
| 7761 CpuFeatures::Scope scope(VFP3); |
| 7762 // Convert smi in r1 to double in d6. |
| 7763 __ mov(r7, Operand(r1, ASR, kSmiTagSize)); |
| 7764 __ vmov(s13, r7); |
| 7765 __ vcvt_f64_s32(d6, s13); |
| 7766 if (!use_fp_registers) { |
| 7767 __ vmov(r0, r1, d6); |
| 7768 } |
| 7769 } else { |
| 7770 // Write Smi from r1 to r1 and r0 in double format. |
| 7771 __ mov(r7, Operand(r1)); |
| 7772 ConvertToDoubleStub stub4(r1, r0, r7, r9); |
| 7773 __ push(lr); |
| 7774 __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET); |
| 7775 __ pop(lr); |
| 7776 } |
| 7777 |
| 7778 __ bind(&finished_loading_r1); |
7667 } | 7779 } |
7668 | 7780 |
7669 // Move r0 to a double in r2-r3. | 7781 if (generate_code_to_calculate_answer || do_the_call.is_linked()) { |
7670 __ tst(r0, Operand(kSmiTagMask)); | 7782 __ bind(&do_the_call); |
7671 __ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number. | 7783 // If we are inlining the operation using VFP3 instructions for |
7672 __ ldr(r4, FieldMemOperand(r0, HeapObject::kMapOffset)); | 7784 // add, subtract, multiply, or divide, the arguments are in d6 and d7. |
7673 __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | 7785 if (use_fp_registers) { |
7674 __ cmp(r4, heap_number_map); | 7786 CpuFeatures::Scope scope(VFP3); |
7675 __ b(ne, &slow); | 7787 // ARMv7 VFP3 instructions to implement |
7676 if (mode_ == OVERWRITE_RIGHT) { | 7788 // double precision, add, subtract, multiply, divide. |
7677 __ mov(r5, Operand(r0)); // Overwrite this heap number. | 7789 |
| 7790 if (Token::MUL == op_) { |
| 7791 __ vmul(d5, d6, d7); |
| 7792 } else if (Token::DIV == op_) { |
| 7793 __ vdiv(d5, d6, d7); |
| 7794 } else if (Token::ADD == op_) { |
| 7795 __ vadd(d5, d6, d7); |
| 7796 } else if (Token::SUB == op_) { |
| 7797 __ vsub(d5, d6, d7); |
| 7798 } else { |
| 7799 UNREACHABLE(); |
| 7800 } |
| 7801 __ sub(r0, r5, Operand(kHeapObjectTag)); |
| 7802 __ vstr(d5, r0, HeapNumber::kValueOffset); |
| 7803 __ add(r0, r0, Operand(kHeapObjectTag)); |
| 7804 __ mov(pc, lr); |
| 7805 } else { |
| 7806 // If we did not inline the operation, then the arguments are in: |
| 7807 // r0: Left value (least significant part of mantissa). |
| 7808 // r1: Left value (sign, exponent, top of mantissa). |
| 7809 // r2: Right value (least significant part of mantissa). |
| 7810 // r3: Right value (sign, exponent, top of mantissa). |
| 7811 // r5: Address of heap number for result. |
| 7812 |
| 7813 __ push(lr); // For later. |
| 7814 __ PrepareCallCFunction(4, r4); // Two doubles count as 4 arguments. |
| 7815 // Call C routine that may not cause GC or other trouble. r5 is callee |
| 7816 // save. |
| 7817 __ CallCFunction(ExternalReference::double_fp_operation(op_), 4); |
| 7818 // Store answer in the overwritable heap number. |
| 7819 #if !defined(USE_ARM_EABI) |
| 7820 // Double returned in fp coprocessor register 0 and 1, encoded as |
| 7821 // register cr8. Offsets must be divisible by 4 for coprocessor so we |
| 7822 // need to substract the tag from r5. |
| 7823 __ sub(r4, r5, Operand(kHeapObjectTag)); |
| 7824 __ stc(p1, cr8, MemOperand(r4, HeapNumber::kValueOffset)); |
| 7825 #else |
| 7826 // Double returned in registers 0 and 1. |
| 7827 __ Strd(r0, r1, FieldMemOperand(r5, HeapNumber::kValueOffset)); |
| 7828 #endif |
| 7829 __ mov(r0, Operand(r5)); |
| 7830 // And we are done. |
| 7831 __ pop(pc); |
| 7832 } |
7678 } | 7833 } |
7679 if (use_fp_registers) { | 7834 } |
7680 CpuFeatures::Scope scope(VFP3); | |
7681 // Load the double from tagged HeapNumber r0 to d7. | |
7682 __ sub(r7, r0, Operand(kHeapObjectTag)); | |
7683 __ vldr(d7, r7, HeapNumber::kValueOffset); | |
7684 } else { | |
7685 // Calling convention says that second double is in r2 and r3. | |
7686 __ Ldrd(r2, r3, FieldMemOperand(r0, HeapNumber::kValueOffset)); | |
7687 } | |
7688 __ jmp(&finished_loading_r0); | |
7689 __ bind(&r0_is_smi); | |
7690 if (mode_ == OVERWRITE_RIGHT) { | |
7691 // We can't overwrite a Smi so get address of new heap number into r5. | |
7692 __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow); | |
7693 } | |
7694 | 7835 |
7695 if (CpuFeatures::IsSupported(VFP3)) { | 7836 if (!generate_code_to_calculate_answer && |
7696 CpuFeatures::Scope scope(VFP3); | 7837 !slow_reverse.is_linked() && |
7697 // Convert smi in r0 to double in d7. | 7838 !slow.is_linked()) { |
7698 __ mov(r7, Operand(r0, ASR, kSmiTagSize)); | 7839 return; |
7699 __ vmov(s15, r7); | |
7700 __ vcvt_f64_s32(d7, s15); | |
7701 if (!use_fp_registers) { | |
7702 __ vmov(r2, r3, d7); | |
7703 } | |
7704 } else { | |
7705 // Write Smi from r0 to r3 and r2 in double format. | |
7706 __ mov(r7, Operand(r0)); | |
7707 ConvertToDoubleStub stub3(r3, r2, r7, r4); | |
7708 __ push(lr); | |
7709 __ Call(stub3.GetCode(), RelocInfo::CODE_TARGET); | |
7710 __ pop(lr); | |
7711 } | |
7712 | |
7713 // HEAP_NUMBERS stub is slower than GENERIC on a pair of smis. | |
7714 // r0 is known to be a smi. If r1 is also a smi then switch to GENERIC. | |
7715 Label r1_is_not_smi; | |
7716 if (runtime_operands_type_ == BinaryOpIC::HEAP_NUMBERS) { | |
7717 __ tst(r1, Operand(kSmiTagMask)); | |
7718 __ b(ne, &r1_is_not_smi); | |
7719 GenerateTypeTransition(masm); | |
7720 // Restore heap number map register. | |
7721 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | |
7722 __ jmp(&r1_is_smi); | |
7723 } | |
7724 | |
7725 __ bind(&finished_loading_r0); | |
7726 | |
7727 // Move r1 to a double in r0-r1. | |
7728 __ tst(r1, Operand(kSmiTagMask)); | |
7729 __ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number. | |
7730 __ bind(&r1_is_not_smi); | |
7731 __ ldr(r4, FieldMemOperand(r1, HeapNumber::kMapOffset)); | |
7732 __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | |
7733 __ cmp(r4, heap_number_map); | |
7734 __ b(ne, &slow); | |
7735 if (mode_ == OVERWRITE_LEFT) { | |
7736 __ mov(r5, Operand(r1)); // Overwrite this heap number. | |
7737 } | |
7738 if (use_fp_registers) { | |
7739 CpuFeatures::Scope scope(VFP3); | |
7740 // Load the double from tagged HeapNumber r1 to d6. | |
7741 __ sub(r7, r1, Operand(kHeapObjectTag)); | |
7742 __ vldr(d6, r7, HeapNumber::kValueOffset); | |
7743 } else { | |
7744 // Calling convention says that first double is in r0 and r1. | |
7745 __ Ldrd(r0, r1, FieldMemOperand(r1, HeapNumber::kValueOffset)); | |
7746 } | |
7747 __ jmp(&finished_loading_r1); | |
7748 __ bind(&r1_is_smi); | |
7749 if (mode_ == OVERWRITE_LEFT) { | |
7750 // We can't overwrite a Smi so get address of new heap number into r5. | |
7751 __ AllocateHeapNumber(r5, r4, r7, heap_number_map, &slow); | |
7752 } | |
7753 | |
7754 if (CpuFeatures::IsSupported(VFP3)) { | |
7755 CpuFeatures::Scope scope(VFP3); | |
7756 // Convert smi in r1 to double in d6. | |
7757 __ mov(r7, Operand(r1, ASR, kSmiTagSize)); | |
7758 __ vmov(s13, r7); | |
7759 __ vcvt_f64_s32(d6, s13); | |
7760 if (!use_fp_registers) { | |
7761 __ vmov(r0, r1, d6); | |
7762 } | |
7763 } else { | |
7764 // Write Smi from r1 to r1 and r0 in double format. | |
7765 __ mov(r7, Operand(r1)); | |
7766 ConvertToDoubleStub stub4(r1, r0, r7, r9); | |
7767 __ push(lr); | |
7768 __ Call(stub4.GetCode(), RelocInfo::CODE_TARGET); | |
7769 __ pop(lr); | |
7770 } | |
7771 | |
7772 __ bind(&finished_loading_r1); | |
7773 | |
7774 __ bind(&do_the_call); | |
7775 // If we are inlining the operation using VFP3 instructions for | |
7776 // add, subtract, multiply, or divide, the arguments are in d6 and d7. | |
7777 if (use_fp_registers) { | |
7778 CpuFeatures::Scope scope(VFP3); | |
7779 // ARMv7 VFP3 instructions to implement | |
7780 // double precision, add, subtract, multiply, divide. | |
7781 | |
7782 if (Token::MUL == op_) { | |
7783 __ vmul(d5, d6, d7); | |
7784 } else if (Token::DIV == op_) { | |
7785 __ vdiv(d5, d6, d7); | |
7786 } else if (Token::ADD == op_) { | |
7787 __ vadd(d5, d6, d7); | |
7788 } else if (Token::SUB == op_) { | |
7789 __ vsub(d5, d6, d7); | |
7790 } else { | |
7791 UNREACHABLE(); | |
7792 } | |
7793 __ sub(r0, r5, Operand(kHeapObjectTag)); | |
7794 __ vstr(d5, r0, HeapNumber::kValueOffset); | |
7795 __ add(r0, r0, Operand(kHeapObjectTag)); | |
7796 __ mov(pc, lr); | |
7797 } else { | |
7798 // If we did not inline the operation, then the arguments are in: | |
7799 // r0: Left value (least significant part of mantissa). | |
7800 // r1: Left value (sign, exponent, top of mantissa). | |
7801 // r2: Right value (least significant part of mantissa). | |
7802 // r3: Right value (sign, exponent, top of mantissa). | |
7803 // r5: Address of heap number for result. | |
7804 | |
7805 __ push(lr); // For later. | |
7806 __ PrepareCallCFunction(4, r4); // Two doubles count as 4 arguments. | |
7807 // Call C routine that may not cause GC or other trouble. r5 is callee | |
7808 // save. | |
7809 __ CallCFunction(ExternalReference::double_fp_operation(op_), 4); | |
7810 // Store answer in the overwritable heap number. | |
7811 #if !defined(USE_ARM_EABI) | |
7812 // Double returned in fp coprocessor register 0 and 1, encoded as register | |
7813 // cr8. Offsets must be divisible by 4 for coprocessor so we need to | |
7814 // substract the tag from r5. | |
7815 __ sub(r4, r5, Operand(kHeapObjectTag)); | |
7816 __ stc(p1, cr8, MemOperand(r4, HeapNumber::kValueOffset)); | |
7817 #else | |
7818 // Double returned in registers 0 and 1. | |
7819 __ Strd(r0, r1, FieldMemOperand(r5, HeapNumber::kValueOffset)); | |
7820 #endif | |
7821 __ mov(r0, Operand(r5)); | |
7822 // And we are done. | |
7823 __ pop(pc); | |
7824 } | |
7825 } | 7840 } |
7826 | 7841 |
7827 if (lhs.is(r0)) { | 7842 if (lhs.is(r0)) { |
7828 __ b(&slow); | 7843 __ b(&slow); |
7829 __ bind(&slow_reverse); | 7844 __ bind(&slow_reverse); |
7830 __ Swap(r0, r1, ip); | 7845 __ Swap(r0, r1, ip); |
7831 } | 7846 } |
7832 | 7847 |
7833 heap_number_map = no_reg; // Don't use this any more from here on. | 7848 heap_number_map = no_reg; // Don't use this any more from here on. |
7834 | 7849 |
(...skipping 903 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
8738 __ CallStub(&uninit); | 8753 __ CallStub(&uninit); |
8739 } | 8754 } |
8740 } | 8755 } |
8741 | 8756 |
8742 | 8757 |
8743 void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { | 8758 void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { |
8744 Label get_result; | 8759 Label get_result; |
8745 | 8760 |
8746 __ Push(r1, r0); | 8761 __ Push(r1, r0); |
8747 | 8762 |
8748 // Internal frame is necessary to handle exceptions properly. | 8763 __ mov(r2, Operand(Smi::FromInt(MinorKey()))); |
8749 __ EnterInternalFrame(); | 8764 __ mov(r1, Operand(Smi::FromInt(op_))); |
8750 // Call the stub proper to get the result in r0. | |
8751 __ Call(&get_result); | |
8752 __ LeaveInternalFrame(); | |
8753 | |
8754 __ push(r0); | |
8755 | |
8756 __ mov(r0, Operand(Smi::FromInt(MinorKey()))); | |
8757 __ push(r0); | |
8758 __ mov(r0, Operand(Smi::FromInt(op_))); | |
8759 __ push(r0); | |
8760 __ mov(r0, Operand(Smi::FromInt(runtime_operands_type_))); | 8765 __ mov(r0, Operand(Smi::FromInt(runtime_operands_type_))); |
8761 __ push(r0); | 8766 __ Push(r2, r1, r0); |
8762 | 8767 |
8763 __ TailCallExternalReference( | 8768 __ TailCallExternalReference( |
8764 ExternalReference(IC_Utility(IC::kBinaryOp_Patch)), | 8769 ExternalReference(IC_Utility(IC::kBinaryOp_Patch)), |
8765 6, | 8770 5, |
8766 1); | 8771 1); |
8767 | |
8768 // The entry point for the result calculation is assumed to be immediately | |
8769 // after this sequence. | |
8770 __ bind(&get_result); | |
8771 } | 8772 } |
8772 | 8773 |
8773 | 8774 |
8774 Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) { | 8775 Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) { |
8775 GenericBinaryOpStub stub(key, type_info); | 8776 GenericBinaryOpStub stub(key, type_info); |
8776 return stub.GetCode(); | 8777 return stub.GetCode(); |
8777 } | 8778 } |
8778 | 8779 |
8779 | 8780 |
8780 void TranscendentalCacheStub::Generate(MacroAssembler* masm) { | 8781 void TranscendentalCacheStub::Generate(MacroAssembler* masm) { |
(...skipping 111 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
8892 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | 8893 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
8893 | 8894 |
8894 if (op_ == Token::SUB) { | 8895 if (op_ == Token::SUB) { |
8895 // Check whether the value is a smi. | 8896 // Check whether the value is a smi. |
8896 Label try_float; | 8897 Label try_float; |
8897 __ tst(r0, Operand(kSmiTagMask)); | 8898 __ tst(r0, Operand(kSmiTagMask)); |
8898 __ b(ne, &try_float); | 8899 __ b(ne, &try_float); |
8899 | 8900 |
8900 // Go slow case if the value of the expression is zero | 8901 // Go slow case if the value of the expression is zero |
8901 // to make sure that we switch between 0 and -0. | 8902 // to make sure that we switch between 0 and -0. |
8902 __ cmp(r0, Operand(0)); | 8903 if (negative_zero_ == kStrictNegativeZero) { |
8903 __ b(eq, &slow); | 8904 // If we have to check for zero, then we can check for the max negative |
8904 | 8905 // smi while we are at it. |
8905 // The value of the expression is a smi that is not zero. Try | 8906 __ bic(ip, r0, Operand(0x80000000), SetCC); |
8906 // optimistic subtraction '0 - value'. | 8907 __ b(eq, &slow); |
8907 __ rsb(r1, r0, Operand(0), SetCC); | 8908 __ rsb(r0, r0, Operand(0)); |
8908 __ b(vs, &slow); | 8909 __ StubReturn(1); |
8909 | 8910 } else { |
8910 __ mov(r0, Operand(r1)); // Set r0 to result. | 8911 // The value of the expression is a smi and 0 is OK for -0. Try |
8911 __ b(&done); | 8912 // optimistic subtraction '0 - value'. |
| 8913 __ rsb(r0, r0, Operand(0), SetCC); |
| 8914 __ StubReturn(1, vc); |
| 8915 // We don't have to reverse the optimistic neg since the only case |
| 8916 // where we fall through is the minimum negative Smi, which is the case |
| 8917 // where the neg leaves the register unchanged. |
| 8918 __ jmp(&slow); // Go slow on max negative Smi. |
| 8919 } |
8912 | 8920 |
8913 __ bind(&try_float); | 8921 __ bind(&try_float); |
8914 __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); | 8922 __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); |
8915 __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | 8923 __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
8916 __ cmp(r1, heap_number_map); | 8924 __ cmp(r1, heap_number_map); |
8917 __ b(ne, &slow); | 8925 __ b(ne, &slow); |
8918 // r0 is a heap number. Get a new heap number in r1. | 8926 // r0 is a heap number. Get a new heap number in r1. |
8919 if (overwrite_) { | 8927 if (overwrite_ == UNARY_OVERWRITE) { |
8920 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); | 8928 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); |
8921 __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign. | 8929 __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign. |
8922 __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); | 8930 __ str(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); |
8923 } else { | 8931 } else { |
8924 __ AllocateHeapNumber(r1, r2, r3, r6, &slow); | 8932 __ AllocateHeapNumber(r1, r2, r3, r6, &slow); |
8925 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); | 8933 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); |
8926 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); | 8934 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); |
8927 __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset)); | 8935 __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset)); |
8928 __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign. | 8936 __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign. |
8929 __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset)); | 8937 __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset)); |
(...skipping 12 matching lines...) Expand all Loading... |
8942 // Do the bitwise operation (move negated) and check if the result | 8950 // Do the bitwise operation (move negated) and check if the result |
8943 // fits in a smi. | 8951 // fits in a smi. |
8944 Label try_float; | 8952 Label try_float; |
8945 __ mvn(r1, Operand(r1)); | 8953 __ mvn(r1, Operand(r1)); |
8946 __ add(r2, r1, Operand(0x40000000), SetCC); | 8954 __ add(r2, r1, Operand(0x40000000), SetCC); |
8947 __ b(mi, &try_float); | 8955 __ b(mi, &try_float); |
8948 __ mov(r0, Operand(r1, LSL, kSmiTagSize)); | 8956 __ mov(r0, Operand(r1, LSL, kSmiTagSize)); |
8949 __ b(&done); | 8957 __ b(&done); |
8950 | 8958 |
8951 __ bind(&try_float); | 8959 __ bind(&try_float); |
8952 if (!overwrite_) { | 8960 if (!overwrite_ == UNARY_OVERWRITE) { |
8953 // Allocate a fresh heap number, but don't overwrite r0 until | 8961 // Allocate a fresh heap number, but don't overwrite r0 until |
8954 // we're sure we can do it without going through the slow case | 8962 // we're sure we can do it without going through the slow case |
8955 // that needs the value in r0. | 8963 // that needs the value in r0. |
8956 __ AllocateHeapNumber(r2, r3, r4, r6, &slow); | 8964 __ AllocateHeapNumber(r2, r3, r4, r6, &slow); |
8957 __ mov(r0, Operand(r2)); | 8965 __ mov(r0, Operand(r2)); |
8958 } | 8966 } |
8959 | 8967 |
8960 if (CpuFeatures::IsSupported(VFP3)) { | 8968 if (CpuFeatures::IsSupported(VFP3)) { |
8961 // Convert the int32 in r1 to the heap number in r0. r2 is corrupted. | 8969 // Convert the int32 in r1 to the heap number in r0. r2 is corrupted. |
8962 CpuFeatures::Scope scope(VFP3); | 8970 CpuFeatures::Scope scope(VFP3); |
(...skipping 2236 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
11199 __ bind(&string_add_runtime); | 11207 __ bind(&string_add_runtime); |
11200 __ TailCallRuntime(Runtime::kStringAdd, 2, 1); | 11208 __ TailCallRuntime(Runtime::kStringAdd, 2, 1); |
11201 } | 11209 } |
11202 | 11210 |
11203 | 11211 |
11204 #undef __ | 11212 #undef __ |
11205 | 11213 |
11206 } } // namespace v8::internal | 11214 } } // namespace v8::internal |
11207 | 11215 |
11208 #endif // V8_TARGET_ARCH_ARM | 11216 #endif // V8_TARGET_ARCH_ARM |
OLD | NEW |