OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 127 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
138 | 138 |
139 static void EmitIdenticalObjectComparison(MacroAssembler* masm, | 139 static void EmitIdenticalObjectComparison(MacroAssembler* masm, |
140 Label* slow, | 140 Label* slow, |
141 Condition cond); | 141 Condition cond); |
142 static void EmitSmiNonsmiComparison(MacroAssembler* masm, | 142 static void EmitSmiNonsmiComparison(MacroAssembler* masm, |
143 Register lhs, | 143 Register lhs, |
144 Register rhs, | 144 Register rhs, |
145 Label* lhs_not_nan, | 145 Label* lhs_not_nan, |
146 Label* slow, | 146 Label* slow, |
147 bool strict); | 147 bool strict); |
148 static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cond); | |
149 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, | 148 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, |
150 Register lhs, | 149 Register lhs, |
151 Register rhs); | 150 Register rhs); |
152 | 151 |
153 | 152 |
154 // Check if the operand is a heap number. | 153 // Check if the operand is a heap number. |
155 static void EmitCheckForHeapNumber(MacroAssembler* masm, Register operand, | 154 static void EmitCheckForHeapNumber(MacroAssembler* masm, Register operand, |
156 Register scratch1, Register scratch2, | 155 Register scratch1, Register scratch2, |
157 Label* not_a_heap_number) { | 156 Label* not_a_heap_number) { |
158 __ ldr(scratch1, FieldMemOperand(operand, HeapObject::kMapOffset)); | 157 __ ldr(scratch1, FieldMemOperand(operand, HeapObject::kMapOffset)); |
(...skipping 349 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
508 exponent, | 507 exponent, |
509 Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord)); | 508 Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord)); |
510 __ Ret(); | 509 __ Ret(); |
511 } | 510 } |
512 | 511 |
513 | 512 |
514 void FloatingPointHelper::LoadSmis(MacroAssembler* masm, | 513 void FloatingPointHelper::LoadSmis(MacroAssembler* masm, |
515 FloatingPointHelper::Destination destination, | 514 FloatingPointHelper::Destination destination, |
516 Register scratch1, | 515 Register scratch1, |
517 Register scratch2) { | 516 Register scratch2) { |
518 if (CpuFeatures::IsSupported(VFP2)) { | 517 __ mov(scratch1, Operand(r0, ASR, kSmiTagSize)); |
519 CpuFeatureScope scope(masm, VFP2); | 518 __ vmov(d7.high(), scratch1); |
520 __ mov(scratch1, Operand(r0, ASR, kSmiTagSize)); | 519 __ vcvt_f64_s32(d7, d7.high()); |
521 __ vmov(d7.high(), scratch1); | 520 __ mov(scratch1, Operand(r1, ASR, kSmiTagSize)); |
522 __ vcvt_f64_s32(d7, d7.high()); | 521 __ vmov(d6.high(), scratch1); |
523 __ mov(scratch1, Operand(r1, ASR, kSmiTagSize)); | 522 __ vcvt_f64_s32(d6, d6.high()); |
524 __ vmov(d6.high(), scratch1); | 523 if (destination == kCoreRegisters) { |
525 __ vcvt_f64_s32(d6, d6.high()); | 524 __ vmov(r2, r3, d7); |
526 if (destination == kCoreRegisters) { | 525 __ vmov(r0, r1, d6); |
527 __ vmov(r2, r3, d7); | |
528 __ vmov(r0, r1, d6); | |
529 } | |
530 } else { | |
531 ASSERT(destination == kCoreRegisters); | |
532 // Write Smi from r0 to r3 and r2 in double format. | |
533 __ mov(scratch1, Operand(r0)); | |
534 ConvertToDoubleStub stub1(r3, r2, scratch1, scratch2); | |
535 __ push(lr); | |
536 __ Call(stub1.GetCode(masm->isolate())); | |
537 // Write Smi from r1 to r1 and r0 in double format. | |
538 __ mov(scratch1, Operand(r1)); | |
539 ConvertToDoubleStub stub2(r1, r0, scratch1, scratch2); | |
540 __ Call(stub2.GetCode(masm->isolate())); | |
541 __ pop(lr); | |
542 } | 526 } |
543 } | 527 } |
544 | 528 |
545 | 529 |
546 void FloatingPointHelper::LoadNumber(MacroAssembler* masm, | 530 void FloatingPointHelper::LoadNumber(MacroAssembler* masm, |
547 Destination destination, | 531 Destination destination, |
548 Register object, | 532 Register object, |
549 DwVfpRegister dst, | 533 DwVfpRegister dst, |
550 Register dst1, | 534 Register dst1, |
551 Register dst2, | 535 Register dst2, |
552 Register heap_number_map, | 536 Register heap_number_map, |
553 Register scratch1, | 537 Register scratch1, |
554 Register scratch2, | 538 Register scratch2, |
555 Label* not_number) { | 539 Label* not_number) { |
556 __ AssertRootValue(heap_number_map, | 540 __ AssertRootValue(heap_number_map, |
557 Heap::kHeapNumberMapRootIndex, | 541 Heap::kHeapNumberMapRootIndex, |
558 "HeapNumberMap register clobbered."); | 542 "HeapNumberMap register clobbered."); |
559 | 543 |
560 Label is_smi, done; | 544 Label is_smi, done; |
561 | 545 |
562 // Smi-check | 546 // Smi-check |
563 __ UntagAndJumpIfSmi(scratch1, object, &is_smi); | 547 __ UntagAndJumpIfSmi(scratch1, object, &is_smi); |
564 // Heap number check | 548 // Heap number check |
565 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number); | 549 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number); |
566 | 550 |
567 // Handle loading a double from a heap number. | 551 // Handle loading a double from a heap number. |
568 if (CpuFeatures::IsSupported(VFP2) && | 552 if (destination == kVFPRegisters) { |
569 destination == kVFPRegisters) { | |
570 CpuFeatureScope scope(masm, VFP2); | |
571 // Load the double from tagged HeapNumber to double register. | 553 // Load the double from tagged HeapNumber to double register. |
572 __ sub(scratch1, object, Operand(kHeapObjectTag)); | 554 __ sub(scratch1, object, Operand(kHeapObjectTag)); |
573 __ vldr(dst, scratch1, HeapNumber::kValueOffset); | 555 __ vldr(dst, scratch1, HeapNumber::kValueOffset); |
574 } else { | 556 } else { |
575 ASSERT(destination == kCoreRegisters); | 557 ASSERT(destination == kCoreRegisters); |
576 // Load the double from heap number to dst1 and dst2 in double format. | 558 // Load the double from heap number to dst1 and dst2 in double format. |
577 __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset)); | 559 __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset)); |
578 } | 560 } |
579 __ jmp(&done); | 561 __ jmp(&done); |
580 | 562 |
581 // Handle loading a double from a smi. | 563 // Handle loading a double from a smi. |
582 __ bind(&is_smi); | 564 __ bind(&is_smi); |
583 if (CpuFeatures::IsSupported(VFP2)) { | 565 // Convert smi to double using VFP instructions. |
584 CpuFeatureScope scope(masm, VFP2); | 566 __ vmov(dst.high(), scratch1); |
585 // Convert smi to double using VFP instructions. | 567 __ vcvt_f64_s32(dst, dst.high()); |
586 __ vmov(dst.high(), scratch1); | 568 if (destination == kCoreRegisters) { |
587 __ vcvt_f64_s32(dst, dst.high()); | 569 // Load the converted smi to dst1 and dst2 in double format. |
588 if (destination == kCoreRegisters) { | 570 __ vmov(dst1, dst2, dst); |
589 // Load the converted smi to dst1 and dst2 in double format. | |
590 __ vmov(dst1, dst2, dst); | |
591 } | |
592 } else { | |
593 ASSERT(destination == kCoreRegisters); | |
594 // Write smi to dst1 and dst2 double format. | |
595 __ mov(scratch1, Operand(object)); | |
596 ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2); | |
597 __ push(lr); | |
598 __ Call(stub.GetCode(masm->isolate())); | |
599 __ pop(lr); | |
600 } | 571 } |
601 | 572 |
602 __ bind(&done); | 573 __ bind(&done); |
603 } | 574 } |
604 | 575 |
605 | 576 |
606 void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm, | 577 void FloatingPointHelper::ConvertNumberToInt32(MacroAssembler* masm, |
607 Register object, | 578 Register object, |
608 Register dst, | 579 Register dst, |
609 Register heap_number_map, | 580 Register heap_number_map, |
(...skipping 26 matching lines...) Expand all Loading... |
636 Register dst_mantissa, | 607 Register dst_mantissa, |
637 Register dst_exponent, | 608 Register dst_exponent, |
638 Register scratch2, | 609 Register scratch2, |
639 SwVfpRegister single_scratch) { | 610 SwVfpRegister single_scratch) { |
640 ASSERT(!int_scratch.is(scratch2)); | 611 ASSERT(!int_scratch.is(scratch2)); |
641 ASSERT(!int_scratch.is(dst_mantissa)); | 612 ASSERT(!int_scratch.is(dst_mantissa)); |
642 ASSERT(!int_scratch.is(dst_exponent)); | 613 ASSERT(!int_scratch.is(dst_exponent)); |
643 | 614 |
644 Label done; | 615 Label done; |
645 | 616 |
646 if (CpuFeatures::IsSupported(VFP2)) { | 617 __ vmov(single_scratch, int_scratch); |
647 CpuFeatureScope scope(masm, VFP2); | 618 __ vcvt_f64_s32(double_dst, single_scratch); |
648 __ vmov(single_scratch, int_scratch); | 619 if (destination == kCoreRegisters) { |
649 __ vcvt_f64_s32(double_dst, single_scratch); | 620 __ vmov(dst_mantissa, dst_exponent, double_dst); |
650 if (destination == kCoreRegisters) { | |
651 __ vmov(dst_mantissa, dst_exponent, double_dst); | |
652 } | |
653 } else { | |
654 Label fewer_than_20_useful_bits; | |
655 // Expected output: | |
656 // | dst_exponent | dst_mantissa | | |
657 // | s | exp | mantissa | | |
658 | |
659 // Check for zero. | |
660 __ cmp(int_scratch, Operand::Zero()); | |
661 __ mov(dst_exponent, int_scratch); | |
662 __ mov(dst_mantissa, int_scratch); | |
663 __ b(eq, &done); | |
664 | |
665 // Preload the sign of the value. | |
666 __ and_(dst_exponent, int_scratch, Operand(HeapNumber::kSignMask), SetCC); | |
667 // Get the absolute value of the object (as an unsigned integer). | |
668 __ rsb(int_scratch, int_scratch, Operand::Zero(), SetCC, mi); | |
669 | |
670 // Get mantissa[51:20]. | |
671 | |
672 // Get the position of the first set bit. | |
673 __ CountLeadingZeros(dst_mantissa, int_scratch, scratch2); | |
674 __ rsb(dst_mantissa, dst_mantissa, Operand(31)); | |
675 | |
676 // Set the exponent. | |
677 __ add(scratch2, dst_mantissa, Operand(HeapNumber::kExponentBias)); | |
678 __ Bfi(dst_exponent, scratch2, scratch2, | |
679 HeapNumber::kExponentShift, HeapNumber::kExponentBits); | |
680 | |
681 // Clear the first non null bit. | |
682 __ mov(scratch2, Operand(1)); | |
683 __ bic(int_scratch, int_scratch, Operand(scratch2, LSL, dst_mantissa)); | |
684 | |
685 __ cmp(dst_mantissa, Operand(HeapNumber::kMantissaBitsInTopWord)); | |
686 // Get the number of bits to set in the lower part of the mantissa. | |
687 __ sub(scratch2, dst_mantissa, Operand(HeapNumber::kMantissaBitsInTopWord), | |
688 SetCC); | |
689 __ b(mi, &fewer_than_20_useful_bits); | |
690 // Set the higher 20 bits of the mantissa. | |
691 __ orr(dst_exponent, dst_exponent, Operand(int_scratch, LSR, scratch2)); | |
692 __ rsb(scratch2, scratch2, Operand(32)); | |
693 __ mov(dst_mantissa, Operand(int_scratch, LSL, scratch2)); | |
694 __ b(&done); | |
695 | |
696 __ bind(&fewer_than_20_useful_bits); | |
697 __ rsb(scratch2, dst_mantissa, Operand(HeapNumber::kMantissaBitsInTopWord)); | |
698 __ mov(scratch2, Operand(int_scratch, LSL, scratch2)); | |
699 __ orr(dst_exponent, dst_exponent, scratch2); | |
700 // Set dst1 to 0. | |
701 __ mov(dst_mantissa, Operand::Zero()); | |
702 } | 621 } |
703 __ bind(&done); | 622 __ bind(&done); |
704 } | 623 } |
705 | 624 |
706 | 625 |
707 void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm, | 626 void FloatingPointHelper::LoadNumberAsInt32Double(MacroAssembler* masm, |
708 Register object, | 627 Register object, |
709 Destination destination, | 628 Destination destination, |
710 DwVfpRegister double_dst, | 629 DwVfpRegister double_dst, |
711 DwVfpRegister double_scratch, | 630 DwVfpRegister double_scratch, |
(...skipping 18 matching lines...) Expand all Loading... |
730 dst_exponent, scratch2, single_scratch); | 649 dst_exponent, scratch2, single_scratch); |
731 __ b(&done); | 650 __ b(&done); |
732 | 651 |
733 __ bind(&obj_is_not_smi); | 652 __ bind(&obj_is_not_smi); |
734 __ AssertRootValue(heap_number_map, | 653 __ AssertRootValue(heap_number_map, |
735 Heap::kHeapNumberMapRootIndex, | 654 Heap::kHeapNumberMapRootIndex, |
736 "HeapNumberMap register clobbered."); | 655 "HeapNumberMap register clobbered."); |
737 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32); | 656 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_int32); |
738 | 657 |
739 // Load the number. | 658 // Load the number. |
740 if (CpuFeatures::IsSupported(VFP2)) { | 659 // Load the double value. |
741 CpuFeatureScope scope(masm, VFP2); | 660 __ sub(scratch1, object, Operand(kHeapObjectTag)); |
742 // Load the double value. | 661 __ vldr(double_dst, scratch1, HeapNumber::kValueOffset); |
743 __ sub(scratch1, object, Operand(kHeapObjectTag)); | |
744 __ vldr(double_dst, scratch1, HeapNumber::kValueOffset); | |
745 | 662 |
746 __ TestDoubleIsInt32(double_dst, double_scratch); | 663 __ TestDoubleIsInt32(double_dst, double_scratch); |
747 // Jump to not_int32 if the operation did not succeed. | 664 // Jump to not_int32 if the operation did not succeed. |
748 __ b(ne, not_int32); | 665 __ b(ne, not_int32); |
749 | 666 |
750 if (destination == kCoreRegisters) { | 667 if (destination == kCoreRegisters) { |
751 __ vmov(dst_mantissa, dst_exponent, double_dst); | 668 __ vmov(dst_mantissa, dst_exponent, double_dst); |
752 } | |
753 | |
754 } else { | |
755 ASSERT(!scratch1.is(object) && !scratch2.is(object)); | |
756 // Load the double value in the destination registers. | |
757 bool save_registers = object.is(dst_mantissa) || object.is(dst_exponent); | |
758 if (save_registers) { | |
759 // Save both output registers, because the other one probably holds | |
760 // an important value too. | |
761 __ Push(dst_exponent, dst_mantissa); | |
762 } | |
763 __ Ldrd(dst_mantissa, dst_exponent, | |
764 FieldMemOperand(object, HeapNumber::kValueOffset)); | |
765 | |
766 // Check for 0 and -0. | |
767 Label zero; | |
768 __ bic(scratch1, dst_exponent, Operand(HeapNumber::kSignMask)); | |
769 __ orr(scratch1, scratch1, Operand(dst_mantissa)); | |
770 __ cmp(scratch1, Operand::Zero()); | |
771 __ b(eq, &zero); | |
772 | |
773 // Check that the value can be exactly represented by a 32-bit integer. | |
774 // Jump to not_int32 if that's not the case. | |
775 Label restore_input_and_miss; | |
776 DoubleIs32BitInteger(masm, dst_exponent, dst_mantissa, scratch1, scratch2, | |
777 &restore_input_and_miss); | |
778 | |
779 // dst_* were trashed. Reload the double value. | |
780 if (save_registers) { | |
781 __ Pop(dst_exponent, dst_mantissa); | |
782 } | |
783 __ Ldrd(dst_mantissa, dst_exponent, | |
784 FieldMemOperand(object, HeapNumber::kValueOffset)); | |
785 __ b(&done); | |
786 | |
787 __ bind(&restore_input_and_miss); | |
788 if (save_registers) { | |
789 __ Pop(dst_exponent, dst_mantissa); | |
790 } | |
791 __ b(not_int32); | |
792 | |
793 __ bind(&zero); | |
794 if (save_registers) { | |
795 __ Drop(2); | |
796 } | |
797 } | 669 } |
798 | |
799 __ bind(&done); | 670 __ bind(&done); |
800 } | 671 } |
801 | 672 |
802 | 673 |
803 void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm, | 674 void FloatingPointHelper::LoadNumberAsInt32(MacroAssembler* masm, |
804 Register object, | 675 Register object, |
805 Register dst, | 676 Register dst, |
806 Register heap_number_map, | 677 Register heap_number_map, |
807 Register scratch1, | 678 Register scratch1, |
808 Register scratch2, | 679 Register scratch2, |
(...skipping 12 matching lines...) Expand all Loading... |
821 __ UntagAndJumpIfSmi(dst, object, &done); | 692 __ UntagAndJumpIfSmi(dst, object, &done); |
822 | 693 |
823 __ AssertRootValue(heap_number_map, | 694 __ AssertRootValue(heap_number_map, |
824 Heap::kHeapNumberMapRootIndex, | 695 Heap::kHeapNumberMapRootIndex, |
825 "HeapNumberMap register clobbered."); | 696 "HeapNumberMap register clobbered."); |
826 | 697 |
827 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, &maybe_undefined); | 698 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, &maybe_undefined); |
828 | 699 |
829 // Object is a heap number. | 700 // Object is a heap number. |
830 // Convert the floating point value to a 32-bit integer. | 701 // Convert the floating point value to a 32-bit integer. |
831 if (CpuFeatures::IsSupported(VFP2)) { | 702 // Load the double value. |
832 CpuFeatureScope scope(masm, VFP2); | 703 __ sub(scratch1, object, Operand(kHeapObjectTag)); |
| 704 __ vldr(double_scratch0, scratch1, HeapNumber::kValueOffset); |
833 | 705 |
834 // Load the double value. | 706 __ TryDoubleToInt32Exact(dst, double_scratch0, double_scratch1); |
835 __ sub(scratch1, object, Operand(kHeapObjectTag)); | 707 // Jump to not_int32 if the operation did not succeed. |
836 __ vldr(double_scratch0, scratch1, HeapNumber::kValueOffset); | 708 __ b(ne, not_int32); |
837 | |
838 __ TryDoubleToInt32Exact(dst, double_scratch0, double_scratch1); | |
839 // Jump to not_int32 if the operation did not succeed. | |
840 __ b(ne, not_int32); | |
841 } else { | |
842 // Load the double value in the destination registers. | |
843 __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset)); | |
844 __ ldr(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset)); | |
845 | |
846 // Check for 0 and -0. | |
847 __ bic(dst, scratch1, Operand(HeapNumber::kSignMask)); | |
848 __ orr(dst, scratch2, Operand(dst)); | |
849 __ cmp(dst, Operand::Zero()); | |
850 __ b(eq, &done); | |
851 | |
852 DoubleIs32BitInteger(masm, scratch1, scratch2, dst, scratch3, not_int32); | |
853 | |
854 // Registers state after DoubleIs32BitInteger. | |
855 // dst: mantissa[51:20]. | |
856 // scratch2: 1 | |
857 | |
858 // Shift back the higher bits of the mantissa. | |
859 __ mov(dst, Operand(dst, LSR, scratch3)); | |
860 // Set the implicit first bit. | |
861 __ rsb(scratch3, scratch3, Operand(32)); | |
862 __ orr(dst, dst, Operand(scratch2, LSL, scratch3)); | |
863 // Set the sign. | |
864 __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset)); | |
865 __ tst(scratch1, Operand(HeapNumber::kSignMask)); | |
866 __ rsb(dst, dst, Operand::Zero(), LeaveCC, mi); | |
867 } | |
868 __ b(&done); | 709 __ b(&done); |
869 | 710 |
870 __ bind(&maybe_undefined); | 711 __ bind(&maybe_undefined); |
871 __ CompareRoot(object, Heap::kUndefinedValueRootIndex); | 712 __ CompareRoot(object, Heap::kUndefinedValueRootIndex); |
872 __ b(ne, not_int32); | 713 __ b(ne, not_int32); |
873 // |undefined| is truncated to 0. | 714 // |undefined| is truncated to 0. |
874 __ mov(dst, Operand(Smi::FromInt(0))); | 715 __ mov(dst, Operand(Smi::FromInt(0))); |
875 // Fall through. | 716 // Fall through. |
876 | 717 |
877 __ bind(&done); | 718 __ bind(&done); |
(...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
951 | 792 |
952 // Assert that heap_number_result is callee-saved. | 793 // Assert that heap_number_result is callee-saved. |
953 // We currently always use r5 to pass it. | 794 // We currently always use r5 to pass it. |
954 ASSERT(heap_number_result.is(r5)); | 795 ASSERT(heap_number_result.is(r5)); |
955 | 796 |
956 // Push the current return address before the C call. Return will be | 797 // Push the current return address before the C call. Return will be |
957 // through pop(pc) below. | 798 // through pop(pc) below. |
958 __ push(lr); | 799 __ push(lr); |
959 __ PrepareCallCFunction(0, 2, scratch); | 800 __ PrepareCallCFunction(0, 2, scratch); |
960 if (masm->use_eabi_hardfloat()) { | 801 if (masm->use_eabi_hardfloat()) { |
961 CpuFeatureScope scope(masm, VFP2); | |
962 __ vmov(d0, r0, r1); | 802 __ vmov(d0, r0, r1); |
963 __ vmov(d1, r2, r3); | 803 __ vmov(d1, r2, r3); |
964 } | 804 } |
965 { | 805 { |
966 AllowExternalCallThatCantCauseGC scope(masm); | 806 AllowExternalCallThatCantCauseGC scope(masm); |
967 __ CallCFunction( | 807 __ CallCFunction( |
968 ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2); | 808 ExternalReference::double_fp_operation(op, masm->isolate()), 0, 2); |
969 } | 809 } |
970 // Store answer in the overwritable heap number. Double returned in | 810 // Store answer in the overwritable heap number. Double returned in |
971 // registers r0 and r1 or in d0. | 811 // registers r0 and r1 or in d0. |
972 if (masm->use_eabi_hardfloat()) { | 812 if (masm->use_eabi_hardfloat()) { |
973 CpuFeatureScope scope(masm, VFP2); | |
974 __ vstr(d0, | 813 __ vstr(d0, |
975 FieldMemOperand(heap_number_result, HeapNumber::kValueOffset)); | 814 FieldMemOperand(heap_number_result, HeapNumber::kValueOffset)); |
976 } else { | 815 } else { |
977 __ Strd(r0, r1, FieldMemOperand(heap_number_result, | 816 __ Strd(r0, r1, FieldMemOperand(heap_number_result, |
978 HeapNumber::kValueOffset)); | 817 HeapNumber::kValueOffset)); |
979 } | 818 } |
980 // Place heap_number_result in r0 and return to the pushed return address. | 819 // Place heap_number_result in r0 and return to the pushed return address. |
981 __ mov(r0, Operand(heap_number_result)); | 820 __ mov(r0, Operand(heap_number_result)); |
982 __ pop(pc); | 821 __ pop(pc); |
983 } | 822 } |
(...skipping 192 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1176 __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne); | 1015 __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne); |
1177 } | 1016 } |
1178 __ Ret(ne); | 1017 __ Ret(ne); |
1179 } else { | 1018 } else { |
1180 // Smi compared non-strictly with a non-Smi non-heap-number. Call | 1019 // Smi compared non-strictly with a non-Smi non-heap-number. Call |
1181 // the runtime. | 1020 // the runtime. |
1182 __ b(ne, slow); | 1021 __ b(ne, slow); |
1183 } | 1022 } |
1184 | 1023 |
1185 // Lhs is a smi, rhs is a number. | 1024 // Lhs is a smi, rhs is a number. |
1186 if (CpuFeatures::IsSupported(VFP2)) { | 1025 // Convert lhs to a double in d7. |
1187 // Convert lhs to a double in d7. | 1026 __ SmiToDoubleVFPRegister(lhs, d7, r7, s15); |
1188 CpuFeatureScope scope(masm, VFP2); | 1027 // Load the double from rhs, tagged HeapNumber r0, to d6. |
1189 __ SmiToDoubleVFPRegister(lhs, d7, r7, s15); | 1028 __ sub(r7, rhs, Operand(kHeapObjectTag)); |
1190 // Load the double from rhs, tagged HeapNumber r0, to d6. | 1029 __ vldr(d6, r7, HeapNumber::kValueOffset); |
1191 __ sub(r7, rhs, Operand(kHeapObjectTag)); | |
1192 __ vldr(d6, r7, HeapNumber::kValueOffset); | |
1193 } else { | |
1194 __ push(lr); | |
1195 // Convert lhs to a double in r2, r3. | |
1196 __ mov(r7, Operand(lhs)); | |
1197 ConvertToDoubleStub stub1(r3, r2, r7, r6); | |
1198 __ Call(stub1.GetCode(masm->isolate())); | |
1199 // Load rhs to a double in r0, r1. | |
1200 __ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset)); | |
1201 __ pop(lr); | |
1202 } | |
1203 | 1030 |
1204 // We now have both loaded as doubles but we can skip the lhs nan check | 1031 // We now have both loaded as doubles but we can skip the lhs nan check |
1205 // since it's a smi. | 1032 // since it's a smi. |
1206 __ jmp(lhs_not_nan); | 1033 __ jmp(lhs_not_nan); |
1207 | 1034 |
1208 __ bind(&rhs_is_smi); | 1035 __ bind(&rhs_is_smi); |
1209 // Rhs is a smi. Check whether the non-smi lhs is a heap number. | 1036 // Rhs is a smi. Check whether the non-smi lhs is a heap number. |
1210 __ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE); | 1037 __ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE); |
1211 if (strict) { | 1038 if (strict) { |
1212 // If lhs is not a number and rhs is a smi then strict equality cannot | 1039 // If lhs is not a number and rhs is a smi then strict equality cannot |
1213 // succeed. Return non-equal. | 1040 // succeed. Return non-equal. |
1214 // If lhs is r0 then there is already a non zero value in it. | 1041 // If lhs is r0 then there is already a non zero value in it. |
1215 if (!lhs.is(r0)) { | 1042 if (!lhs.is(r0)) { |
1216 __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne); | 1043 __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne); |
1217 } | 1044 } |
1218 __ Ret(ne); | 1045 __ Ret(ne); |
1219 } else { | 1046 } else { |
1220 // Smi compared non-strictly with a non-smi non-heap-number. Call | 1047 // Smi compared non-strictly with a non-smi non-heap-number. Call |
1221 // the runtime. | 1048 // the runtime. |
1222 __ b(ne, slow); | 1049 __ b(ne, slow); |
1223 } | 1050 } |
1224 | 1051 |
1225 // Rhs is a smi, lhs is a heap number. | 1052 // Rhs is a smi, lhs is a heap number. |
1226 if (CpuFeatures::IsSupported(VFP2)) { | 1053 // Load the double from lhs, tagged HeapNumber r1, to d7. |
1227 CpuFeatureScope scope(masm, VFP2); | 1054 __ sub(r7, lhs, Operand(kHeapObjectTag)); |
1228 // Load the double from lhs, tagged HeapNumber r1, to d7. | 1055 __ vldr(d7, r7, HeapNumber::kValueOffset); |
1229 __ sub(r7, lhs, Operand(kHeapObjectTag)); | 1056 // Convert rhs to a double in d6 . |
1230 __ vldr(d7, r7, HeapNumber::kValueOffset); | 1057 __ SmiToDoubleVFPRegister(rhs, d6, r7, s13); |
1231 // Convert rhs to a double in d6 . | |
1232 __ SmiToDoubleVFPRegister(rhs, d6, r7, s13); | |
1233 } else { | |
1234 __ push(lr); | |
1235 // Load lhs to a double in r2, r3. | |
1236 __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset)); | |
1237 // Convert rhs to a double in r0, r1. | |
1238 __ mov(r7, Operand(rhs)); | |
1239 ConvertToDoubleStub stub2(r1, r0, r7, r6); | |
1240 __ Call(stub2.GetCode(masm->isolate())); | |
1241 __ pop(lr); | |
1242 } | |
1243 // Fall through to both_loaded_as_doubles. | 1058 // Fall through to both_loaded_as_doubles. |
1244 } | 1059 } |
1245 | 1060 |
1246 | 1061 |
1247 void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cond) { | 1062 void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cond) { |
1248 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); | 1063 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); |
1249 Register rhs_exponent = exp_first ? r0 : r1; | 1064 Register rhs_exponent = exp_first ? r0 : r1; |
1250 Register lhs_exponent = exp_first ? r2 : r3; | 1065 Register lhs_exponent = exp_first ? r2 : r3; |
1251 Register rhs_mantissa = exp_first ? r1 : r0; | 1066 Register rhs_mantissa = exp_first ? r1 : r0; |
1252 Register lhs_mantissa = exp_first ? r3 : r2; | 1067 Register lhs_mantissa = exp_first ? r3 : r2; |
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1289 } else { | 1104 } else { |
1290 __ mov(r0, Operand(LESS)); | 1105 __ mov(r0, Operand(LESS)); |
1291 } | 1106 } |
1292 __ Ret(); | 1107 __ Ret(); |
1293 | 1108 |
1294 __ bind(&neither_is_nan); | 1109 __ bind(&neither_is_nan); |
1295 } | 1110 } |
1296 | 1111 |
1297 | 1112 |
1298 // See comment at call site. | 1113 // See comment at call site. |
1299 static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, | |
1300 Condition cond) { | |
1301 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); | |
1302 Register rhs_exponent = exp_first ? r0 : r1; | |
1303 Register lhs_exponent = exp_first ? r2 : r3; | |
1304 Register rhs_mantissa = exp_first ? r1 : r0; | |
1305 Register lhs_mantissa = exp_first ? r3 : r2; | |
1306 | |
1307 // r0, r1, r2, r3 have the two doubles. Neither is a NaN. | |
1308 if (cond == eq) { | |
1309 // Doubles are not equal unless they have the same bit pattern. | |
1310 // Exception: 0 and -0. | |
1311 __ cmp(rhs_mantissa, Operand(lhs_mantissa)); | |
1312 __ orr(r0, rhs_mantissa, Operand(lhs_mantissa), LeaveCC, ne); | |
1313 // Return non-zero if the numbers are unequal. | |
1314 __ Ret(ne); | |
1315 | |
1316 __ sub(r0, rhs_exponent, Operand(lhs_exponent), SetCC); | |
1317 // If exponents are equal then return 0. | |
1318 __ Ret(eq); | |
1319 | |
1320 // Exponents are unequal. The only way we can return that the numbers | |
1321 // are equal is if one is -0 and the other is 0. We already dealt | |
1322 // with the case where both are -0 or both are 0. | |
1323 // We start by seeing if the mantissas (that are equal) or the bottom | |
1324 // 31 bits of the rhs exponent are non-zero. If so we return not | |
1325 // equal. | |
1326 __ orr(r4, lhs_mantissa, Operand(lhs_exponent, LSL, kSmiTagSize), SetCC); | |
1327 __ mov(r0, Operand(r4), LeaveCC, ne); | |
1328 __ Ret(ne); | |
1329 // Now they are equal if and only if the lhs exponent is zero in its | |
1330 // low 31 bits. | |
1331 __ mov(r0, Operand(rhs_exponent, LSL, kSmiTagSize)); | |
1332 __ Ret(); | |
1333 } else { | |
1334 // Call a native function to do a comparison between two non-NaNs. | |
1335 // Call C routine that may not cause GC or other trouble. | |
1336 __ push(lr); | |
1337 __ PrepareCallCFunction(0, 2, r5); | |
1338 if (masm->use_eabi_hardfloat()) { | |
1339 CpuFeatureScope scope(masm, VFP2); | |
1340 __ vmov(d0, r0, r1); | |
1341 __ vmov(d1, r2, r3); | |
1342 } | |
1343 | |
1344 AllowExternalCallThatCantCauseGC scope(masm); | |
1345 __ CallCFunction(ExternalReference::compare_doubles(masm->isolate()), | |
1346 0, 2); | |
1347 __ pop(pc); // Return. | |
1348 } | |
1349 } | |
1350 | |
1351 | |
1352 // See comment at call site. | |
1353 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, | 1114 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, |
1354 Register lhs, | 1115 Register lhs, |
1355 Register rhs) { | 1116 Register rhs) { |
1356 ASSERT((lhs.is(r0) && rhs.is(r1)) || | 1117 ASSERT((lhs.is(r0) && rhs.is(r1)) || |
1357 (lhs.is(r1) && rhs.is(r0))); | 1118 (lhs.is(r1) && rhs.is(r0))); |
1358 | 1119 |
1359 // If either operand is a JS object or an oddball value, then they are | 1120 // If either operand is a JS object or an oddball value, then they are |
1360 // not equal since their pointers are different. | 1121 // not equal since their pointers are different. |
1361 // There is no test for undetectability in strict equality. | 1122 // There is no test for undetectability in strict equality. |
1362 STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE); | 1123 STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE); |
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1405 (lhs.is(r1) && rhs.is(r0))); | 1166 (lhs.is(r1) && rhs.is(r0))); |
1406 | 1167 |
1407 __ CompareObjectType(rhs, r3, r2, HEAP_NUMBER_TYPE); | 1168 __ CompareObjectType(rhs, r3, r2, HEAP_NUMBER_TYPE); |
1408 __ b(ne, not_heap_numbers); | 1169 __ b(ne, not_heap_numbers); |
1409 __ ldr(r2, FieldMemOperand(lhs, HeapObject::kMapOffset)); | 1170 __ ldr(r2, FieldMemOperand(lhs, HeapObject::kMapOffset)); |
1410 __ cmp(r2, r3); | 1171 __ cmp(r2, r3); |
1411 __ b(ne, slow); // First was a heap number, second wasn't. Go slow case. | 1172 __ b(ne, slow); // First was a heap number, second wasn't. Go slow case. |
1412 | 1173 |
1413 // Both are heap numbers. Load them up then jump to the code we have | 1174 // Both are heap numbers. Load them up then jump to the code we have |
1414 // for that. | 1175 // for that. |
1415 if (CpuFeatures::IsSupported(VFP2)) { | 1176 __ sub(r7, rhs, Operand(kHeapObjectTag)); |
1416 CpuFeatureScope scope(masm, VFP2); | 1177 __ vldr(d6, r7, HeapNumber::kValueOffset); |
1417 __ sub(r7, rhs, Operand(kHeapObjectTag)); | 1178 __ sub(r7, lhs, Operand(kHeapObjectTag)); |
1418 __ vldr(d6, r7, HeapNumber::kValueOffset); | 1179 __ vldr(d7, r7, HeapNumber::kValueOffset); |
1419 __ sub(r7, lhs, Operand(kHeapObjectTag)); | |
1420 __ vldr(d7, r7, HeapNumber::kValueOffset); | |
1421 } else { | |
1422 __ Ldrd(r2, r3, FieldMemOperand(lhs, HeapNumber::kValueOffset)); | |
1423 __ Ldrd(r0, r1, FieldMemOperand(rhs, HeapNumber::kValueOffset)); | |
1424 } | |
1425 __ jmp(both_loaded_as_doubles); | 1180 __ jmp(both_loaded_as_doubles); |
1426 } | 1181 } |
1427 | 1182 |
1428 | 1183 |
1429 // Fast negative check for internalized-to-internalized equality. | 1184 // Fast negative check for internalized-to-internalized equality. |
1430 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm, | 1185 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm, |
1431 Register lhs, | 1186 Register lhs, |
1432 Register rhs, | 1187 Register rhs, |
1433 Label* possible_strings, | 1188 Label* possible_strings, |
1434 Label* not_both_strings) { | 1189 Label* not_both_strings) { |
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1495 | 1250 |
1496 // Calculate the entry in the number string cache. The hash value in the | 1251 // Calculate the entry in the number string cache. The hash value in the |
1497 // number string cache for smis is just the smi value, and the hash for | 1252 // number string cache for smis is just the smi value, and the hash for |
1498 // doubles is the xor of the upper and lower words. See | 1253 // doubles is the xor of the upper and lower words. See |
1499 // Heap::GetNumberStringCache. | 1254 // Heap::GetNumberStringCache. |
1500 Isolate* isolate = masm->isolate(); | 1255 Isolate* isolate = masm->isolate(); |
1501 Label is_smi; | 1256 Label is_smi; |
1502 Label load_result_from_cache; | 1257 Label load_result_from_cache; |
1503 if (!object_is_smi) { | 1258 if (!object_is_smi) { |
1504 __ JumpIfSmi(object, &is_smi); | 1259 __ JumpIfSmi(object, &is_smi); |
1505 if (CpuFeatures::IsSupported(VFP2)) { | 1260 __ CheckMap(object, |
1506 CpuFeatureScope scope(masm, VFP2); | 1261 scratch1, |
1507 __ CheckMap(object, | 1262 Heap::kHeapNumberMapRootIndex, |
1508 scratch1, | 1263 not_found, |
1509 Heap::kHeapNumberMapRootIndex, | 1264 DONT_DO_SMI_CHECK); |
1510 not_found, | |
1511 DONT_DO_SMI_CHECK); | |
1512 | 1265 |
1513 STATIC_ASSERT(8 == kDoubleSize); | 1266 STATIC_ASSERT(8 == kDoubleSize); |
1514 __ add(scratch1, | 1267 __ add(scratch1, |
1515 object, | 1268 object, |
1516 Operand(HeapNumber::kValueOffset - kHeapObjectTag)); | 1269 Operand(HeapNumber::kValueOffset - kHeapObjectTag)); |
1517 __ ldm(ia, scratch1, scratch1.bit() | scratch2.bit()); | 1270 __ ldm(ia, scratch1, scratch1.bit() | scratch2.bit()); |
1518 __ eor(scratch1, scratch1, Operand(scratch2)); | 1271 __ eor(scratch1, scratch1, Operand(scratch2)); |
1519 __ and_(scratch1, scratch1, Operand(mask)); | 1272 __ and_(scratch1, scratch1, Operand(mask)); |
1520 | 1273 |
1521 // Calculate address of entry in string cache: each entry consists | 1274 // Calculate address of entry in string cache: each entry consists |
1522 // of two pointer sized fields. | 1275 // of two pointer sized fields. |
1523 __ add(scratch1, | 1276 __ add(scratch1, |
1524 number_string_cache, | 1277 number_string_cache, |
1525 Operand(scratch1, LSL, kPointerSizeLog2 + 1)); | 1278 Operand(scratch1, LSL, kPointerSizeLog2 + 1)); |
1526 | 1279 |
1527 Register probe = mask; | 1280 Register probe = mask; |
1528 __ ldr(probe, | 1281 __ ldr(probe, |
1529 FieldMemOperand(scratch1, FixedArray::kHeaderSize)); | 1282 FieldMemOperand(scratch1, FixedArray::kHeaderSize)); |
1530 __ JumpIfSmi(probe, not_found); | 1283 __ JumpIfSmi(probe, not_found); |
1531 __ sub(scratch2, object, Operand(kHeapObjectTag)); | 1284 __ sub(scratch2, object, Operand(kHeapObjectTag)); |
1532 __ vldr(d0, scratch2, HeapNumber::kValueOffset); | 1285 __ vldr(d0, scratch2, HeapNumber::kValueOffset); |
1533 __ sub(probe, probe, Operand(kHeapObjectTag)); | 1286 __ sub(probe, probe, Operand(kHeapObjectTag)); |
1534 __ vldr(d1, probe, HeapNumber::kValueOffset); | 1287 __ vldr(d1, probe, HeapNumber::kValueOffset); |
1535 __ VFPCompareAndSetFlags(d0, d1); | 1288 __ VFPCompareAndSetFlags(d0, d1); |
1536 __ b(ne, not_found); // The cache did not contain this value. | 1289 __ b(ne, not_found); // The cache did not contain this value. |
1537 __ b(&load_result_from_cache); | 1290 __ b(&load_result_from_cache); |
1538 } else { | |
1539 __ b(not_found); | |
1540 } | |
1541 } | 1291 } |
1542 | 1292 |
1543 __ bind(&is_smi); | 1293 __ bind(&is_smi); |
1544 Register scratch = scratch1; | 1294 Register scratch = scratch1; |
1545 __ and_(scratch, mask, Operand(object, ASR, 1)); | 1295 __ and_(scratch, mask, Operand(object, ASR, 1)); |
1546 // Calculate address of entry in string cache: each entry consists | 1296 // Calculate address of entry in string cache: each entry consists |
1547 // of two pointer sized fields. | 1297 // of two pointer sized fields. |
1548 __ add(scratch, | 1298 __ add(scratch, |
1549 number_string_cache, | 1299 number_string_cache, |
1550 Operand(scratch, LSL, kPointerSizeLog2 + 1)); | 1300 Operand(scratch, LSL, kPointerSizeLog2 + 1)); |
(...skipping 94 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1645 // In cases 3 and 4 we have found out we were dealing with a number-number | 1395 // In cases 3 and 4 we have found out we were dealing with a number-number |
1646 // comparison. If VFP3 is supported the double values of the numbers have | 1396 // comparison. If VFP3 is supported the double values of the numbers have |
1647 // been loaded into d7 and d6. Otherwise, the double values have been loaded | 1397 // been loaded into d7 and d6. Otherwise, the double values have been loaded |
1648 // into r0, r1, r2, and r3. | 1398 // into r0, r1, r2, and r3. |
1649 EmitSmiNonsmiComparison(masm, lhs, rhs, &lhs_not_nan, &slow, strict()); | 1399 EmitSmiNonsmiComparison(masm, lhs, rhs, &lhs_not_nan, &slow, strict()); |
1650 | 1400 |
1651 __ bind(&both_loaded_as_doubles); | 1401 __ bind(&both_loaded_as_doubles); |
1652 // The arguments have been converted to doubles and stored in d6 and d7, if | 1402 // The arguments have been converted to doubles and stored in d6 and d7, if |
1653 // VFP3 is supported, or in r0, r1, r2, and r3. | 1403 // VFP3 is supported, or in r0, r1, r2, and r3. |
1654 Isolate* isolate = masm->isolate(); | 1404 Isolate* isolate = masm->isolate(); |
1655 if (CpuFeatures::IsSupported(VFP2)) { | 1405 __ bind(&lhs_not_nan); |
1656 __ bind(&lhs_not_nan); | 1406 Label no_nan; |
1657 CpuFeatureScope scope(masm, VFP2); | 1407 // ARMv7 VFP3 instructions to implement double precision comparison. |
1658 Label no_nan; | 1408 __ VFPCompareAndSetFlags(d7, d6); |
1659 // ARMv7 VFP3 instructions to implement double precision comparison. | 1409 Label nan; |
1660 __ VFPCompareAndSetFlags(d7, d6); | 1410 __ b(vs, &nan); |
1661 Label nan; | 1411 __ mov(r0, Operand(EQUAL), LeaveCC, eq); |
1662 __ b(vs, &nan); | 1412 __ mov(r0, Operand(LESS), LeaveCC, lt); |
1663 __ mov(r0, Operand(EQUAL), LeaveCC, eq); | 1413 __ mov(r0, Operand(GREATER), LeaveCC, gt); |
1664 __ mov(r0, Operand(LESS), LeaveCC, lt); | 1414 __ Ret(); |
1665 __ mov(r0, Operand(GREATER), LeaveCC, gt); | |
1666 __ Ret(); | |
1667 | 1415 |
1668 __ bind(&nan); | 1416 __ bind(&nan); |
1669 // If one of the sides was a NaN then the v flag is set. Load r0 with | 1417 // If one of the sides was a NaN then the v flag is set. Load r0 with |
1670 // whatever it takes to make the comparison fail, since comparisons with NaN | 1418 // whatever it takes to make the comparison fail, since comparisons with NaN |
1671 // always fail. | 1419 // always fail. |
1672 if (cc == lt || cc == le) { | 1420 if (cc == lt || cc == le) { |
1673 __ mov(r0, Operand(GREATER)); | 1421 __ mov(r0, Operand(GREATER)); |
1674 } else { | |
1675 __ mov(r0, Operand(LESS)); | |
1676 } | |
1677 __ Ret(); | |
1678 } else { | 1422 } else { |
1679 // Checks for NaN in the doubles we have loaded. Can return the answer or | 1423 __ mov(r0, Operand(LESS)); |
1680 // fall through if neither is a NaN. Also binds lhs_not_nan. | |
1681 EmitNanCheck(masm, &lhs_not_nan, cc); | |
1682 // Compares two doubles in r0, r1, r2, r3 that are not NaNs. Returns the | |
1683 // answer. Never falls through. | |
1684 EmitTwoNonNanDoubleComparison(masm, cc); | |
1685 } | 1424 } |
| 1425 __ Ret(); |
1686 | 1426 |
1687 __ bind(¬_smis); | 1427 __ bind(¬_smis); |
1688 // At this point we know we are dealing with two different objects, | 1428 // At this point we know we are dealing with two different objects, |
1689 // and neither of them is a Smi. The objects are in rhs_ and lhs_. | 1429 // and neither of them is a Smi. The objects are in rhs_ and lhs_. |
1690 if (strict()) { | 1430 if (strict()) { |
1691 // This returns non-equal for some object types, or falls through if it | 1431 // This returns non-equal for some object types, or falls through if it |
1692 // was not lucky. | 1432 // was not lucky. |
1693 EmitStrictTwoHeapObjectCompare(masm, lhs, rhs); | 1433 EmitStrictTwoHeapObjectCompare(masm, lhs, rhs); |
1694 } | 1434 } |
1695 | 1435 |
(...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1772 } | 1512 } |
1773 | 1513 |
1774 | 1514 |
1775 // The stub expects its argument in the tos_ register and returns its result in | 1515 // The stub expects its argument in the tos_ register and returns its result in |
1776 // it, too: zero for false, and a non-zero value for true. | 1516 // it, too: zero for false, and a non-zero value for true. |
1777 void ToBooleanStub::Generate(MacroAssembler* masm) { | 1517 void ToBooleanStub::Generate(MacroAssembler* masm) { |
1778 // This stub overrides SometimesSetsUpAFrame() to return false. That means | 1518 // This stub overrides SometimesSetsUpAFrame() to return false. That means |
1779 // we cannot call anything that could cause a GC from this stub. | 1519 // we cannot call anything that could cause a GC from this stub. |
1780 Label patch; | 1520 Label patch; |
1781 const Register map = r9.is(tos_) ? r7 : r9; | 1521 const Register map = r9.is(tos_) ? r7 : r9; |
1782 const Register temp = map; | |
1783 | 1522 |
1784 // undefined -> false. | 1523 // undefined -> false. |
1785 CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false); | 1524 CheckOddball(masm, UNDEFINED, Heap::kUndefinedValueRootIndex, false); |
1786 | 1525 |
1787 // Boolean -> its value. | 1526 // Boolean -> its value. |
1788 CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false); | 1527 CheckOddball(masm, BOOLEAN, Heap::kFalseValueRootIndex, false); |
1789 CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true); | 1528 CheckOddball(masm, BOOLEAN, Heap::kTrueValueRootIndex, true); |
1790 | 1529 |
1791 // 'null' -> false. | 1530 // 'null' -> false. |
1792 CheckOddball(masm, NULL_TYPE, Heap::kNullValueRootIndex, false); | 1531 CheckOddball(masm, NULL_TYPE, Heap::kNullValueRootIndex, false); |
(...skipping 22 matching lines...) Expand all Loading... |
1815 | 1554 |
1816 if (types_.Contains(SPEC_OBJECT)) { | 1555 if (types_.Contains(SPEC_OBJECT)) { |
1817 // Spec object -> true. | 1556 // Spec object -> true. |
1818 __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE); | 1557 __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE); |
1819 // tos_ contains the correct non-zero return value already. | 1558 // tos_ contains the correct non-zero return value already. |
1820 __ Ret(ge); | 1559 __ Ret(ge); |
1821 } | 1560 } |
1822 | 1561 |
1823 if (types_.Contains(STRING)) { | 1562 if (types_.Contains(STRING)) { |
1824 // String value -> false iff empty. | 1563 // String value -> false iff empty. |
1825 __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE); | 1564 __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE); |
1826 __ ldr(tos_, FieldMemOperand(tos_, String::kLengthOffset), lt); | 1565 __ ldr(tos_, FieldMemOperand(tos_, String::kLengthOffset), lt); |
1827 __ Ret(lt); // the string length is OK as the return value | 1566 __ Ret(lt); // the string length is OK as the return value |
1828 } | 1567 } |
1829 | 1568 |
1830 if (types_.Contains(HEAP_NUMBER)) { | 1569 if (types_.Contains(HEAP_NUMBER)) { |
1831 // Heap number -> false iff +0, -0, or NaN. | 1570 // Heap number -> false iff +0, -0, or NaN. |
1832 Label not_heap_number; | 1571 Label not_heap_number; |
1833 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex); | 1572 __ CompareRoot(map, Heap::kHeapNumberMapRootIndex); |
1834 __ b(ne, ¬_heap_number); | 1573 __ b(ne, ¬_heap_number); |
1835 | 1574 |
1836 if (CpuFeatures::IsSupported(VFP2)) { | 1575 __ vldr(d1, FieldMemOperand(tos_, HeapNumber::kValueOffset)); |
1837 CpuFeatureScope scope(masm, VFP2); | 1576 __ VFPCompareAndSetFlags(d1, 0.0); |
1838 | 1577 // "tos_" is a register, and contains a non zero value by default. |
1839 __ vldr(d1, FieldMemOperand(tos_, HeapNumber::kValueOffset)); | 1578 // Hence we only need to overwrite "tos_" with zero to return false for |
1840 __ VFPCompareAndSetFlags(d1, 0.0); | 1579 // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true. |
1841 // "tos_" is a register, and contains a non zero value by default. | 1580 __ mov(tos_, Operand::Zero(), LeaveCC, eq); // for FP_ZERO |
1842 // Hence we only need to overwrite "tos_" with zero to return false for | 1581 __ mov(tos_, Operand::Zero(), LeaveCC, vs); // for FP_NAN |
1843 // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true. | |
1844 __ mov(tos_, Operand::Zero(), LeaveCC, eq); // for FP_ZERO | |
1845 __ mov(tos_, Operand::Zero(), LeaveCC, vs); // for FP_NAN | |
1846 } else { | |
1847 Label done, not_nan, not_zero; | |
1848 __ ldr(temp, FieldMemOperand(tos_, HeapNumber::kExponentOffset)); | |
1849 // -0 maps to false: | |
1850 __ bic( | |
1851 temp, temp, Operand(HeapNumber::kSignMask, RelocInfo::NONE32), SetCC); | |
1852 __ b(ne, ¬_zero); | |
1853 // If exponent word is zero then the answer depends on the mantissa word. | |
1854 __ ldr(tos_, FieldMemOperand(tos_, HeapNumber::kMantissaOffset)); | |
1855 __ jmp(&done); | |
1856 | |
1857 // Check for NaN. | |
1858 __ bind(¬_zero); | |
1859 // We already zeroed the sign bit, now shift out the mantissa so we only | |
1860 // have the exponent left. | |
1861 __ mov(temp, Operand(temp, LSR, HeapNumber::kMantissaBitsInTopWord)); | |
1862 unsigned int shifted_exponent_mask = | |
1863 HeapNumber::kExponentMask >> HeapNumber::kMantissaBitsInTopWord; | |
1864 __ cmp(temp, Operand(shifted_exponent_mask, RelocInfo::NONE32)); | |
1865 __ b(ne, ¬_nan); // If exponent is not 0x7ff then it can't be a NaN. | |
1866 | |
1867 // Reload exponent word. | |
1868 __ ldr(temp, FieldMemOperand(tos_, HeapNumber::kExponentOffset)); | |
1869 __ tst(temp, Operand(HeapNumber::kMantissaMask, RelocInfo::NONE32)); | |
1870 // If mantissa is not zero then we have a NaN, so return 0. | |
1871 __ mov(tos_, Operand::Zero(), LeaveCC, ne); | |
1872 __ b(ne, &done); | |
1873 | |
1874 // Load mantissa word. | |
1875 __ ldr(temp, FieldMemOperand(tos_, HeapNumber::kMantissaOffset)); | |
1876 __ cmp(temp, Operand::Zero()); | |
1877 // If mantissa is not zero then we have a NaN, so return 0. | |
1878 __ mov(tos_, Operand::Zero(), LeaveCC, ne); | |
1879 __ b(ne, &done); | |
1880 | |
1881 __ bind(¬_nan); | |
1882 __ mov(tos_, Operand(1, RelocInfo::NONE32)); | |
1883 __ bind(&done); | |
1884 } | |
1885 __ Ret(); | 1582 __ Ret(); |
1886 __ bind(¬_heap_number); | 1583 __ bind(¬_heap_number); |
1887 } | 1584 } |
1888 | 1585 |
1889 __ bind(&patch); | 1586 __ bind(&patch); |
1890 GenerateTypeTransition(masm); | 1587 GenerateTypeTransition(masm); |
1891 } | 1588 } |
1892 | 1589 |
1893 | 1590 |
1894 void ToBooleanStub::CheckOddball(MacroAssembler* masm, | 1591 void ToBooleanStub::CheckOddball(MacroAssembler* masm, |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1927 | 1624 |
1928 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { | 1625 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { |
1929 // We don't allow a GC during a store buffer overflow so there is no need to | 1626 // We don't allow a GC during a store buffer overflow so there is no need to |
1930 // store the registers in any particular way, but we do have to store and | 1627 // store the registers in any particular way, but we do have to store and |
1931 // restore them. | 1628 // restore them. |
1932 __ stm(db_w, sp, kCallerSaved | lr.bit()); | 1629 __ stm(db_w, sp, kCallerSaved | lr.bit()); |
1933 | 1630 |
1934 const Register scratch = r1; | 1631 const Register scratch = r1; |
1935 | 1632 |
1936 if (save_doubles_ == kSaveFPRegs) { | 1633 if (save_doubles_ == kSaveFPRegs) { |
1937 CpuFeatureScope scope(masm, VFP2); | |
1938 // Check CPU flags for number of registers, setting the Z condition flag. | 1634 // Check CPU flags for number of registers, setting the Z condition flag. |
1939 __ CheckFor32DRegs(scratch); | 1635 __ CheckFor32DRegs(scratch); |
1940 | 1636 |
1941 __ sub(sp, sp, Operand(kDoubleSize * DwVfpRegister::kMaxNumRegisters)); | 1637 __ sub(sp, sp, Operand(kDoubleSize * DwVfpRegister::kMaxNumRegisters)); |
1942 for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; i++) { | 1638 for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; i++) { |
1943 DwVfpRegister reg = DwVfpRegister::from_code(i); | 1639 DwVfpRegister reg = DwVfpRegister::from_code(i); |
1944 __ vstr(reg, MemOperand(sp, i * kDoubleSize), i < 16 ? al : ne); | 1640 __ vstr(reg, MemOperand(sp, i * kDoubleSize), i < 16 ? al : ne); |
1945 } | 1641 } |
1946 } | 1642 } |
1947 const int argument_count = 1; | 1643 const int argument_count = 1; |
1948 const int fp_argument_count = 0; | 1644 const int fp_argument_count = 0; |
1949 | 1645 |
1950 AllowExternalCallThatCantCauseGC scope(masm); | 1646 AllowExternalCallThatCantCauseGC scope(masm); |
1951 __ PrepareCallCFunction(argument_count, fp_argument_count, scratch); | 1647 __ PrepareCallCFunction(argument_count, fp_argument_count, scratch); |
1952 __ mov(r0, Operand(ExternalReference::isolate_address())); | 1648 __ mov(r0, Operand(ExternalReference::isolate_address())); |
1953 __ CallCFunction( | 1649 __ CallCFunction( |
1954 ExternalReference::store_buffer_overflow_function(masm->isolate()), | 1650 ExternalReference::store_buffer_overflow_function(masm->isolate()), |
1955 argument_count); | 1651 argument_count); |
1956 if (save_doubles_ == kSaveFPRegs) { | 1652 if (save_doubles_ == kSaveFPRegs) { |
1957 CpuFeatureScope scope(masm, VFP2); | |
1958 | |
1959 // Check CPU flags for number of registers, setting the Z condition flag. | 1653 // Check CPU flags for number of registers, setting the Z condition flag. |
1960 __ CheckFor32DRegs(scratch); | 1654 __ CheckFor32DRegs(scratch); |
1961 | 1655 |
1962 for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; i++) { | 1656 for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; i++) { |
1963 DwVfpRegister reg = DwVfpRegister::from_code(i); | 1657 DwVfpRegister reg = DwVfpRegister::from_code(i); |
1964 __ vldr(reg, MemOperand(sp, i * kDoubleSize), i < 16 ? al : ne); | 1658 __ vldr(reg, MemOperand(sp, i * kDoubleSize), i < 16 ? al : ne); |
1965 } | 1659 } |
1966 __ add(sp, sp, Operand(kDoubleSize * DwVfpRegister::kMaxNumRegisters)); | 1660 __ add(sp, sp, Operand(kDoubleSize * DwVfpRegister::kMaxNumRegisters)); |
1967 } | 1661 } |
1968 __ ldm(ia_w, sp, kCallerSaved | pc.bit()); // Also pop pc to get Ret(0). | 1662 __ ldm(ia_w, sp, kCallerSaved | pc.bit()); // Also pop pc to get Ret(0). |
(...skipping 204 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2173 // Push the 31 high bits (bit 0 cleared to look like a smi). | 1867 // Push the 31 high bits (bit 0 cleared to look like a smi). |
2174 __ bic(r1, r1, Operand(1)); | 1868 __ bic(r1, r1, Operand(1)); |
2175 __ Push(r2, r1); | 1869 __ Push(r2, r1); |
2176 __ CallRuntime(Runtime::kNumberAlloc, 0); | 1870 __ CallRuntime(Runtime::kNumberAlloc, 0); |
2177 __ Pop(r2, r1); // Restore the result. | 1871 __ Pop(r2, r1); // Restore the result. |
2178 __ orr(r1, r1, Operand(r2, LSR, 31)); | 1872 __ orr(r1, r1, Operand(r2, LSR, 31)); |
2179 } | 1873 } |
2180 __ bind(&heapnumber_allocated); | 1874 __ bind(&heapnumber_allocated); |
2181 } | 1875 } |
2182 | 1876 |
2183 if (CpuFeatures::IsSupported(VFP2)) { | 1877 __ vmov(s0, r1); |
2184 // Convert the int32 in r1 to the heap number in r0. r2 is corrupted. | 1878 __ vcvt_f64_s32(d0, s0); |
2185 CpuFeatureScope scope(masm, VFP2); | 1879 __ vstr(d0, FieldMemOperand(r0, HeapNumber::kValueOffset)); |
2186 __ vmov(s0, r1); | 1880 __ Ret(); |
2187 __ vcvt_f64_s32(d0, s0); | |
2188 __ vstr(d0, FieldMemOperand(r0, HeapNumber::kValueOffset)); | |
2189 __ Ret(); | |
2190 } else { | |
2191 // WriteInt32ToHeapNumberStub does not trigger GC, so we do not | |
2192 // have to set up a frame. | |
2193 WriteInt32ToHeapNumberStub stub(r1, r0, r2); | |
2194 __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); | |
2195 } | |
2196 } | 1881 } |
2197 | 1882 |
2198 | 1883 |
2199 // TODO(svenpanne): Use virtual functions instead of switch. | 1884 // TODO(svenpanne): Use virtual functions instead of switch. |
2200 void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) { | 1885 void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) { |
2201 switch (op_) { | 1886 switch (op_) { |
2202 case Token::SUB: | 1887 case Token::SUB: |
2203 GenerateGenericStubSub(masm); | 1888 GenerateGenericStubSub(masm); |
2204 break; | 1889 break; |
2205 case Token::BIT_NOT: | 1890 case Token::BIT_NOT: |
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2241 case Token::BIT_NOT: | 1926 case Token::BIT_NOT: |
2242 __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION); | 1927 __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION); |
2243 break; | 1928 break; |
2244 default: | 1929 default: |
2245 UNREACHABLE(); | 1930 UNREACHABLE(); |
2246 } | 1931 } |
2247 } | 1932 } |
2248 | 1933 |
2249 | 1934 |
2250 void BinaryOpStub::Initialize() { | 1935 void BinaryOpStub::Initialize() { |
2251 platform_specific_bit_ = CpuFeatures::IsSupported(VFP2); | 1936 platform_specific_bit_ = true; // VFP2 is a base requirement for V8 |
2252 } | 1937 } |
2253 | 1938 |
2254 | 1939 |
2255 void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { | 1940 void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { |
2256 Label get_result; | 1941 Label get_result; |
2257 | 1942 |
2258 __ Push(r1, r0); | 1943 __ Push(r1, r0); |
2259 | 1944 |
2260 __ mov(r2, Operand(Smi::FromInt(MinorKey()))); | 1945 __ mov(r2, Operand(Smi::FromInt(MinorKey()))); |
2261 __ push(r2); | 1946 __ push(r2); |
(...skipping 258 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2520 | 2205 |
2521 switch (op) { | 2206 switch (op) { |
2522 case Token::ADD: | 2207 case Token::ADD: |
2523 case Token::SUB: | 2208 case Token::SUB: |
2524 case Token::MUL: | 2209 case Token::MUL: |
2525 case Token::DIV: | 2210 case Token::DIV: |
2526 case Token::MOD: { | 2211 case Token::MOD: { |
2527 // Load left and right operands into d6 and d7 or r0/r1 and r2/r3 | 2212 // Load left and right operands into d6 and d7 or r0/r1 and r2/r3 |
2528 // depending on whether VFP3 is available or not. | 2213 // depending on whether VFP3 is available or not. |
2529 FloatingPointHelper::Destination destination = | 2214 FloatingPointHelper::Destination destination = |
2530 CpuFeatures::IsSupported(VFP2) && | |
2531 op != Token::MOD ? | 2215 op != Token::MOD ? |
2532 FloatingPointHelper::kVFPRegisters : | 2216 FloatingPointHelper::kVFPRegisters : |
2533 FloatingPointHelper::kCoreRegisters; | 2217 FloatingPointHelper::kCoreRegisters; |
2534 | 2218 |
2535 // Allocate new heap number for result. | 2219 // Allocate new heap number for result. |
2536 Register result = r5; | 2220 Register result = r5; |
2537 BinaryOpStub_GenerateHeapResultAllocation( | 2221 BinaryOpStub_GenerateHeapResultAllocation( |
2538 masm, result, heap_number_map, scratch1, scratch2, gc_required, mode); | 2222 masm, result, heap_number_map, scratch1, scratch2, gc_required, mode); |
2539 | 2223 |
2540 // Load the operands. | 2224 // Load the operands. |
(...skipping 23 matching lines...) Expand all Loading... |
2564 masm, destination, left, d6, r0, r1, heap_number_map, | 2248 masm, destination, left, d6, r0, r1, heap_number_map, |
2565 scratch1, scratch2, fail); | 2249 scratch1, scratch2, fail); |
2566 } | 2250 } |
2567 } | 2251 } |
2568 | 2252 |
2569 // Calculate the result. | 2253 // Calculate the result. |
2570 if (destination == FloatingPointHelper::kVFPRegisters) { | 2254 if (destination == FloatingPointHelper::kVFPRegisters) { |
2571 // Using VFP registers: | 2255 // Using VFP registers: |
2572 // d6: Left value | 2256 // d6: Left value |
2573 // d7: Right value | 2257 // d7: Right value |
2574 CpuFeatureScope scope(masm, VFP2); | |
2575 switch (op) { | 2258 switch (op) { |
2576 case Token::ADD: | 2259 case Token::ADD: |
2577 __ vadd(d5, d6, d7); | 2260 __ vadd(d5, d6, d7); |
2578 break; | 2261 break; |
2579 case Token::SUB: | 2262 case Token::SUB: |
2580 __ vsub(d5, d6, d7); | 2263 __ vsub(d5, d6, d7); |
2581 break; | 2264 break; |
2582 case Token::MUL: | 2265 case Token::MUL: |
2583 __ vmul(d5, d6, d7); | 2266 __ vmul(d5, d6, d7); |
2584 break; | 2267 break; |
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2655 __ mov(r2, Operand(r3, ASR, r2)); | 2338 __ mov(r2, Operand(r3, ASR, r2)); |
2656 break; | 2339 break; |
2657 case Token::SHR: | 2340 case Token::SHR: |
2658 // Use only the 5 least significant bits of the shift count. | 2341 // Use only the 5 least significant bits of the shift count. |
2659 __ GetLeastBitsFromInt32(r2, r2, 5); | 2342 __ GetLeastBitsFromInt32(r2, r2, 5); |
2660 __ mov(r2, Operand(r3, LSR, r2), SetCC); | 2343 __ mov(r2, Operand(r3, LSR, r2), SetCC); |
2661 // SHR is special because it is required to produce a positive answer. | 2344 // SHR is special because it is required to produce a positive answer. |
2662 // The code below for writing into heap numbers isn't capable of | 2345 // The code below for writing into heap numbers isn't capable of |
2663 // writing the register as an unsigned int so we go to slow case if we | 2346 // writing the register as an unsigned int so we go to slow case if we |
2664 // hit this case. | 2347 // hit this case. |
2665 if (CpuFeatures::IsSupported(VFP2)) { | 2348 __ b(mi, &result_not_a_smi); |
2666 __ b(mi, &result_not_a_smi); | |
2667 } else { | |
2668 __ b(mi, not_numbers); | |
2669 } | |
2670 break; | 2349 break; |
2671 case Token::SHL: | 2350 case Token::SHL: |
2672 // Use only the 5 least significant bits of the shift count. | 2351 // Use only the 5 least significant bits of the shift count. |
2673 __ GetLeastBitsFromInt32(r2, r2, 5); | 2352 __ GetLeastBitsFromInt32(r2, r2, 5); |
2674 __ mov(r2, Operand(r3, LSL, r2)); | 2353 __ mov(r2, Operand(r3, LSL, r2)); |
2675 break; | 2354 break; |
2676 default: | 2355 default: |
2677 UNREACHABLE(); | 2356 UNREACHABLE(); |
2678 } | 2357 } |
2679 | 2358 |
(...skipping 15 matching lines...) Expand all Loading... |
2695 mode); | 2374 mode); |
2696 } | 2375 } |
2697 | 2376 |
2698 // r2: Answer as signed int32. | 2377 // r2: Answer as signed int32. |
2699 // r5: Heap number to write answer into. | 2378 // r5: Heap number to write answer into. |
2700 | 2379 |
2701 // Nothing can go wrong now, so move the heap number to r0, which is the | 2380 // Nothing can go wrong now, so move the heap number to r0, which is the |
2702 // result. | 2381 // result. |
2703 __ mov(r0, Operand(r5)); | 2382 __ mov(r0, Operand(r5)); |
2704 | 2383 |
2705 if (CpuFeatures::IsSupported(VFP2)) { | 2384 // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As |
2706 // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As | 2385 // mentioned above SHR needs to always produce a positive result. |
2707 // mentioned above SHR needs to always produce a positive result. | 2386 __ vmov(s0, r2); |
2708 CpuFeatureScope scope(masm, VFP2); | 2387 if (op == Token::SHR) { |
2709 __ vmov(s0, r2); | 2388 __ vcvt_f64_u32(d0, s0); |
2710 if (op == Token::SHR) { | |
2711 __ vcvt_f64_u32(d0, s0); | |
2712 } else { | |
2713 __ vcvt_f64_s32(d0, s0); | |
2714 } | |
2715 __ sub(r3, r0, Operand(kHeapObjectTag)); | |
2716 __ vstr(d0, r3, HeapNumber::kValueOffset); | |
2717 __ Ret(); | |
2718 } else { | 2389 } else { |
2719 // Tail call that writes the int32 in r2 to the heap number in r0, using | 2390 __ vcvt_f64_s32(d0, s0); |
2720 // r3 as scratch. r0 is preserved and returned. | |
2721 WriteInt32ToHeapNumberStub stub(r2, r0, r3); | |
2722 __ TailCallStub(&stub); | |
2723 } | 2391 } |
| 2392 __ sub(r3, r0, Operand(kHeapObjectTag)); |
| 2393 __ vstr(d0, r3, HeapNumber::kValueOffset); |
| 2394 __ Ret(); |
2724 break; | 2395 break; |
2725 } | 2396 } |
2726 default: | 2397 default: |
2727 UNREACHABLE(); | 2398 UNREACHABLE(); |
2728 } | 2399 } |
2729 } | 2400 } |
2730 | 2401 |
2731 | 2402 |
2732 // Generate the smi code. If the operation on smis are successful this return is | 2403 // Generate the smi code. If the operation on smis are successful this return is |
2733 // generated. If the result is not a smi and heap number allocation is not | 2404 // generated. If the result is not a smi and heap number allocation is not |
(...skipping 125 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2859 // again if this changes. | 2530 // again if this changes. |
2860 if (left_type_ == BinaryOpIC::SMI) { | 2531 if (left_type_ == BinaryOpIC::SMI) { |
2861 __ JumpIfNotSmi(left, &transition); | 2532 __ JumpIfNotSmi(left, &transition); |
2862 } | 2533 } |
2863 if (right_type_ == BinaryOpIC::SMI) { | 2534 if (right_type_ == BinaryOpIC::SMI) { |
2864 __ JumpIfNotSmi(right, &transition); | 2535 __ JumpIfNotSmi(right, &transition); |
2865 } | 2536 } |
2866 // Load both operands and check that they are 32-bit integer. | 2537 // Load both operands and check that they are 32-bit integer. |
2867 // Jump to type transition if they are not. The registers r0 and r1 (right | 2538 // Jump to type transition if they are not. The registers r0 and r1 (right |
2868 // and left) are preserved for the runtime call. | 2539 // and left) are preserved for the runtime call. |
2869 FloatingPointHelper::Destination destination = | 2540 FloatingPointHelper::Destination destination = (op_ != Token::MOD) |
2870 (CpuFeatures::IsSupported(VFP2) && op_ != Token::MOD) | |
2871 ? FloatingPointHelper::kVFPRegisters | 2541 ? FloatingPointHelper::kVFPRegisters |
2872 : FloatingPointHelper::kCoreRegisters; | 2542 : FloatingPointHelper::kCoreRegisters; |
2873 | 2543 |
2874 FloatingPointHelper::LoadNumberAsInt32Double(masm, | 2544 FloatingPointHelper::LoadNumberAsInt32Double(masm, |
2875 right, | 2545 right, |
2876 destination, | 2546 destination, |
2877 d7, | 2547 d7, |
2878 d8, | 2548 d8, |
2879 r2, | 2549 r2, |
2880 r3, | 2550 r3, |
2881 heap_number_map, | 2551 heap_number_map, |
2882 scratch1, | 2552 scratch1, |
2883 scratch2, | 2553 scratch2, |
2884 s0, | 2554 s0, |
2885 &transition); | 2555 &transition); |
2886 FloatingPointHelper::LoadNumberAsInt32Double(masm, | 2556 FloatingPointHelper::LoadNumberAsInt32Double(masm, |
2887 left, | 2557 left, |
2888 destination, | 2558 destination, |
2889 d6, | 2559 d6, |
2890 d8, | 2560 d8, |
2891 r4, | 2561 r4, |
2892 r5, | 2562 r5, |
2893 heap_number_map, | 2563 heap_number_map, |
2894 scratch1, | 2564 scratch1, |
2895 scratch2, | 2565 scratch2, |
2896 s0, | 2566 s0, |
2897 &transition); | 2567 &transition); |
2898 | 2568 |
2899 if (destination == FloatingPointHelper::kVFPRegisters) { | 2569 if (destination == FloatingPointHelper::kVFPRegisters) { |
2900 CpuFeatureScope scope(masm, VFP2); | |
2901 Label return_heap_number; | 2570 Label return_heap_number; |
2902 switch (op_) { | 2571 switch (op_) { |
2903 case Token::ADD: | 2572 case Token::ADD: |
2904 __ vadd(d5, d6, d7); | 2573 __ vadd(d5, d6, d7); |
2905 break; | 2574 break; |
2906 case Token::SUB: | 2575 case Token::SUB: |
2907 __ vsub(d5, d6, d7); | 2576 __ vsub(d5, d6, d7); |
2908 break; | 2577 break; |
2909 case Token::MUL: | 2578 case Token::MUL: |
2910 __ vmul(d5, d6, d7); | 2579 __ vmul(d5, d6, d7); |
(...skipping 147 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3058 __ and_(r2, r2, Operand(0x1f)); | 2727 __ and_(r2, r2, Operand(0x1f)); |
3059 __ mov(r2, Operand(r3, ASR, r2)); | 2728 __ mov(r2, Operand(r3, ASR, r2)); |
3060 break; | 2729 break; |
3061 case Token::SHR: | 2730 case Token::SHR: |
3062 __ and_(r2, r2, Operand(0x1f)); | 2731 __ and_(r2, r2, Operand(0x1f)); |
3063 __ mov(r2, Operand(r3, LSR, r2), SetCC); | 2732 __ mov(r2, Operand(r3, LSR, r2), SetCC); |
3064 // SHR is special because it is required to produce a positive answer. | 2733 // SHR is special because it is required to produce a positive answer. |
3065 // We only get a negative result if the shift value (r2) is 0. | 2734 // We only get a negative result if the shift value (r2) is 0. |
3066 // This result cannot be respresented as a signed 32-bit integer, try | 2735 // This result cannot be respresented as a signed 32-bit integer, try |
3067 // to return a heap number if we can. | 2736 // to return a heap number if we can. |
3068 // The non vfp2 code does not support this special case, so jump to | 2737 __ b(mi, (result_type_ <= BinaryOpIC::INT32) |
3069 // runtime if we don't support it. | 2738 ? &transition |
3070 if (CpuFeatures::IsSupported(VFP2)) { | 2739 : &return_heap_number); |
3071 __ b(mi, (result_type_ <= BinaryOpIC::INT32) | |
3072 ? &transition | |
3073 : &return_heap_number); | |
3074 } else { | |
3075 __ b(mi, (result_type_ <= BinaryOpIC::INT32) | |
3076 ? &transition | |
3077 : &call_runtime); | |
3078 } | |
3079 break; | 2740 break; |
3080 case Token::SHL: | 2741 case Token::SHL: |
3081 __ and_(r2, r2, Operand(0x1f)); | 2742 __ and_(r2, r2, Operand(0x1f)); |
3082 __ mov(r2, Operand(r3, LSL, r2)); | 2743 __ mov(r2, Operand(r3, LSL, r2)); |
3083 break; | 2744 break; |
3084 default: | 2745 default: |
3085 UNREACHABLE(); | 2746 UNREACHABLE(); |
3086 } | 2747 } |
3087 | 2748 |
3088 // Check if the result fits in a smi. | 2749 // Check if the result fits in a smi. |
3089 __ add(scratch1, r2, Operand(0x40000000), SetCC); | 2750 __ add(scratch1, r2, Operand(0x40000000), SetCC); |
3090 // If not try to return a heap number. (We know the result is an int32.) | 2751 // If not try to return a heap number. (We know the result is an int32.) |
3091 __ b(mi, &return_heap_number); | 2752 __ b(mi, &return_heap_number); |
3092 // Tag the result and return. | 2753 // Tag the result and return. |
3093 __ SmiTag(r0, r2); | 2754 __ SmiTag(r0, r2); |
3094 __ Ret(); | 2755 __ Ret(); |
3095 | 2756 |
3096 __ bind(&return_heap_number); | 2757 __ bind(&return_heap_number); |
3097 heap_number_result = r5; | 2758 heap_number_result = r5; |
3098 BinaryOpStub_GenerateHeapResultAllocation(masm, | 2759 BinaryOpStub_GenerateHeapResultAllocation(masm, |
3099 heap_number_result, | 2760 heap_number_result, |
3100 heap_number_map, | 2761 heap_number_map, |
3101 scratch1, | 2762 scratch1, |
3102 scratch2, | 2763 scratch2, |
3103 &call_runtime, | 2764 &call_runtime, |
3104 mode_); | 2765 mode_); |
3105 | 2766 |
3106 if (CpuFeatures::IsSupported(VFP2)) { | 2767 if (op_ != Token::SHR) { |
3107 CpuFeatureScope scope(masm, VFP2); | 2768 // Convert the result to a floating point value. |
3108 if (op_ != Token::SHR) { | 2769 __ vmov(double_scratch.low(), r2); |
3109 // Convert the result to a floating point value. | 2770 __ vcvt_f64_s32(double_scratch, double_scratch.low()); |
3110 __ vmov(double_scratch.low(), r2); | 2771 } else { |
3111 __ vcvt_f64_s32(double_scratch, double_scratch.low()); | 2772 // The result must be interpreted as an unsigned 32-bit integer. |
3112 } else { | 2773 __ vmov(double_scratch.low(), r2); |
3113 // The result must be interpreted as an unsigned 32-bit integer. | 2774 __ vcvt_f64_u32(double_scratch, double_scratch.low()); |
3114 __ vmov(double_scratch.low(), r2); | 2775 } |
3115 __ vcvt_f64_u32(double_scratch, double_scratch.low()); | |
3116 } | |
3117 | 2776 |
3118 // Store the result. | 2777 // Store the result. |
3119 __ sub(r0, heap_number_result, Operand(kHeapObjectTag)); | 2778 __ sub(r0, heap_number_result, Operand(kHeapObjectTag)); |
3120 __ vstr(double_scratch, r0, HeapNumber::kValueOffset); | 2779 __ vstr(double_scratch, r0, HeapNumber::kValueOffset); |
3121 __ mov(r0, heap_number_result); | 2780 __ mov(r0, heap_number_result); |
3122 __ Ret(); | 2781 __ Ret(); |
3123 } else { | |
3124 // Tail call that writes the int32 in r2 to the heap number in r0, using | |
3125 // r3 as scratch. r0 is preserved and returned. | |
3126 __ mov(r0, r5); | |
3127 WriteInt32ToHeapNumberStub stub(r2, r0, r3); | |
3128 __ TailCallStub(&stub); | |
3129 } | |
3130 | 2782 |
3131 break; | 2783 break; |
3132 } | 2784 } |
3133 | 2785 |
3134 default: | 2786 default: |
3135 UNREACHABLE(); | 2787 UNREACHABLE(); |
3136 } | 2788 } |
3137 | 2789 |
3138 // We never expect DIV to yield an integer result, so we always generate | 2790 // We never expect DIV to yield an integer result, so we always generate |
3139 // type transition code for DIV operations expecting an integer result: the | 2791 // type transition code for DIV operations expecting an integer result: the |
(...skipping 159 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3299 | 2951 |
3300 Label input_not_smi; | 2952 Label input_not_smi; |
3301 Label loaded; | 2953 Label loaded; |
3302 Label calculate; | 2954 Label calculate; |
3303 Label invalid_cache; | 2955 Label invalid_cache; |
3304 const Register scratch0 = r9; | 2956 const Register scratch0 = r9; |
3305 const Register scratch1 = r7; | 2957 const Register scratch1 = r7; |
3306 const Register cache_entry = r0; | 2958 const Register cache_entry = r0; |
3307 const bool tagged = (argument_type_ == TAGGED); | 2959 const bool tagged = (argument_type_ == TAGGED); |
3308 | 2960 |
3309 if (CpuFeatures::IsSupported(VFP2)) { | 2961 if (tagged) { |
3310 CpuFeatureScope scope(masm, VFP2); | 2962 // Argument is a number and is on stack and in r0. |
3311 if (tagged) { | 2963 // Load argument and check if it is a smi. |
3312 // Argument is a number and is on stack and in r0. | 2964 __ JumpIfNotSmi(r0, &input_not_smi); |
3313 // Load argument and check if it is a smi. | |
3314 __ JumpIfNotSmi(r0, &input_not_smi); | |
3315 | 2965 |
3316 // Input is a smi. Convert to double and load the low and high words | 2966 // Input is a smi. Convert to double and load the low and high words |
3317 // of the double into r2, r3. | 2967 // of the double into r2, r3. |
3318 __ IntegerToDoubleConversionWithVFP3(r0, r3, r2); | 2968 __ IntegerToDoubleConversionWithVFP3(r0, r3, r2); |
3319 __ b(&loaded); | 2969 __ b(&loaded); |
3320 | 2970 |
3321 __ bind(&input_not_smi); | 2971 __ bind(&input_not_smi); |
3322 // Check if input is a HeapNumber. | 2972 // Check if input is a HeapNumber. |
3323 __ CheckMap(r0, | 2973 __ CheckMap(r0, |
3324 r1, | 2974 r1, |
3325 Heap::kHeapNumberMapRootIndex, | 2975 Heap::kHeapNumberMapRootIndex, |
3326 &calculate, | 2976 &calculate, |
3327 DONT_DO_SMI_CHECK); | 2977 DONT_DO_SMI_CHECK); |
3328 // Input is a HeapNumber. Load it to a double register and store the | 2978 // Input is a HeapNumber. Load it to a double register and store the |
3329 // low and high words into r2, r3. | 2979 // low and high words into r2, r3. |
3330 __ vldr(d0, FieldMemOperand(r0, HeapNumber::kValueOffset)); | 2980 __ vldr(d0, FieldMemOperand(r0, HeapNumber::kValueOffset)); |
3331 __ vmov(r2, r3, d0); | 2981 __ vmov(r2, r3, d0); |
3332 } else { | 2982 } else { |
3333 // Input is untagged double in d2. Output goes to d2. | 2983 // Input is untagged double in d2. Output goes to d2. |
3334 __ vmov(r2, r3, d2); | 2984 __ vmov(r2, r3, d2); |
3335 } | 2985 } |
3336 __ bind(&loaded); | 2986 __ bind(&loaded); |
3337 // r2 = low 32 bits of double value | 2987 // r2 = low 32 bits of double value |
3338 // r3 = high 32 bits of double value | 2988 // r3 = high 32 bits of double value |
3339 // Compute hash (the shifts are arithmetic): | 2989 // Compute hash (the shifts are arithmetic): |
3340 // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1); | 2990 // h = (low ^ high); h ^= h >> 16; h ^= h >> 8; h = h & (cacheSize - 1); |
3341 __ eor(r1, r2, Operand(r3)); | 2991 __ eor(r1, r2, Operand(r3)); |
3342 __ eor(r1, r1, Operand(r1, ASR, 16)); | 2992 __ eor(r1, r1, Operand(r1, ASR, 16)); |
3343 __ eor(r1, r1, Operand(r1, ASR, 8)); | 2993 __ eor(r1, r1, Operand(r1, ASR, 8)); |
3344 ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize)); | 2994 ASSERT(IsPowerOf2(TranscendentalCache::SubCache::kCacheSize)); |
3345 __ And(r1, r1, Operand(TranscendentalCache::SubCache::kCacheSize - 1)); | 2995 __ And(r1, r1, Operand(TranscendentalCache::SubCache::kCacheSize - 1)); |
3346 | 2996 |
3347 // r2 = low 32 bits of double value. | 2997 // r2 = low 32 bits of double value. |
3348 // r3 = high 32 bits of double value. | 2998 // r3 = high 32 bits of double value. |
3349 // r1 = TranscendentalCache::hash(double value). | 2999 // r1 = TranscendentalCache::hash(double value). |
3350 Isolate* isolate = masm->isolate(); | 3000 Isolate* isolate = masm->isolate(); |
3351 ExternalReference cache_array = | 3001 ExternalReference cache_array = |
3352 ExternalReference::transcendental_cache_array_address(isolate); | 3002 ExternalReference::transcendental_cache_array_address(isolate); |
3353 __ mov(cache_entry, Operand(cache_array)); | 3003 __ mov(cache_entry, Operand(cache_array)); |
3354 // cache_entry points to cache array. | 3004 // cache_entry points to cache array. |
3355 int cache_array_index | 3005 int cache_array_index |
3356 = type_ * sizeof(isolate->transcendental_cache()->caches_[0]); | 3006 = type_ * sizeof(isolate->transcendental_cache()->caches_[0]); |
3357 __ ldr(cache_entry, MemOperand(cache_entry, cache_array_index)); | 3007 __ ldr(cache_entry, MemOperand(cache_entry, cache_array_index)); |
3358 // r0 points to the cache for the type type_. | 3008 // r0 points to the cache for the type type_. |
3359 // If NULL, the cache hasn't been initialized yet, so go through runtime. | 3009 // If NULL, the cache hasn't been initialized yet, so go through runtime. |
3360 __ cmp(cache_entry, Operand::Zero()); | 3010 __ cmp(cache_entry, Operand::Zero()); |
3361 __ b(eq, &invalid_cache); | 3011 __ b(eq, &invalid_cache); |
3362 | 3012 |
3363 #ifdef DEBUG | 3013 #ifdef DEBUG |
3364 // Check that the layout of cache elements match expectations. | 3014 // Check that the layout of cache elements match expectations. |
3365 { TranscendentalCache::SubCache::Element test_elem[2]; | 3015 { TranscendentalCache::SubCache::Element test_elem[2]; |
3366 char* elem_start = reinterpret_cast<char*>(&test_elem[0]); | 3016 char* elem_start = reinterpret_cast<char*>(&test_elem[0]); |
3367 char* elem2_start = reinterpret_cast<char*>(&test_elem[1]); | 3017 char* elem2_start = reinterpret_cast<char*>(&test_elem[1]); |
3368 char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0])); | 3018 char* elem_in0 = reinterpret_cast<char*>(&(test_elem[0].in[0])); |
3369 char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1])); | 3019 char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1])); |
3370 char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output)); | 3020 char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output)); |
3371 CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer. | 3021 CHECK_EQ(12, elem2_start - elem_start); // Two uint_32's and a pointer. |
3372 CHECK_EQ(0, elem_in0 - elem_start); | 3022 CHECK_EQ(0, elem_in0 - elem_start); |
3373 CHECK_EQ(kIntSize, elem_in1 - elem_start); | 3023 CHECK_EQ(kIntSize, elem_in1 - elem_start); |
3374 CHECK_EQ(2 * kIntSize, elem_out - elem_start); | 3024 CHECK_EQ(2 * kIntSize, elem_out - elem_start); |
3375 } | 3025 } |
3376 #endif | 3026 #endif |
3377 | 3027 |
3378 // Find the address of the r1'st entry in the cache, i.e., &r0[r1*12]. | 3028 // Find the address of the r1'st entry in the cache, i.e., &r0[r1*12]. |
3379 __ add(r1, r1, Operand(r1, LSL, 1)); | 3029 __ add(r1, r1, Operand(r1, LSL, 1)); |
3380 __ add(cache_entry, cache_entry, Operand(r1, LSL, 2)); | 3030 __ add(cache_entry, cache_entry, Operand(r1, LSL, 2)); |
3381 // Check if cache matches: Double value is stored in uint32_t[2] array. | 3031 // Check if cache matches: Double value is stored in uint32_t[2] array. |
3382 __ ldm(ia, cache_entry, r4.bit() | r5.bit() | r6.bit()); | 3032 __ ldm(ia, cache_entry, r4.bit() | r5.bit() | r6.bit()); |
3383 __ cmp(r2, r4); | 3033 __ cmp(r2, r4); |
3384 __ cmp(r3, r5, eq); | 3034 __ cmp(r3, r5, eq); |
3385 __ b(ne, &calculate); | 3035 __ b(ne, &calculate); |
3386 // Cache hit. Load result, cleanup and return. | 3036 // Cache hit. Load result, cleanup and return. |
3387 Counters* counters = masm->isolate()->counters(); | 3037 Counters* counters = masm->isolate()->counters(); |
3388 __ IncrementCounter( | 3038 __ IncrementCounter( |
3389 counters->transcendental_cache_hit(), 1, scratch0, scratch1); | 3039 counters->transcendental_cache_hit(), 1, scratch0, scratch1); |
3390 if (tagged) { | 3040 if (tagged) { |
3391 // Pop input value from stack and load result into r0. | 3041 // Pop input value from stack and load result into r0. |
3392 __ pop(); | 3042 __ pop(); |
3393 __ mov(r0, Operand(r6)); | 3043 __ mov(r0, Operand(r6)); |
3394 } else { | 3044 } else { |
3395 // Load result into d2. | 3045 // Load result into d2. |
3396 __ vldr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset)); | 3046 __ vldr(d2, FieldMemOperand(r6, HeapNumber::kValueOffset)); |
3397 } | 3047 } |
3398 __ Ret(); | 3048 __ Ret(); |
3399 } // if (CpuFeatures::IsSupported(VFP3)) | |
3400 | 3049 |
3401 __ bind(&calculate); | 3050 __ bind(&calculate); |
3402 Counters* counters = masm->isolate()->counters(); | |
3403 __ IncrementCounter( | 3051 __ IncrementCounter( |
3404 counters->transcendental_cache_miss(), 1, scratch0, scratch1); | 3052 counters->transcendental_cache_miss(), 1, scratch0, scratch1); |
3405 if (tagged) { | 3053 if (tagged) { |
3406 __ bind(&invalid_cache); | 3054 __ bind(&invalid_cache); |
3407 ExternalReference runtime_function = | 3055 ExternalReference runtime_function = |
3408 ExternalReference(RuntimeFunction(), masm->isolate()); | 3056 ExternalReference(RuntimeFunction(), masm->isolate()); |
3409 __ TailCallExternalReference(runtime_function, 1, 1); | 3057 __ TailCallExternalReference(runtime_function, 1, 1); |
3410 } else { | 3058 } else { |
3411 ASSERT(CpuFeatures::IsSupported(VFP2)); | |
3412 CpuFeatureScope scope(masm, VFP2); | |
3413 | |
3414 Label no_update; | 3059 Label no_update; |
3415 Label skip_cache; | 3060 Label skip_cache; |
3416 | 3061 |
3417 // Call C function to calculate the result and update the cache. | 3062 // Call C function to calculate the result and update the cache. |
3418 // r0: precalculated cache entry address. | 3063 // r0: precalculated cache entry address. |
3419 // r2 and r3: parts of the double value. | 3064 // r2 and r3: parts of the double value. |
3420 // Store r0, r2 and r3 on stack for later before calling C function. | 3065 // Store r0, r2 and r3 on stack for later before calling C function. |
3421 __ Push(r3, r2, cache_entry); | 3066 __ Push(r3, r2, cache_entry); |
3422 GenerateCallCFunction(masm, scratch0); | 3067 GenerateCallCFunction(masm, scratch0); |
3423 __ GetCFunctionDoubleResult(d2); | 3068 __ GetCFunctionDoubleResult(d2); |
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3463 __ push(scratch0); | 3108 __ push(scratch0); |
3464 __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace); | 3109 __ CallRuntimeSaveDoubles(Runtime::kAllocateInNewSpace); |
3465 } | 3110 } |
3466 __ Ret(); | 3111 __ Ret(); |
3467 } | 3112 } |
3468 } | 3113 } |
3469 | 3114 |
3470 | 3115 |
3471 void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm, | 3116 void TranscendentalCacheStub::GenerateCallCFunction(MacroAssembler* masm, |
3472 Register scratch) { | 3117 Register scratch) { |
3473 ASSERT(masm->IsEnabled(VFP2)); | |
3474 Isolate* isolate = masm->isolate(); | 3118 Isolate* isolate = masm->isolate(); |
3475 | 3119 |
3476 __ push(lr); | 3120 __ push(lr); |
3477 __ PrepareCallCFunction(0, 1, scratch); | 3121 __ PrepareCallCFunction(0, 1, scratch); |
3478 if (masm->use_eabi_hardfloat()) { | 3122 if (masm->use_eabi_hardfloat()) { |
3479 __ vmov(d0, d2); | 3123 __ vmov(d0, d2); |
3480 } else { | 3124 } else { |
3481 __ vmov(r0, r1, d2); | 3125 __ vmov(r0, r1, d2); |
3482 } | 3126 } |
3483 AllowExternalCallThatCantCauseGC scope(masm); | 3127 AllowExternalCallThatCantCauseGC scope(masm); |
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3524 __ TailCallRuntime(Runtime::kStackGuard, 0, 1); | 3168 __ TailCallRuntime(Runtime::kStackGuard, 0, 1); |
3525 } | 3169 } |
3526 | 3170 |
3527 | 3171 |
3528 void InterruptStub::Generate(MacroAssembler* masm) { | 3172 void InterruptStub::Generate(MacroAssembler* masm) { |
3529 __ TailCallRuntime(Runtime::kInterrupt, 0, 1); | 3173 __ TailCallRuntime(Runtime::kInterrupt, 0, 1); |
3530 } | 3174 } |
3531 | 3175 |
3532 | 3176 |
3533 void MathPowStub::Generate(MacroAssembler* masm) { | 3177 void MathPowStub::Generate(MacroAssembler* masm) { |
3534 CpuFeatureScope vfp2_scope(masm, VFP2); | |
3535 const Register base = r1; | 3178 const Register base = r1; |
3536 const Register exponent = r2; | 3179 const Register exponent = r2; |
3537 const Register heapnumbermap = r5; | 3180 const Register heapnumbermap = r5; |
3538 const Register heapnumber = r0; | 3181 const Register heapnumber = r0; |
3539 const DwVfpRegister double_base = d1; | 3182 const DwVfpRegister double_base = d1; |
3540 const DwVfpRegister double_exponent = d2; | 3183 const DwVfpRegister double_exponent = d2; |
3541 const DwVfpRegister double_result = d3; | 3184 const DwVfpRegister double_result = d3; |
3542 const DwVfpRegister double_scratch = d0; | 3185 const DwVfpRegister double_scratch = d0; |
3543 const SwVfpRegister single_scratch = s0; | 3186 const SwVfpRegister single_scratch = s0; |
3544 const Register scratch = r9; | 3187 const Register scratch = r9; |
(...skipping 198 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3743 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) { | 3386 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) { |
3744 CEntryStub::GenerateAheadOfTime(isolate); | 3387 CEntryStub::GenerateAheadOfTime(isolate); |
3745 WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate); | 3388 WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate); |
3746 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate); | 3389 StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate); |
3747 StubFailureTrampolineStub::GenerateAheadOfTime(isolate); | 3390 StubFailureTrampolineStub::GenerateAheadOfTime(isolate); |
3748 RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate); | 3391 RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate); |
3749 } | 3392 } |
3750 | 3393 |
3751 | 3394 |
3752 void CodeStub::GenerateFPStubs(Isolate* isolate) { | 3395 void CodeStub::GenerateFPStubs(Isolate* isolate) { |
3753 SaveFPRegsMode mode = CpuFeatures::IsSupported(VFP2) | 3396 SaveFPRegsMode mode = kSaveFPRegs; |
3754 ? kSaveFPRegs | |
3755 : kDontSaveFPRegs; | |
3756 CEntryStub save_doubles(1, mode); | 3397 CEntryStub save_doubles(1, mode); |
3757 StoreBufferOverflowStub stub(mode); | 3398 StoreBufferOverflowStub stub(mode); |
3758 // These stubs might already be in the snapshot, detect that and don't | 3399 // These stubs might already be in the snapshot, detect that and don't |
3759 // regenerate, which would lead to code stub initialization state being messed | 3400 // regenerate, which would lead to code stub initialization state being messed |
3760 // up. | 3401 // up. |
3761 Code* save_doubles_code; | 3402 Code* save_doubles_code; |
3762 if (!save_doubles.FindCodeInCache(&save_doubles_code, isolate)) { | 3403 if (!save_doubles.FindCodeInCache(&save_doubles_code, isolate)) { |
3763 save_doubles_code = *save_doubles.GetCode(isolate); | 3404 save_doubles_code = *save_doubles.GetCode(isolate); |
3764 } | 3405 } |
3765 Code* store_buffer_overflow_code; | 3406 Code* store_buffer_overflow_code; |
(...skipping 241 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4007 // r3: argc | 3648 // r3: argc |
4008 // [sp+0]: argv | 3649 // [sp+0]: argv |
4009 | 3650 |
4010 Label invoke, handler_entry, exit; | 3651 Label invoke, handler_entry, exit; |
4011 | 3652 |
4012 // Called from C, so do not pop argc and args on exit (preserve sp) | 3653 // Called from C, so do not pop argc and args on exit (preserve sp) |
4013 // No need to save register-passed args | 3654 // No need to save register-passed args |
4014 // Save callee-saved registers (incl. cp and fp), sp, and lr | 3655 // Save callee-saved registers (incl. cp and fp), sp, and lr |
4015 __ stm(db_w, sp, kCalleeSaved | lr.bit()); | 3656 __ stm(db_w, sp, kCalleeSaved | lr.bit()); |
4016 | 3657 |
4017 if (CpuFeatures::IsSupported(VFP2)) { | 3658 // Save callee-saved vfp registers. |
4018 CpuFeatureScope scope(masm, VFP2); | 3659 __ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg); |
4019 // Save callee-saved vfp registers. | 3660 // Set up the reserved register for 0.0. |
4020 __ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg); | 3661 __ vmov(kDoubleRegZero, 0.0); |
4021 // Set up the reserved register for 0.0. | |
4022 __ vmov(kDoubleRegZero, 0.0); | |
4023 } | |
4024 | 3662 |
4025 // Get address of argv, see stm above. | 3663 // Get address of argv, see stm above. |
4026 // r0: code entry | 3664 // r0: code entry |
4027 // r1: function | 3665 // r1: function |
4028 // r2: receiver | 3666 // r2: receiver |
4029 // r3: argc | 3667 // r3: argc |
4030 | 3668 |
4031 // Set up argv in r4. | 3669 // Set up argv in r4. |
4032 int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize; | 3670 int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize; |
4033 if (CpuFeatures::IsSupported(VFP2)) { | 3671 offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize; |
4034 offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize; | |
4035 } | |
4036 __ ldr(r4, MemOperand(sp, offset_to_argv)); | 3672 __ ldr(r4, MemOperand(sp, offset_to_argv)); |
4037 | 3673 |
4038 // Push a frame with special values setup to mark it as an entry frame. | 3674 // Push a frame with special values setup to mark it as an entry frame. |
4039 // r0: code entry | 3675 // r0: code entry |
4040 // r1: function | 3676 // r1: function |
4041 // r2: receiver | 3677 // r2: receiver |
4042 // r3: argc | 3678 // r3: argc |
4043 // r4: argv | 3679 // r4: argv |
4044 Isolate* isolate = masm->isolate(); | 3680 Isolate* isolate = masm->isolate(); |
4045 __ mov(r8, Operand(-1)); // Push a bad frame pointer to fail if it is used. | 3681 __ mov(r8, Operand(-1)); // Push a bad frame pointer to fail if it is used. |
(...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4161 // Reset the stack to the callee saved registers. | 3797 // Reset the stack to the callee saved registers. |
4162 __ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset)); | 3798 __ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset)); |
4163 | 3799 |
4164 // Restore callee-saved registers and return. | 3800 // Restore callee-saved registers and return. |
4165 #ifdef DEBUG | 3801 #ifdef DEBUG |
4166 if (FLAG_debug_code) { | 3802 if (FLAG_debug_code) { |
4167 __ mov(lr, Operand(pc)); | 3803 __ mov(lr, Operand(pc)); |
4168 } | 3804 } |
4169 #endif | 3805 #endif |
4170 | 3806 |
4171 if (CpuFeatures::IsSupported(VFP2)) { | 3807 // Restore callee-saved vfp registers. |
4172 CpuFeatureScope scope(masm, VFP2); | 3808 __ vldm(ia_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg); |
4173 // Restore callee-saved vfp registers. | |
4174 __ vldm(ia_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg); | |
4175 } | |
4176 | 3809 |
4177 __ ldm(ia_w, sp, kCalleeSaved | pc.bit()); | 3810 __ ldm(ia_w, sp, kCalleeSaved | pc.bit()); |
4178 } | 3811 } |
4179 | 3812 |
4180 | 3813 |
4181 // Uses registers r0 to r4. | 3814 // Uses registers r0 to r4. |
4182 // Expected input (depending on whether args are in registers or on the stack): | 3815 // Expected input (depending on whether args are in registers or on the stack): |
4183 // * object: r0 or at sp + 1 * kPointerSize. | 3816 // * object: r0 or at sp + 1 * kPointerSize. |
4184 // * function: r1 or at sp. | 3817 // * function: r1 or at sp. |
4185 // | 3818 // |
(...skipping 2684 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6870 Label miss; | 6503 Label miss; |
6871 | 6504 |
6872 if (left_ == CompareIC::SMI) { | 6505 if (left_ == CompareIC::SMI) { |
6873 __ JumpIfNotSmi(r1, &miss); | 6506 __ JumpIfNotSmi(r1, &miss); |
6874 } | 6507 } |
6875 if (right_ == CompareIC::SMI) { | 6508 if (right_ == CompareIC::SMI) { |
6876 __ JumpIfNotSmi(r0, &miss); | 6509 __ JumpIfNotSmi(r0, &miss); |
6877 } | 6510 } |
6878 | 6511 |
6879 // Inlining the double comparison and falling back to the general compare | 6512 // Inlining the double comparison and falling back to the general compare |
6880 // stub if NaN is involved or VFP2 is unsupported. | 6513 // stub if NaN is involved. |
6881 if (CpuFeatures::IsSupported(VFP2)) { | 6514 // Load left and right operand. |
6882 CpuFeatureScope scope(masm, VFP2); | 6515 Label done, left, left_smi, right_smi; |
| 6516 __ JumpIfSmi(r0, &right_smi); |
| 6517 __ CheckMap(r0, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1, |
| 6518 DONT_DO_SMI_CHECK); |
| 6519 __ sub(r2, r0, Operand(kHeapObjectTag)); |
| 6520 __ vldr(d1, r2, HeapNumber::kValueOffset); |
| 6521 __ b(&left); |
| 6522 __ bind(&right_smi); |
| 6523 __ SmiUntag(r2, r0); // Can't clobber r0 yet. |
| 6524 SwVfpRegister single_scratch = d2.low(); |
| 6525 __ vmov(single_scratch, r2); |
| 6526 __ vcvt_f64_s32(d1, single_scratch); |
6883 | 6527 |
6884 // Load left and right operand. | 6528 __ bind(&left); |
6885 Label done, left, left_smi, right_smi; | 6529 __ JumpIfSmi(r1, &left_smi); |
6886 __ JumpIfSmi(r0, &right_smi); | 6530 __ CheckMap(r1, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2, |
6887 __ CheckMap(r0, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1, | 6531 DONT_DO_SMI_CHECK); |
6888 DONT_DO_SMI_CHECK); | 6532 __ sub(r2, r1, Operand(kHeapObjectTag)); |
6889 __ sub(r2, r0, Operand(kHeapObjectTag)); | 6533 __ vldr(d0, r2, HeapNumber::kValueOffset); |
6890 __ vldr(d1, r2, HeapNumber::kValueOffset); | 6534 __ b(&done); |
6891 __ b(&left); | 6535 __ bind(&left_smi); |
6892 __ bind(&right_smi); | 6536 __ SmiUntag(r2, r1); // Can't clobber r1 yet. |
6893 __ SmiUntag(r2, r0); // Can't clobber r0 yet. | 6537 single_scratch = d3.low(); |
6894 SwVfpRegister single_scratch = d2.low(); | 6538 __ vmov(single_scratch, r2); |
6895 __ vmov(single_scratch, r2); | 6539 __ vcvt_f64_s32(d0, single_scratch); |
6896 __ vcvt_f64_s32(d1, single_scratch); | |
6897 | 6540 |
6898 __ bind(&left); | 6541 __ bind(&done); |
6899 __ JumpIfSmi(r1, &left_smi); | 6542 // Compare operands. |
6900 __ CheckMap(r1, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2, | 6543 __ VFPCompareAndSetFlags(d0, d1); |
6901 DONT_DO_SMI_CHECK); | |
6902 __ sub(r2, r1, Operand(kHeapObjectTag)); | |
6903 __ vldr(d0, r2, HeapNumber::kValueOffset); | |
6904 __ b(&done); | |
6905 __ bind(&left_smi); | |
6906 __ SmiUntag(r2, r1); // Can't clobber r1 yet. | |
6907 single_scratch = d3.low(); | |
6908 __ vmov(single_scratch, r2); | |
6909 __ vcvt_f64_s32(d0, single_scratch); | |
6910 | 6544 |
6911 __ bind(&done); | 6545 // Don't base result on status bits when a NaN is involved. |
6912 // Compare operands. | 6546 __ b(vs, &unordered); |
6913 __ VFPCompareAndSetFlags(d0, d1); | |
6914 | 6547 |
6915 // Don't base result on status bits when a NaN is involved. | 6548 // Return a result of -1, 0, or 1, based on status bits. |
6916 __ b(vs, &unordered); | 6549 __ mov(r0, Operand(EQUAL), LeaveCC, eq); |
6917 | 6550 __ mov(r0, Operand(LESS), LeaveCC, lt); |
6918 // Return a result of -1, 0, or 1, based on status bits. | 6551 __ mov(r0, Operand(GREATER), LeaveCC, gt); |
6919 __ mov(r0, Operand(EQUAL), LeaveCC, eq); | 6552 __ Ret(); |
6920 __ mov(r0, Operand(LESS), LeaveCC, lt); | |
6921 __ mov(r0, Operand(GREATER), LeaveCC, gt); | |
6922 __ Ret(); | |
6923 } | |
6924 | 6553 |
6925 __ bind(&unordered); | 6554 __ bind(&unordered); |
6926 __ bind(&generic_stub); | 6555 __ bind(&generic_stub); |
6927 ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC, | 6556 ICCompareStub stub(op_, CompareIC::GENERIC, CompareIC::GENERIC, |
6928 CompareIC::GENERIC); | 6557 CompareIC::GENERIC); |
6929 __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); | 6558 __ Jump(stub.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); |
6930 | 6559 |
6931 __ bind(&maybe_undefined1); | 6560 __ bind(&maybe_undefined1); |
6932 if (Token::IsOrderedRelationalCompareOp(op_)) { | 6561 if (Token::IsOrderedRelationalCompareOp(op_)) { |
6933 __ CompareRoot(r0, Heap::kUndefinedValueRootIndex); | 6562 __ CompareRoot(r0, Heap::kUndefinedValueRootIndex); |
(...skipping 611 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
7545 entry->value, | 7174 entry->value, |
7546 entry->address, | 7175 entry->address, |
7547 entry->action, | 7176 entry->action, |
7548 kDontSaveFPRegs); | 7177 kDontSaveFPRegs); |
7549 stub.GetCode(isolate)->set_is_pregenerated(true); | 7178 stub.GetCode(isolate)->set_is_pregenerated(true); |
7550 } | 7179 } |
7551 } | 7180 } |
7552 | 7181 |
7553 | 7182 |
7554 bool CodeStub::CanUseFPRegisters() { | 7183 bool CodeStub::CanUseFPRegisters() { |
7555 return CpuFeatures::IsSupported(VFP2); | 7184 return true; // VFP2 is a base requirement for V8 |
7556 } | 7185 } |
7557 | 7186 |
7558 | 7187 |
7559 // Takes the input in 3 registers: address_ value_ and object_. A pointer to | 7188 // Takes the input in 3 registers: address_ value_ and object_. A pointer to |
7560 // the value has just been written into the object, now this stub makes sure | 7189 // the value has just been written into the object, now this stub makes sure |
7561 // we keep the GC informed. The word in the object where the value has been | 7190 // we keep the GC informed. The word in the object where the value has been |
7562 // written is in the address register. | 7191 // written is in the address register. |
7563 void RecordWriteStub::Generate(MacroAssembler* masm) { | 7192 void RecordWriteStub::Generate(MacroAssembler* masm) { |
7564 Label skip_to_incremental_noncompacting; | 7193 Label skip_to_incremental_noncompacting; |
7565 Label skip_to_incremental_compacting; | 7194 Label skip_to_incremental_compacting; |
(...skipping 324 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
7890 | 7519 |
7891 __ Pop(lr, r5, r1); | 7520 __ Pop(lr, r5, r1); |
7892 __ Ret(); | 7521 __ Ret(); |
7893 } | 7522 } |
7894 | 7523 |
7895 #undef __ | 7524 #undef __ |
7896 | 7525 |
7897 } } // namespace v8::internal | 7526 } } // namespace v8::internal |
7898 | 7527 |
7899 #endif // V8_TARGET_ARCH_ARM | 7528 #endif // V8_TARGET_ARCH_ARM |
OLD | NEW |