OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/v8.h" | 5 #include "src/v8.h" |
6 | 6 |
7 #if V8_TARGET_ARCH_MIPS | 7 #if V8_TARGET_ARCH_MIPS |
8 | 8 |
9 #include "src/codegen.h" | 9 #include "src/codegen.h" |
10 #include "src/macro-assembler.h" | 10 #include "src/macro-assembler.h" |
(...skipping 637 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
648 masm->set_has_frame(false); | 648 masm->set_has_frame(false); |
649 } | 649 } |
650 | 650 |
651 | 651 |
652 // ------------------------------------------------------------------------- | 652 // ------------------------------------------------------------------------- |
653 // Code generators | 653 // Code generators |
654 | 654 |
655 #define __ ACCESS_MASM(masm) | 655 #define __ ACCESS_MASM(masm) |
656 | 656 |
657 void ElementsTransitionGenerator::GenerateMapChangeElementsTransition( | 657 void ElementsTransitionGenerator::GenerateMapChangeElementsTransition( |
658 MacroAssembler* masm, AllocationSiteMode mode, | 658 MacroAssembler* masm, |
| 659 Register receiver, |
| 660 Register key, |
| 661 Register value, |
| 662 Register target_map, |
| 663 AllocationSiteMode mode, |
659 Label* allocation_memento_found) { | 664 Label* allocation_memento_found) { |
660 // ----------- S t a t e ------------- | 665 Register scratch_elements = t0; |
661 // -- a0 : value | 666 ASSERT(!AreAliased(receiver, key, value, target_map, |
662 // -- a1 : key | 667 scratch_elements)); |
663 // -- a2 : receiver | 668 |
664 // -- ra : return address | |
665 // -- a3 : target map, scratch for subsequent call | |
666 // -- t0 : scratch (elements) | |
667 // ----------------------------------- | |
668 if (mode == TRACK_ALLOCATION_SITE) { | 669 if (mode == TRACK_ALLOCATION_SITE) { |
669 ASSERT(allocation_memento_found != NULL); | 670 ASSERT(allocation_memento_found != NULL); |
670 __ JumpIfJSArrayHasAllocationMemento(a2, t0, allocation_memento_found); | 671 __ JumpIfJSArrayHasAllocationMemento( |
| 672 receiver, scratch_elements, allocation_memento_found); |
671 } | 673 } |
672 | 674 |
673 // Set transitioned map. | 675 // Set transitioned map. |
674 __ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset)); | 676 __ sw(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
675 __ RecordWriteField(a2, | 677 __ RecordWriteField(receiver, |
676 HeapObject::kMapOffset, | 678 HeapObject::kMapOffset, |
677 a3, | 679 target_map, |
678 t5, | 680 t5, |
679 kRAHasNotBeenSaved, | 681 kRAHasNotBeenSaved, |
680 kDontSaveFPRegs, | 682 kDontSaveFPRegs, |
681 EMIT_REMEMBERED_SET, | 683 EMIT_REMEMBERED_SET, |
682 OMIT_SMI_CHECK); | 684 OMIT_SMI_CHECK); |
683 } | 685 } |
684 | 686 |
685 | 687 |
686 void ElementsTransitionGenerator::GenerateSmiToDouble( | 688 void ElementsTransitionGenerator::GenerateSmiToDouble( |
687 MacroAssembler* masm, AllocationSiteMode mode, Label* fail) { | 689 MacroAssembler* masm, |
688 // ----------- S t a t e ------------- | 690 Register receiver, |
689 // -- a0 : value | 691 Register key, |
690 // -- a1 : key | 692 Register value, |
691 // -- a2 : receiver | 693 Register target_map, |
692 // -- ra : return address | 694 AllocationSiteMode mode, |
693 // -- a3 : target map, scratch for subsequent call | 695 Label* fail) { |
694 // -- t0 : scratch (elements) | 696 // Register ra contains the return address. |
695 // ----------------------------------- | |
696 Label loop, entry, convert_hole, gc_required, only_change_map, done; | 697 Label loop, entry, convert_hole, gc_required, only_change_map, done; |
| 698 Register elements = t0; |
| 699 Register length = t1; |
| 700 Register array = t2; |
| 701 Register array_end = array; |
| 702 |
| 703 // target_map parameter can be clobbered. |
| 704 Register scratch1 = target_map; |
| 705 Register scratch2 = t5; |
| 706 Register scratch3 = t3; |
| 707 |
| 708 // Verify input registers don't conflict with locals. |
| 709 ASSERT(!AreAliased(receiver, key, value, target_map, |
| 710 elements, length, array, scratch2)); |
697 | 711 |
698 Register scratch = t6; | 712 Register scratch = t6; |
699 | 713 |
700 if (mode == TRACK_ALLOCATION_SITE) { | 714 if (mode == TRACK_ALLOCATION_SITE) { |
701 __ JumpIfJSArrayHasAllocationMemento(a2, t0, fail); | 715 __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail); |
702 } | 716 } |
703 | 717 |
704 // Check for empty arrays, which only require a map transition and no changes | 718 // Check for empty arrays, which only require a map transition and no changes |
705 // to the backing store. | 719 // to the backing store. |
706 __ lw(t0, FieldMemOperand(a2, JSObject::kElementsOffset)); | 720 __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
707 __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex); | 721 __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex); |
708 __ Branch(&only_change_map, eq, at, Operand(t0)); | 722 __ Branch(&only_change_map, eq, at, Operand(elements)); |
709 | 723 |
710 __ push(ra); | 724 __ push(ra); |
711 __ lw(t1, FieldMemOperand(t0, FixedArray::kLengthOffset)); | 725 __ lw(length, FieldMemOperand(elements, FixedArray::kLengthOffset)); |
712 // t0: source FixedArray | 726 // elements: source FixedArray |
713 // t1: number of elements (smi-tagged) | 727 // length: number of elements (smi-tagged) |
714 | 728 |
715 // Allocate new FixedDoubleArray. | 729 // Allocate new FixedDoubleArray. |
716 __ sll(scratch, t1, 2); | 730 __ sll(scratch, length, 2); |
717 __ Addu(scratch, scratch, FixedDoubleArray::kHeaderSize); | 731 __ Addu(scratch, scratch, FixedDoubleArray::kHeaderSize); |
718 __ Allocate(scratch, t2, t3, t5, &gc_required, DOUBLE_ALIGNMENT); | 732 __ Allocate(scratch, array, t3, scratch2, &gc_required, DOUBLE_ALIGNMENT); |
719 // t2: destination FixedDoubleArray, not tagged as heap object | 733 // array: destination FixedDoubleArray, not tagged as heap object |
720 | 734 |
721 // Set destination FixedDoubleArray's length and map. | 735 // Set destination FixedDoubleArray's length and map. |
722 __ LoadRoot(t5, Heap::kFixedDoubleArrayMapRootIndex); | 736 __ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex); |
723 __ sw(t1, MemOperand(t2, FixedDoubleArray::kLengthOffset)); | 737 __ sw(length, MemOperand(array, FixedDoubleArray::kLengthOffset)); |
724 __ sw(t5, MemOperand(t2, HeapObject::kMapOffset)); | |
725 // Update receiver's map. | 738 // Update receiver's map. |
| 739 __ sw(scratch2, MemOperand(array, HeapObject::kMapOffset)); |
726 | 740 |
727 __ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset)); | 741 __ sw(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
728 __ RecordWriteField(a2, | 742 __ RecordWriteField(receiver, |
729 HeapObject::kMapOffset, | 743 HeapObject::kMapOffset, |
730 a3, | 744 target_map, |
731 t5, | 745 scratch2, |
732 kRAHasBeenSaved, | 746 kRAHasBeenSaved, |
733 kDontSaveFPRegs, | 747 kDontSaveFPRegs, |
734 OMIT_REMEMBERED_SET, | 748 OMIT_REMEMBERED_SET, |
735 OMIT_SMI_CHECK); | 749 OMIT_SMI_CHECK); |
736 // Replace receiver's backing store with newly created FixedDoubleArray. | 750 // Replace receiver's backing store with newly created FixedDoubleArray. |
737 __ Addu(a3, t2, Operand(kHeapObjectTag)); | 751 __ Addu(scratch1, array, Operand(kHeapObjectTag)); |
738 __ sw(a3, FieldMemOperand(a2, JSObject::kElementsOffset)); | 752 __ sw(scratch1, FieldMemOperand(a2, JSObject::kElementsOffset)); |
739 __ RecordWriteField(a2, | 753 __ RecordWriteField(receiver, |
740 JSObject::kElementsOffset, | 754 JSObject::kElementsOffset, |
741 a3, | 755 scratch1, |
742 t5, | 756 scratch2, |
743 kRAHasBeenSaved, | 757 kRAHasBeenSaved, |
744 kDontSaveFPRegs, | 758 kDontSaveFPRegs, |
745 EMIT_REMEMBERED_SET, | 759 EMIT_REMEMBERED_SET, |
746 OMIT_SMI_CHECK); | 760 OMIT_SMI_CHECK); |
747 | 761 |
748 | 762 |
749 // Prepare for conversion loop. | 763 // Prepare for conversion loop. |
750 __ Addu(a3, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 764 __ Addu(scratch1, elements, |
751 __ Addu(t3, t2, Operand(FixedDoubleArray::kHeaderSize)); | 765 Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
752 __ sll(t2, t1, 2); | 766 __ Addu(scratch3, array, Operand(FixedDoubleArray::kHeaderSize)); |
753 __ Addu(t2, t2, t3); | 767 __ sll(at, length, 2); |
754 __ li(t0, Operand(kHoleNanLower32)); | 768 __ Addu(array_end, scratch3, at); |
755 __ li(t1, Operand(kHoleNanUpper32)); | |
756 // t0: kHoleNanLower32 | |
757 // t1: kHoleNanUpper32 | |
758 // t2: end of destination FixedDoubleArray, not tagged | |
759 // t3: begin of FixedDoubleArray element fields, not tagged | |
760 | 769 |
761 __ Branch(&entry); | 770 // Repurpose registers no longer in use. |
| 771 Register hole_lower = elements; |
| 772 Register hole_upper = length; |
| 773 |
| 774 __ li(hole_lower, Operand(kHoleNanLower32)); |
| 775 // scratch1: begin of source FixedArray element fields, not tagged |
| 776 // hole_lower: kHoleNanLower32 |
| 777 // hole_upper: kHoleNanUpper32 |
| 778 // array_end: end of destination FixedDoubleArray, not tagged |
| 779 // scratch3: begin of FixedDoubleArray element fields, not tagged |
| 780 __ Branch(USE_DELAY_SLOT, &entry); |
| 781 __ li(hole_upper, Operand(kHoleNanUpper32)); // In delay slot. |
762 | 782 |
763 __ bind(&only_change_map); | 783 __ bind(&only_change_map); |
764 __ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset)); | 784 __ sw(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
765 __ RecordWriteField(a2, | 785 __ RecordWriteField(receiver, |
766 HeapObject::kMapOffset, | 786 HeapObject::kMapOffset, |
767 a3, | 787 target_map, |
768 t5, | 788 scratch2, |
769 kRAHasNotBeenSaved, | 789 kRAHasNotBeenSaved, |
770 kDontSaveFPRegs, | 790 kDontSaveFPRegs, |
771 OMIT_REMEMBERED_SET, | 791 OMIT_REMEMBERED_SET, |
772 OMIT_SMI_CHECK); | 792 OMIT_SMI_CHECK); |
773 __ Branch(&done); | 793 __ Branch(&done); |
774 | 794 |
775 // Call into runtime if GC is required. | 795 // Call into runtime if GC is required. |
776 __ bind(&gc_required); | 796 __ bind(&gc_required); |
| 797 __ Branch(USE_DELAY_SLOT, fail); |
777 __ pop(ra); | 798 __ pop(ra); |
778 __ Branch(fail); | |
779 | 799 |
780 // Convert and copy elements. | 800 // Convert and copy elements. |
781 __ bind(&loop); | 801 __ bind(&loop); |
782 __ lw(t5, MemOperand(a3)); | 802 __ lw(scratch2, MemOperand(scratch1)); |
783 __ Addu(a3, a3, kIntSize); | 803 __ Addu(scratch1, scratch1, kIntSize); |
784 // t5: current element | 804 // scratch2: current element |
785 __ UntagAndJumpIfNotSmi(t5, t5, &convert_hole); | 805 __ UntagAndJumpIfNotSmi(scratch2, scratch2, &convert_hole); |
786 | 806 |
787 // Normal smi, convert to double and store. | 807 // Normal smi, convert to double and store. |
788 __ mtc1(t5, f0); | 808 __ mtc1(scratch2, f0); |
789 __ cvt_d_w(f0, f0); | 809 __ cvt_d_w(f0, f0); |
790 __ sdc1(f0, MemOperand(t3)); | 810 __ sdc1(f0, MemOperand(scratch3)); |
791 __ Addu(t3, t3, kDoubleSize); | 811 __ Branch(USE_DELAY_SLOT, &entry); |
792 | 812 __ addiu(scratch3, scratch3, kDoubleSize); // In delay slot. |
793 __ Branch(&entry); | |
794 | 813 |
795 // Hole found, store the-hole NaN. | 814 // Hole found, store the-hole NaN. |
796 __ bind(&convert_hole); | 815 __ bind(&convert_hole); |
797 if (FLAG_debug_code) { | 816 if (FLAG_debug_code) { |
798 // Restore a "smi-untagged" heap object. | 817 // Restore a "smi-untagged" heap object. |
799 __ SmiTag(t5); | 818 __ SmiTag(scratch2); |
800 __ Or(t5, t5, Operand(1)); | 819 __ Or(scratch2, scratch2, Operand(1)); |
801 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); | 820 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); |
802 __ Assert(eq, kObjectFoundInSmiOnlyArray, at, Operand(t5)); | 821 __ Assert(eq, kObjectFoundInSmiOnlyArray, at, Operand(scratch2)); |
803 } | 822 } |
804 __ sw(t0, MemOperand(t3, Register::kMantissaOffset)); // mantissa | 823 // mantissa |
805 __ sw(t1, MemOperand(t3, Register::kExponentOffset)); // exponent | 824 __ sw(hole_lower, MemOperand(scratch3, Register::kMantissaOffset)); |
806 __ Addu(t3, t3, kDoubleSize); | 825 // exponent |
| 826 __ sw(hole_upper, MemOperand(scratch3, Register::kExponentOffset)); |
| 827 __ bind(&entry); |
| 828 __ addiu(scratch3, scratch3, kDoubleSize); // In delay slot. |
807 | 829 |
808 __ bind(&entry); | 830 __ Branch(&loop, lt, scratch3, Operand(array_end)); |
809 __ Branch(&loop, lt, t3, Operand(t2)); | |
810 | 831 |
811 __ pop(ra); | 832 __ pop(ra); |
812 __ bind(&done); | 833 __ bind(&done); |
813 } | 834 } |
814 | 835 |
815 | 836 |
816 void ElementsTransitionGenerator::GenerateDoubleToObject( | 837 void ElementsTransitionGenerator::GenerateDoubleToObject( |
817 MacroAssembler* masm, AllocationSiteMode mode, Label* fail) { | 838 MacroAssembler* masm, |
818 // ----------- S t a t e ------------- | 839 Register receiver, |
819 // -- a0 : value | 840 Register key, |
820 // -- a1 : key | 841 Register value, |
821 // -- a2 : receiver | 842 Register target_map, |
822 // -- ra : return address | 843 AllocationSiteMode mode, |
823 // -- a3 : target map, scratch for subsequent call | 844 Label* fail) { |
824 // -- t0 : scratch (elements) | 845 // Register ra contains the return address. |
825 // ----------------------------------- | |
826 Label entry, loop, convert_hole, gc_required, only_change_map; | 846 Label entry, loop, convert_hole, gc_required, only_change_map; |
| 847 Register elements = t0; |
| 848 Register array = t2; |
| 849 Register length = t1; |
| 850 Register scratch = t5; |
| 851 |
| 852 // Verify input registers don't conflict with locals. |
| 853 ASSERT(!AreAliased(receiver, key, value, target_map, |
| 854 elements, array, length, scratch)); |
827 | 855 |
828 if (mode == TRACK_ALLOCATION_SITE) { | 856 if (mode == TRACK_ALLOCATION_SITE) { |
829 __ JumpIfJSArrayHasAllocationMemento(a2, t0, fail); | 857 __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail); |
830 } | 858 } |
831 | 859 |
832 // Check for empty arrays, which only require a map transition and no changes | 860 // Check for empty arrays, which only require a map transition and no changes |
833 // to the backing store. | 861 // to the backing store. |
834 __ lw(t0, FieldMemOperand(a2, JSObject::kElementsOffset)); | 862 __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
835 __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex); | 863 __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex); |
836 __ Branch(&only_change_map, eq, at, Operand(t0)); | 864 __ Branch(&only_change_map, eq, at, Operand(elements)); |
837 | 865 |
838 __ MultiPush(a0.bit() | a1.bit() | a2.bit() | a3.bit() | ra.bit()); | 866 __ MultiPush( |
| 867 value.bit() | key.bit() | receiver.bit() | target_map.bit() | ra.bit()); |
839 | 868 |
840 __ lw(t1, FieldMemOperand(t0, FixedArray::kLengthOffset)); | 869 __ lw(length, FieldMemOperand(elements, FixedArray::kLengthOffset)); |
841 // t0: source FixedArray | 870 // elements: source FixedArray |
842 // t1: number of elements (smi-tagged) | 871 // length: number of elements (smi-tagged) |
843 | 872 |
844 // Allocate new FixedArray. | 873 // Allocate new FixedArray. |
845 __ sll(a0, t1, 1); | 874 // Re-use value and target_map registers, as they have been saved on the |
846 __ Addu(a0, a0, FixedDoubleArray::kHeaderSize); | 875 // stack. |
847 __ Allocate(a0, t2, t3, t5, &gc_required, NO_ALLOCATION_FLAGS); | 876 Register array_size = value; |
848 // t2: destination FixedArray, not tagged as heap object | 877 Register allocate_scratch = target_map; |
| 878 __ sll(array_size, length, 1); |
| 879 __ Addu(array_size, array_size, FixedDoubleArray::kHeaderSize); |
| 880 __ Allocate(array_size, array, allocate_scratch, scratch, &gc_required, |
| 881 NO_ALLOCATION_FLAGS); |
| 882 // array: destination FixedArray, not tagged as heap object |
849 // Set destination FixedDoubleArray's length and map. | 883 // Set destination FixedDoubleArray's length and map. |
850 __ LoadRoot(t5, Heap::kFixedArrayMapRootIndex); | 884 __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex); |
851 __ sw(t1, MemOperand(t2, FixedDoubleArray::kLengthOffset)); | 885 __ sw(length, MemOperand(array, FixedDoubleArray::kLengthOffset)); |
852 __ sw(t5, MemOperand(t2, HeapObject::kMapOffset)); | 886 __ sw(scratch, MemOperand(array, HeapObject::kMapOffset)); |
853 | 887 |
854 // Prepare for conversion loop. | 888 // Prepare for conversion loop. |
855 __ Addu(t0, t0, Operand( | 889 Register src_elements = elements; |
| 890 Register dst_elements = target_map; |
| 891 Register dst_end = length; |
| 892 Register heap_number_map = scratch; |
| 893 __ Addu(src_elements, src_elements, Operand( |
856 FixedDoubleArray::kHeaderSize - kHeapObjectTag | 894 FixedDoubleArray::kHeaderSize - kHeapObjectTag |
857 + Register::kExponentOffset)); | 895 + Register::kExponentOffset)); |
858 __ Addu(a3, t2, Operand(FixedArray::kHeaderSize)); | 896 __ Addu(dst_elements, array, Operand(FixedArray::kHeaderSize)); |
859 __ Addu(t2, t2, Operand(kHeapObjectTag)); | 897 __ Addu(array, array, Operand(kHeapObjectTag)); |
860 __ sll(t1, t1, 1); | 898 __ sll(dst_end, dst_end, 1); |
861 __ Addu(t1, a3, t1); | 899 __ Addu(dst_end, dst_elements, dst_end); |
862 __ LoadRoot(t3, Heap::kTheHoleValueRootIndex); | 900 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
863 __ LoadRoot(t5, Heap::kHeapNumberMapRootIndex); | |
864 // Using offsetted addresses. | 901 // Using offsetted addresses. |
865 // a3: begin of destination FixedArray element fields, not tagged | 902 // dst_elements: begin of destination FixedArray element fields, not tagged |
866 // t0: begin of source FixedDoubleArray element fields, not tagged, | 903 // src_elements: begin of source FixedDoubleArray element fields, not tagged, |
867 // points to the exponent | 904 // points to the exponent |
868 // t1: end of destination FixedArray, not tagged | 905 // dst_end: end of destination FixedArray, not tagged |
869 // t2: destination FixedArray | 906 // array: destination FixedArray |
870 // t3: the-hole pointer | 907 // heap_number_map: heap number map |
871 // t5: heap number map | |
872 __ Branch(&entry); | 908 __ Branch(&entry); |
873 | 909 |
874 // Call into runtime if GC is required. | 910 // Call into runtime if GC is required. |
875 __ bind(&gc_required); | 911 __ bind(&gc_required); |
876 __ MultiPop(a0.bit() | a1.bit() | a2.bit() | a3.bit() | ra.bit()); | 912 __ MultiPop( |
| 913 value.bit() | key.bit() | receiver.bit() | target_map.bit() | ra.bit()); |
877 | 914 |
878 __ Branch(fail); | 915 __ Branch(fail); |
879 | 916 |
880 __ bind(&loop); | 917 __ bind(&loop); |
881 __ lw(a1, MemOperand(t0)); | 918 Register upper_bits = key; |
882 __ Addu(t0, t0, kDoubleSize); | 919 __ lw(upper_bits, MemOperand(src_elements)); |
883 // a1: current element's upper 32 bit | 920 __ Addu(src_elements, src_elements, kDoubleSize); |
884 // t0: address of next element's upper 32 bit | 921 // upper_bits: current element's upper 32 bit |
| 922 // src_elements: address of next element's upper 32 bit |
885 __ Branch(&convert_hole, eq, a1, Operand(kHoleNanUpper32)); | 923 __ Branch(&convert_hole, eq, a1, Operand(kHoleNanUpper32)); |
886 | 924 |
887 // Non-hole double, copy value into a heap number. | 925 // Non-hole double, copy value into a heap number. |
888 __ AllocateHeapNumber(a2, a0, t6, t5, &gc_required); | 926 Register heap_number = receiver; |
889 // a2: new heap number | 927 Register scratch2 = value; |
890 // Load mantissa of current element, t0 point to exponent of next element. | 928 Register scratch3 = t6; |
891 __ lw(a0, MemOperand(t0, (Register::kMantissaOffset | 929 __ AllocateHeapNumber(heap_number, scratch2, scratch3, heap_number_map, |
| 930 &gc_required); |
| 931 // heap_number: new heap number |
| 932 // Load mantissa of current element, src_elements |
| 933 // point to exponent of next element. |
| 934 __ lw(scratch2, MemOperand(src_elements, (Register::kMantissaOffset |
892 - Register::kExponentOffset - kDoubleSize))); | 935 - Register::kExponentOffset - kDoubleSize))); |
893 __ sw(a0, FieldMemOperand(a2, HeapNumber::kMantissaOffset)); | 936 __ sw(scratch2, FieldMemOperand(heap_number, HeapNumber::kMantissaOffset)); |
894 __ sw(a1, FieldMemOperand(a2, HeapNumber::kExponentOffset)); | 937 __ sw(upper_bits, FieldMemOperand(heap_number, HeapNumber::kExponentOffset)); |
895 __ mov(a0, a3); | 938 __ mov(scratch2, dst_elements); |
896 __ sw(a2, MemOperand(a3)); | 939 __ sw(heap_number, MemOperand(dst_elements)); |
897 __ Addu(a3, a3, kIntSize); | 940 __ Addu(dst_elements, dst_elements, kIntSize); |
898 __ RecordWrite(t2, | 941 __ RecordWrite(array, |
899 a0, | 942 scratch2, |
900 a2, | 943 heap_number, |
901 kRAHasBeenSaved, | 944 kRAHasBeenSaved, |
902 kDontSaveFPRegs, | 945 kDontSaveFPRegs, |
903 EMIT_REMEMBERED_SET, | 946 EMIT_REMEMBERED_SET, |
904 OMIT_SMI_CHECK); | 947 OMIT_SMI_CHECK); |
905 __ Branch(&entry); | 948 __ Branch(&entry); |
906 | 949 |
907 // Replace the-hole NaN with the-hole pointer. | 950 // Replace the-hole NaN with the-hole pointer. |
908 __ bind(&convert_hole); | 951 __ bind(&convert_hole); |
909 __ sw(t3, MemOperand(a3)); | 952 __ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex); |
910 __ Addu(a3, a3, kIntSize); | 953 __ sw(scratch2, MemOperand(dst_elements)); |
| 954 __ Addu(dst_elements, dst_elements, kIntSize); |
911 | 955 |
912 __ bind(&entry); | 956 __ bind(&entry); |
913 __ Branch(&loop, lt, a3, Operand(t1)); | 957 __ Branch(&loop, lt, dst_elements, Operand(dst_end)); |
914 | 958 |
915 __ MultiPop(a2.bit() | a3.bit() | a0.bit() | a1.bit()); | 959 __ MultiPop(receiver.bit() | target_map.bit() | value.bit() | key.bit()); |
916 // Replace receiver's backing store with newly created and filled FixedArray. | 960 // Replace receiver's backing store with newly created and filled FixedArray. |
917 __ sw(t2, FieldMemOperand(a2, JSObject::kElementsOffset)); | 961 __ sw(array, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
918 __ RecordWriteField(a2, | 962 __ RecordWriteField(receiver, |
919 JSObject::kElementsOffset, | 963 JSObject::kElementsOffset, |
920 t2, | 964 array, |
921 t5, | 965 scratch, |
922 kRAHasBeenSaved, | 966 kRAHasBeenSaved, |
923 kDontSaveFPRegs, | 967 kDontSaveFPRegs, |
924 EMIT_REMEMBERED_SET, | 968 EMIT_REMEMBERED_SET, |
925 OMIT_SMI_CHECK); | 969 OMIT_SMI_CHECK); |
926 __ pop(ra); | 970 __ pop(ra); |
927 | 971 |
928 __ bind(&only_change_map); | 972 __ bind(&only_change_map); |
929 // Update receiver's map. | 973 // Update receiver's map. |
930 __ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset)); | 974 __ sw(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
931 __ RecordWriteField(a2, | 975 __ RecordWriteField(receiver, |
932 HeapObject::kMapOffset, | 976 HeapObject::kMapOffset, |
933 a3, | 977 target_map, |
934 t5, | 978 scratch, |
935 kRAHasNotBeenSaved, | 979 kRAHasNotBeenSaved, |
936 kDontSaveFPRegs, | 980 kDontSaveFPRegs, |
937 OMIT_REMEMBERED_SET, | 981 OMIT_REMEMBERED_SET, |
938 OMIT_SMI_CHECK); | 982 OMIT_SMI_CHECK); |
939 } | 983 } |
940 | 984 |
941 | 985 |
942 void StringCharLoadGenerator::Generate(MacroAssembler* masm, | 986 void StringCharLoadGenerator::Generate(MacroAssembler* masm, |
943 Register string, | 987 Register string, |
944 Register index, | 988 Register index, |
(...skipping 251 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1196 patcher.masm()->nop(); // Pad the empty space. | 1240 patcher.masm()->nop(); // Pad the empty space. |
1197 } | 1241 } |
1198 } | 1242 } |
1199 | 1243 |
1200 | 1244 |
1201 #undef __ | 1245 #undef __ |
1202 | 1246 |
1203 } } // namespace v8::internal | 1247 } } // namespace v8::internal |
1204 | 1248 |
1205 #endif // V8_TARGET_ARCH_MIPS | 1249 #endif // V8_TARGET_ARCH_MIPS |
OLD | NEW |