Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(43)

Side by Side Diff: src/mips64/codegen-mips64.cc

Issue 393693003: MIPS: Use register parameters in ElementsTransitionGenerator. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Add mips64 port. Created 6 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/mips/macro-assembler-mips.cc ('k') | src/mips64/ic-mips64.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/v8.h" 5 #include "src/v8.h"
6 6
7 #if V8_TARGET_ARCH_MIPS64 7 #if V8_TARGET_ARCH_MIPS64
8 8
9 #include "src/codegen.h" 9 #include "src/codegen.h"
10 #include "src/macro-assembler.h" 10 #include "src/macro-assembler.h"
(...skipping 533 matching lines...) Expand 10 before | Expand all | Expand 10 after
544 masm->set_has_frame(false); 544 masm->set_has_frame(false);
545 } 545 }
546 546
547 547
548 // ------------------------------------------------------------------------- 548 // -------------------------------------------------------------------------
549 // Code generators 549 // Code generators
550 550
551 #define __ ACCESS_MASM(masm) 551 #define __ ACCESS_MASM(masm)
552 552
553 void ElementsTransitionGenerator::GenerateMapChangeElementsTransition( 553 void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
554 MacroAssembler* masm, AllocationSiteMode mode, 554 MacroAssembler* masm,
555 Register receiver,
556 Register key,
557 Register value,
558 Register target_map,
559 AllocationSiteMode mode,
555 Label* allocation_memento_found) { 560 Label* allocation_memento_found) {
556 // ----------- S t a t e ------------- 561 Register scratch_elements = a4;
557 // -- a0 : value 562 ASSERT(!AreAliased(receiver, key, value, target_map,
558 // -- a1 : key 563 scratch_elements));
559 // -- a2 : receiver 564
560 // -- ra : return address
561 // -- a3 : target map, scratch for subsequent call
562 // -- a4 : scratch (elements)
563 // -----------------------------------
564 if (mode == TRACK_ALLOCATION_SITE) { 565 if (mode == TRACK_ALLOCATION_SITE) {
565 ASSERT(allocation_memento_found != NULL); 566 __ JumpIfJSArrayHasAllocationMemento(
566 __ JumpIfJSArrayHasAllocationMemento(a2, a4, allocation_memento_found); 567 receiver, scratch_elements, allocation_memento_found);
567 } 568 }
568 569
569 // Set transitioned map. 570 // Set transitioned map.
570 __ sd(a3, FieldMemOperand(a2, HeapObject::kMapOffset)); 571 __ sd(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
571 __ RecordWriteField(a2, 572 __ RecordWriteField(a2,
572 HeapObject::kMapOffset, 573 HeapObject::kMapOffset,
573 a3, 574 target_map,
574 t1, 575 t1,
575 kRAHasNotBeenSaved, 576 kRAHasNotBeenSaved,
576 kDontSaveFPRegs, 577 kDontSaveFPRegs,
577 EMIT_REMEMBERED_SET, 578 EMIT_REMEMBERED_SET,
578 OMIT_SMI_CHECK); 579 OMIT_SMI_CHECK);
579 } 580 }
580 581
581 582
582 void ElementsTransitionGenerator::GenerateSmiToDouble( 583 void ElementsTransitionGenerator::GenerateSmiToDouble(
583 MacroAssembler* masm, AllocationSiteMode mode, Label* fail) { 584 MacroAssembler* masm,
584 // ----------- S t a t e ------------- 585 Register receiver,
585 // -- a0 : value 586 Register key,
586 // -- a1 : key 587 Register value,
587 // -- a2 : receiver 588 Register target_map,
588 // -- ra : return address 589 AllocationSiteMode mode,
589 // -- a3 : target map, scratch for subsequent call 590 Label* fail) {
590 // -- a4 : scratch (elements) 591 // Register ra contains the return address.
591 // -----------------------------------
592 Label loop, entry, convert_hole, gc_required, only_change_map, done; 592 Label loop, entry, convert_hole, gc_required, only_change_map, done;
593 Register elements = a4;
594 Register length = a5;
595 Register array = a6;
596 Register array_end = array;
597
598 // target_map parameter can be clobbered.
599 Register scratch1 = target_map;
600 Register scratch2 = t1;
601 Register scratch3 = a7;
602
603 // Verify input registers don't conflict with locals.
604 ASSERT(!AreAliased(receiver, key, value, target_map,
605 elements, length, array, scratch2));
593 606
594 Register scratch = t2; 607 Register scratch = t2;
595 if (mode == TRACK_ALLOCATION_SITE) { 608 if (mode == TRACK_ALLOCATION_SITE) {
596 __ JumpIfJSArrayHasAllocationMemento(a2, a4, fail); 609 __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
597 } 610 }
598 611
599 // Check for empty arrays, which only require a map transition and no changes 612 // Check for empty arrays, which only require a map transition and no changes
600 // to the backing store. 613 // to the backing store.
601 __ ld(a4, FieldMemOperand(a2, JSObject::kElementsOffset)); 614 __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
602 __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex); 615 __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
603 __ Branch(&only_change_map, eq, at, Operand(a4)); 616 __ Branch(&only_change_map, eq, at, Operand(elements));
604 617
605 __ push(ra); 618 __ push(ra);
606 __ ld(a5, FieldMemOperand(a4, FixedArray::kLengthOffset)); 619 __ ld(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
607 // a4: source FixedArray 620 // elements: source FixedArray
608 // a5: number of elements (smi-tagged) 621 // length: number of elements (smi-tagged)
609 622
610 // Allocate new FixedDoubleArray. 623 // Allocate new FixedDoubleArray.
611 __ SmiScale(scratch, a5, kDoubleSizeLog2); 624 __ SmiScale(scratch, length, kDoubleSizeLog2);
612 __ Daddu(scratch, scratch, FixedDoubleArray::kHeaderSize); 625 __ Daddu(scratch, scratch, FixedDoubleArray::kHeaderSize);
613 __ Allocate(scratch, a6, a7, t1, &gc_required, DOUBLE_ALIGNMENT); 626 __ Allocate(scratch, array, t3, scratch2, &gc_required, DOUBLE_ALIGNMENT);
614 // a6: destination FixedDoubleArray, not tagged as heap object 627 // array: destination FixedDoubleArray, not tagged as heap object
615 628
616 // Set destination FixedDoubleArray's length and map. 629 // Set destination FixedDoubleArray's length and map.
617 __ LoadRoot(t1, Heap::kFixedDoubleArrayMapRootIndex); 630 __ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
618 __ sd(a5, MemOperand(a6, FixedDoubleArray::kLengthOffset)); 631 __ sd(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
619 __ sd(t1, MemOperand(a6, HeapObject::kMapOffset));
620 // Update receiver's map. 632 // Update receiver's map.
633 __ sd(scratch2, MemOperand(array, HeapObject::kMapOffset));
621 634
622 __ sd(a3, FieldMemOperand(a2, HeapObject::kMapOffset)); 635 __ sd(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
623 __ RecordWriteField(a2, 636 __ RecordWriteField(receiver,
624 HeapObject::kMapOffset, 637 HeapObject::kMapOffset,
625 a3, 638 target_map,
626 t1, 639 scratch2,
627 kRAHasBeenSaved, 640 kRAHasBeenSaved,
628 kDontSaveFPRegs, 641 kDontSaveFPRegs,
629 OMIT_REMEMBERED_SET, 642 OMIT_REMEMBERED_SET,
630 OMIT_SMI_CHECK); 643 OMIT_SMI_CHECK);
631 // Replace receiver's backing store with newly created FixedDoubleArray. 644 // Replace receiver's backing store with newly created FixedDoubleArray.
632 __ Daddu(a3, a6, Operand(kHeapObjectTag)); 645 __ Daddu(scratch1, array, Operand(kHeapObjectTag));
633 __ sd(a3, FieldMemOperand(a2, JSObject::kElementsOffset)); 646 __ sd(scratch1, FieldMemOperand(a2, JSObject::kElementsOffset));
634 __ RecordWriteField(a2, 647 __ RecordWriteField(receiver,
635 JSObject::kElementsOffset, 648 JSObject::kElementsOffset,
636 a3, 649 scratch1,
637 t1, 650 scratch2,
638 kRAHasBeenSaved, 651 kRAHasBeenSaved,
639 kDontSaveFPRegs, 652 kDontSaveFPRegs,
640 EMIT_REMEMBERED_SET, 653 EMIT_REMEMBERED_SET,
641 OMIT_SMI_CHECK); 654 OMIT_SMI_CHECK);
642 655
643 656
644 // Prepare for conversion loop. 657 // Prepare for conversion loop.
645 __ Daddu(a3, a4, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); 658 __ Daddu(scratch1, elements,
646 __ Daddu(a7, a6, Operand(FixedDoubleArray::kHeaderSize)); 659 Operand(FixedArray::kHeaderSize - kHeapObjectTag));
647 __ SmiScale(a6, a5, kDoubleSizeLog2); 660 __ Daddu(scratch3, array, Operand(FixedDoubleArray::kHeaderSize));
648 __ Daddu(a6, a6, a7); 661 __ SmiScale(array_end, length, kDoubleSizeLog2);
649 __ li(a4, Operand(kHoleNanLower32)); 662 __ Daddu(array_end, array_end, scratch3);
650 __ li(a5, Operand(kHoleNanUpper32));
651 // a4: kHoleNanLower32
652 // a5: kHoleNanUpper32
653 // a6: end of destination FixedDoubleArray, not tagged
654 // a7: begin of FixedDoubleArray element fields, not tagged
655 663
664 // Repurpose registers no longer in use.
665 Register hole_lower = elements;
666 Register hole_upper = length;
667 __ li(hole_lower, Operand(kHoleNanLower32));
668 __ li(hole_upper, Operand(kHoleNanUpper32));
669 // scratch1: begin of source FixedArray element fields, not tagged
670 // hole_lower: kHoleNanLower32
671 // hole_upper: kHoleNanUpper32
672 // array_end: end of destination FixedDoubleArray, not tagged
673 // scratch3: begin of FixedDoubleArray element fields, not tagged
656 __ Branch(&entry); 674 __ Branch(&entry);
657 675
658 __ bind(&only_change_map); 676 __ bind(&only_change_map);
659 __ sd(a3, FieldMemOperand(a2, HeapObject::kMapOffset)); 677 __ sd(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
660 __ RecordWriteField(a2, 678 __ RecordWriteField(receiver,
661 HeapObject::kMapOffset, 679 HeapObject::kMapOffset,
662 a3, 680 target_map,
663 t1, 681 scratch2,
664 kRAHasNotBeenSaved, 682 kRAHasNotBeenSaved,
665 kDontSaveFPRegs, 683 kDontSaveFPRegs,
666 OMIT_REMEMBERED_SET, 684 OMIT_REMEMBERED_SET,
667 OMIT_SMI_CHECK); 685 OMIT_SMI_CHECK);
668 __ Branch(&done); 686 __ Branch(&done);
669 687
670 // Call into runtime if GC is required. 688 // Call into runtime if GC is required.
671 __ bind(&gc_required); 689 __ bind(&gc_required);
672 __ pop(ra); 690 __ pop(ra);
673 __ Branch(fail); 691 __ Branch(fail);
674 692
675 // Convert and copy elements. 693 // Convert and copy elements.
676 __ bind(&loop); 694 __ bind(&loop);
677 __ ld(t1, MemOperand(a3)); 695 __ ld(scratch2, MemOperand(scratch1));
678 __ Daddu(a3, a3, kIntSize); 696 __ Daddu(scratch1, scratch1, kIntSize);
679 // t1: current element 697 // scratch2: current element
680 __ JumpIfNotSmi(t1, &convert_hole); 698 __ JumpIfNotSmi(scratch2, &convert_hole);
681 __ SmiUntag(t1); 699 __ SmiUntag(scratch2);
682 700
683 // Normal smi, convert to double and store. 701 // Normal smi, convert to double and store.
684 __ mtc1(t1, f0); 702 __ mtc1(scratch2, f0);
685 __ cvt_d_w(f0, f0); 703 __ cvt_d_w(f0, f0);
686 __ sdc1(f0, MemOperand(a7)); 704 __ sdc1(f0, MemOperand(scratch3));
687 __ Daddu(a7, a7, kDoubleSize); 705 __ Daddu(scratch3, scratch3, kDoubleSize);
688 706
689 __ Branch(&entry); 707 __ Branch(&entry);
690 708
691 // Hole found, store the-hole NaN. 709 // Hole found, store the-hole NaN.
692 __ bind(&convert_hole); 710 __ bind(&convert_hole);
693 if (FLAG_debug_code) { 711 if (FLAG_debug_code) {
694 // Restore a "smi-untagged" heap object. 712 // Restore a "smi-untagged" heap object.
695 __ Or(t1, t1, Operand(1)); 713 __ Or(scratch2, scratch2, Operand(1));
696 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); 714 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
697 __ Assert(eq, kObjectFoundInSmiOnlyArray, at, Operand(t1)); 715 __ Assert(eq, kObjectFoundInSmiOnlyArray, at, Operand(scratch2));
698 } 716 }
699 __ sw(a4, MemOperand(a7)); // mantissa 717 // mantissa
700 __ sw(a5, MemOperand(a7, kIntSize)); // exponent 718 __ sw(hole_lower, MemOperand(scratch3));
701 __ Daddu(a7, a7, kDoubleSize); 719 // exponent
720 __ sw(hole_upper, MemOperand(scratch3, kIntSize));
721 __ Daddu(scratch3, scratch3, kDoubleSize);
702 722
703 __ bind(&entry); 723 __ bind(&entry);
704 __ Branch(&loop, lt, a7, Operand(a6)); 724 __ Branch(&loop, lt, scratch3, Operand(array_end));
705 725
706 __ pop(ra); 726 __ pop(ra);
707 __ bind(&done); 727 __ bind(&done);
708 } 728 }
709 729
710 730
711 void ElementsTransitionGenerator::GenerateDoubleToObject( 731 void ElementsTransitionGenerator::GenerateDoubleToObject(
712 MacroAssembler* masm, AllocationSiteMode mode, Label* fail) { 732 MacroAssembler* masm,
713 // ----------- S t a t e ------------- 733 Register receiver,
714 // -- a0 : value 734 Register key,
715 // -- a1 : key 735 Register value,
716 // -- a2 : receiver 736 Register target_map,
717 // -- ra : return address 737 AllocationSiteMode mode,
718 // -- a3 : target map, scratch for subsequent call 738 Label* fail) {
719 // -- a4 : scratch (elements) 739 // Register ra contains the return address.
720 // -----------------------------------
721 Label entry, loop, convert_hole, gc_required, only_change_map; 740 Label entry, loop, convert_hole, gc_required, only_change_map;
741 Register elements = a4;
742 Register array = a6;
743 Register length = a5;
744 Register scratch = t1;
745
746 // Verify input registers don't conflict with locals.
747 ASSERT(!AreAliased(receiver, key, value, target_map,
748 elements, array, length, scratch));
722 if (mode == TRACK_ALLOCATION_SITE) { 749 if (mode == TRACK_ALLOCATION_SITE) {
723 __ JumpIfJSArrayHasAllocationMemento(a2, a4, fail); 750 __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
724 } 751 }
725 752
726 // Check for empty arrays, which only require a map transition and no changes 753 // Check for empty arrays, which only require a map transition and no changes
727 // to the backing store. 754 // to the backing store.
728 __ ld(a4, FieldMemOperand(a2, JSObject::kElementsOffset)); 755 __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
729 __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex); 756 __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
730 __ Branch(&only_change_map, eq, at, Operand(a4)); 757 __ Branch(&only_change_map, eq, at, Operand(elements));
731 758
732 __ MultiPush(a0.bit() | a1.bit() | a2.bit() | a3.bit() | ra.bit()); 759 __ MultiPush(
760 value.bit() | key.bit() | receiver.bit() | target_map.bit() | ra.bit());
733 761
734 __ ld(a5, FieldMemOperand(a4, FixedArray::kLengthOffset)); 762 __ ld(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
735 // a4: source FixedArray 763 // elements: source FixedArray
736 // a5: number of elements (smi-tagged) 764 // length: number of elements (smi-tagged)
737 765
738 // Allocate new FixedArray. 766 // Allocate new FixedArray.
739 __ SmiScale(a0, a5, kPointerSizeLog2); 767 // Re-use value and target_map registers, as they have been saved on the
740 __ Daddu(a0, a0, FixedDoubleArray::kHeaderSize); 768 // stack.
741 __ Allocate(a0, a6, a7, t1, &gc_required, NO_ALLOCATION_FLAGS); 769 Register array_size = value;
742 // a6: destination FixedArray, not tagged as heap object 770 Register allocate_scratch = target_map;
771 __ SmiScale(array_size, length, kPointerSizeLog2);
772 __ Daddu(array_size, array_size, FixedDoubleArray::kHeaderSize);
773 __ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
774 NO_ALLOCATION_FLAGS);
775 // array: destination FixedArray, not tagged as heap object
743 // Set destination FixedDoubleArray's length and map. 776 // Set destination FixedDoubleArray's length and map.
744 __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex); 777 __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
745 __ sd(a5, MemOperand(a6, FixedDoubleArray::kLengthOffset)); 778 __ sd(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
746 __ sd(t1, MemOperand(a6, HeapObject::kMapOffset)); 779 __ sd(scratch, MemOperand(array, HeapObject::kMapOffset));
747 780
748 // Prepare for conversion loop. 781 // Prepare for conversion loop.
749 __ Daddu(a4, a4, Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4)); 782 Register src_elements = elements;
750 __ Daddu(a3, a6, Operand(FixedArray::kHeaderSize)); 783 Register dst_elements = target_map;
751 __ Daddu(a6, a6, Operand(kHeapObjectTag)); 784 Register dst_end = length;
752 __ SmiScale(a5, a5, kPointerSizeLog2); 785 Register heap_number_map = scratch;
753 __ Daddu(a5, a3, a5); 786 __ Daddu(src_elements, src_elements,
754 __ LoadRoot(a7, Heap::kTheHoleValueRootIndex); 787 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4));
755 __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex); 788 __ Daddu(dst_elements, array, Operand(FixedArray::kHeaderSize));
789 __ Daddu(array, array, Operand(kHeapObjectTag));
790 __ SmiScale(dst_end, dst_end, kPointerSizeLog2);
791 __ Daddu(dst_end, dst_elements, dst_end);
792 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
756 // Using offsetted addresses. 793 // Using offsetted addresses.
757 // a3: begin of destination FixedArray element fields, not tagged 794 // dst_elements: begin of destination FixedArray element fields, not tagged
758 // a4: begin of source FixedDoubleArray element fields, not tagged, +4 795 // src_elements: begin of source FixedDoubleArray element fields, not tagged,
759 // a5: end of destination FixedArray, not tagged 796 // points to the exponent
760 // a6: destination FixedArray 797 // dst_end: end of destination FixedArray, not tagged
761 // a7: the-hole pointer 798 // array: destination FixedArray
762 // t1: heap number map 799 // heap_number_map: heap number map
763 __ Branch(&entry); 800 __ Branch(&entry);
764 801
765 // Call into runtime if GC is required. 802 // Call into runtime if GC is required.
766 __ bind(&gc_required); 803 __ bind(&gc_required);
767 __ MultiPop(a0.bit() | a1.bit() | a2.bit() | a3.bit() | ra.bit()); 804 __ MultiPop(
805 value.bit() | key.bit() | receiver.bit() | target_map.bit() | ra.bit());
768 806
769 __ Branch(fail); 807 __ Branch(fail);
770 808
771 __ bind(&loop); 809 __ bind(&loop);
772 __ lw(a1, MemOperand(a4)); 810 Register upper_bits = key;
773 __ Daddu(a4, a4, kDoubleSize); 811 __ lw(upper_bits, MemOperand(src_elements));
774 // a1: current element's upper 32 bit 812 __ Daddu(src_elements, src_elements, kDoubleSize);
775 // a4: address of next element's upper 32 bit 813 // upper_bits: current element's upper 32 bit
814 // src_elements: address of next element's upper 32 bit
776 __ Branch(&convert_hole, eq, a1, Operand(kHoleNanUpper32)); 815 __ Branch(&convert_hole, eq, a1, Operand(kHoleNanUpper32));
777 816
778 // Non-hole double, copy value into a heap number. 817 // Non-hole double, copy value into a heap number.
779 __ AllocateHeapNumber(a2, a0, t2, t1, &gc_required); 818 Register heap_number = receiver;
780 // a2: new heap number 819 Register scratch2 = value;
781 __ lw(a0, MemOperand(a4, -12)); 820 Register scratch3 = t2;
782 __ sw(a0, FieldMemOperand(a2, HeapNumber::kMantissaOffset)); 821 __ AllocateHeapNumber(heap_number, scratch2, scratch3, heap_number_map,
783 __ sw(a1, FieldMemOperand(a2, HeapNumber::kExponentOffset)); 822 &gc_required);
784 __ mov(a0, a3); 823 // heap_number: new heap number
785 __ sd(a2, MemOperand(a3)); 824 // Load mantissa of current element, src_elements
786 __ Daddu(a3, a3, kPointerSize); 825 // point to exponent of next element.
787 __ RecordWrite(a6, 826 __ lw(scratch2, MemOperand(heap_number, -12));
788 a0, 827 __ sw(scratch2, FieldMemOperand(heap_number, HeapNumber::kMantissaOffset));
789 a2, 828 __ sw(upper_bits, FieldMemOperand(heap_number, HeapNumber::kExponentOffset));
829 __ mov(scratch2, dst_elements);
830 __ sd(heap_number, MemOperand(dst_elements));
831 __ Daddu(dst_elements, dst_elements, kPointerSize);
832 __ RecordWrite(array,
833 scratch2,
834 heap_number,
790 kRAHasBeenSaved, 835 kRAHasBeenSaved,
791 kDontSaveFPRegs, 836 kDontSaveFPRegs,
792 EMIT_REMEMBERED_SET, 837 EMIT_REMEMBERED_SET,
793 OMIT_SMI_CHECK); 838 OMIT_SMI_CHECK);
794 __ Branch(&entry); 839 __ Branch(&entry);
795 840
796 // Replace the-hole NaN with the-hole pointer. 841 // Replace the-hole NaN with the-hole pointer.
797 __ bind(&convert_hole); 842 __ bind(&convert_hole);
798 __ sd(a7, MemOperand(a3)); 843 __ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex);
799 __ Daddu(a3, a3, kPointerSize); 844 __ sd(scratch2, MemOperand(dst_elements));
845 __ Daddu(dst_elements, dst_elements, kPointerSize);
800 846
801 __ bind(&entry); 847 __ bind(&entry);
802 __ Branch(&loop, lt, a3, Operand(a5)); 848 __ Branch(&loop, lt, dst_elements, Operand(dst_end));
803 849
804 __ MultiPop(a2.bit() | a3.bit() | a0.bit() | a1.bit()); 850 __ MultiPop(receiver.bit() | target_map.bit() | value.bit() | key.bit());
805 // Replace receiver's backing store with newly created and filled FixedArray. 851 // Replace receiver's backing store with newly created and filled FixedArray.
806 __ sd(a6, FieldMemOperand(a2, JSObject::kElementsOffset)); 852 __ sd(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
807 __ RecordWriteField(a2, 853 __ RecordWriteField(receiver,
808 JSObject::kElementsOffset, 854 JSObject::kElementsOffset,
809 a6, 855 array,
810 t1, 856 scratch,
811 kRAHasBeenSaved, 857 kRAHasBeenSaved,
812 kDontSaveFPRegs, 858 kDontSaveFPRegs,
813 EMIT_REMEMBERED_SET, 859 EMIT_REMEMBERED_SET,
814 OMIT_SMI_CHECK); 860 OMIT_SMI_CHECK);
815 __ pop(ra); 861 __ pop(ra);
816 862
817 __ bind(&only_change_map); 863 __ bind(&only_change_map);
818 // Update receiver's map. 864 // Update receiver's map.
819 __ sd(a3, FieldMemOperand(a2, HeapObject::kMapOffset)); 865 __ sd(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
820 __ RecordWriteField(a2, 866 __ RecordWriteField(receiver,
821 HeapObject::kMapOffset, 867 HeapObject::kMapOffset,
822 a3, 868 target_map,
823 t1, 869 scratch,
824 kRAHasNotBeenSaved, 870 kRAHasNotBeenSaved,
825 kDontSaveFPRegs, 871 kDontSaveFPRegs,
826 OMIT_REMEMBERED_SET, 872 OMIT_REMEMBERED_SET,
827 OMIT_SMI_CHECK); 873 OMIT_SMI_CHECK);
828 } 874 }
829 875
830 876
831 void StringCharLoadGenerator::Generate(MacroAssembler* masm, 877 void StringCharLoadGenerator::Generate(MacroAssembler* masm,
832 Register string, 878 Register string,
833 Register index, 879 Register index,
(...skipping 252 matching lines...) Expand 10 before | Expand all | Expand 10 after
1086 patcher.masm()->nop(); // Pad the empty space. 1132 patcher.masm()->nop(); // Pad the empty space.
1087 } 1133 }
1088 } 1134 }
1089 1135
1090 1136
1091 #undef __ 1137 #undef __
1092 1138
1093 } } // namespace v8::internal 1139 } } // namespace v8::internal
1094 1140
1095 #endif // V8_TARGET_ARCH_MIPS64 1141 #endif // V8_TARGET_ARCH_MIPS64
OLDNEW
« no previous file with comments | « src/mips/macro-assembler-mips.cc ('k') | src/mips64/ic-mips64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698