OLD | NEW |
---|---|
1 // Copyright 2006-2008 the V8 project authors. All rights reserved. | 1 // Copyright 2006-2008 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 24 matching lines...) Expand all Loading... | |
35 namespace v8 { | 35 namespace v8 { |
36 namespace internal { | 36 namespace internal { |
37 | 37 |
38 | 38 |
39 // ---------------------------------------------------------------------------- | 39 // ---------------------------------------------------------------------------- |
40 // Static IC stub generators. | 40 // Static IC stub generators. |
41 // | 41 // |
42 | 42 |
43 #define __ ACCESS_MASM(masm) | 43 #define __ ACCESS_MASM(masm) |
44 | 44 |
45 | |
46 // Helper function used from LoadIC/CallIC GenerateNormal. | 45 // Helper function used from LoadIC/CallIC GenerateNormal. |
47 static void GenerateDictionaryLoad(MacroAssembler* masm, | 46 static void GenerateDictionaryLoad(MacroAssembler* masm, |
48 Label* miss, | 47 Label* miss, |
49 Register t0, | 48 Register t0, |
50 Register t1) { | 49 Register t1) { |
51 // Register use: | 50 // Register use: |
52 // | 51 // |
53 // t0 - used to hold the property dictionary. | 52 // t0 - used to hold the property dictionary. |
54 // | 53 // |
55 // t1 - initially the receiver | 54 // t1 - initially the receiver |
(...skipping 494 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
550 __ TailCallRuntime(Runtime::kGetProperty, 2, 1); | 549 __ TailCallRuntime(Runtime::kGetProperty, 2, 1); |
551 } | 550 } |
552 | 551 |
553 | 552 |
554 void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { | 553 void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { |
555 // ---------- S t a t e -------------- | 554 // ---------- S t a t e -------------- |
556 // -- lr : return address | 555 // -- lr : return address |
557 // -- sp[0] : key | 556 // -- sp[0] : key |
558 // -- sp[4] : receiver | 557 // -- sp[4] : receiver |
559 // ----------------------------------- | 558 // ----------------------------------- |
560 Label slow, fast; | 559 Label slow, fast, check_pixel_array; |
561 | 560 |
562 // Get the key and receiver object from the stack. | 561 // Get the key and receiver object from the stack. |
563 __ ldm(ia, sp, r0.bit() | r1.bit()); | 562 __ ldm(ia, sp, r0.bit() | r1.bit()); |
564 | 563 |
565 // Check that the object isn't a smi. | 564 // Check that the object isn't a smi. |
566 __ BranchOnSmi(r1, &slow); | 565 __ BranchOnSmi(r1, &slow); |
567 // Get the map of the receiver. | 566 // Get the map of the receiver. |
568 __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset)); | 567 __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset)); |
569 // Check bit field. | 568 // Check bit field. |
570 __ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset)); | 569 __ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset)); |
(...skipping 17 matching lines...) Expand all Loading... | |
588 // Check that the object is in fast mode (not dictionary). | 587 // Check that the object is in fast mode (not dictionary). |
589 __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset)); | 588 __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset)); |
590 __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); | 589 __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); |
591 __ cmp(r3, ip); | 590 __ cmp(r3, ip); |
592 __ b(ne, &slow); | 591 __ b(ne, &slow); |
593 // Check that the key (index) is within bounds. | 592 // Check that the key (index) is within bounds. |
594 __ ldr(r3, FieldMemOperand(r1, Array::kLengthOffset)); | 593 __ ldr(r3, FieldMemOperand(r1, Array::kLengthOffset)); |
595 __ cmp(r0, Operand(r3)); | 594 __ cmp(r0, Operand(r3)); |
596 __ b(lo, &fast); | 595 __ b(lo, &fast); |
597 | 596 |
597 // Check whether the elements is a pixel array. | |
598 __ bind(&check_pixel_array); | |
599 __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex); | |
600 __ cmp(r3, ip); | |
601 __ b(ne, &slow); | |
602 __ ldr(ip, FieldMemOperand(r1, PixelArray::kLengthOffset)); | |
603 __ cmp(r0, ip); | |
604 __ b(hs, &slow); | |
605 __ ldr(ip, FieldMemOperand(r1, PixelArray::kExternalPointerOffset)); | |
606 __ ldrb(r0, MemOperand(ip, r0)); | |
607 __ mov(r0, Operand(r0, LSL, kSmiTagSize)); // Tag result as smi. | |
608 __ Ret(); | |
609 | |
598 // Slow case: Push extra copies of the arguments (2). | 610 // Slow case: Push extra copies of the arguments (2). |
599 __ bind(&slow); | 611 __ bind(&slow); |
600 __ IncrementCounter(&Counters::keyed_load_generic_slow, 1, r0, r1); | 612 __ IncrementCounter(&Counters::keyed_load_generic_slow, 1, r0, r1); |
601 GenerateRuntimeGetProperty(masm); | 613 GenerateRuntimeGetProperty(masm); |
602 | 614 |
603 // Fast case: Do the load. | 615 // Fast case: Do the load. |
604 __ bind(&fast); | 616 __ bind(&fast); |
605 __ add(r3, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 617 __ add(r3, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
606 __ ldr(r0, MemOperand(r3, r0, LSL, kPointerSizeLog2)); | 618 __ ldr(r0, MemOperand(r3, r0, LSL, kPointerSizeLog2)); |
607 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); | 619 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); |
(...skipping 10 matching lines...) Expand all Loading... | |
618 // ---------- S t a t e -------------- | 630 // ---------- S t a t e -------------- |
619 // -- lr : return address | 631 // -- lr : return address |
620 // -- sp[0] : key | 632 // -- sp[0] : key |
621 // -- sp[4] : receiver | 633 // -- sp[4] : receiver |
622 // ----------------------------------- | 634 // ----------------------------------- |
623 | 635 |
624 GenerateGeneric(masm); | 636 GenerateGeneric(masm); |
625 } | 637 } |
626 | 638 |
627 | 639 |
640 // Convert unsigned integer with specified number of leading zeroes in binary | |
641 // representation to IEEE 754 double. | |
642 // Integer to convert is passed in register hiword. | |
643 // Resulting double is returned in registers hiword:loword. | |
644 // This functions does not work correctly for 0. | |
645 static void GenerateUInt2Double(MacroAssembler* masm, | |
646 Register hiword, | |
647 Register loword, | |
648 Register scratch, | |
649 int leading_zeroes) { | |
650 const int meaningfull_bits = kBitsPerInt - leading_zeroes - 1; | |
Mads Ager (chromium)
2010/03/23 11:46:54
meaningfull -> meaningful
| |
651 const int biased_exponent = HeapNumber::kExponentBias + meaningfull_bits; | |
652 | |
653 const int mantissa_shift_for_hi_word = | |
654 meaningfull_bits - HeapNumber::kMantissaBitsInTopWord; | |
655 | |
656 const int mantissa_shift_for_lo_word = | |
657 kBitsPerInt - mantissa_shift_for_hi_word; | |
658 | |
659 __ mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift)); | |
660 if (mantissa_shift_for_hi_word > 0) { | |
661 __ mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word)); | |
662 __ orr(hiword, scratch, Operand(hiword, LSR, mantissa_shift_for_hi_word)); | |
663 } else { | |
664 __ mov(loword, Operand(0)); | |
665 __ orr(hiword, scratch, Operand(hiword, LSL, mantissa_shift_for_hi_word)); | |
666 } | |
667 | |
668 // If least significant bit of biased exponent was not 1 it was corrupted | |
669 // by most significant bit of mantissa so we should fix that. | |
670 if (!(biased_exponent & 1)) { | |
671 __ bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift)); | |
672 } | |
673 } | |
674 | |
675 | |
628 void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm, | 676 void KeyedLoadIC::GenerateExternalArray(MacroAssembler* masm, |
629 ExternalArrayType array_type) { | 677 ExternalArrayType array_type) { |
630 // TODO(476): port specialized code. | 678 // ---------- S t a t e -------------- |
631 GenerateGeneric(masm); | 679 // -- lr : return address |
680 // -- sp[0] : key | |
681 // -- sp[4] : receiver | |
682 // ----------------------------------- | |
683 Label slow, failed_allocation; | |
684 | |
685 // Get the key and receiver object from the stack. | |
686 __ ldm(ia, sp, r0.bit() | r1.bit()); | |
687 | |
688 // r0: key | |
689 // r1: receiver object | |
690 | |
691 // Check that the object isn't a smi | |
692 __ BranchOnSmi(r1, &slow); | |
693 | |
694 // Check that the key is a smi. | |
695 __ BranchOnNotSmi(r0, &slow); | |
696 | |
697 // Check that the object is a JS object. Load map into r2 | |
Mads Ager (chromium)
2010/03/23 11:46:54
End comment with period.
| |
698 __ CompareObjectType(r1, r2, r3, FIRST_JS_OBJECT_TYPE); | |
699 __ b(lt, &slow); | |
700 | |
701 // Check that the receiver does not require access checks. We need | |
702 // to check this explicitly since this generic stub does not perform | |
703 // map checks. | |
704 __ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset)); | |
705 __ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded)); | |
706 __ b(ne, &slow); | |
707 | |
708 // Check that the elements array is the appropriate type of | |
709 // ExternalArray. | |
710 // r0: index (as a smi) | |
711 // r1: JSObject | |
712 __ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset)); | |
713 __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset)); | |
714 __ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type)); | |
715 __ cmp(r2, ip); | |
716 __ b(ne, &slow); | |
717 | |
718 // Check that the index is in range. | |
719 | |
Mads Ager (chromium)
2010/03/23 11:46:54
Remove empty line?
| |
720 __ ldr(ip, FieldMemOperand(r1, ExternalArray::kLengthOffset)); | |
721 __ cmp(r1, Operand(r0, ASR, kSmiTagSize)); | |
722 // Unsigned comparison catches both negative and too-large values. | |
723 __ b(lo, &slow); | |
724 | |
725 // r0: index (smi) | |
726 // r1: elements array | |
727 __ ldr(r1, FieldMemOperand(r1, ExternalArray::kExternalPointerOffset)); | |
728 // r1: base pointer of external storage | |
729 | |
730 // We are not untagging smi key and instead work with it | |
731 // as if it was premultiplied by 2. | |
732 ASSERT((kSmiTag == 0) && (kSmiTagSize == 1)); | |
733 | |
734 switch (array_type) { | |
735 case kExternalByteArray: | |
736 __ ldrsb(r0, MemOperand(r1, r0, LSR, 1)); | |
737 break; | |
738 case kExternalUnsignedByteArray: | |
739 __ ldrb(r0, MemOperand(r1, r0, LSR, 1)); | |
740 break; | |
741 case kExternalShortArray: | |
742 __ ldrsh(r0, MemOperand(r1, r0, LSL, 0)); | |
743 break; | |
744 case kExternalUnsignedShortArray: | |
745 __ ldrh(r0, MemOperand(r1, r0, LSL, 0)); | |
746 break; | |
747 case kExternalIntArray: | |
748 case kExternalUnsignedIntArray: | |
749 __ ldr(r0, MemOperand(r1, r0, LSL, 1)); | |
750 break; | |
751 case kExternalFloatArray: | |
752 if (CpuFeatures::IsSupported(VFP3)) { | |
753 CpuFeatures::Scope scope(VFP3); | |
754 __ add(r0, r1, Operand(r0, LSL, 1)); | |
755 __ vldr(s0, r0, 0); | |
756 } else { | |
757 __ ldr(r0, MemOperand(r1, r0, LSL, 1)); | |
758 } | |
759 break; | |
760 default: | |
761 UNREACHABLE(); | |
762 break; | |
763 } | |
764 | |
765 // For integer array types: | |
766 // r0: value | |
767 // For floating-point array type | |
768 // s0: value (if VFP3 is supported) | |
769 // r0: value (if VFP3 is not supported) | |
770 | |
771 if (array_type == kExternalIntArray) { | |
772 // For the Int and UnsignedInt array types, we need to see whether | |
773 // the value can be represented in a Smi. If not, we need to convert | |
774 // it to a HeapNumber. | |
775 Label box_int; | |
776 __ cmp(r0, Operand(0xC0000000)); | |
777 __ b(mi, &box_int); | |
778 __ mov(r0, Operand(r0, LSL, kSmiTagSize)); | |
779 __ Ret(); | |
780 | |
781 __ bind(&box_int); | |
782 | |
783 __ mov(r1, r0); | |
784 // Allocate a HeapNumber for the int and perform int-to-double | |
785 // conversion. | |
786 __ AllocateHeapNumber(r0, r3, r4, &slow); | |
787 | |
788 if (CpuFeatures::IsSupported(VFP3)) { | |
789 CpuFeatures::Scope scope(VFP3); | |
790 __ vmov(s0, r1); | |
791 __ vcvt_f64_s32(d0, s0); | |
792 __ sub(r1, r0, Operand(kHeapObjectTag)); | |
793 __ vstr(d0, r1, HeapNumber::kValueOffset); | |
794 __ Ret(); | |
795 } else { | |
796 WriteInt32ToHeapNumberStub stub(r1, r0, r3); | |
797 __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); | |
Mads Ager (chromium)
2010/03/23 11:46:54
Please use "__ TailCallStub(&stub);"
| |
798 } | |
799 } else if (array_type == kExternalUnsignedIntArray) { | |
800 // The test is different for unsigned int values. Since we need | |
801 // the value to be in the range of a positive smi, we can't | |
802 // handle either of the top two bits being set in the value. | |
803 if (CpuFeatures::IsSupported(VFP3)) { | |
804 CpuFeatures::Scope scope(VFP3); | |
805 Label box_int, done; | |
806 __ tst(r0, Operand(0xC0000000)); | |
807 __ b(ne, &box_int); | |
808 | |
809 __ mov(r0, Operand(r0, LSL, kSmiTagSize)); | |
810 __ Ret(); | |
811 | |
812 __ bind(&box_int); | |
813 __ vmov(s0, r0); | |
814 __ AllocateHeapNumber(r0, r1, r2, &slow); | |
815 | |
816 __ vcvt_f64_u32(d0, s0); | |
817 __ sub(r1, r0, Operand(kHeapObjectTag)); | |
818 __ vstr(d0, r1, HeapNumber::kValueOffset); | |
819 __ Ret(); | |
820 } else { | |
821 // Check whether unsigned integer fits into smi. | |
822 Label box_int_0, box_int_1, done; | |
823 __ tst(r0, Operand(0x80000000)); | |
824 __ b(ne, &box_int_0); | |
825 __ tst(r0, Operand(0x40000000)); | |
826 __ b(ne, &box_int_1); | |
827 | |
828 // Tag integer as smi and return it. | |
829 __ mov(r0, Operand(r0, LSL, kSmiTagSize)); | |
830 __ Ret(); | |
831 | |
832 __ bind(&box_int_0); | |
833 // Integer does not have leading zeros. | |
834 GenerateUInt2Double(masm, r0, r1, r2, 0); | |
835 __ b(&done); | |
836 | |
837 __ bind(&box_int_1); | |
838 // Integer has one leading zero. | |
839 GenerateUInt2Double(masm, r0, r1, r2, 1); | |
840 | |
841 __ bind(&done); | |
842 // Integer was converted to double in registers r0:r1. | |
843 // Wrap it into a HeapNumber. | |
844 __ AllocateHeapNumber(r2, r3, r5, &slow); | |
845 | |
846 __ str(r0, FieldMemOperand(r2, HeapNumber::kExponentOffset)); | |
847 __ str(r1, FieldMemOperand(r2, HeapNumber::kMantissaOffset)); | |
848 | |
849 __ mov(r0, r2); | |
850 | |
851 __ Ret(); | |
852 } | |
853 } else if (array_type == kExternalFloatArray) { | |
854 // For the floating-point array type, we need to always allocate a | |
855 // HeapNumber. | |
856 if (CpuFeatures::IsSupported(VFP3)) { | |
857 CpuFeatures::Scope scope(VFP3); | |
858 __ AllocateHeapNumber(r0, r1, r2, &slow); | |
859 __ vcvt_f64_f32(d0, s0); | |
860 __ sub(r1, r0, Operand(kHeapObjectTag)); | |
861 __ vstr(d0, r1, HeapNumber::kValueOffset); | |
862 __ Ret(); | |
863 } else { | |
864 __ AllocateHeapNumber(r3, r1, r2, &slow); | |
865 // VFP is not available, do manual single to double conversion. | |
866 | |
867 // r0: floating point value (binary32) | |
868 | |
869 // Extract mantissa to r1. | |
870 __ and_(r1, r0, Operand(kBinary32MantissaMask)); | |
871 | |
872 // Extract exponent to r2. | |
873 __ mov(r2, Operand(r0, LSR, kBinary32MantissaBits)); | |
874 __ and_(r2, r2, Operand(kBinary32ExponentMask >> kBinary32MantissaBits)); | |
875 | |
876 Label exponent_rebiased; | |
877 __ teq(r2, Operand(0x00)); | |
878 __ b(eq, &exponent_rebiased); | |
879 | |
880 __ teq(r2, Operand(0xff)); | |
881 __ mov(r2, Operand(0x7ff), LeaveCC, eq); | |
882 __ b(eq, &exponent_rebiased); | |
883 | |
884 // Rebias exponent. | |
885 __ add(r2, | |
886 r2, | |
887 Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias)); | |
888 | |
889 __ bind(&exponent_rebiased); | |
890 __ and_(r0, r0, Operand(kBinary32SignMask)); | |
891 __ orr(r0, r0, Operand(r2, LSL, HeapNumber::kMantissaBitsInTopWord)); | |
892 | |
893 // Shift mantissa. | |
894 static const int kMantissaShiftForHiWord = | |
895 kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord; | |
896 | |
897 static const int kMantissaShiftForLoWord = | |
898 kBitsPerInt - kMantissaShiftForHiWord; | |
899 | |
900 __ orr(r0, r0, Operand(r1, LSR, kMantissaShiftForHiWord)); | |
901 __ mov(r1, Operand(r1, LSL, kMantissaShiftForLoWord)); | |
902 | |
903 __ str(r0, FieldMemOperand(r3, HeapNumber::kExponentOffset)); | |
904 __ str(r1, FieldMemOperand(r3, HeapNumber::kMantissaOffset)); | |
905 __ mov(r0, r3); | |
906 __ Ret(); | |
907 } | |
908 | |
909 } else { | |
910 __ mov(r0, Operand(r0, LSL, kSmiTagSize)); | |
911 __ Ret(); | |
912 } | |
913 | |
914 // Slow case: Load name and receiver from stack and jump to runtime. | |
915 __ bind(&slow); | |
916 __ IncrementCounter(&Counters::keyed_load_external_array_slow, 1, r0, r1); | |
917 GenerateRuntimeGetProperty(masm); | |
632 } | 918 } |
633 | 919 |
634 | 920 |
635 void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) { | 921 void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) { |
636 // ---------- S t a t e -------------- | 922 // ---------- S t a t e -------------- |
637 // -- lr : return address | 923 // -- lr : return address |
638 // -- sp[0] : key | 924 // -- sp[0] : key |
639 // -- sp[4] : receiver | 925 // -- sp[4] : receiver |
640 // ----------------------------------- | 926 // ----------------------------------- |
641 Label slow; | 927 Label slow; |
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
702 } | 988 } |
703 | 989 |
704 | 990 |
705 void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) { | 991 void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm) { |
706 // ---------- S t a t e -------------- | 992 // ---------- S t a t e -------------- |
707 // -- r0 : value | 993 // -- r0 : value |
708 // -- lr : return address | 994 // -- lr : return address |
709 // -- sp[0] : key | 995 // -- sp[0] : key |
710 // -- sp[1] : receiver | 996 // -- sp[1] : receiver |
711 // ----------------------------------- | 997 // ----------------------------------- |
712 Label slow, fast, array, extra, exit; | 998 Label slow, fast, array, extra, exit, check_pixel_array; |
713 | 999 |
714 // Get the key and the object from the stack. | 1000 // Get the key and the object from the stack. |
715 __ ldm(ia, sp, r1.bit() | r3.bit()); // r1 = key, r3 = receiver | 1001 __ ldm(ia, sp, r1.bit() | r3.bit()); // r1 = key, r3 = receiver |
716 // Check that the key is a smi. | 1002 // Check that the key is a smi. |
717 __ tst(r1, Operand(kSmiTagMask)); | 1003 __ tst(r1, Operand(kSmiTagMask)); |
718 __ b(ne, &slow); | 1004 __ b(ne, &slow); |
719 // Check that the object isn't a smi. | 1005 // Check that the object isn't a smi. |
720 __ tst(r3, Operand(kSmiTagMask)); | 1006 __ tst(r3, Operand(kSmiTagMask)); |
721 __ b(eq, &slow); | 1007 __ b(eq, &slow); |
722 // Get the map of the object. | 1008 // Get the map of the object. |
(...skipping 12 matching lines...) Expand all Loading... | |
735 __ cmp(r2, Operand(FIRST_JS_OBJECT_TYPE)); | 1021 __ cmp(r2, Operand(FIRST_JS_OBJECT_TYPE)); |
736 __ b(lt, &slow); | 1022 __ b(lt, &slow); |
737 | 1023 |
738 | 1024 |
739 // Object case: Check key against length in the elements array. | 1025 // Object case: Check key against length in the elements array. |
740 __ ldr(r3, FieldMemOperand(r3, JSObject::kElementsOffset)); | 1026 __ ldr(r3, FieldMemOperand(r3, JSObject::kElementsOffset)); |
741 // Check that the object is in fast mode (not dictionary). | 1027 // Check that the object is in fast mode (not dictionary). |
742 __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset)); | 1028 __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset)); |
743 __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); | 1029 __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); |
744 __ cmp(r2, ip); | 1030 __ cmp(r2, ip); |
745 __ b(ne, &slow); | 1031 __ b(ne, &check_pixel_array); |
746 // Untag the key (for checking against untagged length in the fixed array). | 1032 // Untag the key (for checking against untagged length in the fixed array). |
747 __ mov(r1, Operand(r1, ASR, kSmiTagSize)); | 1033 __ mov(r1, Operand(r1, ASR, kSmiTagSize)); |
748 // Compute address to store into and check array bounds. | 1034 // Compute address to store into and check array bounds. |
749 __ add(r2, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 1035 __ add(r2, r3, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
750 __ add(r2, r2, Operand(r1, LSL, kPointerSizeLog2)); | 1036 __ add(r2, r2, Operand(r1, LSL, kPointerSizeLog2)); |
751 __ ldr(ip, FieldMemOperand(r3, FixedArray::kLengthOffset)); | 1037 __ ldr(ip, FieldMemOperand(r3, FixedArray::kLengthOffset)); |
752 __ cmp(r1, Operand(ip)); | 1038 __ cmp(r1, Operand(ip)); |
753 __ b(lo, &fast); | 1039 __ b(lo, &fast); |
754 | 1040 |
755 | 1041 |
756 // Slow case: | 1042 // Slow case: |
757 __ bind(&slow); | 1043 __ bind(&slow); |
758 GenerateRuntimeSetProperty(masm); | 1044 GenerateRuntimeSetProperty(masm); |
759 | 1045 |
1046 // Check whether the elements is a pixel array. | |
1047 // r0: value | |
1048 // r1: index (as a smi), zero-extended. | |
1049 // r3: elements array | |
1050 __ bind(&check_pixel_array); | |
1051 __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex); | |
1052 __ cmp(r2, ip); | |
1053 __ b(ne, &slow); | |
1054 // Check that the value is a smi. If a conversion is needed call into the | |
1055 // runtime to convert and clamp. | |
1056 __ BranchOnNotSmi(r0, &slow); | |
1057 __ mov(r1, Operand(r1, ASR, kSmiTagSize)); // Untag the key. | |
1058 __ ldr(ip, FieldMemOperand(r3, PixelArray::kLengthOffset)); | |
1059 __ cmp(r1, Operand(ip)); | |
1060 __ b(hs, &slow); | |
1061 __ mov(r4, r0); // Save the value. | |
1062 __ mov(r0, Operand(r0, ASR, kSmiTagSize)); // Untag the value. | |
1063 { // Clamp the value to [0..255]. | |
1064 Label done; | |
1065 __ tst(r0, Operand(0xFFFFFF00)); | |
1066 __ b(eq, &done); | |
1067 __ mov(r0, Operand(0), LeaveCC, mi); // 0 if negative. | |
1068 __ mov(r0, Operand(255), LeaveCC, pl); // 255 if positive. | |
1069 __ bind(&done); | |
1070 } | |
1071 __ ldr(r2, FieldMemOperand(r3, PixelArray::kExternalPointerOffset)); | |
1072 __ strb(r0, MemOperand(r2, r1)); | |
1073 __ mov(r0, Operand(r4)); // Return the original value. | |
1074 __ Ret(); | |
1075 | |
1076 | |
760 // Extra capacity case: Check if there is extra capacity to | 1077 // Extra capacity case: Check if there is extra capacity to |
761 // perform the store and update the length. Used for adding one | 1078 // perform the store and update the length. Used for adding one |
762 // element to the array by writing to array[array.length]. | 1079 // element to the array by writing to array[array.length]. |
763 // r0 == value, r1 == key, r2 == elements, r3 == object | 1080 // r0 == value, r1 == key, r2 == elements, r3 == object |
764 __ bind(&extra); | 1081 __ bind(&extra); |
765 __ b(ne, &slow); // do not leave holes in the array | 1082 __ b(ne, &slow); // do not leave holes in the array |
766 __ mov(r1, Operand(r1, ASR, kSmiTagSize)); // untag | 1083 __ mov(r1, Operand(r1, ASR, kSmiTagSize)); // untag |
767 __ ldr(ip, FieldMemOperand(r2, Array::kLengthOffset)); | 1084 __ ldr(ip, FieldMemOperand(r2, Array::kLengthOffset)); |
768 __ cmp(r1, Operand(ip)); | 1085 __ cmp(r1, Operand(ip)); |
769 __ b(hs, &slow); | 1086 __ b(hs, &slow); |
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
812 __ b(eq, &exit); | 1129 __ b(eq, &exit); |
813 // Update write barrier for the elements array address. | 1130 // Update write barrier for the elements array address. |
814 __ sub(r1, r2, Operand(r3)); | 1131 __ sub(r1, r2, Operand(r3)); |
815 __ RecordWrite(r3, r1, r2); | 1132 __ RecordWrite(r3, r1, r2); |
816 | 1133 |
817 __ bind(&exit); | 1134 __ bind(&exit); |
818 __ Ret(); | 1135 __ Ret(); |
819 } | 1136 } |
820 | 1137 |
821 | 1138 |
1139 // Convert int passed in register ival to IEE 754 single precision | |
1140 // floating point value and store it into register fval. | |
1141 // If VFP3 is available use it for conversion. | |
1142 static void ConvertIntToFloat(MacroAssembler* masm, | |
1143 Register ival, | |
1144 Register fval, | |
1145 Register scratch1, | |
1146 Register scratch2) { | |
1147 if (CpuFeatures::IsSupported(VFP3)) { | |
1148 CpuFeatures::Scope scope(VFP3); | |
1149 __ vmov(s0, ival); | |
1150 __ vcvt_f32_s32(s0, s0); | |
1151 __ vmov(fval, s0); | |
1152 } else { | |
1153 Label not_special, done; | |
1154 // Move sign bit from source to destination. This works because the sign | |
1155 // bit in the exponent word of the double has the same position and polarity | |
1156 // as the 2's complement sign bit in a Smi. | |
1157 ASSERT(kBinary32SignMask == 0x80000000u); | |
1158 | |
1159 __ and_(fval, ival, Operand(kBinary32SignMask), SetCC); | |
1160 // Negate value if it is negative. | |
1161 __ rsb(ival, ival, Operand(0), LeaveCC, ne); | |
1162 | |
1163 // We have -1, 0 or 1, which we treat specially. | |
1164 __ cmp(ival, Operand(1)); | |
1165 __ b(gt, ¬_special); | |
1166 | |
1167 // For 1 or -1 we need to or in the 0 exponent (biased). | |
1168 static const uint32_t exponent_word_for_1 = | |
1169 kBinary32ExponentBias << kBinary32ExponentShift; | |
1170 | |
1171 __ orr(fval, fval, Operand(exponent_word_for_1), LeaveCC, eq); | |
1172 __ b(&done); | |
1173 | |
1174 __ bind(¬_special); | |
1175 // Count leading zeros. | |
1176 // Gets the wrong answer for 0, but we already checked for that case above. | |
1177 Register zeros = scratch2; | |
1178 __ CountLeadingZeros(ival, scratch1, zeros); | |
1179 | |
1180 // Compute exponent and or it into the exponent register. | |
1181 __ rsb(scratch1, | |
1182 zeros, | |
1183 Operand((kBitsPerInt - 1) + kBinary32ExponentBias)); | |
1184 | |
1185 __ orr(fval, | |
1186 fval, | |
1187 Operand(scratch1, LSL, kBinary32ExponentShift)); | |
1188 | |
1189 // Shift up the source chopping the top bit off. | |
1190 __ add(zeros, zeros, Operand(1)); | |
1191 // This wouldn't work for 1 and -1 as the shift would be 32 which means 0. | |
1192 __ mov(ival, Operand(ival, LSL, zeros)); | |
1193 // And the top (top 20 bits). | |
1194 __ orr(fval, | |
1195 fval, | |
1196 Operand(ival, LSR, kBitsPerInt - kBinary32MantissaBits)); | |
1197 | |
1198 __ bind(&done); | |
1199 } | |
1200 } | |
1201 | |
1202 | |
1203 static bool IsElementTypeSigned(ExternalArrayType array_type) { | |
1204 switch (array_type) { | |
1205 case kExternalByteArray: | |
1206 case kExternalShortArray: | |
1207 case kExternalIntArray: | |
1208 return true; | |
1209 | |
1210 case kExternalUnsignedByteArray: | |
1211 case kExternalUnsignedShortArray: | |
1212 case kExternalUnsignedIntArray: | |
1213 return false; | |
1214 | |
1215 default: | |
1216 UNREACHABLE(); | |
1217 return false; | |
1218 } | |
1219 } | |
1220 | |
1221 | |
822 void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm, | 1222 void KeyedStoreIC::GenerateExternalArray(MacroAssembler* masm, |
823 ExternalArrayType array_type) { | 1223 ExternalArrayType array_type) { |
824 // TODO(476): port specialized code. | 1224 // ---------- S t a t e -------------- |
825 GenerateGeneric(masm); | 1225 // -- r0 : value |
1226 // -- lr : return address | |
1227 // -- sp[0] : key | |
1228 // -- sp[1] : receiver | |
1229 // ----------------------------------- | |
1230 Label slow, check_heap_number; | |
1231 | |
1232 // Get the key and the object from the stack. | |
1233 __ ldm(ia, sp, r1.bit() | r2.bit()); // r1 = key, r2 = receiver | |
1234 | |
1235 // Check that the object isn't a smi. | |
1236 __ BranchOnSmi(r2, &slow); | |
1237 | |
1238 // Check that the object is a JS object. Load map into r3 | |
1239 __ CompareObjectType(r2, r3, r4, FIRST_JS_OBJECT_TYPE); | |
1240 __ b(le, &slow); | |
1241 | |
1242 // Check that the receiver does not require access checks. We need | |
1243 // to do this because this generic stub does not perform map checks. | |
1244 __ ldrb(ip, FieldMemOperand(r3, Map::kBitFieldOffset)); | |
1245 __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded)); | |
1246 __ b(ne, &slow); | |
1247 | |
1248 // Check that the key is a smi. | |
1249 __ BranchOnNotSmi(r1, &slow); | |
1250 | |
1251 // Check that the elements array is the appropriate type of | |
1252 // ExternalArray. | |
1253 // r0: value | |
1254 // r1: index (smi) | |
1255 // r2: object | |
1256 __ ldr(r2, FieldMemOperand(r2, JSObject::kElementsOffset)); | |
1257 __ ldr(r3, FieldMemOperand(r2, HeapObject::kMapOffset)); | |
1258 __ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type)); | |
1259 __ cmp(r3, ip); | |
1260 __ b(ne, &slow); | |
1261 | |
1262 // Check that the index is in range. | |
1263 __ mov(r1, Operand(r1, ASR, kSmiTagSize)); // Untag the index. | |
1264 __ ldr(ip, FieldMemOperand(r2, ExternalArray::kLengthOffset)); | |
1265 __ cmp(r1, ip); | |
1266 // Unsigned comparison catches both negative and too-large values. | |
1267 __ b(hs, &slow); | |
1268 | |
1269 // Handle both smis and HeapNumbers in the fast path. Go to the | |
1270 // runtime for all other kinds of values. | |
1271 // r0: value | |
1272 // r1: index (integer) | |
1273 // r2: array | |
1274 __ BranchOnNotSmi(r0, &check_heap_number); | |
1275 __ mov(r3, Operand(r0, ASR, kSmiTagSize)); // Untag the value. | |
1276 __ ldr(r2, FieldMemOperand(r2, ExternalArray::kExternalPointerOffset)); | |
1277 | |
1278 // r1: index (integer) | |
1279 // r2: base pointer of external storage | |
1280 // r3: value (integer) | |
1281 switch (array_type) { | |
1282 case kExternalByteArray: | |
1283 case kExternalUnsignedByteArray: | |
1284 __ strb(r3, MemOperand(r2, r1, LSL, 0)); | |
1285 break; | |
1286 case kExternalShortArray: | |
1287 case kExternalUnsignedShortArray: | |
1288 __ strh(r3, MemOperand(r2, r1, LSL, 1)); | |
1289 break; | |
1290 case kExternalIntArray: | |
1291 case kExternalUnsignedIntArray: | |
1292 __ str(r3, MemOperand(r2, r1, LSL, 2)); | |
1293 break; | |
1294 case kExternalFloatArray: | |
1295 // Need to perform int-to-float conversion. | |
1296 ConvertIntToFloat(masm, r3, r4, r5, r6); | |
1297 __ str(r4, MemOperand(r2, r1, LSL, 2)); | |
1298 break; | |
1299 default: | |
1300 UNREACHABLE(); | |
1301 break; | |
1302 } | |
1303 | |
1304 // r0: value | |
1305 __ Ret(); | |
1306 | |
1307 | |
1308 // r0: value | |
1309 // r1: index (integer) | |
1310 // r2: external array object | |
1311 __ bind(&check_heap_number); | |
1312 __ CompareObjectType(r0, r3, r4, HEAP_NUMBER_TYPE); | |
1313 __ b(ne, &slow); | |
1314 | |
1315 __ ldr(r2, FieldMemOperand(r2, ExternalArray::kExternalPointerOffset)); | |
1316 | |
1317 // The WebGL specification leaves the behavior of storing NaN and | |
1318 // +/-Infinity into integer arrays basically undefined. For more | |
1319 // reproducible behavior, convert these to zero. | |
1320 if (CpuFeatures::IsSupported(VFP3)) { | |
1321 CpuFeatures::Scope scope(VFP3); | |
1322 | |
1323 // vldr requires offset to be a multiple of 4 so we can not | |
1324 // include -kHeapObjectTag into it. | |
1325 __ sub(r3, r0, Operand(kHeapObjectTag)); | |
1326 __ vldr(d0, r3, HeapNumber::kValueOffset); | |
1327 | |
1328 if (array_type == kExternalFloatArray) { | |
1329 __ vcvt_f32_f64(s0, d0); | |
1330 __ vmov(r3, s0); | |
1331 __ str(r3, MemOperand(r2, r1, LSL, 2)); | |
1332 } else { | |
1333 Label done; | |
1334 | |
1335 // Need to perform float-to-int conversion. | |
1336 // Test for NaN. | |
1337 __ vcmp(d0, d0); | |
1338 // Move vector status bits to normal status bits. | |
1339 __ vmrs(v8::internal::pc); | |
1340 __ mov(r3, Operand(0), LeaveCC, vs); // NaN converts to 0 | |
1341 __ b(vs, &done); | |
1342 | |
1343 // Test whether exponent equal to 0x7FF (infinity or NaN) | |
1344 __ vmov(r4, r3, d0); | |
1345 __ mov(r5, Operand(0x7FF00000)); | |
1346 __ and_(r3, r3, Operand(r5)); | |
1347 __ teq(r3, Operand(r5)); | |
1348 __ mov(r3, Operand(0), LeaveCC, eq); | |
1349 | |
1350 // Not infinity or NaN simply convert to int | |
1351 if (IsElementTypeSigned(array_type)) { | |
1352 __ vcvt_s32_f64(s0, d0, ne); | |
1353 } else { | |
1354 __ vcvt_u32_f64(s0, d0, ne); | |
1355 } | |
1356 | |
1357 __ vmov(r3, s0, ne); | |
1358 | |
1359 __ bind(&done); | |
1360 switch (array_type) { | |
1361 case kExternalByteArray: | |
1362 case kExternalUnsignedByteArray: | |
1363 __ strb(r3, MemOperand(r2, r1, LSL, 0)); | |
1364 break; | |
1365 case kExternalShortArray: | |
1366 case kExternalUnsignedShortArray: | |
1367 __ strh(r3, MemOperand(r2, r1, LSL, 1)); | |
1368 break; | |
1369 case kExternalIntArray: | |
1370 case kExternalUnsignedIntArray: | |
1371 __ str(r3, MemOperand(r2, r1, LSL, 2)); | |
1372 break; | |
1373 default: | |
1374 UNREACHABLE(); | |
1375 break; | |
1376 } | |
1377 } | |
1378 | |
1379 // r0: original value | |
1380 __ Ret(); | |
1381 } else { | |
1382 // VFP3 is not available do manual conversions | |
1383 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kExponentOffset)); | |
1384 __ ldr(r4, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); | |
1385 | |
1386 if (array_type == kExternalFloatArray) { | |
1387 Label done, nan_or_infinity_or_zero; | |
1388 static const int kMantissaInHiWordShift = | |
1389 kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord; | |
1390 | |
1391 static const int kMantissaInLoWordShift = | |
1392 kBitsPerInt - kMantissaInHiWordShift; | |
1393 | |
1394 // Test for all special exponent values: zeros, subnormal numbers, NaNs | |
1395 // and infinities. All these should be converted to 0. | |
1396 __ mov(r5, Operand(HeapNumber::kExponentMask)); | |
1397 __ and_(r6, r3, Operand(r5), SetCC); | |
1398 __ b(eq, &nan_or_infinity_or_zero); | |
1399 | |
1400 __ teq(r6, Operand(r5)); | |
1401 __ mov(r6, Operand(kBinary32ExponentMask), LeaveCC, eq); | |
1402 __ b(eq, &nan_or_infinity_or_zero); | |
1403 | |
1404 // Rebias exponent. | |
1405 __ mov(r6, Operand(r6, LSR, HeapNumber::kExponentShift)); | |
1406 __ add(r6, | |
1407 r6, | |
1408 Operand(kBinary32ExponentBias - HeapNumber::kExponentBias)); | |
1409 | |
1410 __ cmp(r6, Operand(kBinary32MaxExponent)); | |
1411 __ and_(r3, r3, Operand(HeapNumber::kSignMask), LeaveCC, gt); | |
1412 __ orr(r3, r3, Operand(kBinary32ExponentMask), LeaveCC, gt); | |
1413 __ b(gt, &done); | |
1414 | |
1415 __ cmp(r6, Operand(kBinary32MinExponent)); | |
1416 __ and_(r3, r3, Operand(HeapNumber::kSignMask), LeaveCC, lt); | |
1417 __ b(lt, &done); | |
1418 | |
1419 __ and_(r7, r3, Operand(HeapNumber::kSignMask)); | |
1420 __ and_(r3, r3, Operand(HeapNumber::kMantissaMask)); | |
1421 __ orr(r7, r7, Operand(r3, LSL, kMantissaInHiWordShift)); | |
1422 __ orr(r7, r7, Operand(r4, LSR, kMantissaInLoWordShift)); | |
1423 __ orr(r3, r7, Operand(r6, LSL, kBinary32ExponentShift)); | |
1424 | |
1425 __ bind(&done); | |
1426 __ str(r3, MemOperand(r2, r1, LSL, 2)); | |
1427 __ Ret(); | |
1428 | |
1429 __ bind(&nan_or_infinity_or_zero); | |
1430 __ and_(r7, r3, Operand(HeapNumber::kSignMask)); | |
1431 __ and_(r3, r3, Operand(HeapNumber::kMantissaMask)); | |
1432 __ orr(r6, r6, r7); | |
1433 __ orr(r6, r6, Operand(r3, LSL, kMantissaInHiWordShift)); | |
1434 __ orr(r3, r6, Operand(r4, LSR, kMantissaInLoWordShift)); | |
1435 __ b(&done); | |
1436 } else { | |
1437 bool is_signed_type = IsElementTypeSigned(array_type); | |
1438 int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt; | |
1439 int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000; | |
1440 | |
1441 Label done, sign; | |
1442 | |
1443 // Test for all special exponent values: zeros, subnormal numbers, NaNs | |
1444 // and infinities. All these should be converted to 0. | |
1445 __ mov(r5, Operand(HeapNumber::kExponentMask)); | |
1446 __ and_(r6, r3, Operand(r5), SetCC); | |
1447 __ mov(r3, Operand(0), LeaveCC, eq); | |
1448 __ b(eq, &done); | |
1449 | |
1450 __ teq(r6, Operand(r5)); | |
1451 __ mov(r3, Operand(0), LeaveCC, eq); | |
1452 __ b(eq, &done); | |
1453 | |
1454 // Unbias exponent. | |
1455 __ mov(r6, Operand(r6, LSR, HeapNumber::kExponentShift)); | |
1456 __ sub(r6, r6, Operand(HeapNumber::kExponentBias), SetCC); | |
1457 // If exponent is negative than result is 0. | |
1458 __ mov(r3, Operand(0), LeaveCC, mi); | |
1459 __ b(mi, &done); | |
1460 | |
1461 // If exponent is too big than result is minimal value | |
1462 __ cmp(r6, Operand(meaningfull_bits - 1)); | |
1463 __ mov(r3, Operand(min_value), LeaveCC, ge); | |
1464 __ b(ge, &done); | |
1465 | |
1466 __ and_(r5, r3, Operand(HeapNumber::kSignMask), SetCC); | |
1467 __ and_(r3, r3, Operand(HeapNumber::kMantissaMask)); | |
1468 __ orr(r3, r3, Operand(1u << HeapNumber::kMantissaBitsInTopWord)); | |
1469 | |
1470 __ rsb(r6, r6, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC); | |
1471 __ mov(r3, Operand(r3, LSR, r6), LeaveCC, pl); | |
1472 __ b(pl, &sign); | |
1473 | |
1474 __ rsb(r6, r6, Operand(0)); | |
1475 __ mov(r3, Operand(r3, LSL, r6)); | |
1476 __ rsb(r6, r6, Operand(meaningfull_bits)); | |
1477 __ orr(r3, r3, Operand(r4, LSR, r6)); | |
1478 | |
1479 __ bind(&sign); | |
1480 __ teq(r5, Operand(0)); | |
1481 __ rsb(r3, r3, Operand(0), LeaveCC, ne); | |
1482 | |
1483 __ bind(&done); | |
1484 switch (array_type) { | |
1485 case kExternalByteArray: | |
1486 case kExternalUnsignedByteArray: | |
1487 __ strb(r3, MemOperand(r2, r1, LSL, 0)); | |
1488 break; | |
1489 case kExternalShortArray: | |
1490 case kExternalUnsignedShortArray: | |
1491 __ strh(r3, MemOperand(r2, r1, LSL, 1)); | |
1492 break; | |
1493 case kExternalIntArray: | |
1494 case kExternalUnsignedIntArray: | |
1495 __ str(r3, MemOperand(r2, r1, LSL, 2)); | |
1496 break; | |
1497 default: | |
1498 UNREACHABLE(); | |
1499 break; | |
1500 } | |
1501 } | |
1502 } | |
1503 | |
1504 // Slow case: call runtime. | |
1505 __ bind(&slow); | |
1506 GenerateRuntimeSetProperty(masm); | |
826 } | 1507 } |
827 | 1508 |
828 | 1509 |
829 void StoreIC::GenerateMegamorphic(MacroAssembler* masm) { | 1510 void StoreIC::GenerateMegamorphic(MacroAssembler* masm) { |
830 // ----------- S t a t e ------------- | 1511 // ----------- S t a t e ------------- |
831 // -- r0 : value | 1512 // -- r0 : value |
832 // -- r1 : receiver | 1513 // -- r1 : receiver |
833 // -- r2 : name | 1514 // -- r2 : name |
834 // -- lr : return address | 1515 // -- lr : return address |
835 // ----------------------------------- | 1516 // ----------------------------------- |
(...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
907 __ bind(&miss); | 1588 __ bind(&miss); |
908 | 1589 |
909 GenerateMiss(masm); | 1590 GenerateMiss(masm); |
910 } | 1591 } |
911 | 1592 |
912 | 1593 |
913 #undef __ | 1594 #undef __ |
914 | 1595 |
915 | 1596 |
916 } } // namespace v8::internal | 1597 } } // namespace v8::internal |
OLD | NEW |