OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 1035 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1046 __ orr(fval, | 1046 __ orr(fval, |
1047 fval, | 1047 fval, |
1048 Operand(ival, LSR, kBitsPerInt - kBinary32MantissaBits)); | 1048 Operand(ival, LSR, kBitsPerInt - kBinary32MantissaBits)); |
1049 | 1049 |
1050 __ bind(&done); | 1050 __ bind(&done); |
1051 __ str(fval, MemOperand(dst, wordoffset, LSL, 2)); | 1051 __ str(fval, MemOperand(dst, wordoffset, LSL, 2)); |
1052 } | 1052 } |
1053 } | 1053 } |
1054 | 1054 |
1055 | 1055 |
1056 // Convert unsigned integer with specified number of leading zeroes in binary | |
1057 // representation to IEEE 754 double. | |
1058 // Integer to convert is passed in register hiword. | |
1059 // Resulting double is returned in registers hiword:loword. | |
1060 // This functions does not work correctly for 0. | |
1061 static void GenerateUInt2Double(MacroAssembler* masm, | |
1062 Register hiword, | |
1063 Register loword, | |
1064 Register scratch, | |
1065 int leading_zeroes) { | |
1066 const int meaningful_bits = kBitsPerInt - leading_zeroes - 1; | |
1067 const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits; | |
1068 | |
1069 const int mantissa_shift_for_hi_word = | |
1070 meaningful_bits - HeapNumber::kMantissaBitsInTopWord; | |
1071 | |
1072 const int mantissa_shift_for_lo_word = | |
1073 kBitsPerInt - mantissa_shift_for_hi_word; | |
1074 | |
1075 __ mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift)); | |
1076 if (mantissa_shift_for_hi_word > 0) { | |
1077 __ mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word)); | |
1078 __ orr(hiword, scratch, Operand(hiword, LSR, mantissa_shift_for_hi_word)); | |
1079 } else { | |
1080 __ mov(loword, Operand(0, RelocInfo::NONE)); | |
1081 __ orr(hiword, scratch, Operand(hiword, LSL, mantissa_shift_for_hi_word)); | |
1082 } | |
1083 | |
1084 // If least significant bit of biased exponent was not 1 it was corrupted | |
1085 // by most significant bit of mantissa so we should fix that. | |
1086 if (!(biased_exponent & 1)) { | |
1087 __ bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift)); | |
1088 } | |
1089 } | |
1090 | |
1091 | |
1092 #undef __ | 1056 #undef __ |
1093 #define __ ACCESS_MASM(masm()) | 1057 #define __ ACCESS_MASM(masm()) |
1094 | 1058 |
1095 | 1059 |
1096 Register StubCompiler::CheckPrototypes(Handle<JSObject> object, | 1060 Register StubCompiler::CheckPrototypes(Handle<JSObject> object, |
1097 Register object_reg, | 1061 Register object_reg, |
1098 Handle<JSObject> holder, | 1062 Handle<JSObject> holder, |
1099 Register holder_reg, | 1063 Register holder_reg, |
1100 Register scratch1, | 1064 Register scratch1, |
1101 Register scratch2, | 1065 Register scratch2, |
(...skipping 2173 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3275 | 3239 |
3276 | 3240 |
3277 Handle<Code> KeyedLoadStubCompiler::CompileLoadElement( | 3241 Handle<Code> KeyedLoadStubCompiler::CompileLoadElement( |
3278 Handle<Map> receiver_map) { | 3242 Handle<Map> receiver_map) { |
3279 // ----------- S t a t e ------------- | 3243 // ----------- S t a t e ------------- |
3280 // -- lr : return address | 3244 // -- lr : return address |
3281 // -- r0 : key | 3245 // -- r0 : key |
3282 // -- r1 : receiver | 3246 // -- r1 : receiver |
3283 // ----------------------------------- | 3247 // ----------------------------------- |
3284 ElementsKind elements_kind = receiver_map->elements_kind(); | 3248 ElementsKind elements_kind = receiver_map->elements_kind(); |
3285 Handle<Code> stub = KeyedLoadElementStub(elements_kind).GetCode(); | 3249 if (receiver_map->has_fast_elements() || |
3286 | 3250 receiver_map->has_external_array_elements()) { |
3287 __ DispatchMap(r1, r2, receiver_map, stub, DO_SMI_CHECK); | 3251 Handle<Code> stub = KeyedLoadFastElementStub( |
| 3252 receiver_map->instance_type() == JS_ARRAY_TYPE, |
| 3253 elements_kind).GetCode(); |
| 3254 __ DispatchMap(r1, r2, receiver_map, stub, DO_SMI_CHECK); |
| 3255 } else { |
| 3256 Handle<Code> stub = |
| 3257 KeyedLoadDictionaryElementStub().GetCode(); |
| 3258 __ DispatchMap(r1, r2, receiver_map, stub, DO_SMI_CHECK); |
| 3259 } |
3288 | 3260 |
3289 Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss(); | 3261 Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss(); |
3290 __ Jump(ic, RelocInfo::CODE_TARGET); | 3262 __ Jump(ic, RelocInfo::CODE_TARGET); |
3291 | 3263 |
3292 // Return the generated code. | 3264 // Return the generated code. |
3293 return GetCode(Code::NORMAL, factory()->empty_string()); | 3265 return GetCode(Code::NORMAL, factory()->empty_string()); |
3294 } | 3266 } |
3295 | 3267 |
3296 | 3268 |
3297 Handle<Code> KeyedLoadStubCompiler::CompileLoadPolymorphic( | 3269 Handle<Code> KeyedLoadStubCompiler::CompileLoadPolymorphic( |
(...skipping 384 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3682 __ TrySmiTag(scratch0, fail, scratch1); | 3654 __ TrySmiTag(scratch0, fail, scratch1); |
3683 __ mov(key, scratch0); | 3655 __ mov(key, scratch0); |
3684 __ bind(&key_ok); | 3656 __ bind(&key_ok); |
3685 } else { | 3657 } else { |
3686 // Check that the key is a smi. | 3658 // Check that the key is a smi. |
3687 __ JumpIfNotSmi(key, fail); | 3659 __ JumpIfNotSmi(key, fail); |
3688 } | 3660 } |
3689 } | 3661 } |
3690 | 3662 |
3691 | 3663 |
3692 void KeyedLoadStubCompiler::GenerateLoadExternalArray( | |
3693 MacroAssembler* masm, | |
3694 ElementsKind elements_kind) { | |
3695 // ---------- S t a t e -------------- | |
3696 // -- lr : return address | |
3697 // -- r0 : key | |
3698 // -- r1 : receiver | |
3699 // ----------------------------------- | |
3700 Label miss_force_generic, slow, failed_allocation; | |
3701 | |
3702 Register key = r0; | |
3703 Register receiver = r1; | |
3704 | |
3705 // This stub is meant to be tail-jumped to, the receiver must already | |
3706 // have been verified by the caller to not be a smi. | |
3707 | |
3708 // Check that the key is a smi or a heap number convertible to a smi. | |
3709 GenerateSmiKeyCheck(masm, key, r4, r5, d1, d2, &miss_force_generic); | |
3710 | |
3711 __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset)); | |
3712 // r3: elements array | |
3713 | |
3714 // Check that the index is in range. | |
3715 __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset)); | |
3716 __ cmp(key, ip); | |
3717 // Unsigned comparison catches both negative and too-large values. | |
3718 __ b(hs, &miss_force_generic); | |
3719 | |
3720 __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset)); | |
3721 // r3: base pointer of external storage | |
3722 | |
3723 // We are not untagging smi key and instead work with it | |
3724 // as if it was premultiplied by 2. | |
3725 STATIC_ASSERT((kSmiTag == 0) && (kSmiTagSize == 1)); | |
3726 | |
3727 Register value = r2; | |
3728 switch (elements_kind) { | |
3729 case EXTERNAL_BYTE_ELEMENTS: | |
3730 __ ldrsb(value, MemOperand(r3, key, LSR, 1)); | |
3731 break; | |
3732 case EXTERNAL_PIXEL_ELEMENTS: | |
3733 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: | |
3734 __ ldrb(value, MemOperand(r3, key, LSR, 1)); | |
3735 break; | |
3736 case EXTERNAL_SHORT_ELEMENTS: | |
3737 __ ldrsh(value, MemOperand(r3, key, LSL, 0)); | |
3738 break; | |
3739 case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: | |
3740 __ ldrh(value, MemOperand(r3, key, LSL, 0)); | |
3741 break; | |
3742 case EXTERNAL_INT_ELEMENTS: | |
3743 case EXTERNAL_UNSIGNED_INT_ELEMENTS: | |
3744 __ ldr(value, MemOperand(r3, key, LSL, 1)); | |
3745 break; | |
3746 case EXTERNAL_FLOAT_ELEMENTS: | |
3747 if (CpuFeatures::IsSupported(VFP2)) { | |
3748 CpuFeatures::Scope scope(VFP2); | |
3749 __ add(r2, r3, Operand(key, LSL, 1)); | |
3750 __ vldr(s0, r2, 0); | |
3751 } else { | |
3752 __ ldr(value, MemOperand(r3, key, LSL, 1)); | |
3753 } | |
3754 break; | |
3755 case EXTERNAL_DOUBLE_ELEMENTS: | |
3756 if (CpuFeatures::IsSupported(VFP2)) { | |
3757 CpuFeatures::Scope scope(VFP2); | |
3758 __ add(r2, r3, Operand(key, LSL, 2)); | |
3759 __ vldr(d0, r2, 0); | |
3760 } else { | |
3761 __ add(r4, r3, Operand(key, LSL, 2)); | |
3762 // r4: pointer to the beginning of the double we want to load. | |
3763 __ ldr(r2, MemOperand(r4, 0)); | |
3764 __ ldr(r3, MemOperand(r4, Register::kSizeInBytes)); | |
3765 } | |
3766 break; | |
3767 case FAST_ELEMENTS: | |
3768 case FAST_SMI_ELEMENTS: | |
3769 case FAST_DOUBLE_ELEMENTS: | |
3770 case FAST_HOLEY_ELEMENTS: | |
3771 case FAST_HOLEY_SMI_ELEMENTS: | |
3772 case FAST_HOLEY_DOUBLE_ELEMENTS: | |
3773 case DICTIONARY_ELEMENTS: | |
3774 case NON_STRICT_ARGUMENTS_ELEMENTS: | |
3775 UNREACHABLE(); | |
3776 break; | |
3777 } | |
3778 | |
3779 // For integer array types: | |
3780 // r2: value | |
3781 // For float array type: | |
3782 // s0: value (if VFP3 is supported) | |
3783 // r2: value (if VFP3 is not supported) | |
3784 // For double array type: | |
3785 // d0: value (if VFP3 is supported) | |
3786 // r2/r3: value (if VFP3 is not supported) | |
3787 | |
3788 if (elements_kind == EXTERNAL_INT_ELEMENTS) { | |
3789 // For the Int and UnsignedInt array types, we need to see whether | |
3790 // the value can be represented in a Smi. If not, we need to convert | |
3791 // it to a HeapNumber. | |
3792 Label box_int; | |
3793 __ cmp(value, Operand(0xC0000000)); | |
3794 __ b(mi, &box_int); | |
3795 // Tag integer as smi and return it. | |
3796 __ mov(r0, Operand(value, LSL, kSmiTagSize)); | |
3797 __ Ret(); | |
3798 | |
3799 __ bind(&box_int); | |
3800 if (CpuFeatures::IsSupported(VFP2)) { | |
3801 CpuFeatures::Scope scope(VFP2); | |
3802 // Allocate a HeapNumber for the result and perform int-to-double | |
3803 // conversion. Don't touch r0 or r1 as they are needed if allocation | |
3804 // fails. | |
3805 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); | |
3806 | |
3807 __ AllocateHeapNumber(r5, r3, r4, r6, &slow, DONT_TAG_RESULT); | |
3808 // Now we can use r0 for the result as key is not needed any more. | |
3809 __ add(r0, r5, Operand(kHeapObjectTag)); | |
3810 __ vmov(s0, value); | |
3811 __ vcvt_f64_s32(d0, s0); | |
3812 __ vstr(d0, r5, HeapNumber::kValueOffset); | |
3813 __ Ret(); | |
3814 } else { | |
3815 // Allocate a HeapNumber for the result and perform int-to-double | |
3816 // conversion. Don't touch r0 or r1 as they are needed if allocation | |
3817 // fails. | |
3818 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); | |
3819 __ AllocateHeapNumber(r5, r3, r4, r6, &slow, TAG_RESULT); | |
3820 // Now we can use r0 for the result as key is not needed any more. | |
3821 __ mov(r0, r5); | |
3822 Register dst1 = r1; | |
3823 Register dst2 = r3; | |
3824 FloatingPointHelper::Destination dest = | |
3825 FloatingPointHelper::kCoreRegisters; | |
3826 FloatingPointHelper::ConvertIntToDouble(masm, | |
3827 value, | |
3828 dest, | |
3829 d0, | |
3830 dst1, | |
3831 dst2, | |
3832 r9, | |
3833 s0); | |
3834 __ str(dst1, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); | |
3835 __ str(dst2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); | |
3836 __ Ret(); | |
3837 } | |
3838 } else if (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) { | |
3839 // The test is different for unsigned int values. Since we need | |
3840 // the value to be in the range of a positive smi, we can't | |
3841 // handle either of the top two bits being set in the value. | |
3842 if (CpuFeatures::IsSupported(VFP2)) { | |
3843 CpuFeatures::Scope scope(VFP2); | |
3844 Label box_int, done; | |
3845 __ tst(value, Operand(0xC0000000)); | |
3846 __ b(ne, &box_int); | |
3847 // Tag integer as smi and return it. | |
3848 __ mov(r0, Operand(value, LSL, kSmiTagSize)); | |
3849 __ Ret(); | |
3850 | |
3851 __ bind(&box_int); | |
3852 __ vmov(s0, value); | |
3853 // Allocate a HeapNumber for the result and perform int-to-double | |
3854 // conversion. Don't use r0 and r1 as AllocateHeapNumber clobbers all | |
3855 // registers - also when jumping due to exhausted young space. | |
3856 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); | |
3857 __ AllocateHeapNumber(r2, r3, r4, r6, &slow, DONT_TAG_RESULT); | |
3858 | |
3859 __ vcvt_f64_u32(d0, s0); | |
3860 __ vstr(d0, r2, HeapNumber::kValueOffset); | |
3861 | |
3862 __ add(r0, r2, Operand(kHeapObjectTag)); | |
3863 __ Ret(); | |
3864 } else { | |
3865 // Check whether unsigned integer fits into smi. | |
3866 Label box_int_0, box_int_1, done; | |
3867 __ tst(value, Operand(0x80000000)); | |
3868 __ b(ne, &box_int_0); | |
3869 __ tst(value, Operand(0x40000000)); | |
3870 __ b(ne, &box_int_1); | |
3871 // Tag integer as smi and return it. | |
3872 __ mov(r0, Operand(value, LSL, kSmiTagSize)); | |
3873 __ Ret(); | |
3874 | |
3875 Register hiword = value; // r2. | |
3876 Register loword = r3; | |
3877 | |
3878 __ bind(&box_int_0); | |
3879 // Integer does not have leading zeros. | |
3880 GenerateUInt2Double(masm, hiword, loword, r4, 0); | |
3881 __ b(&done); | |
3882 | |
3883 __ bind(&box_int_1); | |
3884 // Integer has one leading zero. | |
3885 GenerateUInt2Double(masm, hiword, loword, r4, 1); | |
3886 | |
3887 | |
3888 __ bind(&done); | |
3889 // Integer was converted to double in registers hiword:loword. | |
3890 // Wrap it into a HeapNumber. Don't use r0 and r1 as AllocateHeapNumber | |
3891 // clobbers all registers - also when jumping due to exhausted young | |
3892 // space. | |
3893 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); | |
3894 __ AllocateHeapNumber(r4, r5, r7, r6, &slow, TAG_RESULT); | |
3895 | |
3896 __ str(hiword, FieldMemOperand(r4, HeapNumber::kExponentOffset)); | |
3897 __ str(loword, FieldMemOperand(r4, HeapNumber::kMantissaOffset)); | |
3898 | |
3899 __ mov(r0, r4); | |
3900 __ Ret(); | |
3901 } | |
3902 } else if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { | |
3903 // For the floating-point array type, we need to always allocate a | |
3904 // HeapNumber. | |
3905 if (CpuFeatures::IsSupported(VFP2)) { | |
3906 CpuFeatures::Scope scope(VFP2); | |
3907 // Allocate a HeapNumber for the result. Don't use r0 and r1 as | |
3908 // AllocateHeapNumber clobbers all registers - also when jumping due to | |
3909 // exhausted young space. | |
3910 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); | |
3911 __ AllocateHeapNumber(r2, r3, r4, r6, &slow, DONT_TAG_RESULT); | |
3912 __ vcvt_f64_f32(d0, s0); | |
3913 __ vstr(d0, r2, HeapNumber::kValueOffset); | |
3914 | |
3915 __ add(r0, r2, Operand(kHeapObjectTag)); | |
3916 __ Ret(); | |
3917 } else { | |
3918 // Allocate a HeapNumber for the result. Don't use r0 and r1 as | |
3919 // AllocateHeapNumber clobbers all registers - also when jumping due to | |
3920 // exhausted young space. | |
3921 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); | |
3922 __ AllocateHeapNumber(r3, r4, r5, r6, &slow, TAG_RESULT); | |
3923 // VFP is not available, do manual single to double conversion. | |
3924 | |
3925 // r2: floating point value (binary32) | |
3926 // r3: heap number for result | |
3927 | |
3928 // Extract mantissa to r0. OK to clobber r0 now as there are no jumps to | |
3929 // the slow case from here. | |
3930 __ and_(r0, value, Operand(kBinary32MantissaMask)); | |
3931 | |
3932 // Extract exponent to r1. OK to clobber r1 now as there are no jumps to | |
3933 // the slow case from here. | |
3934 __ mov(r1, Operand(value, LSR, kBinary32MantissaBits)); | |
3935 __ and_(r1, r1, Operand(kBinary32ExponentMask >> kBinary32MantissaBits)); | |
3936 | |
3937 Label exponent_rebiased; | |
3938 __ teq(r1, Operand(0x00)); | |
3939 __ b(eq, &exponent_rebiased); | |
3940 | |
3941 __ teq(r1, Operand(0xff)); | |
3942 __ mov(r1, Operand(0x7ff), LeaveCC, eq); | |
3943 __ b(eq, &exponent_rebiased); | |
3944 | |
3945 // Rebias exponent. | |
3946 __ add(r1, | |
3947 r1, | |
3948 Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias)); | |
3949 | |
3950 __ bind(&exponent_rebiased); | |
3951 __ and_(r2, value, Operand(kBinary32SignMask)); | |
3952 value = no_reg; | |
3953 __ orr(r2, r2, Operand(r1, LSL, HeapNumber::kMantissaBitsInTopWord)); | |
3954 | |
3955 // Shift mantissa. | |
3956 static const int kMantissaShiftForHiWord = | |
3957 kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord; | |
3958 | |
3959 static const int kMantissaShiftForLoWord = | |
3960 kBitsPerInt - kMantissaShiftForHiWord; | |
3961 | |
3962 __ orr(r2, r2, Operand(r0, LSR, kMantissaShiftForHiWord)); | |
3963 __ mov(r0, Operand(r0, LSL, kMantissaShiftForLoWord)); | |
3964 | |
3965 __ str(r2, FieldMemOperand(r3, HeapNumber::kExponentOffset)); | |
3966 __ str(r0, FieldMemOperand(r3, HeapNumber::kMantissaOffset)); | |
3967 | |
3968 __ mov(r0, r3); | |
3969 __ Ret(); | |
3970 } | |
3971 } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { | |
3972 if (CpuFeatures::IsSupported(VFP2)) { | |
3973 CpuFeatures::Scope scope(VFP2); | |
3974 // Allocate a HeapNumber for the result. Don't use r0 and r1 as | |
3975 // AllocateHeapNumber clobbers all registers - also when jumping due to | |
3976 // exhausted young space. | |
3977 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); | |
3978 __ AllocateHeapNumber(r2, r3, r4, r6, &slow, DONT_TAG_RESULT); | |
3979 __ vstr(d0, r2, HeapNumber::kValueOffset); | |
3980 | |
3981 __ add(r0, r2, Operand(kHeapObjectTag)); | |
3982 __ Ret(); | |
3983 } else { | |
3984 // Allocate a HeapNumber for the result. Don't use r0 and r1 as | |
3985 // AllocateHeapNumber clobbers all registers - also when jumping due to | |
3986 // exhausted young space. | |
3987 __ LoadRoot(r7, Heap::kHeapNumberMapRootIndex); | |
3988 __ AllocateHeapNumber(r4, r5, r6, r7, &slow, TAG_RESULT); | |
3989 | |
3990 __ str(r2, FieldMemOperand(r4, HeapNumber::kMantissaOffset)); | |
3991 __ str(r3, FieldMemOperand(r4, HeapNumber::kExponentOffset)); | |
3992 __ mov(r0, r4); | |
3993 __ Ret(); | |
3994 } | |
3995 | |
3996 } else { | |
3997 // Tag integer as smi and return it. | |
3998 __ mov(r0, Operand(value, LSL, kSmiTagSize)); | |
3999 __ Ret(); | |
4000 } | |
4001 | |
4002 // Slow case, key and receiver still in r0 and r1. | |
4003 __ bind(&slow); | |
4004 __ IncrementCounter( | |
4005 masm->isolate()->counters()->keyed_load_external_array_slow(), | |
4006 1, r2, r3); | |
4007 | |
4008 // ---------- S t a t e -------------- | |
4009 // -- lr : return address | |
4010 // -- r0 : key | |
4011 // -- r1 : receiver | |
4012 // ----------------------------------- | |
4013 | |
4014 __ Push(r1, r0); | |
4015 | |
4016 __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1); | |
4017 | |
4018 __ bind(&miss_force_generic); | |
4019 Handle<Code> stub = | |
4020 masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric(); | |
4021 __ Jump(stub, RelocInfo::CODE_TARGET); | |
4022 } | |
4023 | |
4024 | |
4025 void KeyedStoreStubCompiler::GenerateStoreExternalArray( | 3664 void KeyedStoreStubCompiler::GenerateStoreExternalArray( |
4026 MacroAssembler* masm, | 3665 MacroAssembler* masm, |
4027 ElementsKind elements_kind) { | 3666 ElementsKind elements_kind) { |
4028 // ---------- S t a t e -------------- | 3667 // ---------- S t a t e -------------- |
4029 // -- r0 : value | 3668 // -- r0 : value |
4030 // -- r1 : key | 3669 // -- r1 : key |
4031 // -- r2 : receiver | 3670 // -- r2 : receiver |
4032 // -- lr : return address | 3671 // -- lr : return address |
4033 // ----------------------------------- | 3672 // ----------------------------------- |
4034 Label slow, check_heap_number, miss_force_generic; | 3673 Label slow, check_heap_number, miss_force_generic; |
(...skipping 324 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4359 // -- r0 : key | 3998 // -- r0 : key |
4360 // -- r1 : receiver | 3999 // -- r1 : receiver |
4361 // ----------------------------------- | 4000 // ----------------------------------- |
4362 | 4001 |
4363 Handle<Code> miss_ic = | 4002 Handle<Code> miss_ic = |
4364 masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric(); | 4003 masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric(); |
4365 __ Jump(miss_ic, RelocInfo::CODE_TARGET); | 4004 __ Jump(miss_ic, RelocInfo::CODE_TARGET); |
4366 } | 4005 } |
4367 | 4006 |
4368 | 4007 |
4369 void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) { | |
4370 // ----------- S t a t e ------------- | |
4371 // -- lr : return address | |
4372 // -- r0 : key | |
4373 // -- r1 : receiver | |
4374 // ----------------------------------- | |
4375 Label miss_force_generic; | |
4376 | |
4377 // This stub is meant to be tail-jumped to, the receiver must already | |
4378 // have been verified by the caller to not be a smi. | |
4379 | |
4380 // Check that the key is a smi or a heap number convertible to a smi. | |
4381 GenerateSmiKeyCheck(masm, r0, r4, r5, d1, d2, &miss_force_generic); | |
4382 | |
4383 // Get the elements array. | |
4384 __ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset)); | |
4385 __ AssertFastElements(r2); | |
4386 | |
4387 // Check that the key is within bounds. | |
4388 __ ldr(r3, FieldMemOperand(r2, FixedArray::kLengthOffset)); | |
4389 __ cmp(r0, Operand(r3)); | |
4390 __ b(hs, &miss_force_generic); | |
4391 | |
4392 // Load the result and make sure it's not the hole. | |
4393 __ add(r3, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | |
4394 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2); | |
4395 __ ldr(r4, | |
4396 MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize)); | |
4397 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); | |
4398 __ cmp(r4, ip); | |
4399 __ b(eq, &miss_force_generic); | |
4400 __ mov(r0, r4); | |
4401 __ Ret(); | |
4402 | |
4403 __ bind(&miss_force_generic); | |
4404 Handle<Code> stub = | |
4405 masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric(); | |
4406 __ Jump(stub, RelocInfo::CODE_TARGET); | |
4407 } | |
4408 | |
4409 | |
4410 void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement( | |
4411 MacroAssembler* masm) { | |
4412 // ----------- S t a t e ------------- | |
4413 // -- lr : return address | |
4414 // -- r0 : key | |
4415 // -- r1 : receiver | |
4416 // ----------------------------------- | |
4417 Label miss_force_generic, slow_allocate_heapnumber; | |
4418 | |
4419 Register key_reg = r0; | |
4420 Register receiver_reg = r1; | |
4421 Register elements_reg = r2; | |
4422 Register heap_number_reg = r2; | |
4423 Register indexed_double_offset = r3; | |
4424 Register scratch = r4; | |
4425 Register scratch2 = r5; | |
4426 Register scratch3 = r6; | |
4427 Register heap_number_map = r7; | |
4428 | |
4429 // This stub is meant to be tail-jumped to, the receiver must already | |
4430 // have been verified by the caller to not be a smi. | |
4431 | |
4432 // Check that the key is a smi or a heap number convertible to a smi. | |
4433 GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, d2, &miss_force_generic); | |
4434 | |
4435 // Get the elements array. | |
4436 __ ldr(elements_reg, | |
4437 FieldMemOperand(receiver_reg, JSObject::kElementsOffset)); | |
4438 | |
4439 // Check that the key is within bounds. | |
4440 __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset)); | |
4441 __ cmp(key_reg, Operand(scratch)); | |
4442 __ b(hs, &miss_force_generic); | |
4443 | |
4444 // Load the upper word of the double in the fixed array and test for NaN. | |
4445 __ add(indexed_double_offset, elements_reg, | |
4446 Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize)); | |
4447 uint32_t upper_32_offset = FixedArray::kHeaderSize + sizeof(kHoleNanLower32); | |
4448 __ ldr(scratch, FieldMemOperand(indexed_double_offset, upper_32_offset)); | |
4449 __ cmp(scratch, Operand(kHoleNanUpper32)); | |
4450 __ b(&miss_force_generic, eq); | |
4451 | |
4452 // Non-NaN. Allocate a new heap number and copy the double value into it. | |
4453 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | |
4454 __ AllocateHeapNumber(heap_number_reg, scratch2, scratch3, | |
4455 heap_number_map, &slow_allocate_heapnumber, TAG_RESULT); | |
4456 | |
4457 // Don't need to reload the upper 32 bits of the double, it's already in | |
4458 // scratch. | |
4459 __ str(scratch, FieldMemOperand(heap_number_reg, | |
4460 HeapNumber::kExponentOffset)); | |
4461 __ ldr(scratch, FieldMemOperand(indexed_double_offset, | |
4462 FixedArray::kHeaderSize)); | |
4463 __ str(scratch, FieldMemOperand(heap_number_reg, | |
4464 HeapNumber::kMantissaOffset)); | |
4465 | |
4466 __ mov(r0, heap_number_reg); | |
4467 __ Ret(); | |
4468 | |
4469 __ bind(&slow_allocate_heapnumber); | |
4470 Handle<Code> slow_ic = | |
4471 masm->isolate()->builtins()->KeyedLoadIC_Slow(); | |
4472 __ Jump(slow_ic, RelocInfo::CODE_TARGET); | |
4473 | |
4474 __ bind(&miss_force_generic); | |
4475 Handle<Code> miss_ic = | |
4476 masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric(); | |
4477 __ Jump(miss_ic, RelocInfo::CODE_TARGET); | |
4478 } | |
4479 | |
4480 | |
4481 void KeyedStoreStubCompiler::GenerateStoreFastElement( | 4008 void KeyedStoreStubCompiler::GenerateStoreFastElement( |
4482 MacroAssembler* masm, | 4009 MacroAssembler* masm, |
4483 bool is_js_array, | 4010 bool is_js_array, |
4484 ElementsKind elements_kind, | 4011 ElementsKind elements_kind, |
4485 KeyedAccessGrowMode grow_mode) { | 4012 KeyedAccessGrowMode grow_mode) { |
4486 // ----------- S t a t e ------------- | 4013 // ----------- S t a t e ------------- |
4487 // -- r0 : value | 4014 // -- r0 : value |
4488 // -- r1 : key | 4015 // -- r1 : key |
4489 // -- r2 : receiver | 4016 // -- r2 : receiver |
4490 // -- lr : return address | 4017 // -- lr : return address |
(...skipping 296 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4787 __ Jump(ic_slow, RelocInfo::CODE_TARGET); | 4314 __ Jump(ic_slow, RelocInfo::CODE_TARGET); |
4788 } | 4315 } |
4789 } | 4316 } |
4790 | 4317 |
4791 | 4318 |
4792 #undef __ | 4319 #undef __ |
4793 | 4320 |
4794 } } // namespace v8::internal | 4321 } } // namespace v8::internal |
4795 | 4322 |
4796 #endif // V8_TARGET_ARCH_ARM | 4323 #endif // V8_TARGET_ARCH_ARM |
OLD | NEW |