OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 1035 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1046 __ orr(fval, | 1046 __ orr(fval, |
1047 fval, | 1047 fval, |
1048 Operand(ival, LSR, kBitsPerInt - kBinary32MantissaBits)); | 1048 Operand(ival, LSR, kBitsPerInt - kBinary32MantissaBits)); |
1049 | 1049 |
1050 __ bind(&done); | 1050 __ bind(&done); |
1051 __ str(fval, MemOperand(dst, wordoffset, LSL, 2)); | 1051 __ str(fval, MemOperand(dst, wordoffset, LSL, 2)); |
1052 } | 1052 } |
1053 } | 1053 } |
1054 | 1054 |
1055 | 1055 |
| 1056 // Convert unsigned integer with specified number of leading zeroes in binary |
| 1057 // representation to IEEE 754 double. |
| 1058 // Integer to convert is passed in register hiword. |
| 1059 // Resulting double is returned in registers hiword:loword. |
| 1060 // This functions does not work correctly for 0. |
| 1061 static void GenerateUInt2Double(MacroAssembler* masm, |
| 1062 Register hiword, |
| 1063 Register loword, |
| 1064 Register scratch, |
| 1065 int leading_zeroes) { |
| 1066 const int meaningful_bits = kBitsPerInt - leading_zeroes - 1; |
| 1067 const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits; |
| 1068 |
| 1069 const int mantissa_shift_for_hi_word = |
| 1070 meaningful_bits - HeapNumber::kMantissaBitsInTopWord; |
| 1071 |
| 1072 const int mantissa_shift_for_lo_word = |
| 1073 kBitsPerInt - mantissa_shift_for_hi_word; |
| 1074 |
| 1075 __ mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift)); |
| 1076 if (mantissa_shift_for_hi_word > 0) { |
| 1077 __ mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word)); |
| 1078 __ orr(hiword, scratch, Operand(hiword, LSR, mantissa_shift_for_hi_word)); |
| 1079 } else { |
| 1080 __ mov(loword, Operand(0, RelocInfo::NONE)); |
| 1081 __ orr(hiword, scratch, Operand(hiword, LSL, mantissa_shift_for_hi_word)); |
| 1082 } |
| 1083 |
| 1084 // If least significant bit of biased exponent was not 1 it was corrupted |
| 1085 // by most significant bit of mantissa so we should fix that. |
| 1086 if (!(biased_exponent & 1)) { |
| 1087 __ bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift)); |
| 1088 } |
| 1089 } |
| 1090 |
| 1091 |
1056 #undef __ | 1092 #undef __ |
1057 #define __ ACCESS_MASM(masm()) | 1093 #define __ ACCESS_MASM(masm()) |
1058 | 1094 |
1059 | 1095 |
1060 Register StubCompiler::CheckPrototypes(Handle<JSObject> object, | 1096 Register StubCompiler::CheckPrototypes(Handle<JSObject> object, |
1061 Register object_reg, | 1097 Register object_reg, |
1062 Handle<JSObject> holder, | 1098 Handle<JSObject> holder, |
1063 Register holder_reg, | 1099 Register holder_reg, |
1064 Register scratch1, | 1100 Register scratch1, |
1065 Register scratch2, | 1101 Register scratch2, |
(...skipping 2210 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3276 | 3312 |
3277 | 3313 |
3278 Handle<Code> KeyedLoadStubCompiler::CompileLoadElement( | 3314 Handle<Code> KeyedLoadStubCompiler::CompileLoadElement( |
3279 Handle<Map> receiver_map) { | 3315 Handle<Map> receiver_map) { |
3280 // ----------- S t a t e ------------- | 3316 // ----------- S t a t e ------------- |
3281 // -- lr : return address | 3317 // -- lr : return address |
3282 // -- r0 : key | 3318 // -- r0 : key |
3283 // -- r1 : receiver | 3319 // -- r1 : receiver |
3284 // ----------------------------------- | 3320 // ----------------------------------- |
3285 ElementsKind elements_kind = receiver_map->elements_kind(); | 3321 ElementsKind elements_kind = receiver_map->elements_kind(); |
3286 if (receiver_map->has_fast_elements() || | 3322 Handle<Code> stub = KeyedLoadElementStub(elements_kind).GetCode(); |
3287 receiver_map->has_external_array_elements()) { | 3323 |
3288 Handle<Code> stub = KeyedLoadFastElementStub( | 3324 __ DispatchMap(r1, r2, receiver_map, stub, DO_SMI_CHECK); |
3289 receiver_map->instance_type() == JS_ARRAY_TYPE, | |
3290 elements_kind).GetCode(); | |
3291 __ DispatchMap(r1, r2, receiver_map, stub, DO_SMI_CHECK); | |
3292 } else { | |
3293 Handle<Code> stub = | |
3294 KeyedLoadDictionaryElementStub().GetCode(); | |
3295 __ DispatchMap(r1, r2, receiver_map, stub, DO_SMI_CHECK); | |
3296 } | |
3297 | 3325 |
3298 Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss(); | 3326 Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss(); |
3299 __ Jump(ic, RelocInfo::CODE_TARGET); | 3327 __ Jump(ic, RelocInfo::CODE_TARGET); |
3300 | 3328 |
3301 // Return the generated code. | 3329 // Return the generated code. |
3302 return GetCode(Code::NORMAL, factory()->empty_string()); | 3330 return GetCode(Code::NORMAL, factory()->empty_string()); |
3303 } | 3331 } |
3304 | 3332 |
3305 | 3333 |
3306 Handle<Code> KeyedLoadStubCompiler::CompileLoadPolymorphic( | 3334 Handle<Code> KeyedLoadStubCompiler::CompileLoadPolymorphic( |
(...skipping 384 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3691 __ TrySmiTag(scratch0, fail, scratch1); | 3719 __ TrySmiTag(scratch0, fail, scratch1); |
3692 __ mov(key, scratch0); | 3720 __ mov(key, scratch0); |
3693 __ bind(&key_ok); | 3721 __ bind(&key_ok); |
3694 } else { | 3722 } else { |
3695 // Check that the key is a smi. | 3723 // Check that the key is a smi. |
3696 __ JumpIfNotSmi(key, fail); | 3724 __ JumpIfNotSmi(key, fail); |
3697 } | 3725 } |
3698 } | 3726 } |
3699 | 3727 |
3700 | 3728 |
| 3729 void KeyedLoadStubCompiler::GenerateLoadExternalArray( |
| 3730 MacroAssembler* masm, |
| 3731 ElementsKind elements_kind) { |
| 3732 // ---------- S t a t e -------------- |
| 3733 // -- lr : return address |
| 3734 // -- r0 : key |
| 3735 // -- r1 : receiver |
| 3736 // ----------------------------------- |
| 3737 Label miss_force_generic, slow, failed_allocation; |
| 3738 |
| 3739 Register key = r0; |
| 3740 Register receiver = r1; |
| 3741 |
| 3742 // This stub is meant to be tail-jumped to, the receiver must already |
| 3743 // have been verified by the caller to not be a smi. |
| 3744 |
| 3745 // Check that the key is a smi or a heap number convertible to a smi. |
| 3746 GenerateSmiKeyCheck(masm, key, r4, r5, d1, d2, &miss_force_generic); |
| 3747 |
| 3748 __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
| 3749 // r3: elements array |
| 3750 |
| 3751 // Check that the index is in range. |
| 3752 __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset)); |
| 3753 __ cmp(key, ip); |
| 3754 // Unsigned comparison catches both negative and too-large values. |
| 3755 __ b(hs, &miss_force_generic); |
| 3756 |
| 3757 __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset)); |
| 3758 // r3: base pointer of external storage |
| 3759 |
| 3760 // We are not untagging smi key and instead work with it |
| 3761 // as if it was premultiplied by 2. |
| 3762 STATIC_ASSERT((kSmiTag == 0) && (kSmiTagSize == 1)); |
| 3763 |
| 3764 Register value = r2; |
| 3765 switch (elements_kind) { |
| 3766 case EXTERNAL_BYTE_ELEMENTS: |
| 3767 __ ldrsb(value, MemOperand(r3, key, LSR, 1)); |
| 3768 break; |
| 3769 case EXTERNAL_PIXEL_ELEMENTS: |
| 3770 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: |
| 3771 __ ldrb(value, MemOperand(r3, key, LSR, 1)); |
| 3772 break; |
| 3773 case EXTERNAL_SHORT_ELEMENTS: |
| 3774 __ ldrsh(value, MemOperand(r3, key, LSL, 0)); |
| 3775 break; |
| 3776 case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: |
| 3777 __ ldrh(value, MemOperand(r3, key, LSL, 0)); |
| 3778 break; |
| 3779 case EXTERNAL_INT_ELEMENTS: |
| 3780 case EXTERNAL_UNSIGNED_INT_ELEMENTS: |
| 3781 __ ldr(value, MemOperand(r3, key, LSL, 1)); |
| 3782 break; |
| 3783 case EXTERNAL_FLOAT_ELEMENTS: |
| 3784 if (CpuFeatures::IsSupported(VFP2)) { |
| 3785 CpuFeatures::Scope scope(VFP2); |
| 3786 __ add(r2, r3, Operand(key, LSL, 1)); |
| 3787 __ vldr(s0, r2, 0); |
| 3788 } else { |
| 3789 __ ldr(value, MemOperand(r3, key, LSL, 1)); |
| 3790 } |
| 3791 break; |
| 3792 case EXTERNAL_DOUBLE_ELEMENTS: |
| 3793 if (CpuFeatures::IsSupported(VFP2)) { |
| 3794 CpuFeatures::Scope scope(VFP2); |
| 3795 __ add(r2, r3, Operand(key, LSL, 2)); |
| 3796 __ vldr(d0, r2, 0); |
| 3797 } else { |
| 3798 __ add(r4, r3, Operand(key, LSL, 2)); |
| 3799 // r4: pointer to the beginning of the double we want to load. |
| 3800 __ ldr(r2, MemOperand(r4, 0)); |
| 3801 __ ldr(r3, MemOperand(r4, Register::kSizeInBytes)); |
| 3802 } |
| 3803 break; |
| 3804 case FAST_ELEMENTS: |
| 3805 case FAST_SMI_ELEMENTS: |
| 3806 case FAST_DOUBLE_ELEMENTS: |
| 3807 case FAST_HOLEY_ELEMENTS: |
| 3808 case FAST_HOLEY_SMI_ELEMENTS: |
| 3809 case FAST_HOLEY_DOUBLE_ELEMENTS: |
| 3810 case DICTIONARY_ELEMENTS: |
| 3811 case NON_STRICT_ARGUMENTS_ELEMENTS: |
| 3812 UNREACHABLE(); |
| 3813 break; |
| 3814 } |
| 3815 |
| 3816 // For integer array types: |
| 3817 // r2: value |
| 3818 // For float array type: |
| 3819 // s0: value (if VFP3 is supported) |
| 3820 // r2: value (if VFP3 is not supported) |
| 3821 // For double array type: |
| 3822 // d0: value (if VFP3 is supported) |
| 3823 // r2/r3: value (if VFP3 is not supported) |
| 3824 |
| 3825 if (elements_kind == EXTERNAL_INT_ELEMENTS) { |
| 3826 // For the Int and UnsignedInt array types, we need to see whether |
| 3827 // the value can be represented in a Smi. If not, we need to convert |
| 3828 // it to a HeapNumber. |
| 3829 Label box_int; |
| 3830 __ cmp(value, Operand(0xC0000000)); |
| 3831 __ b(mi, &box_int); |
| 3832 // Tag integer as smi and return it. |
| 3833 __ mov(r0, Operand(value, LSL, kSmiTagSize)); |
| 3834 __ Ret(); |
| 3835 |
| 3836 __ bind(&box_int); |
| 3837 if (CpuFeatures::IsSupported(VFP2)) { |
| 3838 CpuFeatures::Scope scope(VFP2); |
| 3839 // Allocate a HeapNumber for the result and perform int-to-double |
| 3840 // conversion. Don't touch r0 or r1 as they are needed if allocation |
| 3841 // fails. |
| 3842 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); |
| 3843 |
| 3844 __ AllocateHeapNumber(r5, r3, r4, r6, &slow, DONT_TAG_RESULT); |
| 3845 // Now we can use r0 for the result as key is not needed any more. |
| 3846 __ add(r0, r5, Operand(kHeapObjectTag)); |
| 3847 __ vmov(s0, value); |
| 3848 __ vcvt_f64_s32(d0, s0); |
| 3849 __ vstr(d0, r5, HeapNumber::kValueOffset); |
| 3850 __ Ret(); |
| 3851 } else { |
| 3852 // Allocate a HeapNumber for the result and perform int-to-double |
| 3853 // conversion. Don't touch r0 or r1 as they are needed if allocation |
| 3854 // fails. |
| 3855 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); |
| 3856 __ AllocateHeapNumber(r5, r3, r4, r6, &slow, TAG_RESULT); |
| 3857 // Now we can use r0 for the result as key is not needed any more. |
| 3858 __ mov(r0, r5); |
| 3859 Register dst_mantissa = r1; |
| 3860 Register dst_exponent = r3; |
| 3861 FloatingPointHelper::Destination dest = |
| 3862 FloatingPointHelper::kCoreRegisters; |
| 3863 FloatingPointHelper::ConvertIntToDouble(masm, |
| 3864 value, |
| 3865 dest, |
| 3866 d0, |
| 3867 dst_mantissa, |
| 3868 dst_exponent, |
| 3869 r9, |
| 3870 s0); |
| 3871 __ str(dst_mantissa, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); |
| 3872 __ str(dst_exponent, FieldMemOperand(r0, HeapNumber::kExponentOffset)); |
| 3873 __ Ret(); |
| 3874 } |
| 3875 } else if (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) { |
| 3876 // The test is different for unsigned int values. Since we need |
| 3877 // the value to be in the range of a positive smi, we can't |
| 3878 // handle either of the top two bits being set in the value. |
| 3879 if (CpuFeatures::IsSupported(VFP2)) { |
| 3880 CpuFeatures::Scope scope(VFP2); |
| 3881 Label box_int, done; |
| 3882 __ tst(value, Operand(0xC0000000)); |
| 3883 __ b(ne, &box_int); |
| 3884 // Tag integer as smi and return it. |
| 3885 __ mov(r0, Operand(value, LSL, kSmiTagSize)); |
| 3886 __ Ret(); |
| 3887 |
| 3888 __ bind(&box_int); |
| 3889 __ vmov(s0, value); |
| 3890 // Allocate a HeapNumber for the result and perform int-to-double |
| 3891 // conversion. Don't use r0 and r1 as AllocateHeapNumber clobbers all |
| 3892 // registers - also when jumping due to exhausted young space. |
| 3893 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); |
| 3894 __ AllocateHeapNumber(r2, r3, r4, r6, &slow, DONT_TAG_RESULT); |
| 3895 |
| 3896 __ vcvt_f64_u32(d0, s0); |
| 3897 __ vstr(d0, r2, HeapNumber::kValueOffset); |
| 3898 |
| 3899 __ add(r0, r2, Operand(kHeapObjectTag)); |
| 3900 __ Ret(); |
| 3901 } else { |
| 3902 // Check whether unsigned integer fits into smi. |
| 3903 Label box_int_0, box_int_1, done; |
| 3904 __ tst(value, Operand(0x80000000)); |
| 3905 __ b(ne, &box_int_0); |
| 3906 __ tst(value, Operand(0x40000000)); |
| 3907 __ b(ne, &box_int_1); |
| 3908 // Tag integer as smi and return it. |
| 3909 __ mov(r0, Operand(value, LSL, kSmiTagSize)); |
| 3910 __ Ret(); |
| 3911 |
| 3912 Register hiword = value; // r2. |
| 3913 Register loword = r3; |
| 3914 |
| 3915 __ bind(&box_int_0); |
| 3916 // Integer does not have leading zeros. |
| 3917 GenerateUInt2Double(masm, hiword, loword, r4, 0); |
| 3918 __ b(&done); |
| 3919 |
| 3920 __ bind(&box_int_1); |
| 3921 // Integer has one leading zero. |
| 3922 GenerateUInt2Double(masm, hiword, loword, r4, 1); |
| 3923 |
| 3924 |
| 3925 __ bind(&done); |
| 3926 // Integer was converted to double in registers hiword:loword. |
| 3927 // Wrap it into a HeapNumber. Don't use r0 and r1 as AllocateHeapNumber |
| 3928 // clobbers all registers - also when jumping due to exhausted young |
| 3929 // space. |
| 3930 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); |
| 3931 __ AllocateHeapNumber(r4, r5, r7, r6, &slow, TAG_RESULT); |
| 3932 |
| 3933 __ str(hiword, FieldMemOperand(r4, HeapNumber::kExponentOffset)); |
| 3934 __ str(loword, FieldMemOperand(r4, HeapNumber::kMantissaOffset)); |
| 3935 |
| 3936 __ mov(r0, r4); |
| 3937 __ Ret(); |
| 3938 } |
| 3939 } else if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { |
| 3940 // For the floating-point array type, we need to always allocate a |
| 3941 // HeapNumber. |
| 3942 if (CpuFeatures::IsSupported(VFP2)) { |
| 3943 CpuFeatures::Scope scope(VFP2); |
| 3944 // Allocate a HeapNumber for the result. Don't use r0 and r1 as |
| 3945 // AllocateHeapNumber clobbers all registers - also when jumping due to |
| 3946 // exhausted young space. |
| 3947 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); |
| 3948 __ AllocateHeapNumber(r2, r3, r4, r6, &slow, DONT_TAG_RESULT); |
| 3949 __ vcvt_f64_f32(d0, s0); |
| 3950 __ vstr(d0, r2, HeapNumber::kValueOffset); |
| 3951 |
| 3952 __ add(r0, r2, Operand(kHeapObjectTag)); |
| 3953 __ Ret(); |
| 3954 } else { |
| 3955 // Allocate a HeapNumber for the result. Don't use r0 and r1 as |
| 3956 // AllocateHeapNumber clobbers all registers - also when jumping due to |
| 3957 // exhausted young space. |
| 3958 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); |
| 3959 __ AllocateHeapNumber(r3, r4, r5, r6, &slow, TAG_RESULT); |
| 3960 // VFP is not available, do manual single to double conversion. |
| 3961 |
| 3962 // r2: floating point value (binary32) |
| 3963 // r3: heap number for result |
| 3964 |
| 3965 // Extract mantissa to r0. OK to clobber r0 now as there are no jumps to |
| 3966 // the slow case from here. |
| 3967 __ and_(r0, value, Operand(kBinary32MantissaMask)); |
| 3968 |
| 3969 // Extract exponent to r1. OK to clobber r1 now as there are no jumps to |
| 3970 // the slow case from here. |
| 3971 __ mov(r1, Operand(value, LSR, kBinary32MantissaBits)); |
| 3972 __ and_(r1, r1, Operand(kBinary32ExponentMask >> kBinary32MantissaBits)); |
| 3973 |
| 3974 Label exponent_rebiased; |
| 3975 __ teq(r1, Operand(0x00)); |
| 3976 __ b(eq, &exponent_rebiased); |
| 3977 |
| 3978 __ teq(r1, Operand(0xff)); |
| 3979 __ mov(r1, Operand(0x7ff), LeaveCC, eq); |
| 3980 __ b(eq, &exponent_rebiased); |
| 3981 |
| 3982 // Rebias exponent. |
| 3983 __ add(r1, |
| 3984 r1, |
| 3985 Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias)); |
| 3986 |
| 3987 __ bind(&exponent_rebiased); |
| 3988 __ and_(r2, value, Operand(kBinary32SignMask)); |
| 3989 value = no_reg; |
| 3990 __ orr(r2, r2, Operand(r1, LSL, HeapNumber::kMantissaBitsInTopWord)); |
| 3991 |
| 3992 // Shift mantissa. |
| 3993 static const int kMantissaShiftForHiWord = |
| 3994 kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord; |
| 3995 |
| 3996 static const int kMantissaShiftForLoWord = |
| 3997 kBitsPerInt - kMantissaShiftForHiWord; |
| 3998 |
| 3999 __ orr(r2, r2, Operand(r0, LSR, kMantissaShiftForHiWord)); |
| 4000 __ mov(r0, Operand(r0, LSL, kMantissaShiftForLoWord)); |
| 4001 |
| 4002 __ str(r2, FieldMemOperand(r3, HeapNumber::kExponentOffset)); |
| 4003 __ str(r0, FieldMemOperand(r3, HeapNumber::kMantissaOffset)); |
| 4004 |
| 4005 __ mov(r0, r3); |
| 4006 __ Ret(); |
| 4007 } |
| 4008 } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { |
| 4009 if (CpuFeatures::IsSupported(VFP2)) { |
| 4010 CpuFeatures::Scope scope(VFP2); |
| 4011 // Allocate a HeapNumber for the result. Don't use r0 and r1 as |
| 4012 // AllocateHeapNumber clobbers all registers - also when jumping due to |
| 4013 // exhausted young space. |
| 4014 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); |
| 4015 __ AllocateHeapNumber(r2, r3, r4, r6, &slow, DONT_TAG_RESULT); |
| 4016 __ vstr(d0, r2, HeapNumber::kValueOffset); |
| 4017 |
| 4018 __ add(r0, r2, Operand(kHeapObjectTag)); |
| 4019 __ Ret(); |
| 4020 } else { |
| 4021 // Allocate a HeapNumber for the result. Don't use r0 and r1 as |
| 4022 // AllocateHeapNumber clobbers all registers - also when jumping due to |
| 4023 // exhausted young space. |
| 4024 __ LoadRoot(r7, Heap::kHeapNumberMapRootIndex); |
| 4025 __ AllocateHeapNumber(r4, r5, r6, r7, &slow, TAG_RESULT); |
| 4026 |
| 4027 __ str(r2, FieldMemOperand(r4, HeapNumber::kMantissaOffset)); |
| 4028 __ str(r3, FieldMemOperand(r4, HeapNumber::kExponentOffset)); |
| 4029 __ mov(r0, r4); |
| 4030 __ Ret(); |
| 4031 } |
| 4032 |
| 4033 } else { |
| 4034 // Tag integer as smi and return it. |
| 4035 __ mov(r0, Operand(value, LSL, kSmiTagSize)); |
| 4036 __ Ret(); |
| 4037 } |
| 4038 |
| 4039 // Slow case, key and receiver still in r0 and r1. |
| 4040 __ bind(&slow); |
| 4041 __ IncrementCounter( |
| 4042 masm->isolate()->counters()->keyed_load_external_array_slow(), |
| 4043 1, r2, r3); |
| 4044 |
| 4045 // ---------- S t a t e -------------- |
| 4046 // -- lr : return address |
| 4047 // -- r0 : key |
| 4048 // -- r1 : receiver |
| 4049 // ----------------------------------- |
| 4050 |
| 4051 __ Push(r1, r0); |
| 4052 |
| 4053 __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1); |
| 4054 |
| 4055 __ bind(&miss_force_generic); |
| 4056 Handle<Code> stub = |
| 4057 masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric(); |
| 4058 __ Jump(stub, RelocInfo::CODE_TARGET); |
| 4059 } |
| 4060 |
| 4061 |
3701 void KeyedStoreStubCompiler::GenerateStoreExternalArray( | 4062 void KeyedStoreStubCompiler::GenerateStoreExternalArray( |
3702 MacroAssembler* masm, | 4063 MacroAssembler* masm, |
3703 ElementsKind elements_kind) { | 4064 ElementsKind elements_kind) { |
3704 // ---------- S t a t e -------------- | 4065 // ---------- S t a t e -------------- |
3705 // -- r0 : value | 4066 // -- r0 : value |
3706 // -- r1 : key | 4067 // -- r1 : key |
3707 // -- r2 : receiver | 4068 // -- r2 : receiver |
3708 // -- lr : return address | 4069 // -- lr : return address |
3709 // ----------------------------------- | 4070 // ----------------------------------- |
3710 Label slow, check_heap_number, miss_force_generic; | 4071 Label slow, check_heap_number, miss_force_generic; |
(...skipping 324 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4035 // -- r0 : key | 4396 // -- r0 : key |
4036 // -- r1 : receiver | 4397 // -- r1 : receiver |
4037 // ----------------------------------- | 4398 // ----------------------------------- |
4038 | 4399 |
4039 Handle<Code> miss_ic = | 4400 Handle<Code> miss_ic = |
4040 masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric(); | 4401 masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric(); |
4041 __ Jump(miss_ic, RelocInfo::CODE_TARGET); | 4402 __ Jump(miss_ic, RelocInfo::CODE_TARGET); |
4042 } | 4403 } |
4043 | 4404 |
4044 | 4405 |
| 4406 void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) { |
| 4407 // ----------- S t a t e ------------- |
| 4408 // -- lr : return address |
| 4409 // -- r0 : key |
| 4410 // -- r1 : receiver |
| 4411 // ----------------------------------- |
| 4412 Label miss_force_generic; |
| 4413 |
| 4414 // This stub is meant to be tail-jumped to, the receiver must already |
| 4415 // have been verified by the caller to not be a smi. |
| 4416 |
| 4417 // Check that the key is a smi or a heap number convertible to a smi. |
| 4418 GenerateSmiKeyCheck(masm, r0, r4, r5, d1, d2, &miss_force_generic); |
| 4419 |
| 4420 // Get the elements array. |
| 4421 __ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset)); |
| 4422 __ AssertFastElements(r2); |
| 4423 |
| 4424 // Check that the key is within bounds. |
| 4425 __ ldr(r3, FieldMemOperand(r2, FixedArray::kLengthOffset)); |
| 4426 __ cmp(r0, Operand(r3)); |
| 4427 __ b(hs, &miss_force_generic); |
| 4428 |
| 4429 // Load the result and make sure it's not the hole. |
| 4430 __ add(r3, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
| 4431 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2); |
| 4432 __ ldr(r4, |
| 4433 MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize)); |
| 4434 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); |
| 4435 __ cmp(r4, ip); |
| 4436 __ b(eq, &miss_force_generic); |
| 4437 __ mov(r0, r4); |
| 4438 __ Ret(); |
| 4439 |
| 4440 __ bind(&miss_force_generic); |
| 4441 Handle<Code> stub = |
| 4442 masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric(); |
| 4443 __ Jump(stub, RelocInfo::CODE_TARGET); |
| 4444 } |
| 4445 |
| 4446 |
| 4447 void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement( |
| 4448 MacroAssembler* masm) { |
| 4449 // ----------- S t a t e ------------- |
| 4450 // -- lr : return address |
| 4451 // -- r0 : key |
| 4452 // -- r1 : receiver |
| 4453 // ----------------------------------- |
| 4454 Label miss_force_generic, slow_allocate_heapnumber; |
| 4455 |
| 4456 Register key_reg = r0; |
| 4457 Register receiver_reg = r1; |
| 4458 Register elements_reg = r2; |
| 4459 Register heap_number_reg = r2; |
| 4460 Register indexed_double_offset = r3; |
| 4461 Register scratch = r4; |
| 4462 Register scratch2 = r5; |
| 4463 Register scratch3 = r6; |
| 4464 Register heap_number_map = r7; |
| 4465 |
| 4466 // This stub is meant to be tail-jumped to, the receiver must already |
| 4467 // have been verified by the caller to not be a smi. |
| 4468 |
| 4469 // Check that the key is a smi or a heap number convertible to a smi. |
| 4470 GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, d2, &miss_force_generic); |
| 4471 |
| 4472 // Get the elements array. |
| 4473 __ ldr(elements_reg, |
| 4474 FieldMemOperand(receiver_reg, JSObject::kElementsOffset)); |
| 4475 |
| 4476 // Check that the key is within bounds. |
| 4477 __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset)); |
| 4478 __ cmp(key_reg, Operand(scratch)); |
| 4479 __ b(hs, &miss_force_generic); |
| 4480 |
| 4481 // Load the upper word of the double in the fixed array and test for NaN. |
| 4482 __ add(indexed_double_offset, elements_reg, |
| 4483 Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize)); |
| 4484 uint32_t upper_32_offset = FixedArray::kHeaderSize + sizeof(kHoleNanLower32); |
| 4485 __ ldr(scratch, FieldMemOperand(indexed_double_offset, upper_32_offset)); |
| 4486 __ cmp(scratch, Operand(kHoleNanUpper32)); |
| 4487 __ b(&miss_force_generic, eq); |
| 4488 |
| 4489 // Non-NaN. Allocate a new heap number and copy the double value into it. |
| 4490 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
| 4491 __ AllocateHeapNumber(heap_number_reg, scratch2, scratch3, |
| 4492 heap_number_map, &slow_allocate_heapnumber, TAG_RESULT); |
| 4493 |
| 4494 // Don't need to reload the upper 32 bits of the double, it's already in |
| 4495 // scratch. |
| 4496 __ str(scratch, FieldMemOperand(heap_number_reg, |
| 4497 HeapNumber::kExponentOffset)); |
| 4498 __ ldr(scratch, FieldMemOperand(indexed_double_offset, |
| 4499 FixedArray::kHeaderSize)); |
| 4500 __ str(scratch, FieldMemOperand(heap_number_reg, |
| 4501 HeapNumber::kMantissaOffset)); |
| 4502 |
| 4503 __ mov(r0, heap_number_reg); |
| 4504 __ Ret(); |
| 4505 |
| 4506 __ bind(&slow_allocate_heapnumber); |
| 4507 Handle<Code> slow_ic = |
| 4508 masm->isolate()->builtins()->KeyedLoadIC_Slow(); |
| 4509 __ Jump(slow_ic, RelocInfo::CODE_TARGET); |
| 4510 |
| 4511 __ bind(&miss_force_generic); |
| 4512 Handle<Code> miss_ic = |
| 4513 masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric(); |
| 4514 __ Jump(miss_ic, RelocInfo::CODE_TARGET); |
| 4515 } |
| 4516 |
| 4517 |
4045 void KeyedStoreStubCompiler::GenerateStoreFastElement( | 4518 void KeyedStoreStubCompiler::GenerateStoreFastElement( |
4046 MacroAssembler* masm, | 4519 MacroAssembler* masm, |
4047 bool is_js_array, | 4520 bool is_js_array, |
4048 ElementsKind elements_kind, | 4521 ElementsKind elements_kind, |
4049 KeyedAccessGrowMode grow_mode) { | 4522 KeyedAccessGrowMode grow_mode) { |
4050 // ----------- S t a t e ------------- | 4523 // ----------- S t a t e ------------- |
4051 // -- r0 : value | 4524 // -- r0 : value |
4052 // -- r1 : key | 4525 // -- r1 : key |
4053 // -- r2 : receiver | 4526 // -- r2 : receiver |
4054 // -- lr : return address | 4527 // -- lr : return address |
(...skipping 317 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4372 __ Jump(ic_slow, RelocInfo::CODE_TARGET); | 4845 __ Jump(ic_slow, RelocInfo::CODE_TARGET); |
4373 } | 4846 } |
4374 } | 4847 } |
4375 | 4848 |
4376 | 4849 |
4377 #undef __ | 4850 #undef __ |
4378 | 4851 |
4379 } } // namespace v8::internal | 4852 } } // namespace v8::internal |
4380 | 4853 |
4381 #endif // V8_TARGET_ARCH_ARM | 4854 #endif // V8_TARGET_ARCH_ARM |
OLD | NEW |