Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(85)

Side by Side Diff: src/mips/stub-cache-mips.cc

Issue 11801002: MIPS: Re-land Crankshaft-generated KeyedLoad stubs. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 7 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/mips/macro-assembler-mips.cc ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 1036 matching lines...) Expand 10 before | Expand all | Expand 10 after
1047 1047
1048 __ bind(&done); 1048 __ bind(&done);
1049 1049
1050 __ sll(scratch1, wordoffset, 2); 1050 __ sll(scratch1, wordoffset, 2);
1051 __ addu(scratch1, dst, scratch1); 1051 __ addu(scratch1, dst, scratch1);
1052 __ sw(fval, MemOperand(scratch1, 0)); 1052 __ sw(fval, MemOperand(scratch1, 0));
1053 } 1053 }
1054 } 1054 }
1055 1055
1056 1056
1057 // Convert unsigned integer with specified number of leading zeroes in binary
1058 // representation to IEEE 754 double.
1059 // Integer to convert is passed in register hiword.
1060 // Resulting double is returned in registers hiword:loword.
1061 // This functions does not work correctly for 0.
1062 static void GenerateUInt2Double(MacroAssembler* masm,
1063 Register hiword,
1064 Register loword,
1065 Register scratch,
1066 int leading_zeroes) {
1067 const int meaningful_bits = kBitsPerInt - leading_zeroes - 1;
1068 const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits;
1069
1070 const int mantissa_shift_for_hi_word =
1071 meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
1072
1073 const int mantissa_shift_for_lo_word =
1074 kBitsPerInt - mantissa_shift_for_hi_word;
1075
1076 __ li(scratch, biased_exponent << HeapNumber::kExponentShift);
1077 if (mantissa_shift_for_hi_word > 0) {
1078 __ sll(loword, hiword, mantissa_shift_for_lo_word);
1079 __ srl(hiword, hiword, mantissa_shift_for_hi_word);
1080 __ or_(hiword, scratch, hiword);
1081 } else {
1082 __ mov(loword, zero_reg);
1083 __ sll(hiword, hiword, mantissa_shift_for_hi_word);
1084 __ or_(hiword, scratch, hiword);
1085 }
1086
1087 // If least significant bit of biased exponent was not 1 it was corrupted
1088 // by most significant bit of mantissa so we should fix that.
1089 if (!(biased_exponent & 1)) {
1090 __ li(scratch, 1 << HeapNumber::kExponentShift);
1091 __ nor(scratch, scratch, scratch);
1092 __ and_(hiword, hiword, scratch);
1093 }
1094 }
1095
1096
1097 #undef __ 1057 #undef __
1098 #define __ ACCESS_MASM(masm()) 1058 #define __ ACCESS_MASM(masm())
1099 1059
1100 1060
1101 Register StubCompiler::CheckPrototypes(Handle<JSObject> object, 1061 Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
1102 Register object_reg, 1062 Register object_reg,
1103 Handle<JSObject> holder, 1063 Handle<JSObject> holder,
1104 Register holder_reg, 1064 Register holder_reg,
1105 Register scratch1, 1065 Register scratch1,
1106 Register scratch2, 1066 Register scratch2,
(...skipping 2202 matching lines...) Expand 10 before | Expand all | Expand 10 after
3309 3269
3310 3270
3311 Handle<Code> KeyedLoadStubCompiler::CompileLoadElement( 3271 Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
3312 Handle<Map> receiver_map) { 3272 Handle<Map> receiver_map) {
3313 // ----------- S t a t e ------------- 3273 // ----------- S t a t e -------------
3314 // -- ra : return address 3274 // -- ra : return address
3315 // -- a0 : key 3275 // -- a0 : key
3316 // -- a1 : receiver 3276 // -- a1 : receiver
3317 // ----------------------------------- 3277 // -----------------------------------
3318 ElementsKind elements_kind = receiver_map->elements_kind(); 3278 ElementsKind elements_kind = receiver_map->elements_kind();
3319 Handle<Code> stub = KeyedLoadElementStub(elements_kind).GetCode(); 3279 if (receiver_map->has_fast_elements() ||
3320 3280 receiver_map->has_external_array_elements()) {
3321 __ DispatchMap(a1, a2, receiver_map, stub, DO_SMI_CHECK); 3281 Handle<Code> stub = KeyedLoadFastElementStub(
3282 receiver_map->instance_type() == JS_ARRAY_TYPE,
3283 elements_kind).GetCode();
3284 __ DispatchMap(a1, a2, receiver_map, stub, DO_SMI_CHECK);
3285 } else {
3286 Handle<Code> stub =
3287 KeyedLoadDictionaryElementStub().GetCode();
3288 __ DispatchMap(a1, a2, receiver_map, stub, DO_SMI_CHECK);
3289 }
3322 3290
3323 Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss(); 3291 Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
3324 __ Jump(ic, RelocInfo::CODE_TARGET); 3292 __ Jump(ic, RelocInfo::CODE_TARGET);
3325 3293
3326 // Return the generated code. 3294 // Return the generated code.
3327 return GetCode(Code::NORMAL, factory()->empty_string()); 3295 return GetCode(Code::NORMAL, factory()->empty_string());
3328 } 3296 }
3329 3297
3330 3298
3331 Handle<Code> KeyedLoadStubCompiler::CompileLoadPolymorphic( 3299 Handle<Code> KeyedLoadStubCompiler::CompileLoadPolymorphic(
(...skipping 391 matching lines...) Expand 10 before | Expand all | Expand 10 after
3723 __ SmiTagCheckOverflow(key, scratch0, scratch1); 3691 __ SmiTagCheckOverflow(key, scratch0, scratch1);
3724 __ BranchOnOverflow(fail, scratch1); 3692 __ BranchOnOverflow(fail, scratch1);
3725 __ bind(&key_ok); 3693 __ bind(&key_ok);
3726 } else { 3694 } else {
3727 // Check that the key is a smi. 3695 // Check that the key is a smi.
3728 __ JumpIfNotSmi(key, fail); 3696 __ JumpIfNotSmi(key, fail);
3729 } 3697 }
3730 } 3698 }
3731 3699
3732 3700
3733 void KeyedLoadStubCompiler::GenerateLoadExternalArray(
3734 MacroAssembler* masm,
3735 ElementsKind elements_kind) {
3736 // ---------- S t a t e --------------
3737 // -- ra : return address
3738 // -- a0 : key
3739 // -- a1 : receiver
3740 // -----------------------------------
3741 Label miss_force_generic, slow, failed_allocation;
3742
3743 Register key = a0;
3744 Register receiver = a1;
3745
3746 // This stub is meant to be tail-jumped to, the receiver must already
3747 // have been verified by the caller to not be a smi.
3748
3749 // Check that the key is a smi or a heap number convertible to a smi.
3750 GenerateSmiKeyCheck(masm, key, t0, t1, f2, f4, &miss_force_generic);
3751
3752 __ lw(a3, FieldMemOperand(receiver, JSObject::kElementsOffset));
3753 // a3: elements array
3754
3755 // Check that the index is in range.
3756 __ lw(t1, FieldMemOperand(a3, ExternalArray::kLengthOffset));
3757 __ sra(t2, key, kSmiTagSize);
3758 // Unsigned comparison catches both negative and too-large values.
3759 __ Branch(&miss_force_generic, Ugreater_equal, key, Operand(t1));
3760
3761 __ lw(a3, FieldMemOperand(a3, ExternalArray::kExternalPointerOffset));
3762 // a3: base pointer of external storage
3763
3764 // We are not untagging smi key and instead work with it
3765 // as if it was premultiplied by 2.
3766 STATIC_ASSERT((kSmiTag == 0) && (kSmiTagSize == 1));
3767
3768 Register value = a2;
3769 switch (elements_kind) {
3770 case EXTERNAL_BYTE_ELEMENTS:
3771 __ srl(t2, key, 1);
3772 __ addu(t3, a3, t2);
3773 __ lb(value, MemOperand(t3, 0));
3774 break;
3775 case EXTERNAL_PIXEL_ELEMENTS:
3776 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
3777 __ srl(t2, key, 1);
3778 __ addu(t3, a3, t2);
3779 __ lbu(value, MemOperand(t3, 0));
3780 break;
3781 case EXTERNAL_SHORT_ELEMENTS:
3782 __ addu(t3, a3, key);
3783 __ lh(value, MemOperand(t3, 0));
3784 break;
3785 case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
3786 __ addu(t3, a3, key);
3787 __ lhu(value, MemOperand(t3, 0));
3788 break;
3789 case EXTERNAL_INT_ELEMENTS:
3790 case EXTERNAL_UNSIGNED_INT_ELEMENTS:
3791 __ sll(t2, key, 1);
3792 __ addu(t3, a3, t2);
3793 __ lw(value, MemOperand(t3, 0));
3794 break;
3795 case EXTERNAL_FLOAT_ELEMENTS:
3796 __ sll(t3, t2, 2);
3797 __ addu(t3, a3, t3);
3798 if (CpuFeatures::IsSupported(FPU)) {
3799 CpuFeatures::Scope scope(FPU);
3800 __ lwc1(f0, MemOperand(t3, 0));
3801 } else {
3802 __ lw(value, MemOperand(t3, 0));
3803 }
3804 break;
3805 case EXTERNAL_DOUBLE_ELEMENTS:
3806 __ sll(t2, key, 2);
3807 __ addu(t3, a3, t2);
3808 if (CpuFeatures::IsSupported(FPU)) {
3809 CpuFeatures::Scope scope(FPU);
3810 __ ldc1(f0, MemOperand(t3, 0));
3811 } else {
3812 // t3: pointer to the beginning of the double we want to load.
3813 __ lw(a2, MemOperand(t3, 0));
3814 __ lw(a3, MemOperand(t3, Register::kSizeInBytes));
3815 }
3816 break;
3817 case FAST_ELEMENTS:
3818 case FAST_SMI_ELEMENTS:
3819 case FAST_DOUBLE_ELEMENTS:
3820 case FAST_HOLEY_ELEMENTS:
3821 case FAST_HOLEY_SMI_ELEMENTS:
3822 case FAST_HOLEY_DOUBLE_ELEMENTS:
3823 case DICTIONARY_ELEMENTS:
3824 case NON_STRICT_ARGUMENTS_ELEMENTS:
3825 UNREACHABLE();
3826 break;
3827 }
3828
3829 // For integer array types:
3830 // a2: value
3831 // For float array type:
3832 // f0: value (if FPU is supported)
3833 // a2: value (if FPU is not supported)
3834 // For double array type:
3835 // f0: value (if FPU is supported)
3836 // a2/a3: value (if FPU is not supported)
3837
3838 if (elements_kind == EXTERNAL_INT_ELEMENTS) {
3839 // For the Int and UnsignedInt array types, we need to see whether
3840 // the value can be represented in a Smi. If not, we need to convert
3841 // it to a HeapNumber.
3842 Label box_int;
3843 __ Subu(t3, value, Operand(0xC0000000)); // Non-smi value gives neg result.
3844 __ Branch(&box_int, lt, t3, Operand(zero_reg));
3845 // Tag integer as smi and return it.
3846 __ sll(v0, value, kSmiTagSize);
3847 __ Ret();
3848
3849 __ bind(&box_int);
3850
3851 if (CpuFeatures::IsSupported(FPU)) {
3852 CpuFeatures::Scope scope(FPU);
3853 // Allocate a HeapNumber for the result and perform int-to-double
3854 // conversion.
3855 // The arm version uses a temporary here to save r0, but we don't need to
3856 // (a0 is not modified).
3857 __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
3858 __ AllocateHeapNumber(v0, a3, t0, t1, &slow, DONT_TAG_RESULT);
3859 __ mtc1(value, f0);
3860 __ cvt_d_w(f0, f0);
3861 __ sdc1(f0, MemOperand(v0, HeapNumber::kValueOffset));
3862 __ Addu(v0, v0, kHeapObjectTag);
3863 __ Ret();
3864 } else {
3865 // Allocate a HeapNumber for the result and perform int-to-double
3866 // conversion.
3867 // The arm version uses a temporary here to save r0, but we don't need to
3868 // (a0 is not modified).
3869 __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
3870 __ AllocateHeapNumber(v0, a3, t0, t1, &slow, TAG_RESULT);
3871 Register dst_mantissa = t2;
3872 Register dst_exponent = t3;
3873 FloatingPointHelper::Destination dest =
3874 FloatingPointHelper::kCoreRegisters;
3875 FloatingPointHelper::ConvertIntToDouble(masm,
3876 value,
3877 dest,
3878 f0,
3879 dst_mantissa,
3880 dst_exponent,
3881 t1,
3882 f2);
3883 __ sw(dst_mantissa, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
3884 __ sw(dst_exponent, FieldMemOperand(v0, HeapNumber::kExponentOffset));
3885 __ Ret();
3886 }
3887 } else if (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
3888 // The test is different for unsigned int values. Since we need
3889 // the value to be in the range of a positive smi, we can't
3890 // handle either of the top two bits being set in the value.
3891 if (CpuFeatures::IsSupported(FPU)) {
3892 CpuFeatures::Scope scope(FPU);
3893 Label pl_box_int;
3894 __ And(t2, value, Operand(0xC0000000));
3895 __ Branch(&pl_box_int, ne, t2, Operand(zero_reg));
3896
3897 // It can fit in an Smi.
3898 // Tag integer as smi and return it.
3899 __ sll(v0, value, kSmiTagSize);
3900 __ Ret();
3901
3902 __ bind(&pl_box_int);
3903 // Allocate a HeapNumber for the result and perform int-to-double
3904 // conversion. Don't use a0 and a1 as AllocateHeapNumber clobbers all
3905 // registers - also when jumping due to exhausted young space.
3906 __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
3907 __ AllocateHeapNumber(v0, t2, t3, t6, &slow, DONT_TAG_RESULT);
3908
3909 // This is replaced by a macro:
3910 // __ mtc1(value, f0); // LS 32-bits.
3911 // __ mtc1(zero_reg, f1); // MS 32-bits are all zero.
3912 // __ cvt_d_l(f0, f0); // Use 64 bit conv to get correct unsigned 32-bit.
3913
3914 __ Cvt_d_uw(f0, value, f22);
3915
3916 __ sdc1(f0, MemOperand(v0, HeapNumber::kValueOffset));
3917
3918 __ Addu(v0, v0, kHeapObjectTag);
3919 __ Ret();
3920 } else {
3921 // Check whether unsigned integer fits into smi.
3922 Label box_int_0, box_int_1, done;
3923 __ And(t2, value, Operand(0x80000000));
3924 __ Branch(&box_int_0, ne, t2, Operand(zero_reg));
3925 __ And(t2, value, Operand(0x40000000));
3926 __ Branch(&box_int_1, ne, t2, Operand(zero_reg));
3927
3928 // Tag integer as smi and return it.
3929 __ sll(v0, value, kSmiTagSize);
3930 __ Ret();
3931
3932 Register hiword = value; // a2.
3933 Register loword = a3;
3934
3935 __ bind(&box_int_0);
3936 // Integer does not have leading zeros.
3937 GenerateUInt2Double(masm, hiword, loword, t0, 0);
3938 __ Branch(&done);
3939
3940 __ bind(&box_int_1);
3941 // Integer has one leading zero.
3942 GenerateUInt2Double(masm, hiword, loword, t0, 1);
3943
3944
3945 __ bind(&done);
3946 // Integer was converted to double in registers hiword:loword.
3947 // Wrap it into a HeapNumber. Don't use a0 and a1 as AllocateHeapNumber
3948 // clobbers all registers - also when jumping due to exhausted young
3949 // space.
3950 __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
3951 __ AllocateHeapNumber(t2, t3, t5, t6, &slow, TAG_RESULT);
3952
3953 __ sw(hiword, FieldMemOperand(t2, HeapNumber::kExponentOffset));
3954 __ sw(loword, FieldMemOperand(t2, HeapNumber::kMantissaOffset));
3955
3956 __ mov(v0, t2);
3957 __ Ret();
3958 }
3959 } else if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
3960 // For the floating-point array type, we need to always allocate a
3961 // HeapNumber.
3962 if (CpuFeatures::IsSupported(FPU)) {
3963 CpuFeatures::Scope scope(FPU);
3964 // Allocate a HeapNumber for the result. Don't use a0 and a1 as
3965 // AllocateHeapNumber clobbers all registers - also when jumping due to
3966 // exhausted young space.
3967 __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
3968 __ AllocateHeapNumber(v0, t3, t5, t6, &slow, DONT_TAG_RESULT);
3969 // The float (single) value is already in fpu reg f0 (if we use float).
3970 __ cvt_d_s(f0, f0);
3971 __ sdc1(f0, MemOperand(v0, HeapNumber::kValueOffset));
3972
3973 __ Addu(v0, v0, kHeapObjectTag);
3974 __ Ret();
3975 } else {
3976 // Allocate a HeapNumber for the result. Don't use a0 and a1 as
3977 // AllocateHeapNumber clobbers all registers - also when jumping due to
3978 // exhausted young space.
3979 __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
3980 __ AllocateHeapNumber(v0, t3, t5, t6, &slow, TAG_RESULT);
3981 // FPU is not available, do manual single to double conversion.
3982
3983 // a2: floating point value (binary32).
3984 // v0: heap number for result
3985
3986 // Extract mantissa to t4.
3987 __ And(t4, value, Operand(kBinary32MantissaMask));
3988
3989 // Extract exponent to t5.
3990 __ srl(t5, value, kBinary32MantissaBits);
3991 __ And(t5, t5, Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
3992
3993 Label exponent_rebiased;
3994 __ Branch(&exponent_rebiased, eq, t5, Operand(zero_reg));
3995
3996 __ li(t0, 0x7ff);
3997 __ Xor(t1, t5, Operand(0xFF));
3998 __ Movz(t5, t0, t1); // Set t5 to 0x7ff only if t5 is equal to 0xff.
3999 __ Branch(&exponent_rebiased, eq, t1, Operand(zero_reg));
4000
4001 // Rebias exponent.
4002 __ Addu(t5,
4003 t5,
4004 Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
4005
4006 __ bind(&exponent_rebiased);
4007 __ And(a2, value, Operand(kBinary32SignMask));
4008 value = no_reg;
4009 __ sll(t0, t5, HeapNumber::kMantissaBitsInTopWord);
4010 __ or_(a2, a2, t0);
4011
4012 // Shift mantissa.
4013 static const int kMantissaShiftForHiWord =
4014 kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
4015
4016 static const int kMantissaShiftForLoWord =
4017 kBitsPerInt - kMantissaShiftForHiWord;
4018
4019 __ srl(t0, t4, kMantissaShiftForHiWord);
4020 __ or_(a2, a2, t0);
4021 __ sll(a0, t4, kMantissaShiftForLoWord);
4022
4023 __ sw(a2, FieldMemOperand(v0, HeapNumber::kExponentOffset));
4024 __ sw(a0, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
4025 __ Ret();
4026 }
4027
4028 } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
4029 if (CpuFeatures::IsSupported(FPU)) {
4030 CpuFeatures::Scope scope(FPU);
4031 // Allocate a HeapNumber for the result. Don't use a0 and a1 as
4032 // AllocateHeapNumber clobbers all registers - also when jumping due to
4033 // exhausted young space.
4034 __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
4035 __ AllocateHeapNumber(v0, t3, t5, t6, &slow, DONT_TAG_RESULT);
4036 // The double value is already in f0
4037 __ sdc1(f0, MemOperand(v0, HeapNumber::kValueOffset));
4038
4039 __ Addu(v0, v0, kHeapObjectTag);
4040 __ Ret();
4041 } else {
4042 // Allocate a HeapNumber for the result. Don't use a0 and a1 as
4043 // AllocateHeapNumber clobbers all registers - also when jumping due to
4044 // exhausted young space.
4045 __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
4046 __ AllocateHeapNumber(v0, t3, t5, t6, &slow, TAG_RESULT);
4047
4048 __ sw(a2, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
4049 __ sw(a3, FieldMemOperand(v0, HeapNumber::kExponentOffset));
4050 __ Ret();
4051 }
4052
4053 } else {
4054 // Tag integer as smi and return it.
4055 __ sll(v0, value, kSmiTagSize);
4056 __ Ret();
4057 }
4058
4059 // Slow case, key and receiver still in a0 and a1.
4060 __ bind(&slow);
4061 __ IncrementCounter(
4062 masm->isolate()->counters()->keyed_load_external_array_slow(),
4063 1, a2, a3);
4064
4065 // ---------- S t a t e --------------
4066 // -- ra : return address
4067 // -- a0 : key
4068 // -- a1 : receiver
4069 // -----------------------------------
4070
4071 __ Push(a1, a0);
4072
4073 __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
4074
4075 __ bind(&miss_force_generic);
4076 Handle<Code> stub =
4077 masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
4078 __ Jump(stub, RelocInfo::CODE_TARGET);
4079 }
4080
4081
4082 void KeyedStoreStubCompiler::GenerateStoreExternalArray( 3701 void KeyedStoreStubCompiler::GenerateStoreExternalArray(
4083 MacroAssembler* masm, 3702 MacroAssembler* masm,
4084 ElementsKind elements_kind) { 3703 ElementsKind elements_kind) {
4085 // ---------- S t a t e -------------- 3704 // ---------- S t a t e --------------
4086 // -- a0 : value 3705 // -- a0 : value
4087 // -- a1 : key 3706 // -- a1 : key
4088 // -- a2 : receiver 3707 // -- a2 : receiver
4089 // -- ra : return address 3708 // -- ra : return address
4090 // ----------------------------------- 3709 // -----------------------------------
4091 3710
(...skipping 379 matching lines...) Expand 10 before | Expand all | Expand 10 after
4471 // -- a0 : key 4090 // -- a0 : key
4472 // -- a1 : receiver 4091 // -- a1 : receiver
4473 // ----------------------------------- 4092 // -----------------------------------
4474 4093
4475 Handle<Code> miss_ic = 4094 Handle<Code> miss_ic =
4476 masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric(); 4095 masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
4477 __ Jump(miss_ic, RelocInfo::CODE_TARGET); 4096 __ Jump(miss_ic, RelocInfo::CODE_TARGET);
4478 } 4097 }
4479 4098
4480 4099
4481 void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
4482 // ----------- S t a t e -------------
4483 // -- ra : return address
4484 // -- a0 : key
4485 // -- a1 : receiver
4486 // -----------------------------------
4487 Label miss_force_generic;
4488
4489 // This stub is meant to be tail-jumped to, the receiver must already
4490 // have been verified by the caller to not be a smi.
4491
4492 // Check that the key is a smi or a heap number convertible to a smi.
4493 GenerateSmiKeyCheck(masm, a0, t0, t1, f2, f4, &miss_force_generic);
4494
4495 // Get the elements array.
4496 __ lw(a2, FieldMemOperand(a1, JSObject::kElementsOffset));
4497 __ AssertFastElements(a2);
4498
4499 // Check that the key is within bounds.
4500 __ lw(a3, FieldMemOperand(a2, FixedArray::kLengthOffset));
4501 __ Branch(USE_DELAY_SLOT, &miss_force_generic, hs, a0, Operand(a3));
4502
4503 // Load the result and make sure it's not the hole.
4504 __ Addu(a3, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
4505 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
4506 __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize);
4507 __ Addu(t0, t0, a3);
4508 __ lw(t0, MemOperand(t0));
4509 __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
4510 __ Branch(&miss_force_generic, eq, t0, Operand(t1));
4511 __ Ret(USE_DELAY_SLOT);
4512 __ mov(v0, t0);
4513
4514 __ bind(&miss_force_generic);
4515 Handle<Code> stub =
4516 masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
4517 __ Jump(stub, RelocInfo::CODE_TARGET);
4518 }
4519
4520
4521 void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
4522 MacroAssembler* masm) {
4523 // ----------- S t a t e -------------
4524 // -- ra : return address
4525 // -- a0 : key
4526 // -- a1 : receiver
4527 // -----------------------------------
4528 Label miss_force_generic, slow_allocate_heapnumber;
4529
4530 Register key_reg = a0;
4531 Register receiver_reg = a1;
4532 Register elements_reg = a2;
4533 Register heap_number_reg = a2;
4534 Register indexed_double_offset = a3;
4535 Register scratch = t0;
4536 Register scratch2 = t1;
4537 Register scratch3 = t2;
4538 Register heap_number_map = t3;
4539
4540 // This stub is meant to be tail-jumped to, the receiver must already
4541 // have been verified by the caller to not be a smi.
4542
4543 // Check that the key is a smi or a heap number convertible to a smi.
4544 GenerateSmiKeyCheck(masm, key_reg, t0, t1, f2, f4, &miss_force_generic);
4545
4546 // Get the elements array.
4547 __ lw(elements_reg,
4548 FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
4549
4550 // Check that the key is within bounds.
4551 __ lw(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
4552 __ Branch(&miss_force_generic, hs, key_reg, Operand(scratch));
4553
4554 // Load the upper word of the double in the fixed array and test for NaN.
4555 __ sll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize);
4556 __ Addu(indexed_double_offset, elements_reg, Operand(scratch2));
4557 uint32_t upper_32_offset = FixedArray::kHeaderSize + sizeof(kHoleNanLower32);
4558 __ lw(scratch, FieldMemOperand(indexed_double_offset, upper_32_offset));
4559 __ Branch(&miss_force_generic, eq, scratch, Operand(kHoleNanUpper32));
4560
4561 // Non-NaN. Allocate a new heap number and copy the double value into it.
4562 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
4563 __ AllocateHeapNumber(heap_number_reg, scratch2, scratch3,
4564 heap_number_map, &slow_allocate_heapnumber, TAG_RESULT);
4565
4566 // Don't need to reload the upper 32 bits of the double, it's already in
4567 // scratch.
4568 __ sw(scratch, FieldMemOperand(heap_number_reg,
4569 HeapNumber::kExponentOffset));
4570 __ lw(scratch, FieldMemOperand(indexed_double_offset,
4571 FixedArray::kHeaderSize));
4572 __ sw(scratch, FieldMemOperand(heap_number_reg,
4573 HeapNumber::kMantissaOffset));
4574
4575 __ mov(v0, heap_number_reg);
4576 __ Ret();
4577
4578 __ bind(&slow_allocate_heapnumber);
4579 Handle<Code> slow_ic =
4580 masm->isolate()->builtins()->KeyedLoadIC_Slow();
4581 __ Jump(slow_ic, RelocInfo::CODE_TARGET);
4582
4583 __ bind(&miss_force_generic);
4584 Handle<Code> miss_ic =
4585 masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
4586 __ Jump(miss_ic, RelocInfo::CODE_TARGET);
4587 }
4588
4589
4590 void KeyedStoreStubCompiler::GenerateStoreFastElement( 4100 void KeyedStoreStubCompiler::GenerateStoreFastElement(
4591 MacroAssembler* masm, 4101 MacroAssembler* masm,
4592 bool is_js_array, 4102 bool is_js_array,
4593 ElementsKind elements_kind, 4103 ElementsKind elements_kind,
4594 KeyedAccessGrowMode grow_mode) { 4104 KeyedAccessGrowMode grow_mode) {
4595 // ----------- S t a t e ------------- 4105 // ----------- S t a t e -------------
4596 // -- a0 : value 4106 // -- a0 : value
4597 // -- a1 : key 4107 // -- a1 : key
4598 // -- a2 : receiver 4108 // -- a2 : receiver
4599 // -- ra : return address 4109 // -- ra : return address
(...skipping 314 matching lines...) Expand 10 before | Expand all | Expand 10 after
4914 __ Jump(ic_slow, RelocInfo::CODE_TARGET); 4424 __ Jump(ic_slow, RelocInfo::CODE_TARGET);
4915 } 4425 }
4916 } 4426 }
4917 4427
4918 4428
4919 #undef __ 4429 #undef __
4920 4430
4921 } } // namespace v8::internal 4431 } } // namespace v8::internal
4922 4432
4923 #endif // V8_TARGET_ARCH_MIPS 4433 #endif // V8_TARGET_ARCH_MIPS
OLDNEW
« no previous file with comments | « src/mips/macro-assembler-mips.cc ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698