| Index: src/mips/stub-cache-mips.cc
|
| diff --git a/src/mips/stub-cache-mips.cc b/src/mips/stub-cache-mips.cc
|
| index 323933b5de4cd10657a6a8f6b12dd16bb1293222..cea15f401dce3554f60a99bd1086203829a16279 100644
|
| --- a/src/mips/stub-cache-mips.cc
|
| +++ b/src/mips/stub-cache-mips.cc
|
| @@ -1054,46 +1054,6 @@ static void StoreIntAsFloat(MacroAssembler* masm,
|
| }
|
|
|
|
|
| -// Convert unsigned integer with specified number of leading zeroes in binary
|
| -// representation to IEEE 754 double.
|
| -// Integer to convert is passed in register hiword.
|
| -// Resulting double is returned in registers hiword:loword.
|
| -// This functions does not work correctly for 0.
|
| -static void GenerateUInt2Double(MacroAssembler* masm,
|
| - Register hiword,
|
| - Register loword,
|
| - Register scratch,
|
| - int leading_zeroes) {
|
| - const int meaningful_bits = kBitsPerInt - leading_zeroes - 1;
|
| - const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits;
|
| -
|
| - const int mantissa_shift_for_hi_word =
|
| - meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
|
| -
|
| - const int mantissa_shift_for_lo_word =
|
| - kBitsPerInt - mantissa_shift_for_hi_word;
|
| -
|
| - __ li(scratch, biased_exponent << HeapNumber::kExponentShift);
|
| - if (mantissa_shift_for_hi_word > 0) {
|
| - __ sll(loword, hiword, mantissa_shift_for_lo_word);
|
| - __ srl(hiword, hiword, mantissa_shift_for_hi_word);
|
| - __ or_(hiword, scratch, hiword);
|
| - } else {
|
| - __ mov(loword, zero_reg);
|
| - __ sll(hiword, hiword, mantissa_shift_for_hi_word);
|
| - __ or_(hiword, scratch, hiword);
|
| - }
|
| -
|
| - // If least significant bit of biased exponent was not 1 it was corrupted
|
| - // by most significant bit of mantissa so we should fix that.
|
| - if (!(biased_exponent & 1)) {
|
| - __ li(scratch, 1 << HeapNumber::kExponentShift);
|
| - __ nor(scratch, scratch, scratch);
|
| - __ and_(hiword, hiword, scratch);
|
| - }
|
| -}
|
| -
|
| -
|
| #undef __
|
| #define __ ACCESS_MASM(masm())
|
|
|
| @@ -3316,9 +3276,17 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
|
| // -- a1 : receiver
|
| // -----------------------------------
|
| ElementsKind elements_kind = receiver_map->elements_kind();
|
| - Handle<Code> stub = KeyedLoadElementStub(elements_kind).GetCode();
|
| -
|
| - __ DispatchMap(a1, a2, receiver_map, stub, DO_SMI_CHECK);
|
| + if (receiver_map->has_fast_elements() ||
|
| + receiver_map->has_external_array_elements()) {
|
| + Handle<Code> stub = KeyedLoadFastElementStub(
|
| + receiver_map->instance_type() == JS_ARRAY_TYPE,
|
| + elements_kind).GetCode();
|
| + __ DispatchMap(a1, a2, receiver_map, stub, DO_SMI_CHECK);
|
| + } else {
|
| + Handle<Code> stub =
|
| + KeyedLoadDictionaryElementStub().GetCode();
|
| + __ DispatchMap(a1, a2, receiver_map, stub, DO_SMI_CHECK);
|
| + }
|
|
|
| Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
|
| __ Jump(ic, RelocInfo::CODE_TARGET);
|
| @@ -3730,355 +3698,6 @@ static void GenerateSmiKeyCheck(MacroAssembler* masm,
|
| }
|
|
|
|
|
| -void KeyedLoadStubCompiler::GenerateLoadExternalArray(
|
| - MacroAssembler* masm,
|
| - ElementsKind elements_kind) {
|
| - // ---------- S t a t e --------------
|
| - // -- ra : return address
|
| - // -- a0 : key
|
| - // -- a1 : receiver
|
| - // -----------------------------------
|
| - Label miss_force_generic, slow, failed_allocation;
|
| -
|
| - Register key = a0;
|
| - Register receiver = a1;
|
| -
|
| - // This stub is meant to be tail-jumped to, the receiver must already
|
| - // have been verified by the caller to not be a smi.
|
| -
|
| - // Check that the key is a smi or a heap number convertible to a smi.
|
| - GenerateSmiKeyCheck(masm, key, t0, t1, f2, f4, &miss_force_generic);
|
| -
|
| - __ lw(a3, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
| - // a3: elements array
|
| -
|
| - // Check that the index is in range.
|
| - __ lw(t1, FieldMemOperand(a3, ExternalArray::kLengthOffset));
|
| - __ sra(t2, key, kSmiTagSize);
|
| - // Unsigned comparison catches both negative and too-large values.
|
| - __ Branch(&miss_force_generic, Ugreater_equal, key, Operand(t1));
|
| -
|
| - __ lw(a3, FieldMemOperand(a3, ExternalArray::kExternalPointerOffset));
|
| - // a3: base pointer of external storage
|
| -
|
| - // We are not untagging smi key and instead work with it
|
| - // as if it was premultiplied by 2.
|
| - STATIC_ASSERT((kSmiTag == 0) && (kSmiTagSize == 1));
|
| -
|
| - Register value = a2;
|
| - switch (elements_kind) {
|
| - case EXTERNAL_BYTE_ELEMENTS:
|
| - __ srl(t2, key, 1);
|
| - __ addu(t3, a3, t2);
|
| - __ lb(value, MemOperand(t3, 0));
|
| - break;
|
| - case EXTERNAL_PIXEL_ELEMENTS:
|
| - case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
|
| - __ srl(t2, key, 1);
|
| - __ addu(t3, a3, t2);
|
| - __ lbu(value, MemOperand(t3, 0));
|
| - break;
|
| - case EXTERNAL_SHORT_ELEMENTS:
|
| - __ addu(t3, a3, key);
|
| - __ lh(value, MemOperand(t3, 0));
|
| - break;
|
| - case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
|
| - __ addu(t3, a3, key);
|
| - __ lhu(value, MemOperand(t3, 0));
|
| - break;
|
| - case EXTERNAL_INT_ELEMENTS:
|
| - case EXTERNAL_UNSIGNED_INT_ELEMENTS:
|
| - __ sll(t2, key, 1);
|
| - __ addu(t3, a3, t2);
|
| - __ lw(value, MemOperand(t3, 0));
|
| - break;
|
| - case EXTERNAL_FLOAT_ELEMENTS:
|
| - __ sll(t3, t2, 2);
|
| - __ addu(t3, a3, t3);
|
| - if (CpuFeatures::IsSupported(FPU)) {
|
| - CpuFeatures::Scope scope(FPU);
|
| - __ lwc1(f0, MemOperand(t3, 0));
|
| - } else {
|
| - __ lw(value, MemOperand(t3, 0));
|
| - }
|
| - break;
|
| - case EXTERNAL_DOUBLE_ELEMENTS:
|
| - __ sll(t2, key, 2);
|
| - __ addu(t3, a3, t2);
|
| - if (CpuFeatures::IsSupported(FPU)) {
|
| - CpuFeatures::Scope scope(FPU);
|
| - __ ldc1(f0, MemOperand(t3, 0));
|
| - } else {
|
| - // t3: pointer to the beginning of the double we want to load.
|
| - __ lw(a2, MemOperand(t3, 0));
|
| - __ lw(a3, MemOperand(t3, Register::kSizeInBytes));
|
| - }
|
| - break;
|
| - case FAST_ELEMENTS:
|
| - case FAST_SMI_ELEMENTS:
|
| - case FAST_DOUBLE_ELEMENTS:
|
| - case FAST_HOLEY_ELEMENTS:
|
| - case FAST_HOLEY_SMI_ELEMENTS:
|
| - case FAST_HOLEY_DOUBLE_ELEMENTS:
|
| - case DICTIONARY_ELEMENTS:
|
| - case NON_STRICT_ARGUMENTS_ELEMENTS:
|
| - UNREACHABLE();
|
| - break;
|
| - }
|
| -
|
| - // For integer array types:
|
| - // a2: value
|
| - // For float array type:
|
| - // f0: value (if FPU is supported)
|
| - // a2: value (if FPU is not supported)
|
| - // For double array type:
|
| - // f0: value (if FPU is supported)
|
| - // a2/a3: value (if FPU is not supported)
|
| -
|
| - if (elements_kind == EXTERNAL_INT_ELEMENTS) {
|
| - // For the Int and UnsignedInt array types, we need to see whether
|
| - // the value can be represented in a Smi. If not, we need to convert
|
| - // it to a HeapNumber.
|
| - Label box_int;
|
| - __ Subu(t3, value, Operand(0xC0000000)); // Non-smi value gives neg result.
|
| - __ Branch(&box_int, lt, t3, Operand(zero_reg));
|
| - // Tag integer as smi and return it.
|
| - __ sll(v0, value, kSmiTagSize);
|
| - __ Ret();
|
| -
|
| - __ bind(&box_int);
|
| -
|
| - if (CpuFeatures::IsSupported(FPU)) {
|
| - CpuFeatures::Scope scope(FPU);
|
| - // Allocate a HeapNumber for the result and perform int-to-double
|
| - // conversion.
|
| - // The arm version uses a temporary here to save r0, but we don't need to
|
| - // (a0 is not modified).
|
| - __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
|
| - __ AllocateHeapNumber(v0, a3, t0, t1, &slow, DONT_TAG_RESULT);
|
| - __ mtc1(value, f0);
|
| - __ cvt_d_w(f0, f0);
|
| - __ sdc1(f0, MemOperand(v0, HeapNumber::kValueOffset));
|
| - __ Addu(v0, v0, kHeapObjectTag);
|
| - __ Ret();
|
| - } else {
|
| - // Allocate a HeapNumber for the result and perform int-to-double
|
| - // conversion.
|
| - // The arm version uses a temporary here to save r0, but we don't need to
|
| - // (a0 is not modified).
|
| - __ LoadRoot(t1, Heap::kHeapNumberMapRootIndex);
|
| - __ AllocateHeapNumber(v0, a3, t0, t1, &slow, TAG_RESULT);
|
| - Register dst_mantissa = t2;
|
| - Register dst_exponent = t3;
|
| - FloatingPointHelper::Destination dest =
|
| - FloatingPointHelper::kCoreRegisters;
|
| - FloatingPointHelper::ConvertIntToDouble(masm,
|
| - value,
|
| - dest,
|
| - f0,
|
| - dst_mantissa,
|
| - dst_exponent,
|
| - t1,
|
| - f2);
|
| - __ sw(dst_mantissa, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
|
| - __ sw(dst_exponent, FieldMemOperand(v0, HeapNumber::kExponentOffset));
|
| - __ Ret();
|
| - }
|
| - } else if (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
|
| - // The test is different for unsigned int values. Since we need
|
| - // the value to be in the range of a positive smi, we can't
|
| - // handle either of the top two bits being set in the value.
|
| - if (CpuFeatures::IsSupported(FPU)) {
|
| - CpuFeatures::Scope scope(FPU);
|
| - Label pl_box_int;
|
| - __ And(t2, value, Operand(0xC0000000));
|
| - __ Branch(&pl_box_int, ne, t2, Operand(zero_reg));
|
| -
|
| - // It can fit in an Smi.
|
| - // Tag integer as smi and return it.
|
| - __ sll(v0, value, kSmiTagSize);
|
| - __ Ret();
|
| -
|
| - __ bind(&pl_box_int);
|
| - // Allocate a HeapNumber for the result and perform int-to-double
|
| - // conversion. Don't use a0 and a1 as AllocateHeapNumber clobbers all
|
| - // registers - also when jumping due to exhausted young space.
|
| - __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
|
| - __ AllocateHeapNumber(v0, t2, t3, t6, &slow, DONT_TAG_RESULT);
|
| -
|
| - // This is replaced by a macro:
|
| - // __ mtc1(value, f0); // LS 32-bits.
|
| - // __ mtc1(zero_reg, f1); // MS 32-bits are all zero.
|
| - // __ cvt_d_l(f0, f0); // Use 64 bit conv to get correct unsigned 32-bit.
|
| -
|
| - __ Cvt_d_uw(f0, value, f22);
|
| -
|
| - __ sdc1(f0, MemOperand(v0, HeapNumber::kValueOffset));
|
| -
|
| - __ Addu(v0, v0, kHeapObjectTag);
|
| - __ Ret();
|
| - } else {
|
| - // Check whether unsigned integer fits into smi.
|
| - Label box_int_0, box_int_1, done;
|
| - __ And(t2, value, Operand(0x80000000));
|
| - __ Branch(&box_int_0, ne, t2, Operand(zero_reg));
|
| - __ And(t2, value, Operand(0x40000000));
|
| - __ Branch(&box_int_1, ne, t2, Operand(zero_reg));
|
| -
|
| - // Tag integer as smi and return it.
|
| - __ sll(v0, value, kSmiTagSize);
|
| - __ Ret();
|
| -
|
| - Register hiword = value; // a2.
|
| - Register loword = a3;
|
| -
|
| - __ bind(&box_int_0);
|
| - // Integer does not have leading zeros.
|
| - GenerateUInt2Double(masm, hiword, loword, t0, 0);
|
| - __ Branch(&done);
|
| -
|
| - __ bind(&box_int_1);
|
| - // Integer has one leading zero.
|
| - GenerateUInt2Double(masm, hiword, loword, t0, 1);
|
| -
|
| -
|
| - __ bind(&done);
|
| - // Integer was converted to double in registers hiword:loword.
|
| - // Wrap it into a HeapNumber. Don't use a0 and a1 as AllocateHeapNumber
|
| - // clobbers all registers - also when jumping due to exhausted young
|
| - // space.
|
| - __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
|
| - __ AllocateHeapNumber(t2, t3, t5, t6, &slow, TAG_RESULT);
|
| -
|
| - __ sw(hiword, FieldMemOperand(t2, HeapNumber::kExponentOffset));
|
| - __ sw(loword, FieldMemOperand(t2, HeapNumber::kMantissaOffset));
|
| -
|
| - __ mov(v0, t2);
|
| - __ Ret();
|
| - }
|
| - } else if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
|
| - // For the floating-point array type, we need to always allocate a
|
| - // HeapNumber.
|
| - if (CpuFeatures::IsSupported(FPU)) {
|
| - CpuFeatures::Scope scope(FPU);
|
| - // Allocate a HeapNumber for the result. Don't use a0 and a1 as
|
| - // AllocateHeapNumber clobbers all registers - also when jumping due to
|
| - // exhausted young space.
|
| - __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
|
| - __ AllocateHeapNumber(v0, t3, t5, t6, &slow, DONT_TAG_RESULT);
|
| - // The float (single) value is already in fpu reg f0 (if we use float).
|
| - __ cvt_d_s(f0, f0);
|
| - __ sdc1(f0, MemOperand(v0, HeapNumber::kValueOffset));
|
| -
|
| - __ Addu(v0, v0, kHeapObjectTag);
|
| - __ Ret();
|
| - } else {
|
| - // Allocate a HeapNumber for the result. Don't use a0 and a1 as
|
| - // AllocateHeapNumber clobbers all registers - also when jumping due to
|
| - // exhausted young space.
|
| - __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
|
| - __ AllocateHeapNumber(v0, t3, t5, t6, &slow, TAG_RESULT);
|
| - // FPU is not available, do manual single to double conversion.
|
| -
|
| - // a2: floating point value (binary32).
|
| - // v0: heap number for result
|
| -
|
| - // Extract mantissa to t4.
|
| - __ And(t4, value, Operand(kBinary32MantissaMask));
|
| -
|
| - // Extract exponent to t5.
|
| - __ srl(t5, value, kBinary32MantissaBits);
|
| - __ And(t5, t5, Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
|
| -
|
| - Label exponent_rebiased;
|
| - __ Branch(&exponent_rebiased, eq, t5, Operand(zero_reg));
|
| -
|
| - __ li(t0, 0x7ff);
|
| - __ Xor(t1, t5, Operand(0xFF));
|
| - __ Movz(t5, t0, t1); // Set t5 to 0x7ff only if t5 is equal to 0xff.
|
| - __ Branch(&exponent_rebiased, eq, t1, Operand(zero_reg));
|
| -
|
| - // Rebias exponent.
|
| - __ Addu(t5,
|
| - t5,
|
| - Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
|
| -
|
| - __ bind(&exponent_rebiased);
|
| - __ And(a2, value, Operand(kBinary32SignMask));
|
| - value = no_reg;
|
| - __ sll(t0, t5, HeapNumber::kMantissaBitsInTopWord);
|
| - __ or_(a2, a2, t0);
|
| -
|
| - // Shift mantissa.
|
| - static const int kMantissaShiftForHiWord =
|
| - kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
|
| -
|
| - static const int kMantissaShiftForLoWord =
|
| - kBitsPerInt - kMantissaShiftForHiWord;
|
| -
|
| - __ srl(t0, t4, kMantissaShiftForHiWord);
|
| - __ or_(a2, a2, t0);
|
| - __ sll(a0, t4, kMantissaShiftForLoWord);
|
| -
|
| - __ sw(a2, FieldMemOperand(v0, HeapNumber::kExponentOffset));
|
| - __ sw(a0, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
|
| - __ Ret();
|
| - }
|
| -
|
| - } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
|
| - if (CpuFeatures::IsSupported(FPU)) {
|
| - CpuFeatures::Scope scope(FPU);
|
| - // Allocate a HeapNumber for the result. Don't use a0 and a1 as
|
| - // AllocateHeapNumber clobbers all registers - also when jumping due to
|
| - // exhausted young space.
|
| - __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
|
| - __ AllocateHeapNumber(v0, t3, t5, t6, &slow, DONT_TAG_RESULT);
|
| - // The double value is already in f0
|
| - __ sdc1(f0, MemOperand(v0, HeapNumber::kValueOffset));
|
| -
|
| - __ Addu(v0, v0, kHeapObjectTag);
|
| - __ Ret();
|
| - } else {
|
| - // Allocate a HeapNumber for the result. Don't use a0 and a1 as
|
| - // AllocateHeapNumber clobbers all registers - also when jumping due to
|
| - // exhausted young space.
|
| - __ LoadRoot(t6, Heap::kHeapNumberMapRootIndex);
|
| - __ AllocateHeapNumber(v0, t3, t5, t6, &slow, TAG_RESULT);
|
| -
|
| - __ sw(a2, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
|
| - __ sw(a3, FieldMemOperand(v0, HeapNumber::kExponentOffset));
|
| - __ Ret();
|
| - }
|
| -
|
| - } else {
|
| - // Tag integer as smi and return it.
|
| - __ sll(v0, value, kSmiTagSize);
|
| - __ Ret();
|
| - }
|
| -
|
| - // Slow case, key and receiver still in a0 and a1.
|
| - __ bind(&slow);
|
| - __ IncrementCounter(
|
| - masm->isolate()->counters()->keyed_load_external_array_slow(),
|
| - 1, a2, a3);
|
| -
|
| - // ---------- S t a t e --------------
|
| - // -- ra : return address
|
| - // -- a0 : key
|
| - // -- a1 : receiver
|
| - // -----------------------------------
|
| -
|
| - __ Push(a1, a0);
|
| -
|
| - __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
|
| -
|
| - __ bind(&miss_force_generic);
|
| - Handle<Code> stub =
|
| - masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
|
| - __ Jump(stub, RelocInfo::CODE_TARGET);
|
| -}
|
| -
|
| -
|
| void KeyedStoreStubCompiler::GenerateStoreExternalArray(
|
| MacroAssembler* masm,
|
| ElementsKind elements_kind) {
|
| @@ -4478,115 +4097,6 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
|
| }
|
|
|
|
|
| -void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
|
| - // ----------- S t a t e -------------
|
| - // -- ra : return address
|
| - // -- a0 : key
|
| - // -- a1 : receiver
|
| - // -----------------------------------
|
| - Label miss_force_generic;
|
| -
|
| - // This stub is meant to be tail-jumped to, the receiver must already
|
| - // have been verified by the caller to not be a smi.
|
| -
|
| - // Check that the key is a smi or a heap number convertible to a smi.
|
| - GenerateSmiKeyCheck(masm, a0, t0, t1, f2, f4, &miss_force_generic);
|
| -
|
| - // Get the elements array.
|
| - __ lw(a2, FieldMemOperand(a1, JSObject::kElementsOffset));
|
| - __ AssertFastElements(a2);
|
| -
|
| - // Check that the key is within bounds.
|
| - __ lw(a3, FieldMemOperand(a2, FixedArray::kLengthOffset));
|
| - __ Branch(USE_DELAY_SLOT, &miss_force_generic, hs, a0, Operand(a3));
|
| -
|
| - // Load the result and make sure it's not the hole.
|
| - __ Addu(a3, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
| - STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
|
| - __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize);
|
| - __ Addu(t0, t0, a3);
|
| - __ lw(t0, MemOperand(t0));
|
| - __ LoadRoot(t1, Heap::kTheHoleValueRootIndex);
|
| - __ Branch(&miss_force_generic, eq, t0, Operand(t1));
|
| - __ Ret(USE_DELAY_SLOT);
|
| - __ mov(v0, t0);
|
| -
|
| - __ bind(&miss_force_generic);
|
| - Handle<Code> stub =
|
| - masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
|
| - __ Jump(stub, RelocInfo::CODE_TARGET);
|
| -}
|
| -
|
| -
|
| -void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
|
| - MacroAssembler* masm) {
|
| - // ----------- S t a t e -------------
|
| - // -- ra : return address
|
| - // -- a0 : key
|
| - // -- a1 : receiver
|
| - // -----------------------------------
|
| - Label miss_force_generic, slow_allocate_heapnumber;
|
| -
|
| - Register key_reg = a0;
|
| - Register receiver_reg = a1;
|
| - Register elements_reg = a2;
|
| - Register heap_number_reg = a2;
|
| - Register indexed_double_offset = a3;
|
| - Register scratch = t0;
|
| - Register scratch2 = t1;
|
| - Register scratch3 = t2;
|
| - Register heap_number_map = t3;
|
| -
|
| - // This stub is meant to be tail-jumped to, the receiver must already
|
| - // have been verified by the caller to not be a smi.
|
| -
|
| - // Check that the key is a smi or a heap number convertible to a smi.
|
| - GenerateSmiKeyCheck(masm, key_reg, t0, t1, f2, f4, &miss_force_generic);
|
| -
|
| - // Get the elements array.
|
| - __ lw(elements_reg,
|
| - FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
|
| -
|
| - // Check that the key is within bounds.
|
| - __ lw(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
|
| - __ Branch(&miss_force_generic, hs, key_reg, Operand(scratch));
|
| -
|
| - // Load the upper word of the double in the fixed array and test for NaN.
|
| - __ sll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize);
|
| - __ Addu(indexed_double_offset, elements_reg, Operand(scratch2));
|
| - uint32_t upper_32_offset = FixedArray::kHeaderSize + sizeof(kHoleNanLower32);
|
| - __ lw(scratch, FieldMemOperand(indexed_double_offset, upper_32_offset));
|
| - __ Branch(&miss_force_generic, eq, scratch, Operand(kHoleNanUpper32));
|
| -
|
| - // Non-NaN. Allocate a new heap number and copy the double value into it.
|
| - __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
|
| - __ AllocateHeapNumber(heap_number_reg, scratch2, scratch3,
|
| - heap_number_map, &slow_allocate_heapnumber, TAG_RESULT);
|
| -
|
| - // Don't need to reload the upper 32 bits of the double, it's already in
|
| - // scratch.
|
| - __ sw(scratch, FieldMemOperand(heap_number_reg,
|
| - HeapNumber::kExponentOffset));
|
| - __ lw(scratch, FieldMemOperand(indexed_double_offset,
|
| - FixedArray::kHeaderSize));
|
| - __ sw(scratch, FieldMemOperand(heap_number_reg,
|
| - HeapNumber::kMantissaOffset));
|
| -
|
| - __ mov(v0, heap_number_reg);
|
| - __ Ret();
|
| -
|
| - __ bind(&slow_allocate_heapnumber);
|
| - Handle<Code> slow_ic =
|
| - masm->isolate()->builtins()->KeyedLoadIC_Slow();
|
| - __ Jump(slow_ic, RelocInfo::CODE_TARGET);
|
| -
|
| - __ bind(&miss_force_generic);
|
| - Handle<Code> miss_ic =
|
| - masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
|
| - __ Jump(miss_ic, RelocInfo::CODE_TARGET);
|
| -}
|
| -
|
| -
|
| void KeyedStoreStubCompiler::GenerateStoreFastElement(
|
| MacroAssembler* masm,
|
| bool is_js_array,
|
|
|