| Index: src/arm/stub-cache-arm.cc
 | 
| diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
 | 
| index dd5db3054e9bf42db69cd5236a759fd438275d95..a194dfae5b0099a00e3ef77ec7be6c5d7ab7d5b4 100644
 | 
| --- a/src/arm/stub-cache-arm.cc
 | 
| +++ b/src/arm/stub-cache-arm.cc
 | 
| @@ -1053,6 +1053,42 @@ static void StoreIntAsFloat(MacroAssembler* masm,
 | 
|  }
 | 
|  
 | 
|  
 | 
| +// Convert unsigned integer with specified number of leading zeroes in binary
 | 
| +// representation to IEEE 754 double.
 | 
| +// Integer to convert is passed in register hiword.
 | 
| +// Resulting double is returned in registers hiword:loword.
 | 
| +// This functions does not work correctly for 0.
 | 
| +static void GenerateUInt2Double(MacroAssembler* masm,
 | 
| +                                Register hiword,
 | 
| +                                Register loword,
 | 
| +                                Register scratch,
 | 
| +                                int leading_zeroes) {
 | 
| +  const int meaningful_bits = kBitsPerInt - leading_zeroes - 1;
 | 
| +  const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits;
 | 
| +
 | 
| +  const int mantissa_shift_for_hi_word =
 | 
| +      meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
 | 
| +
 | 
| +  const int mantissa_shift_for_lo_word =
 | 
| +      kBitsPerInt - mantissa_shift_for_hi_word;
 | 
| +
 | 
| +  __ mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift));
 | 
| +  if (mantissa_shift_for_hi_word > 0) {
 | 
| +    __ mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word));
 | 
| +    __ orr(hiword, scratch, Operand(hiword, LSR, mantissa_shift_for_hi_word));
 | 
| +  } else {
 | 
| +    __ mov(loword, Operand(0, RelocInfo::NONE));
 | 
| +    __ orr(hiword, scratch, Operand(hiword, LSL, mantissa_shift_for_hi_word));
 | 
| +  }
 | 
| +
 | 
| +  // If least significant bit of biased exponent was not 1 it was corrupted
 | 
| +  // by most significant bit of mantissa so we should fix that.
 | 
| +  if (!(biased_exponent & 1)) {
 | 
| +    __ bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift));
 | 
| +  }
 | 
| +}
 | 
| +
 | 
| +
 | 
|  #undef __
 | 
|  #define __ ACCESS_MASM(masm())
 | 
|  
 | 
| @@ -3283,17 +3319,9 @@ Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
 | 
|    //  -- r1    : receiver
 | 
|    // -----------------------------------
 | 
|    ElementsKind elements_kind = receiver_map->elements_kind();
 | 
| -  if (receiver_map->has_fast_elements() ||
 | 
| -      receiver_map->has_external_array_elements()) {
 | 
| -    Handle<Code> stub = KeyedLoadFastElementStub(
 | 
| -        receiver_map->instance_type() == JS_ARRAY_TYPE,
 | 
| -        elements_kind).GetCode();
 | 
| -    __ DispatchMap(r1, r2, receiver_map, stub, DO_SMI_CHECK);
 | 
| -  } else {
 | 
| -    Handle<Code> stub =
 | 
| -        KeyedLoadDictionaryElementStub().GetCode();
 | 
| -    __ DispatchMap(r1, r2, receiver_map, stub, DO_SMI_CHECK);
 | 
| -  }
 | 
| +  Handle<Code> stub = KeyedLoadElementStub(elements_kind).GetCode();
 | 
| +
 | 
| +  __ DispatchMap(r1, r2, receiver_map, stub, DO_SMI_CHECK);
 | 
|  
 | 
|    Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Miss();
 | 
|    __ Jump(ic, RelocInfo::CODE_TARGET);
 | 
| @@ -3698,6 +3726,339 @@ static void GenerateSmiKeyCheck(MacroAssembler* masm,
 | 
|  }
 | 
|  
 | 
|  
 | 
| +void KeyedLoadStubCompiler::GenerateLoadExternalArray(
 | 
| +    MacroAssembler* masm,
 | 
| +    ElementsKind elements_kind) {
 | 
| +  // ---------- S t a t e --------------
 | 
| +  //  -- lr     : return address
 | 
| +  //  -- r0     : key
 | 
| +  //  -- r1     : receiver
 | 
| +  // -----------------------------------
 | 
| +  Label miss_force_generic, slow, failed_allocation;
 | 
| +
 | 
| +  Register key = r0;
 | 
| +  Register receiver = r1;
 | 
| +
 | 
| +  // This stub is meant to be tail-jumped to, the receiver must already
 | 
| +  // have been verified by the caller to not be a smi.
 | 
| +
 | 
| +  // Check that the key is a smi or a heap number convertible to a smi.
 | 
| +  GenerateSmiKeyCheck(masm, key, r4, r5, d1, d2, &miss_force_generic);
 | 
| +
 | 
| +  __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
 | 
| +  // r3: elements array
 | 
| +
 | 
| +  // Check that the index is in range.
 | 
| +  __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
 | 
| +  __ cmp(key, ip);
 | 
| +  // Unsigned comparison catches both negative and too-large values.
 | 
| +  __ b(hs, &miss_force_generic);
 | 
| +
 | 
| +  __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
 | 
| +  // r3: base pointer of external storage
 | 
| +
 | 
| +  // We are not untagging smi key and instead work with it
 | 
| +  // as if it was premultiplied by 2.
 | 
| +  STATIC_ASSERT((kSmiTag == 0) && (kSmiTagSize == 1));
 | 
| +
 | 
| +  Register value = r2;
 | 
| +  switch (elements_kind) {
 | 
| +    case EXTERNAL_BYTE_ELEMENTS:
 | 
| +      __ ldrsb(value, MemOperand(r3, key, LSR, 1));
 | 
| +      break;
 | 
| +    case EXTERNAL_PIXEL_ELEMENTS:
 | 
| +    case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
 | 
| +      __ ldrb(value, MemOperand(r3, key, LSR, 1));
 | 
| +      break;
 | 
| +    case EXTERNAL_SHORT_ELEMENTS:
 | 
| +      __ ldrsh(value, MemOperand(r3, key, LSL, 0));
 | 
| +      break;
 | 
| +    case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
 | 
| +      __ ldrh(value, MemOperand(r3, key, LSL, 0));
 | 
| +      break;
 | 
| +    case EXTERNAL_INT_ELEMENTS:
 | 
| +    case EXTERNAL_UNSIGNED_INT_ELEMENTS:
 | 
| +      __ ldr(value, MemOperand(r3, key, LSL, 1));
 | 
| +      break;
 | 
| +    case EXTERNAL_FLOAT_ELEMENTS:
 | 
| +      if (CpuFeatures::IsSupported(VFP2)) {
 | 
| +        CpuFeatures::Scope scope(VFP2);
 | 
| +        __ add(r2, r3, Operand(key, LSL, 1));
 | 
| +        __ vldr(s0, r2, 0);
 | 
| +      } else {
 | 
| +        __ ldr(value, MemOperand(r3, key, LSL, 1));
 | 
| +      }
 | 
| +      break;
 | 
| +    case EXTERNAL_DOUBLE_ELEMENTS:
 | 
| +      if (CpuFeatures::IsSupported(VFP2)) {
 | 
| +        CpuFeatures::Scope scope(VFP2);
 | 
| +        __ add(r2, r3, Operand(key, LSL, 2));
 | 
| +        __ vldr(d0, r2, 0);
 | 
| +      } else {
 | 
| +        __ add(r4, r3, Operand(key, LSL, 2));
 | 
| +        // r4: pointer to the beginning of the double we want to load.
 | 
| +        __ ldr(r2, MemOperand(r4, 0));
 | 
| +        __ ldr(r3, MemOperand(r4, Register::kSizeInBytes));
 | 
| +      }
 | 
| +      break;
 | 
| +    case FAST_ELEMENTS:
 | 
| +    case FAST_SMI_ELEMENTS:
 | 
| +    case FAST_DOUBLE_ELEMENTS:
 | 
| +    case FAST_HOLEY_ELEMENTS:
 | 
| +    case FAST_HOLEY_SMI_ELEMENTS:
 | 
| +    case FAST_HOLEY_DOUBLE_ELEMENTS:
 | 
| +    case DICTIONARY_ELEMENTS:
 | 
| +    case NON_STRICT_ARGUMENTS_ELEMENTS:
 | 
| +      UNREACHABLE();
 | 
| +      break;
 | 
| +  }
 | 
| +
 | 
| +  // For integer array types:
 | 
| +  // r2: value
 | 
| +  // For float array type:
 | 
| +  // s0: value (if VFP3 is supported)
 | 
| +  // r2: value (if VFP3 is not supported)
 | 
| +  // For double array type:
 | 
| +  // d0: value (if VFP3 is supported)
 | 
| +  // r2/r3: value (if VFP3 is not supported)
 | 
| +
 | 
| +  if (elements_kind == EXTERNAL_INT_ELEMENTS) {
 | 
| +    // For the Int and UnsignedInt array types, we need to see whether
 | 
| +    // the value can be represented in a Smi. If not, we need to convert
 | 
| +    // it to a HeapNumber.
 | 
| +    Label box_int;
 | 
| +    __ cmp(value, Operand(0xC0000000));
 | 
| +    __ b(mi, &box_int);
 | 
| +    // Tag integer as smi and return it.
 | 
| +    __ mov(r0, Operand(value, LSL, kSmiTagSize));
 | 
| +    __ Ret();
 | 
| +
 | 
| +    __ bind(&box_int);
 | 
| +    if (CpuFeatures::IsSupported(VFP2)) {
 | 
| +      CpuFeatures::Scope scope(VFP2);
 | 
| +      // Allocate a HeapNumber for the result and perform int-to-double
 | 
| +      // conversion.  Don't touch r0 or r1 as they are needed if allocation
 | 
| +      // fails.
 | 
| +      __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
 | 
| +
 | 
| +      __ AllocateHeapNumber(r5, r3, r4, r6, &slow, DONT_TAG_RESULT);
 | 
| +      // Now we can use r0 for the result as key is not needed any more.
 | 
| +      __ add(r0, r5, Operand(kHeapObjectTag));
 | 
| +      __ vmov(s0, value);
 | 
| +      __ vcvt_f64_s32(d0, s0);
 | 
| +      __ vstr(d0, r5, HeapNumber::kValueOffset);
 | 
| +      __ Ret();
 | 
| +    } else {
 | 
| +      // Allocate a HeapNumber for the result and perform int-to-double
 | 
| +      // conversion.  Don't touch r0 or r1 as they are needed if allocation
 | 
| +      // fails.
 | 
| +      __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
 | 
| +      __ AllocateHeapNumber(r5, r3, r4, r6, &slow, TAG_RESULT);
 | 
| +      // Now we can use r0 for the result as key is not needed any more.
 | 
| +      __ mov(r0, r5);
 | 
| +      Register dst_mantissa = r1;
 | 
| +      Register dst_exponent = r3;
 | 
| +      FloatingPointHelper::Destination dest =
 | 
| +          FloatingPointHelper::kCoreRegisters;
 | 
| +      FloatingPointHelper::ConvertIntToDouble(masm,
 | 
| +                                              value,
 | 
| +                                              dest,
 | 
| +                                              d0,
 | 
| +                                              dst_mantissa,
 | 
| +                                              dst_exponent,
 | 
| +                                              r9,
 | 
| +                                              s0);
 | 
| +      __ str(dst_mantissa, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
 | 
| +      __ str(dst_exponent, FieldMemOperand(r0, HeapNumber::kExponentOffset));
 | 
| +      __ Ret();
 | 
| +    }
 | 
| +  } else if (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) {
 | 
| +    // The test is different for unsigned int values. Since we need
 | 
| +    // the value to be in the range of a positive smi, we can't
 | 
| +    // handle either of the top two bits being set in the value.
 | 
| +    if (CpuFeatures::IsSupported(VFP2)) {
 | 
| +      CpuFeatures::Scope scope(VFP2);
 | 
| +      Label box_int, done;
 | 
| +      __ tst(value, Operand(0xC0000000));
 | 
| +      __ b(ne, &box_int);
 | 
| +      // Tag integer as smi and return it.
 | 
| +      __ mov(r0, Operand(value, LSL, kSmiTagSize));
 | 
| +      __ Ret();
 | 
| +
 | 
| +      __ bind(&box_int);
 | 
| +      __ vmov(s0, value);
 | 
| +      // Allocate a HeapNumber for the result and perform int-to-double
 | 
| +      // conversion. Don't use r0 and r1 as AllocateHeapNumber clobbers all
 | 
| +      // registers - also when jumping due to exhausted young space.
 | 
| +      __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
 | 
| +      __ AllocateHeapNumber(r2, r3, r4, r6, &slow, DONT_TAG_RESULT);
 | 
| +
 | 
| +      __ vcvt_f64_u32(d0, s0);
 | 
| +      __ vstr(d0, r2, HeapNumber::kValueOffset);
 | 
| +
 | 
| +      __ add(r0, r2, Operand(kHeapObjectTag));
 | 
| +      __ Ret();
 | 
| +    } else {
 | 
| +      // Check whether unsigned integer fits into smi.
 | 
| +      Label box_int_0, box_int_1, done;
 | 
| +      __ tst(value, Operand(0x80000000));
 | 
| +      __ b(ne, &box_int_0);
 | 
| +      __ tst(value, Operand(0x40000000));
 | 
| +      __ b(ne, &box_int_1);
 | 
| +      // Tag integer as smi and return it.
 | 
| +      __ mov(r0, Operand(value, LSL, kSmiTagSize));
 | 
| +      __ Ret();
 | 
| +
 | 
| +      Register hiword = value;  // r2.
 | 
| +      Register loword = r3;
 | 
| +
 | 
| +      __ bind(&box_int_0);
 | 
| +      // Integer does not have leading zeros.
 | 
| +      GenerateUInt2Double(masm, hiword, loword, r4, 0);
 | 
| +      __ b(&done);
 | 
| +
 | 
| +      __ bind(&box_int_1);
 | 
| +      // Integer has one leading zero.
 | 
| +      GenerateUInt2Double(masm, hiword, loword, r4, 1);
 | 
| +
 | 
| +
 | 
| +      __ bind(&done);
 | 
| +      // Integer was converted to double in registers hiword:loword.
 | 
| +      // Wrap it into a HeapNumber. Don't use r0 and r1 as AllocateHeapNumber
 | 
| +      // clobbers all registers - also when jumping due to exhausted young
 | 
| +      // space.
 | 
| +      __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
 | 
| +      __ AllocateHeapNumber(r4, r5, r7, r6, &slow, TAG_RESULT);
 | 
| +
 | 
| +      __ str(hiword, FieldMemOperand(r4, HeapNumber::kExponentOffset));
 | 
| +      __ str(loword, FieldMemOperand(r4, HeapNumber::kMantissaOffset));
 | 
| +
 | 
| +      __ mov(r0, r4);
 | 
| +      __ Ret();
 | 
| +    }
 | 
| +  } else if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
 | 
| +    // For the floating-point array type, we need to always allocate a
 | 
| +    // HeapNumber.
 | 
| +    if (CpuFeatures::IsSupported(VFP2)) {
 | 
| +      CpuFeatures::Scope scope(VFP2);
 | 
| +      // Allocate a HeapNumber for the result. Don't use r0 and r1 as
 | 
| +      // AllocateHeapNumber clobbers all registers - also when jumping due to
 | 
| +      // exhausted young space.
 | 
| +      __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
 | 
| +      __ AllocateHeapNumber(r2, r3, r4, r6, &slow, DONT_TAG_RESULT);
 | 
| +      __ vcvt_f64_f32(d0, s0);
 | 
| +      __ vstr(d0, r2, HeapNumber::kValueOffset);
 | 
| +
 | 
| +      __ add(r0, r2, Operand(kHeapObjectTag));
 | 
| +      __ Ret();
 | 
| +    } else {
 | 
| +      // Allocate a HeapNumber for the result. Don't use r0 and r1 as
 | 
| +      // AllocateHeapNumber clobbers all registers - also when jumping due to
 | 
| +      // exhausted young space.
 | 
| +      __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
 | 
| +      __ AllocateHeapNumber(r3, r4, r5, r6, &slow, TAG_RESULT);
 | 
| +      // VFP is not available, do manual single to double conversion.
 | 
| +
 | 
| +      // r2: floating point value (binary32)
 | 
| +      // r3: heap number for result
 | 
| +
 | 
| +      // Extract mantissa to r0. OK to clobber r0 now as there are no jumps to
 | 
| +      // the slow case from here.
 | 
| +      __ and_(r0, value, Operand(kBinary32MantissaMask));
 | 
| +
 | 
| +      // Extract exponent to r1. OK to clobber r1 now as there are no jumps to
 | 
| +      // the slow case from here.
 | 
| +      __ mov(r1, Operand(value, LSR, kBinary32MantissaBits));
 | 
| +      __ and_(r1, r1, Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
 | 
| +
 | 
| +      Label exponent_rebiased;
 | 
| +      __ teq(r1, Operand(0x00));
 | 
| +      __ b(eq, &exponent_rebiased);
 | 
| +
 | 
| +      __ teq(r1, Operand(0xff));
 | 
| +      __ mov(r1, Operand(0x7ff), LeaveCC, eq);
 | 
| +      __ b(eq, &exponent_rebiased);
 | 
| +
 | 
| +      // Rebias exponent.
 | 
| +      __ add(r1,
 | 
| +             r1,
 | 
| +             Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
 | 
| +
 | 
| +      __ bind(&exponent_rebiased);
 | 
| +      __ and_(r2, value, Operand(kBinary32SignMask));
 | 
| +      value = no_reg;
 | 
| +      __ orr(r2, r2, Operand(r1, LSL, HeapNumber::kMantissaBitsInTopWord));
 | 
| +
 | 
| +      // Shift mantissa.
 | 
| +      static const int kMantissaShiftForHiWord =
 | 
| +          kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
 | 
| +
 | 
| +      static const int kMantissaShiftForLoWord =
 | 
| +          kBitsPerInt - kMantissaShiftForHiWord;
 | 
| +
 | 
| +      __ orr(r2, r2, Operand(r0, LSR, kMantissaShiftForHiWord));
 | 
| +      __ mov(r0, Operand(r0, LSL, kMantissaShiftForLoWord));
 | 
| +
 | 
| +      __ str(r2, FieldMemOperand(r3, HeapNumber::kExponentOffset));
 | 
| +      __ str(r0, FieldMemOperand(r3, HeapNumber::kMantissaOffset));
 | 
| +
 | 
| +      __ mov(r0, r3);
 | 
| +      __ Ret();
 | 
| +    }
 | 
| +  } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
 | 
| +    if (CpuFeatures::IsSupported(VFP2)) {
 | 
| +      CpuFeatures::Scope scope(VFP2);
 | 
| +      // Allocate a HeapNumber for the result. Don't use r0 and r1 as
 | 
| +      // AllocateHeapNumber clobbers all registers - also when jumping due to
 | 
| +      // exhausted young space.
 | 
| +      __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
 | 
| +      __ AllocateHeapNumber(r2, r3, r4, r6, &slow, DONT_TAG_RESULT);
 | 
| +      __ vstr(d0, r2, HeapNumber::kValueOffset);
 | 
| +
 | 
| +      __ add(r0, r2, Operand(kHeapObjectTag));
 | 
| +      __ Ret();
 | 
| +    } else {
 | 
| +      // Allocate a HeapNumber for the result. Don't use r0 and r1 as
 | 
| +      // AllocateHeapNumber clobbers all registers - also when jumping due to
 | 
| +      // exhausted young space.
 | 
| +      __ LoadRoot(r7, Heap::kHeapNumberMapRootIndex);
 | 
| +      __ AllocateHeapNumber(r4, r5, r6, r7, &slow, TAG_RESULT);
 | 
| +
 | 
| +      __ str(r2, FieldMemOperand(r4, HeapNumber::kMantissaOffset));
 | 
| +      __ str(r3, FieldMemOperand(r4, HeapNumber::kExponentOffset));
 | 
| +      __ mov(r0, r4);
 | 
| +      __ Ret();
 | 
| +    }
 | 
| +
 | 
| +  } else {
 | 
| +    // Tag integer as smi and return it.
 | 
| +    __ mov(r0, Operand(value, LSL, kSmiTagSize));
 | 
| +    __ Ret();
 | 
| +  }
 | 
| +
 | 
| +  // Slow case, key and receiver still in r0 and r1.
 | 
| +  __ bind(&slow);
 | 
| +  __ IncrementCounter(
 | 
| +      masm->isolate()->counters()->keyed_load_external_array_slow(),
 | 
| +      1, r2, r3);
 | 
| +
 | 
| +  // ---------- S t a t e --------------
 | 
| +  //  -- lr     : return address
 | 
| +  //  -- r0     : key
 | 
| +  //  -- r1     : receiver
 | 
| +  // -----------------------------------
 | 
| +
 | 
| +  __ Push(r1, r0);
 | 
| +
 | 
| +  __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
 | 
| +
 | 
| +  __ bind(&miss_force_generic);
 | 
| +  Handle<Code> stub =
 | 
| +      masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
 | 
| +  __ Jump(stub, RelocInfo::CODE_TARGET);
 | 
| +}
 | 
| +
 | 
| +
 | 
|  void KeyedStoreStubCompiler::GenerateStoreExternalArray(
 | 
|      MacroAssembler* masm,
 | 
|      ElementsKind elements_kind) {
 | 
| @@ -4042,6 +4403,118 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
 | 
|  }
 | 
|  
 | 
|  
 | 
| +void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
 | 
| +  // ----------- S t a t e -------------
 | 
| +  //  -- lr    : return address
 | 
| +  //  -- r0    : key
 | 
| +  //  -- r1    : receiver
 | 
| +  // -----------------------------------
 | 
| +  Label miss_force_generic;
 | 
| +
 | 
| +  // This stub is meant to be tail-jumped to, the receiver must already
 | 
| +  // have been verified by the caller to not be a smi.
 | 
| +
 | 
| +  // Check that the key is a smi or a heap number convertible to a smi.
 | 
| +  GenerateSmiKeyCheck(masm, r0, r4, r5, d1, d2, &miss_force_generic);
 | 
| +
 | 
| +  // Get the elements array.
 | 
| +  __ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset));
 | 
| +  __ AssertFastElements(r2);
 | 
| +
 | 
| +  // Check that the key is within bounds.
 | 
| +  __ ldr(r3, FieldMemOperand(r2, FixedArray::kLengthOffset));
 | 
| +  __ cmp(r0, Operand(r3));
 | 
| +  __ b(hs, &miss_force_generic);
 | 
| +
 | 
| +  // Load the result and make sure it's not the hole.
 | 
| +  __ add(r3, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
 | 
| +  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
 | 
| +  __ ldr(r4,
 | 
| +         MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize));
 | 
| +  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
 | 
| +  __ cmp(r4, ip);
 | 
| +  __ b(eq, &miss_force_generic);
 | 
| +  __ mov(r0, r4);
 | 
| +  __ Ret();
 | 
| +
 | 
| +  __ bind(&miss_force_generic);
 | 
| +  Handle<Code> stub =
 | 
| +      masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
 | 
| +  __ Jump(stub, RelocInfo::CODE_TARGET);
 | 
| +}
 | 
| +
 | 
| +
 | 
| +void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
 | 
| +    MacroAssembler* masm) {
 | 
| +  // ----------- S t a t e -------------
 | 
| +  //  -- lr    : return address
 | 
| +  //  -- r0    : key
 | 
| +  //  -- r1    : receiver
 | 
| +  // -----------------------------------
 | 
| +  Label miss_force_generic, slow_allocate_heapnumber;
 | 
| +
 | 
| +  Register key_reg = r0;
 | 
| +  Register receiver_reg = r1;
 | 
| +  Register elements_reg = r2;
 | 
| +  Register heap_number_reg = r2;
 | 
| +  Register indexed_double_offset = r3;
 | 
| +  Register scratch = r4;
 | 
| +  Register scratch2 = r5;
 | 
| +  Register scratch3 = r6;
 | 
| +  Register heap_number_map = r7;
 | 
| +
 | 
| +  // This stub is meant to be tail-jumped to, the receiver must already
 | 
| +  // have been verified by the caller to not be a smi.
 | 
| +
 | 
| +  // Check that the key is a smi or a heap number convertible to a smi.
 | 
| +  GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, d2, &miss_force_generic);
 | 
| +
 | 
| +  // Get the elements array.
 | 
| +  __ ldr(elements_reg,
 | 
| +         FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
 | 
| +
 | 
| +  // Check that the key is within bounds.
 | 
| +  __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
 | 
| +  __ cmp(key_reg, Operand(scratch));
 | 
| +  __ b(hs, &miss_force_generic);
 | 
| +
 | 
| +  // Load the upper word of the double in the fixed array and test for NaN.
 | 
| +  __ add(indexed_double_offset, elements_reg,
 | 
| +         Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
 | 
| +  uint32_t upper_32_offset = FixedArray::kHeaderSize + sizeof(kHoleNanLower32);
 | 
| +  __ ldr(scratch, FieldMemOperand(indexed_double_offset, upper_32_offset));
 | 
| +  __ cmp(scratch, Operand(kHoleNanUpper32));
 | 
| +  __ b(&miss_force_generic, eq);
 | 
| +
 | 
| +  // Non-NaN. Allocate a new heap number and copy the double value into it.
 | 
| +  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
 | 
| +  __ AllocateHeapNumber(heap_number_reg, scratch2, scratch3,
 | 
| +                        heap_number_map, &slow_allocate_heapnumber, TAG_RESULT);
 | 
| +
 | 
| +  // Don't need to reload the upper 32 bits of the double, it's already in
 | 
| +  // scratch.
 | 
| +  __ str(scratch, FieldMemOperand(heap_number_reg,
 | 
| +                                  HeapNumber::kExponentOffset));
 | 
| +  __ ldr(scratch, FieldMemOperand(indexed_double_offset,
 | 
| +                                  FixedArray::kHeaderSize));
 | 
| +  __ str(scratch, FieldMemOperand(heap_number_reg,
 | 
| +                                  HeapNumber::kMantissaOffset));
 | 
| +
 | 
| +  __ mov(r0, heap_number_reg);
 | 
| +  __ Ret();
 | 
| +
 | 
| +  __ bind(&slow_allocate_heapnumber);
 | 
| +  Handle<Code> slow_ic =
 | 
| +      masm->isolate()->builtins()->KeyedLoadIC_Slow();
 | 
| +  __ Jump(slow_ic, RelocInfo::CODE_TARGET);
 | 
| +
 | 
| +  __ bind(&miss_force_generic);
 | 
| +  Handle<Code> miss_ic =
 | 
| +      masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
 | 
| +  __ Jump(miss_ic, RelocInfo::CODE_TARGET);
 | 
| +}
 | 
| +
 | 
| +
 | 
|  void KeyedStoreStubCompiler::GenerateStoreFastElement(
 | 
|      MacroAssembler* masm,
 | 
|      bool is_js_array,
 | 
| 
 |