| Index: src/arm/stub-cache-arm.cc
|
| diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
|
| index 86e49716d3b45e3c91dc5b89b4a886a81b5ebdf4..2ca9c6b1c1fa305f1f8256ec5c8d6de9399e77a4 100644
|
| --- a/src/arm/stub-cache-arm.cc
|
| +++ b/src/arm/stub-cache-arm.cc
|
| @@ -4168,6 +4168,77 @@ void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) {
|
| }
|
|
|
|
|
| +void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement(
|
| + MacroAssembler* masm) {
|
| + // ----------- S t a t e -------------
|
| + // -- lr : return address
|
| + // -- r0 : key
|
| + // -- r1 : receiver
|
| + // -----------------------------------
|
| + Label miss_force_generic, slow_allocate_heapnumber;
|
| +
|
| + Register key_reg = r0;
|
| + Register receiver_reg = r1;
|
| + Register elements_reg = r2;
|
| + Register heap_number_reg = r2;
|
| + Register indexed_double_offset = r3;
|
| + Register scratch = r4;
|
| + Register scratch2 = r5;
|
| + Register scratch3 = r6;
|
| + Register heap_number_map = r7;
|
| +
|
| + // This stub is meant to be tail-jumped to, the receiver must already
|
| + // have been verified by the caller to not be a smi.
|
| +
|
| + // Check that the key is a smi.
|
| + __ JumpIfNotSmi(key_reg, &miss_force_generic);
|
| +
|
| + // Get the elements array.
|
| + __ ldr(elements_reg,
|
| + FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
|
| +
|
| + // Check that the key is within bounds.
|
| + __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
|
| + __ cmp(key_reg, Operand(scratch));
|
| + __ b(hs, &miss_force_generic);
|
| +
|
| + // Load the upper word of the double in the fixed array and test for NaN.
|
| + __ add(indexed_double_offset, elements_reg,
|
| + Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
|
| + uint32_t upper_32_offset = FixedArray::kHeaderSize + sizeof(kHoleNanLower32);
|
| + __ ldr(scratch, FieldMemOperand(indexed_double_offset, upper_32_offset));
|
| + __ cmp(scratch, Operand(kHoleNanUpper32));
|
| + __ b(&miss_force_generic, eq);
|
| +
|
| + // Non-NaN. Allocate a new heap number and copy the double value into it.
|
| + __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
|
| + __ AllocateHeapNumber(heap_number_reg, scratch2, scratch3,
|
| + heap_number_map, &slow_allocate_heapnumber);
|
| +
|
| + // Don't need to reload the upper 32 bits of the double, it's already in
|
| + // scratch.
|
| + __ str(scratch, FieldMemOperand(heap_number_reg,
|
| + HeapNumber::kExponentOffset));
|
| + __ ldr(scratch, FieldMemOperand(indexed_double_offset,
|
| + FixedArray::kHeaderSize));
|
| + __ str(scratch, FieldMemOperand(heap_number_reg,
|
| + HeapNumber::kMantissaOffset));
|
| +
|
| + __ mov(r0, heap_number_reg);
|
| + __ Ret();
|
| +
|
| + __ bind(&slow_allocate_heapnumber);
|
| + Handle<Code> slow_ic =
|
| + masm->isolate()->builtins()->KeyedLoadIC_Slow();
|
| + __ Jump(slow_ic, RelocInfo::CODE_TARGET);
|
| +
|
| + __ bind(&miss_force_generic);
|
| + Handle<Code> miss_ic =
|
| + masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric();
|
| + __ Jump(miss_ic, RelocInfo::CODE_TARGET);
|
| +}
|
| +
|
| +
|
| void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
|
| bool is_js_array) {
|
| // ----------- S t a t e -------------
|
| @@ -4231,6 +4302,123 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm,
|
| }
|
|
|
|
|
| +void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
|
| + MacroAssembler* masm,
|
| + bool is_js_array) {
|
| + // ----------- S t a t e -------------
|
| + // -- r0 : value
|
| + // -- r1 : key
|
| + // -- r2 : receiver
|
| + // -- lr : return address
|
| + // -- r3 : scratch
|
| + // -- r4 : scratch
|
| + // -- r5 : scratch
|
| + // -----------------------------------
|
| + Label miss_force_generic, smi_value, is_nan, maybe_nan, have_double_value;
|
| +
|
| + Register value_reg = r0;
|
| + Register key_reg = r1;
|
| + Register receiver_reg = r2;
|
| + Register scratch = r3;
|
| + Register elements_reg = r4;
|
| + Register mantissa_reg = r5;
|
| + Register exponent_reg = r6;
|
| + Register scratch4 = r7;
|
| +
|
| + // This stub is meant to be tail-jumped to, the receiver must already
|
| + // have been verified by the caller to not be a smi.
|
| + __ JumpIfNotSmi(key_reg, &miss_force_generic);
|
| +
|
| + __ ldr(elements_reg,
|
| + FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
|
| +
|
| + // Check that the key is within bounds.
|
| + if (is_js_array) {
|
| + __ ldr(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
|
| + } else {
|
| + __ ldr(scratch,
|
| + FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
|
| + }
|
| + // Compare smis, unsigned compare catches both negative and out-of-bound
|
| + // indexes.
|
| + __ cmp(key_reg, scratch);
|
| + __ b(hs, &miss_force_generic);
|
| +
|
| + // Handle smi values specially.
|
| + __ JumpIfSmi(value_reg, &smi_value);
|
| +
|
| + // Ensure that the object is a heap number
|
| + __ CheckMap(value_reg,
|
| + scratch,
|
| + masm->isolate()->factory()->heap_number_map(),
|
| + &miss_force_generic,
|
| + DONT_DO_SMI_CHECK);
|
| +
|
| + // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
|
| + // in the exponent.
|
| + __ mov(scratch, Operand(kNaNOrInfinityLowerBoundUpper32));
|
| + __ ldr(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
|
| + __ cmp(exponent_reg, scratch);
|
| + __ b(ge, &maybe_nan);
|
| +
|
| + __ ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
|
| +
|
| + __ bind(&have_double_value);
|
| + __ add(scratch, elements_reg,
|
| + Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
|
| + __ str(mantissa_reg, FieldMemOperand(scratch, FixedDoubleArray::kHeaderSize));
|
| + uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
|
| + __ str(exponent_reg, FieldMemOperand(scratch, offset));
|
| + __ Ret();
|
| +
|
| + __ bind(&maybe_nan);
|
| + // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
|
| + // it's an Infinity, and the non-NaN code path applies.
|
| + __ b(gt, &is_nan);
|
| + __ ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
|
| + __ cmp(mantissa_reg, Operand(0));
|
| + __ b(eq, &have_double_value);
|
| + __ bind(&is_nan);
|
| + // Load canonical NaN for storing into the double array.
|
| + __ mov(mantissa_reg, Operand(kCanonicalNonHoleNanLower32));
|
| + __ mov(exponent_reg, Operand(kCanonicalNonHoleNanUpper32));
|
| + __ jmp(&have_double_value);
|
| +
|
| + __ bind(&smi_value);
|
| + __ add(scratch, elements_reg,
|
| + Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
|
| + __ add(scratch, scratch,
|
| + Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
|
| + // scratch is now effective address of the double element
|
| +
|
| + FloatingPointHelper::Destination destination;
|
| + if (CpuFeatures::IsSupported(VFP3)) {
|
| + destination = FloatingPointHelper::kVFPRegisters;
|
| + } else {
|
| + destination = FloatingPointHelper::kCoreRegisters;
|
| + }
|
| + __ SmiUntag(value_reg, value_reg);
|
| + FloatingPointHelper::ConvertIntToDouble(
|
| + masm, value_reg, destination,
|
| + d0, mantissa_reg, exponent_reg, // These are: double_dst, dst1, dst2.
|
| + scratch4, s2); // These are: scratch2, single_scratch.
|
| + if (destination == FloatingPointHelper::kVFPRegisters) {
|
| + CpuFeatures::Scope scope(VFP3);
|
| + __ vstr(d0, scratch, 0);
|
| + } else {
|
| + __ str(mantissa_reg, MemOperand(scratch, 0));
|
| + __ str(exponent_reg, MemOperand(scratch, Register::kSizeInBytes));
|
| + }
|
| + __ Ret();
|
| +
|
| + // Handle store cache miss, replacing the ic with the generic stub.
|
| + __ bind(&miss_force_generic);
|
| + Handle<Code> ic =
|
| + masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric();
|
| + __ Jump(ic, RelocInfo::CODE_TARGET);
|
| +}
|
| +
|
| +
|
| #undef __
|
|
|
| } } // namespace v8::internal
|
|
|