Index: src/mips/stub-cache-mips.cc |
diff --git a/src/mips/stub-cache-mips.cc b/src/mips/stub-cache-mips.cc |
index 6c8e9612ec1bb226fcc83c689f3640aea5667b60..f1ffe9b63403714247ef4e06e7f4b4b77bba28c3 100644 |
--- a/src/mips/stub-cache-mips.cc |
+++ b/src/mips/stub-cache-mips.cc |
@@ -4231,7 +4231,70 @@ void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) { |
void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement( |
MacroAssembler* masm) { |
- UNIMPLEMENTED(); |
+ // ----------- S t a t e ------------- |
+ // -- ra : return address |
+ // -- a0 : key |
+ // -- a1 : receiver |
+ // ----------------------------------- |
+ Label miss_force_generic, slow_allocate_heapnumber; |
+ |
+ Register key_reg = a0; |
+ Register receiver_reg = a1; |
+ Register elements_reg = a2; |
+ Register heap_number_reg = a2; |
+ Register indexed_double_offset = a3; |
+ Register scratch = t0; |
+ Register scratch2 = t1; |
+ Register scratch3 = t2; |
+ Register heap_number_map = t3; |
+ |
+ // This stub is meant to be tail-jumped to, the receiver must already |
+ // have been verified by the caller to not be a smi. |
+ |
+ // Check that the key is a smi. |
+ __ JumpIfNotSmi(key_reg, &miss_force_generic); |
+ |
+ // Get the elements array. |
+ __ lw(elements_reg, |
+ FieldMemOperand(receiver_reg, JSObject::kElementsOffset)); |
+ |
+ // Check that the key is within bounds. |
+ __ lw(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset)); |
+ __ Branch(&miss_force_generic, hs, key_reg, Operand(scratch)); |
+ |
+ // Load the upper word of the double in the fixed array and test for NaN. |
+ __ sll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize); |
+ __ Addu(indexed_double_offset, elements_reg, Operand(scratch2)); |
+ uint32_t upper_32_offset = FixedArray::kHeaderSize + sizeof(kHoleNanLower32); |
+ __ lw(scratch, FieldMemOperand(indexed_double_offset, upper_32_offset)); |
+ __ Branch(&miss_force_generic, eq, scratch, Operand(kHoleNanUpper32)); |
+ |
+ // Non-NaN. Allocate a new heap number and copy the double value into it. |
+ __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
+ __ AllocateHeapNumber(heap_number_reg, scratch2, scratch3, |
+ heap_number_map, &slow_allocate_heapnumber); |
+ |
+ // Don't need to reload the upper 32 bits of the double, it's already in |
+ // scratch. |
+ __ sw(scratch, FieldMemOperand(heap_number_reg, |
+ HeapNumber::kExponentOffset)); |
+ __ lw(scratch, FieldMemOperand(indexed_double_offset, |
+ FixedArray::kHeaderSize)); |
+ __ sw(scratch, FieldMemOperand(heap_number_reg, |
+ HeapNumber::kMantissaOffset)); |
+ |
+ __ mov(v0, heap_number_reg); |
+ __ Ret(); |
+ |
+ __ bind(&slow_allocate_heapnumber); |
+ Handle<Code> slow_ic = |
+ masm->isolate()->builtins()->KeyedLoadIC_Slow(); |
+ __ Jump(slow_ic, RelocInfo::CODE_TARGET); |
+ |
+ __ bind(&miss_force_generic); |
+ Handle<Code> miss_ic = |
+ masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric(); |
+ __ Jump(miss_ic, RelocInfo::CODE_TARGET); |
} |
@@ -4301,7 +4364,120 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm, |
void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( |
MacroAssembler* masm, |
bool is_js_array) { |
- UNIMPLEMENTED(); |
+ // ----------- S t a t e ------------- |
+ // -- a0 : value |
+ // -- a1 : key |
+ // -- a2 : receiver |
+ // -- ra : return address |
+ // -- a3 : scratch |
+ // -- t0 : scratch (elements_reg) |
+ // -- t1 : scratch (mantissa_reg) |
+ // -- t2 : scratch (exponent_reg) |
+ // -- t3 : scratch4 |
+ // ----------------------------------- |
+ Label miss_force_generic, smi_value, is_nan, maybe_nan, have_double_value; |
+ |
+ Register value_reg = a0; |
+ Register key_reg = a1; |
+ Register receiver_reg = a2; |
+ Register scratch = a3; |
+ Register elements_reg = t0; |
+ Register mantissa_reg = t1; |
+ Register exponent_reg = t2; |
+ Register scratch4 = t3; |
+ |
+ // This stub is meant to be tail-jumped to, the receiver must already |
+ // have been verified by the caller to not be a smi. |
+ __ JumpIfNotSmi(key_reg, &miss_force_generic); |
+ |
+ __ lw(elements_reg, |
+ FieldMemOperand(receiver_reg, JSObject::kElementsOffset)); |
+ |
+ // Check that the key is within bounds. |
+ if (is_js_array) { |
+ __ lw(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset)); |
+ } else { |
+ __ lw(scratch, |
+ FieldMemOperand(elements_reg, FixedArray::kLengthOffset)); |
+ } |
+ // Compare smis, unsigned compare catches both negative and out-of-bound |
+ // indexes. |
+ __ Branch(&miss_force_generic, hs, key_reg, Operand(scratch)); |
+ |
+ // Handle smi values specially. |
+ __ JumpIfSmi(value_reg, &smi_value); |
+ |
+ // Ensure that the object is a heap number |
+ __ CheckMap(value_reg, |
+ scratch, |
+ masm->isolate()->factory()->heap_number_map(), |
+ &miss_force_generic, |
+ DONT_DO_SMI_CHECK); |
+ |
+ // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000 |
+ // in the exponent. |
+ __ li(scratch, Operand(kNaNOrInfinityLowerBoundUpper32)); |
+ __ lw(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset)); |
+ __ Branch(&maybe_nan, ge, exponent_reg, Operand(scratch)); |
+ |
+ __ lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset)); |
+ |
+ __ bind(&have_double_value); |
+ __ sll(scratch4, key_reg, kDoubleSizeLog2 - kSmiTagSize); |
+ __ Addu(scratch, elements_reg, Operand(scratch4)); |
+ __ sw(mantissa_reg, FieldMemOperand(scratch, FixedDoubleArray::kHeaderSize)); |
+ uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32); |
+ __ sw(exponent_reg, FieldMemOperand(scratch, offset)); |
+ __ Ret(); |
+ |
+ __ bind(&maybe_nan); |
+ // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise |
+ // it's an Infinity, and the non-NaN code path applies. |
+ __ li(scratch, Operand(kNaNOrInfinityLowerBoundUpper32)); |
+ __ Branch(&is_nan, gt, exponent_reg, Operand(scratch)); |
+ __ lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset)); |
+ __ Branch(&have_double_value, eq, mantissa_reg, Operand(zero_reg)); |
+ |
+ __ bind(&is_nan); |
+ // Load canonical NaN for storing into the double array. |
+ uint64_t nan_int64 = BitCast<uint64_t>( |
+ FixedDoubleArray::canonical_not_the_hole_nan_as_double()); |
+ __ li(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64))); |
+ __ li(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32))); |
+ __ jmp(&have_double_value); |
+ |
+ __ bind(&smi_value); |
+ __ Addu(scratch, elements_reg, |
+ Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); |
+ __ sll(scratch4, key_reg, kDoubleSizeLog2 - kSmiTagSize); |
+ __ Addu(scratch, scratch, scratch4); |
+ // scratch is now effective address of the double element |
+ |
+ FloatingPointHelper::Destination destination; |
+ if (CpuFeatures::IsSupported(FPU)) { |
+ destination = FloatingPointHelper::kFPURegisters; |
+ } else { |
+ destination = FloatingPointHelper::kCoreRegisters; |
+ } |
+ __ SmiUntag(value_reg, value_reg); |
+ FloatingPointHelper::ConvertIntToDouble( |
+ masm, value_reg, destination, |
+ f0, mantissa_reg, exponent_reg, // These are: double_dst, dst1, dst2. |
+ scratch4, f2); // These are: scratch2, single_scratch. |
+ if (destination == FloatingPointHelper::kFPURegisters) { |
+ CpuFeatures::Scope scope(FPU); |
+ __ sdc1(f0, MemOperand(scratch, 0)); |
+ } else { |
+ __ sw(mantissa_reg, MemOperand(scratch, 0)); |
+ __ sw(exponent_reg, MemOperand(scratch, Register::kSizeInBytes)); |
+ } |
+ __ Ret(); |
+ |
+ // Handle store cache miss, replacing the ic with the generic stub. |
+ __ bind(&miss_force_generic); |
+ Handle<Code> ic = |
+ masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric(); |
+ __ Jump(ic, RelocInfo::CODE_TARGET); |
} |