Index: src/arm/stub-cache-arm.cc |
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc |
index 86e49716d3b45e3c91dc5b89b4a886a81b5ebdf4..a883b37dc07e72a04ef87a84eb4804493c1457a9 100644 |
--- a/src/arm/stub-cache-arm.cc |
+++ b/src/arm/stub-cache-arm.cc |
@@ -4168,6 +4168,70 @@ void KeyedLoadStubCompiler::GenerateLoadFastElement(MacroAssembler* masm) { |
} |
+void KeyedLoadStubCompiler::GenerateLoadFastDoubleElement( |
+ MacroAssembler* masm) { |
+ // ----------- S t a t e ------------- |
+ // -- lr : return address |
+ // -- r0 : key |
+ // -- r1 : receiver |
+ // ----------------------------------- |
+ Label miss_force_generic, slow_allocate_heapnumber; |
+ |
+ Register key_reg = r0; |
+ Register receiver_reg = r1; |
+ Register elements_reg = r2; |
+ Register heap_number_reg = r2; |
+ |
+ // This stub is meant to be tail-jumped to, the receiver must already |
+ // have been verified by the caller to not be a smi. |
+ |
+ // Check that the key is a smi. |
+ __ JumpIfNotSmi(key_reg, &miss_force_generic); |
+ |
+ // Get the elements array. |
+ __ ldr(elements_reg, |
+ FieldMemOperand(receiver_reg, JSObject::kElementsOffset)); |
+ |
+ // Check that the key is within bounds. |
+ __ ldr(r3, FieldMemOperand(elements_reg, FixedArray::kLengthOffset)); |
Mads Ager (chromium)
2011/07/12 12:03:26
In the store stub you have named the scratch regis
danno
2011/07/13 08:59:52
Done.
|
+ __ cmp(key_reg, Operand(r3)); |
+ __ b(hs, &miss_force_generic); |
+ |
+ // Load the upper word of the double in the fixed array and test for NaN. |
+ __ add(r3, elements_reg, |
+ Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize)); |
+ uint32_t upper_32_offset = FixedArray::kHeaderSize + Register::kSizeInBytes; |
Mads Ager (chromium)
2011/07/12 12:03:26
This doesn't really matter, but I like sizeof(kHol
danno
2011/07/13 08:59:52
Done.
|
+ __ ldr(r4, FieldMemOperand(r3, upper_32_offset)); |
+ // Detect the upper 32 bit pattern in the double that are a sentinel for the |
+ // hole by detecting an overflow when adding 1. |
+ ASSERT(0x7FFFFFFF == kHoleNanUpper32); |
+ __ add(r5, r4, Operand(1), SetCC); |
Rodolph Perfetta
2011/07/12 14:00:30
0x7FFFFFFF + 1 will not set the overflow flag, but
danno
2011/07/13 08:59:52
Done.
|
+ __ b(&miss_force_generic, vs); |
+ |
+ // Non Nan. Allocate a new heap number and copy the double value into it. |
Mads Ager (chromium)
2011/07/12 12:03:26
NaN
danno
2011/07/13 08:59:52
Done.
|
+ __ LoadRoot(r7, Heap::kHeapNumberMapRootIndex); |
+ __ AllocateHeapNumber(heap_number_reg, r5, r6, r7, &slow_allocate_heapnumber); |
+ |
+ // Don't need to reload the upper 32 bits of the double, it's already in r4. |
+ __ str(r4, FieldMemOperand(heap_number_reg, HeapNumber::kExponentOffset)); |
+ __ ldr(r4, FieldMemOperand(r3, FixedArray::kHeaderSize)); |
+ __ str(r4, FieldMemOperand(heap_number_reg, HeapNumber::kMantissaOffset)); |
+ |
+ __ mov(r0, heap_number_reg); |
+ __ Ret(); |
+ |
+ __ bind(&slow_allocate_heapnumber); |
+ Handle<Code> slow_ic = |
+ masm->isolate()->builtins()->KeyedLoadIC_Slow(); |
+ __ Jump(slow_ic, RelocInfo::CODE_TARGET); |
+ |
+ __ bind(&miss_force_generic); |
+ Handle<Code> miss_ic = |
+ masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric(); |
+ __ Jump(miss_ic, RelocInfo::CODE_TARGET); |
+} |
+ |
+ |
void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm, |
bool is_js_array) { |
// ----------- S t a t e ------------- |
@@ -4231,6 +4295,116 @@ void KeyedStoreStubCompiler::GenerateStoreFastElement(MacroAssembler* masm, |
} |
+void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( |
+ MacroAssembler* masm, |
+ bool is_js_array) { |
+ // ----------- S t a t e ------------- |
+ // -- r0 : value |
+ // -- r1 : key |
+ // -- r2 : receiver |
+ // -- lr : return address |
+ // -- r3 : scratch |
+ // -- r4 : scratch |
+ // -- r5 : scratch |
+ // ----------------------------------- |
+ Label miss_force_generic, smi_value, is_nan, have_double_value; |
+ |
+ Register value_reg = r0; |
+ Register key_reg = r1; |
+ Register receiver_reg = r2; |
+ Register scratch = r3; |
+ Register elements_reg = r4; |
+ Register scratch2 = r5; |
+ Register scratch3 = r6; |
+ Register scratch4 = r7; |
+ |
+ // This stub is meant to be tail-jumped to, the receiver must already |
+ // have been verified by the caller to not be a smi. |
+ __ JumpIfNotSmi(key_reg, &miss_force_generic); |
+ |
+ __ ldr(elements_reg, |
+ FieldMemOperand(receiver_reg, JSObject::kElementsOffset)); |
+ |
+ // Check that the key is within bounds. |
+ if (is_js_array) { |
+ __ ldr(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset)); |
+ } else { |
+ __ ldr(scratch, |
+ FieldMemOperand(elements_reg, FixedArray::kLengthOffset)); |
+ } |
+ // Compare smis, unsigned compare catches both negative and out-of-bound |
+ // indexes. |
+ __ cmp(key_reg, scratch); |
+ __ b(hs, &miss_force_generic); |
+ |
+ // Handle smi values specially. |
+ __ JumpIfSmi(value_reg, &smi_value); |
+ |
+ // Ensure that the object is a heap number |
+ __ CheckMap(value_reg, |
+ scratch, |
+ masm->isolate()->factory()->heap_number_map(), |
+ &miss_force_generic, |
+ DONT_DO_SMI_CHECK); |
+ |
+ // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000 |
+ // in the exponent. |
+ __ mov(scratch, Operand(0x7ff00000)); |
+ __ ldr(scratch3, FieldMemOperand(value_reg, HeapNumber::kExponentOffset)); |
+ __ cmp(scratch3, scratch); |
+ __ b(gt, &is_nan); |
+ |
+ __ ldr(scratch2, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset)); |
+ |
+ __ bind(&have_double_value); |
+ // Double value to store in the double array is in scratch2 and scratch3. |
+ __ add(scratch, elements_reg, |
+ Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize)); |
+ __ str(scratch2, FieldMemOperand(scratch, FixedDoubleArray::kHeaderSize)); |
+ uint32_t offset = FixedDoubleArray::kHeaderSize + Register::kSizeInBytes; |
+ __ str(scratch3, FieldMemOperand(scratch, offset)); |
+ __ Ret(); |
+ |
+ __ bind(&is_nan); |
+ __ mov(scratch2, Operand(kCanonicalNonHoleNanLower32)); |
+ __ mov(scratch3, Operand(kCanonicalNonHoleNanUpper32)); |
+ __ jmp(&have_double_value); |
+ |
+ __ bind(&smi_value); |
+ __ add(scratch, elements_reg, |
+ Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag)); |
+ __ add(scratch, scratch, |
+ Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize)); |
+ // scratch is now effective address of the double element |
+ |
+ FloatingPointHelper::Destination destination; |
+ if (CpuFeatures::IsSupported(VFP3)) { |
+ destination = FloatingPointHelper::kVFPRegisters; |
+ } else { |
+ destination = FloatingPointHelper::kCoreRegisters; |
+ } |
+ __ SmiUntag(value_reg, value_reg); |
+ FloatingPointHelper::ConvertIntToDouble( |
+ masm, value_reg, destination, |
+ d0, scratch2, scratch3, // These are: double_dst, dst1, dst2. |
+ scratch4, s2); // These are: scratch2, single_scratch. |
+ if (destination == FloatingPointHelper::kVFPRegisters) { |
+ CpuFeatures::Scope scope(VFP3); |
+ __ vstr(d0, scratch, 0); |
+ } else { |
+ __ str(scratch2, MemOperand(scratch, 0)); |
+ __ str(scratch3, MemOperand(scratch, Register::kSizeInBytes)); |
+ } |
+ __ Ret(); |
+ |
+ // Handle store cache miss, replacing the ic with the generic stub. |
+ __ bind(&miss_force_generic); |
+ Handle<Code> ic = |
+ masm->isolate()->builtins()->KeyedStoreIC_MissForceGeneric(); |
+ __ Jump(ic, RelocInfo::CODE_TARGET); |
+} |
+ |
+ |
#undef __ |
} } // namespace v8::internal |