Index: src/arm/stub-cache-arm.cc |
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc |
index d7b1b55c20a06589f472cb39f0eb022e7ff41490..d7c509169916d12ebe49fbbbe281c4db84c63782 100644 |
--- a/src/arm/stub-cache-arm.cc |
+++ b/src/arm/stub-cache-arm.cc |
@@ -870,9 +870,9 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm, |
} else { |
__ Move(r6, call_data); |
} |
- __ mov(r7, Operand(ExternalReference::isolate_address(masm->isolate()))); |
+ __ mov(ip, Operand(ExternalReference::isolate_address(masm->isolate()))); |
ulan
2013/07/30 09:45:19
This mov might use ip internally.
|
// Store JS function, call data, isolate ReturnValue default and ReturnValue. |
- __ stm(ib, sp, r5.bit() | r6.bit() | r7.bit()); |
+ __ stm(ib, sp, r5.bit() | r6.bit() | ip.bit()); |
__ LoadRoot(r5, Heap::kUndefinedValueRootIndex); |
__ str(r5, MemOperand(sp, 4 * kPointerSize)); |
__ str(r5, MemOperand(sp, 5 * kPointerSize)); |
@@ -1830,15 +1830,15 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( |
if (FLAG_smi_only_arrays && !FLAG_trace_elements_transitions) { |
Label fast_object, not_fast_object; |
- __ CheckFastObjectElements(r3, r7, ¬_fast_object); |
+ __ CheckFastObjectElements(r3, r9, ¬_fast_object); |
__ jmp(&fast_object); |
// In case of fast smi-only, convert to fast object, otherwise bail out. |
__ bind(¬_fast_object); |
- __ CheckFastSmiElements(r3, r7, &call_builtin); |
+ __ CheckFastSmiElements(r3, r9, &call_builtin); |
- __ ldr(r7, FieldMemOperand(r4, HeapObject::kMapOffset)); |
+ __ ldr(r9, FieldMemOperand(r4, HeapObject::kMapOffset)); |
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); |
- __ cmp(r7, ip); |
+ __ cmp(r9, ip); |
__ b(eq, &call_builtin); |
// edx: receiver |
// r3: map |
@@ -1846,7 +1846,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( |
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, |
FAST_ELEMENTS, |
r3, |
- r7, |
+ r9, |
&try_holey_map); |
__ mov(r2, receiver); |
ElementsTransitionGenerator:: |
@@ -1859,7 +1859,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( |
__ LoadTransitionedArrayMapConditional(FAST_HOLEY_SMI_ELEMENTS, |
FAST_HOLEY_ELEMENTS, |
r3, |
- r7, |
+ r9, |
&call_builtin); |
__ mov(r2, receiver); |
ElementsTransitionGenerator:: |
@@ -1892,7 +1892,6 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( |
__ bind(&attempt_to_grow_elements); |
// r0: array's length + 1. |
- // r4: elements' length. |
if (!FLAG_inline_new) { |
__ b(&call_builtin); |
@@ -1903,8 +1902,8 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( |
// the new element is non-Smi. For now, delegate to the builtin. |
Label no_fast_elements_check; |
__ JumpIfSmi(r2, &no_fast_elements_check); |
- __ ldr(r7, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
- __ CheckFastObjectElements(r7, r7, &call_builtin); |
+ __ ldr(r9, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
+ __ CheckFastObjectElements(r9, r9, &call_builtin); |
__ bind(&no_fast_elements_check); |
ExternalReference new_space_allocation_top = |
@@ -1916,8 +1915,8 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( |
// Load top and check if it is the end of elements. |
__ add(end_elements, elements, Operand::PointerOffsetFromSmiKey(r0)); |
__ add(end_elements, end_elements, Operand(kEndElementsOffset)); |
- __ mov(r7, Operand(new_space_allocation_top)); |
- __ ldr(r3, MemOperand(r7)); |
+ __ mov(r4, Operand(new_space_allocation_top)); |
+ __ ldr(r3, MemOperand(r4)); |
__ cmp(end_elements, r3); |
__ b(ne, &call_builtin); |
@@ -1929,7 +1928,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( |
// We fit and could grow elements. |
// Update new_space_allocation_top. |
- __ str(r3, MemOperand(r7)); |
+ __ str(r3, MemOperand(r4)); |
// Push the argument. |
__ str(r2, MemOperand(end_elements)); |
// Fill the rest with holes. |
@@ -1940,6 +1939,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall( |
// Update elements' and array's sizes. |
__ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset)); |
+ __ ldr(r4, FieldMemOperand(elements, FixedArray::kLengthOffset)); |
__ add(r4, r4, Operand(Smi::FromInt(kAllocationDelta))); |
__ str(r4, FieldMemOperand(elements, FixedArray::kLengthOffset)); |
@@ -3281,7 +3281,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( |
case EXTERNAL_FLOAT_ELEMENTS: |
// Perform int-to-float conversion and store to memory. |
__ SmiUntag(r4, key); |
- StoreIntAsFloat(masm, r3, r4, r5, r7); |
+ StoreIntAsFloat(masm, r3, r4, r5, r6); |
break; |
case EXTERNAL_DOUBLE_ELEMENTS: |
__ vmov(s2, r5); |
@@ -3337,7 +3337,7 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray( |
// not include -kHeapObjectTag into it. |
__ sub(r5, value, Operand(kHeapObjectTag)); |
__ vldr(d0, r5, HeapNumber::kValueOffset); |
- __ ECMAToInt32(r5, d0, r6, r7, r9, d1); |
+ __ ECMAToInt32(r5, d0, r4, r6, r9, d1); |
switch (elements_kind) { |
case EXTERNAL_BYTE_ELEMENTS: |
@@ -3574,8 +3574,8 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( |
Register receiver_reg = r2; |
Register elements_reg = r3; |
Register scratch1 = r4; |
- Register scratch2 = r5; |
- Register length_reg = r7; |
+ Register scratch2 = no_reg; // Will be r5. |
+ Register length_reg = r5; |
// This stub is meant to be tail-jumped to, the receiver must already |
// have been verified by the caller to not be a smi. |
@@ -3639,6 +3639,8 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( |
__ CompareRoot(elements_reg, Heap::kEmptyFixedArrayRootIndex); |
__ b(ne, &check_capacity); |
+ scratch2 = length_reg; // Use length_reg as scratch2 here. |
+ |
int size = FixedDoubleArray::SizeFor(JSArray::kPreallocatedArrayElements); |
__ Allocate(size, elements_reg, scratch1, scratch2, &slow, TAG_OBJECT); |
@@ -3662,6 +3664,8 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement( |
__ str(scratch2, FieldMemOperand(elements_reg, offset + kPointerSize)); |
} |
+ scratch2 = no_reg; // End of scratch2's live range. |
+ |
// Install the new backing store in the JSArray. |
__ str(elements_reg, |
FieldMemOperand(receiver_reg, JSObject::kElementsOffset)); |