Index: src/mips/ic-mips.cc |
diff --git a/src/mips/ic-mips.cc b/src/mips/ic-mips.cc |
index b6f019f478565c23bb8f9ec2a39ce83bb7eeccd5..43b42dd3260457d9133fd32c65e18c0b616cb161 100644 |
--- a/src/mips/ic-mips.cc |
+++ b/src/mips/ic-mips.cc |
@@ -758,8 +758,6 @@ static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm, |
Register scratch3, |
Label* unmapped_case, |
Label* slow_case) { |
- Heap* heap = masm->isolate()->heap(); |
- |
// Check that the receiver is a JSObject. Because of the map check |
// later, we do not need to check for interceptors or whether it |
// requires access checks. |
@@ -773,10 +771,9 @@ static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm, |
__ Branch(slow_case, ne, scratch1, Operand(zero_reg)); |
// Load the elements into scratch1 and check its map. |
- Handle<Map> arguments_map(heap->non_strict_arguments_elements_map()); |
__ lw(scratch1, FieldMemOperand(object, JSObject::kElementsOffset)); |
- __ CheckMap(scratch1, scratch2, arguments_map, slow_case, DONT_DO_SMI_CHECK); |
- |
+ __ CheckMap(scratch1, scratch2, Heap::kNonStrictArgumentsElementsMapRootIndex, |
Erik Corry
2012/03/17 02:49:18
Arguments should be all on one line or one per lin
|
+ slow_case, DONT_DO_SMI_CHECK); |
// Check if element is in the range of mapped arguments. If not, jump |
// to the unmapped lookup with the parameter map in scratch1. |
__ lw(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset)); |
@@ -820,9 +817,8 @@ static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm, |
const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize; |
Register backing_store = parameter_map; |
__ lw(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset)); |
- Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map()); |
- __ CheckMap(backing_store, scratch, fixed_array_map, slow_case, |
- DONT_DO_SMI_CHECK); |
+ __ CheckMap(backing_store, scratch, Heap::kFixedArrayMapRootIndex, slow_case, |
+ DONT_DO_SMI_CHECK); |
__ lw(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset)); |
__ Branch(slow_case, Ugreater_equal, key, Operand(scratch)); |
__ li(scratch, Operand(kPointerSize >> 1)); |
@@ -1254,7 +1250,8 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, |
__ Branch(&slow, hs, key, Operand(t0)); |
__ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); |
__ Branch(&check_if_double_array, ne, elements_map, |
- Operand(masm->isolate()->factory()->fixed_array_map())); |
+ Heap::kFixedArrayMapRootIndex); |
+ |
// Calculate key + 1 as smi. |
STATIC_ASSERT(kSmiTag == 0); |
__ Addu(t0, key, Operand(Smi::FromInt(1))); |
@@ -1262,8 +1259,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, |
__ Branch(&fast_object_without_map_check); |
__ bind(&check_if_double_array); |
- __ Branch(&slow, ne, elements_map, |
- Operand(masm->isolate()->factory()->fixed_double_array_map())); |
+ __ Branch(&slow, ne, elements_map, Heap::kFixedDoubleArrayMapRootIndex); |
// Add 1 to key, and go to common element store code for doubles. |
STATIC_ASSERT(kSmiTag == 0); |
__ Addu(t0, key, Operand(Smi::FromInt(1))); |
@@ -1286,7 +1282,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, |
Register address = t1; |
__ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); |
__ Branch(&fast_double_with_map_check, ne, elements_map, |
- Operand(masm->isolate()->factory()->fixed_array_map())); |
+ Heap::kFixedArrayMapRootIndex); |
__ bind(&fast_object_without_map_check); |
// Smi stores don't require further checks. |
Label non_smi_value; |
@@ -1323,8 +1319,7 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, |
__ bind(&fast_double_with_map_check); |
// Check for fast double array case. If this fails, call through to the |
// runtime. |
- __ Branch(&slow, ne, elements_map, |
- Operand(masm->isolate()->factory()->fixed_double_array_map())); |
+ __ Branch(&slow, ne, elements_map, Heap::kFixedDoubleArrayMapRootIndex); |
__ bind(&fast_double_without_map_check); |
__ StoreNumberToDoubleElements(value, |
key, |