Index: src/mips64/ic-mips64.cc |
diff --git a/src/mips/ic-mips.cc b/src/mips64/ic-mips64.cc |
similarity index 82% |
copy from src/mips/ic-mips.cc |
copy to src/mips64/ic-mips64.cc |
index 2ca6b4d3411a78e0e5d806b98f1e2dcc2b860ac0..d8f7173c997d22981904247d7bc04aeeba8bb439 100644 |
--- a/src/mips/ic-mips.cc |
+++ b/src/mips64/ic-mips64.cc |
@@ -6,7 +6,7 @@ |
#include "src/v8.h" |
-#if V8_TARGET_ARCH_MIPS |
+#if V8_TARGET_ARCH_MIPS64 |
#include "src/code-stubs.h" |
#include "src/codegen.h" |
@@ -70,8 +70,8 @@ static void GenerateNameDictionaryReceiverCheck(MacroAssembler* masm, |
(1 << Map::kHasNamedInterceptor))); |
__ Branch(miss, ne, scratch1, Operand(zero_reg)); |
- __ lw(elements, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); |
- __ lw(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset)); |
+ __ ld(elements, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); |
+ __ ld(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset)); |
__ LoadRoot(scratch0, Heap::kHashTableMapRootIndex); |
__ Branch(miss, ne, scratch1, Operand(scratch0)); |
} |
@@ -120,14 +120,14 @@ static void GenerateDictionaryLoad(MacroAssembler* masm, |
const int kElementsStartOffset = NameDictionary::kHeaderSize + |
NameDictionary::kElementsStartIndex * kPointerSize; |
const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize; |
- __ lw(scratch1, FieldMemOperand(scratch2, kDetailsOffset)); |
+ __ ld(scratch1, FieldMemOperand(scratch2, kDetailsOffset)); |
__ And(at, |
scratch1, |
- Operand(PropertyDetails::TypeField::kMask << kSmiTagSize)); |
+ Operand(Smi::FromInt(PropertyDetails::TypeField::kMask))); |
__ Branch(miss, ne, at, Operand(zero_reg)); |
// Get the value at the masked, scaled index and return. |
- __ lw(result, |
+ __ ld(result, |
FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize)); |
} |
@@ -175,15 +175,15 @@ static void GenerateDictionaryStore(MacroAssembler* masm, |
const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize; |
const int kTypeAndReadOnlyMask = |
(PropertyDetails::TypeField::kMask | |
- PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize; |
- __ lw(scratch1, FieldMemOperand(scratch2, kDetailsOffset)); |
- __ And(at, scratch1, Operand(kTypeAndReadOnlyMask)); |
+ PropertyDetails::AttributesField::encode(READ_ONLY)); |
+ __ ld(scratch1, FieldMemOperand(scratch2, kDetailsOffset)); |
+ __ And(at, scratch1, Operand(Smi::FromInt(kTypeAndReadOnlyMask))); |
__ Branch(miss, ne, at, Operand(zero_reg)); |
// Store the value at the masked, scaled index and return. |
const int kValueOffset = kElementsStartOffset + kPointerSize; |
- __ Addu(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag)); |
- __ sw(value, MemOperand(scratch2)); |
+ __ Daddu(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag)); |
+ __ sd(value, MemOperand(scratch2)); |
// Update the write barrier. Make sure not to clobber the value. |
__ mov(scratch1, value); |
@@ -203,7 +203,7 @@ static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm, |
// Check that the object isn't a smi. |
__ JumpIfSmi(receiver, slow); |
// Get the map of the receiver. |
- __ lw(map, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
+ __ ld(map, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
// Check bit field. |
__ lbu(scratch, FieldMemOperand(map, Map::kBitFieldOffset)); |
__ And(at, scratch, |
@@ -252,10 +252,10 @@ static void GenerateFastArrayLoad(MacroAssembler* masm, |
// |
// scratch2 - used to hold the loaded value. |
- __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
+ __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
if (not_fast_array != NULL) { |
// Check that the object is in fast mode (not dictionary). |
- __ lw(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset)); |
+ __ ld(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset)); |
__ LoadRoot(at, Heap::kFixedArrayMapRootIndex); |
__ Branch(not_fast_array, ne, scratch1, Operand(at)); |
} else { |
@@ -263,17 +263,17 @@ static void GenerateFastArrayLoad(MacroAssembler* masm, |
} |
// Check that the key (index) is within bounds. |
- __ lw(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset)); |
+ __ ld(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset)); |
__ Branch(out_of_range, hs, key, Operand(scratch1)); |
// Fast case: Do the load. |
- __ Addu(scratch1, elements, |
+ __ Daddu(scratch1, elements, |
Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
// The key is a smi. |
STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2); |
- __ sll(at, key, kPointerSizeLog2 - kSmiTagSize); |
- __ addu(at, at, scratch1); |
- __ lw(scratch2, MemOperand(at)); |
+ __ SmiScale(at, key, kPointerSizeLog2); |
+ __ daddu(at, at, scratch1); |
+ __ ld(scratch2, MemOperand(at)); |
__ LoadRoot(at, Heap::kTheHoleValueRootIndex); |
// In case the loaded value is the_hole we have to consult GetProperty |
@@ -300,7 +300,7 @@ static void GenerateKeyNameCheck(MacroAssembler* masm, |
__ Branch(&unique, eq, hash, Operand(LAST_UNIQUE_NAME_TYPE)); |
// Is the string an array index, with cached numeric value? |
- __ lw(hash, FieldMemOperand(key, Name::kHashFieldOffset)); |
+ __ lwu(hash, FieldMemOperand(key, Name::kHashFieldOffset)); |
__ And(at, hash, Operand(Name::kContainsCachedArrayIndexMask)); |
__ Branch(index_string, eq, at, Operand(zero_reg)); |
@@ -326,7 +326,7 @@ void LoadIC::GenerateMegamorphic(MacroAssembler* masm) { |
// Probe the stub cache. |
Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC); |
masm->isolate()->stub_cache()->GenerateProbe( |
- masm, flags, receiver, name, a3, t0, t1, t2); |
+ masm, flags, receiver, name, a3, a4, a5, a6); |
// Cache miss: Jump to runtime. |
GenerateMiss(masm); |
@@ -344,10 +344,10 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) { |
Label miss, slow; |
- GenerateNameDictionaryReceiverCheck(masm, a1, a0, a3, t0, &miss); |
+ GenerateNameDictionaryReceiverCheck(masm, a1, a0, a3, a4, &miss); |
// a0: elements |
- GenerateDictionaryLoad(masm, &slow, a0, a2, v0, a3, t0); |
+ GenerateDictionaryLoad(masm, &slow, a0, a2, v0, a3, a4); |
__ Ret(); |
// Dictionary load failed, go slow (but don't miss). |
@@ -365,10 +365,10 @@ static const Register LoadIC_TempRegister() { return a3; } |
void LoadIC::GenerateMiss(MacroAssembler* masm) { |
- // The return address is in ra. |
+ // The return address is on the stack. |
Isolate* isolate = masm->isolate(); |
- __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, t0); |
+ __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, a4); |
__ mov(LoadIC_TempRegister(), ReceiverRegister()); |
__ Push(LoadIC_TempRegister(), NameRegister()); |
@@ -408,12 +408,12 @@ static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm, |
__ Branch(slow_case, lt, scratch2, Operand(FIRST_JS_RECEIVER_TYPE)); |
// Check that the key is a positive smi. |
- __ And(scratch1, key, Operand(0x80000001)); |
+ __ NonNegativeSmiTst(key, scratch1); |
__ Branch(slow_case, ne, scratch1, Operand(zero_reg)); |
// Load the elements into scratch1 and check its map. |
Handle<Map> arguments_map(heap->sloppy_arguments_elements_map()); |
- __ lw(scratch1, FieldMemOperand(object, JSObject::kElementsOffset)); |
+ __ ld(scratch1, FieldMemOperand(object, JSObject::kElementsOffset)); |
__ CheckMap(scratch1, |
scratch2, |
arguments_map, |
@@ -421,31 +421,31 @@ static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm, |
DONT_DO_SMI_CHECK); |
// Check if element is in the range of mapped arguments. If not, jump |
// to the unmapped lookup with the parameter map in scratch1. |
- __ lw(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset)); |
- __ Subu(scratch2, scratch2, Operand(Smi::FromInt(2))); |
+ __ ld(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset)); |
+ __ Dsubu(scratch2, scratch2, Operand(Smi::FromInt(2))); |
__ Branch(unmapped_case, Ugreater_equal, key, Operand(scratch2)); |
// Load element index and check whether it is the hole. |
const int kOffset = |
FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag; |
- __ li(scratch3, Operand(kPointerSize >> 1)); |
- __ Mul(scratch3, key, scratch3); |
- __ Addu(scratch3, scratch3, Operand(kOffset)); |
+ __ SmiUntag(scratch3, key); |
+ __ dsll(scratch3, scratch3, kPointerSizeLog2); |
+ __ Daddu(scratch3, scratch3, Operand(kOffset)); |
- __ Addu(scratch2, scratch1, scratch3); |
- __ lw(scratch2, MemOperand(scratch2)); |
+ __ Daddu(scratch2, scratch1, scratch3); |
+ __ ld(scratch2, MemOperand(scratch2)); |
__ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex); |
__ Branch(unmapped_case, eq, scratch2, Operand(scratch3)); |
// Load value from context and return it. We can reuse scratch1 because |
// we do not jump to the unmapped lookup (which requires the parameter |
// map in scratch1). |
- __ lw(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize)); |
- __ li(scratch3, Operand(kPointerSize >> 1)); |
- __ Mul(scratch3, scratch2, scratch3); |
- __ Addu(scratch3, scratch3, Operand(Context::kHeaderSize - kHeapObjectTag)); |
- __ Addu(scratch2, scratch1, scratch3); |
+ __ ld(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize)); |
+ __ SmiUntag(scratch3, scratch2); |
+ __ dsll(scratch3, scratch3, kPointerSizeLog2); |
+ __ Daddu(scratch3, scratch3, Operand(Context::kHeaderSize - kHeapObjectTag)); |
+ __ Daddu(scratch2, scratch1, scratch3); |
return MemOperand(scratch2); |
} |
@@ -461,20 +461,20 @@ static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm, |
// overwritten. |
const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize; |
Register backing_store = parameter_map; |
- __ lw(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset)); |
+ __ ld(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset)); |
__ CheckMap(backing_store, |
scratch, |
Heap::kFixedArrayMapRootIndex, |
slow_case, |
DONT_DO_SMI_CHECK); |
- __ lw(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset)); |
+ __ ld(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset)); |
__ Branch(slow_case, Ugreater_equal, key, Operand(scratch)); |
- __ li(scratch, Operand(kPointerSize >> 1)); |
- __ Mul(scratch, key, scratch); |
- __ Addu(scratch, |
+ __ SmiUntag(scratch, key); |
+ __ dsll(scratch, scratch, kPointerSizeLog2); |
+ __ Daddu(scratch, |
scratch, |
Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
- __ Addu(scratch, backing_store, scratch); |
+ __ Daddu(scratch, backing_store, scratch); |
return MemOperand(scratch); |
} |
@@ -489,14 +489,14 @@ void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) { |
Label slow, notin; |
MemOperand mapped_location = |
GenerateMappedArgumentsLookup( |
- masm, receiver, key, a0, a3, t0, ¬in, &slow); |
+ masm, receiver, key, a0, a3, a4, ¬in, &slow); |
__ Ret(USE_DELAY_SLOT); |
- __ lw(v0, mapped_location); |
+ __ ld(v0, mapped_location); |
__ bind(¬in); |
- // The unmapped lookup expects that the parameter map is in a0. |
+ // The unmapped lookup expects that the parameter map is in a2. |
MemOperand unmapped_location = |
- GenerateUnmappedArgumentsLookup(masm, key, a0, a3, &slow); |
- __ lw(a0, unmapped_location); |
+ GenerateUnmappedArgumentsLookup(masm, a0, a0, a3, &slow); |
+ __ ld(a0, unmapped_location); |
__ LoadRoot(a3, Heap::kTheHoleValueRootIndex); |
__ Branch(&slow, eq, a0, Operand(a3)); |
__ Ret(USE_DELAY_SLOT); |
@@ -516,11 +516,11 @@ void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) { |
Label slow, notin; |
// Store address is returned in register (of MemOperand) mapped_location. |
MemOperand mapped_location = |
- GenerateMappedArgumentsLookup(masm, a2, a1, a3, t0, t1, ¬in, &slow); |
- __ sw(a0, mapped_location); |
- __ mov(t5, a0); |
+ GenerateMappedArgumentsLookup(masm, a2, a1, a3, a4, a5, ¬in, &slow); |
+ __ sd(a0, mapped_location); |
+ __ mov(t1, a0); |
ASSERT_EQ(mapped_location.offset(), 0); |
- __ RecordWrite(a3, mapped_location.rm(), t5, |
+ __ RecordWrite(a3, mapped_location.rm(), t1, |
kRAHasNotBeenSaved, kDontSaveFPRegs); |
__ Ret(USE_DELAY_SLOT); |
__ mov(v0, a0); // (In delay slot) return the value stored in v0. |
@@ -528,11 +528,11 @@ void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) { |
// The unmapped lookup expects that the parameter map is in a3. |
// Store address is returned in register (of MemOperand) unmapped_location. |
MemOperand unmapped_location = |
- GenerateUnmappedArgumentsLookup(masm, a1, a3, t0, &slow); |
- __ sw(a0, unmapped_location); |
- __ mov(t5, a0); |
+ GenerateUnmappedArgumentsLookup(masm, a1, a3, a4, &slow); |
+ __ sd(a0, unmapped_location); |
+ __ mov(t1, a0); |
ASSERT_EQ(unmapped_location.offset(), 0); |
- __ RecordWrite(a3, unmapped_location.rm(), t5, |
+ __ RecordWrite(a3, unmapped_location.rm(), t1, |
kRAHasNotBeenSaved, kDontSaveFPRegs); |
__ Ret(USE_DELAY_SLOT); |
__ mov(v0, a0); // (In delay slot) return the value stored in v0. |
@@ -545,7 +545,7 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) { |
// The return address is in ra. |
Isolate* isolate = masm->isolate(); |
- __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, t0); |
+ __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, a4); |
__ Push(ReceiverRegister(), NameRegister()); |
@@ -596,28 +596,28 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { |
__ CheckFastElements(a0, a3, &check_number_dictionary); |
GenerateFastArrayLoad( |
- masm, receiver, key, a0, a3, t0, v0, NULL, &slow); |
- __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, t0, a3); |
+ masm, receiver, key, a0, a3, a4, v0, NULL, &slow); |
+ __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, a4, a3); |
__ Ret(); |
__ bind(&check_number_dictionary); |
- __ lw(t0, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
- __ lw(a3, FieldMemOperand(t0, JSObject::kMapOffset)); |
+ __ ld(a4, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
+ __ ld(a3, FieldMemOperand(a4, JSObject::kMapOffset)); |
// Check whether the elements is a number dictionary. |
// a3: elements map |
- // t0: elements |
+ // a4: elements |
__ LoadRoot(at, Heap::kHashTableMapRootIndex); |
__ Branch(&slow, ne, a3, Operand(at)); |
- __ sra(a0, key, kSmiTagSize); |
- __ LoadFromNumberDictionary(&slow, t0, key, v0, a0, a3, t1); |
+ __ dsra32(a0, key, 0); |
+ __ LoadFromNumberDictionary(&slow, a4, key, v0, a0, a3, a5); |
__ Ret(); |
// Slow case, key and receiver still in a2 and a1. |
__ bind(&slow); |
__ IncrementCounter(isolate->counters()->keyed_load_generic_slow(), |
1, |
- t0, |
+ a4, |
a3); |
GenerateRuntimeGetProperty(masm); |
@@ -625,22 +625,24 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { |
GenerateKeyNameCheck(masm, key, a0, a3, &index_name, &slow); |
GenerateKeyedLoadReceiverCheck( |
- masm, receiver, a0, a3, Map::kHasNamedInterceptor, &slow); |
+ masm, receiver, a0, a3, Map::kHasNamedInterceptor, &slow); |
// If the receiver is a fast-case object, check the keyed lookup |
// cache. Otherwise probe the dictionary. |
- __ lw(a3, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); |
- __ lw(t0, FieldMemOperand(a3, HeapObject::kMapOffset)); |
+ __ ld(a3, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); |
+ __ ld(a4, FieldMemOperand(a3, HeapObject::kMapOffset)); |
__ LoadRoot(at, Heap::kHashTableMapRootIndex); |
- __ Branch(&probe_dictionary, eq, t0, Operand(at)); |
+ __ Branch(&probe_dictionary, eq, a4, Operand(at)); |
// Load the map of the receiver, compute the keyed lookup cache hash |
// based on 32 bits of the map pointer and the name hash. |
- __ lw(a0, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
- __ sra(a3, a0, KeyedLookupCache::kMapHashShift); |
- __ lw(t0, FieldMemOperand(key, Name::kHashFieldOffset)); |
- __ sra(at, t0, Name::kHashShift); |
+ __ ld(a0, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
+ __ dsll32(a3, a0, 0); |
+ __ dsrl32(a3, a3, 0); |
+ __ dsra(a3, a3, KeyedLookupCache::kMapHashShift); |
+ __ lwu(a4, FieldMemOperand(key, Name::kHashFieldOffset)); |
+ __ dsra(at, a4, Name::kHashShift); |
__ xor_(a3, a3, at); |
int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask; |
__ And(a3, a3, Operand(mask)); |
@@ -652,23 +654,23 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { |
Label hit_on_nth_entry[kEntriesPerBucket]; |
ExternalReference cache_keys = |
ExternalReference::keyed_lookup_cache_keys(isolate); |
- __ li(t0, Operand(cache_keys)); |
- __ sll(at, a3, kPointerSizeLog2 + 1); |
- __ addu(t0, t0, at); |
+ __ li(a4, Operand(cache_keys)); |
+ __ dsll(at, a3, kPointerSizeLog2 + 1); |
+ __ daddu(a4, a4, at); |
for (int i = 0; i < kEntriesPerBucket - 1; i++) { |
Label try_next_entry; |
- __ lw(t1, MemOperand(t0, kPointerSize * i * 2)); |
- __ Branch(&try_next_entry, ne, a0, Operand(t1)); |
- __ lw(t1, MemOperand(t0, kPointerSize * (i * 2 + 1))); |
- __ Branch(&hit_on_nth_entry[i], eq, key, Operand(t1)); |
+ __ ld(a5, MemOperand(a4, kPointerSize * i * 2)); |
+ __ Branch(&try_next_entry, ne, a0, Operand(a5)); |
+ __ ld(a5, MemOperand(a4, kPointerSize * (i * 2 + 1))); |
+ __ Branch(&hit_on_nth_entry[i], eq, key, Operand(a5)); |
__ bind(&try_next_entry); |
} |
- __ lw(t1, MemOperand(t0, kPointerSize * (kEntriesPerBucket - 1) * 2)); |
- __ Branch(&slow, ne, a0, Operand(t1)); |
- __ lw(t1, MemOperand(t0, kPointerSize * ((kEntriesPerBucket - 1) * 2 + 1))); |
- __ Branch(&slow, ne, key, Operand(t1)); |
+ __ ld(a5, MemOperand(a4, kPointerSize * (kEntriesPerBucket - 1) * 2)); |
+ __ Branch(&slow, ne, a0, Operand(a5)); |
+ __ ld(a5, MemOperand(a4, kPointerSize * ((kEntriesPerBucket - 1) * 2 + 1))); |
+ __ Branch(&slow, ne, key, Operand(a5)); |
// Get field offset. |
// a0 : receiver's map |
@@ -679,13 +681,16 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { |
// Hit on nth entry. |
for (int i = kEntriesPerBucket - 1; i >= 0; i--) { |
__ bind(&hit_on_nth_entry[i]); |
- __ li(t0, Operand(cache_field_offsets)); |
- __ sll(at, a3, kPointerSizeLog2); |
- __ addu(at, t0, at); |
- __ lw(t1, MemOperand(at, kPointerSize * i)); |
- __ lbu(t2, FieldMemOperand(a0, Map::kInObjectPropertiesOffset)); |
- __ Subu(t1, t1, t2); |
- __ Branch(&property_array_property, ge, t1, Operand(zero_reg)); |
+ __ li(a4, Operand(cache_field_offsets)); |
+ |
+ // TODO(yy) This data structure does NOT follow natural pointer size. |
+ __ dsll(at, a3, kPointerSizeLog2 - 1); |
+ __ daddu(at, a4, at); |
+ __ lwu(a5, MemOperand(at, kPointerSize / 2 * i)); |
+ |
+ __ lbu(a6, FieldMemOperand(a0, Map::kInObjectPropertiesOffset)); |
+ __ Dsubu(a5, a5, a6); |
+ __ Branch(&property_array_property, ge, a5, Operand(zero_reg)); |
if (i != 0) { |
__ Branch(&load_in_object_property); |
} |
@@ -693,28 +698,30 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { |
// Load in-object property. |
__ bind(&load_in_object_property); |
- __ lbu(t2, FieldMemOperand(a0, Map::kInstanceSizeOffset)); |
- __ addu(t2, t2, t1); // Index from start of object. |
- __ Subu(receiver, receiver, Operand(kHeapObjectTag)); // Remove the heap tag. |
- __ sll(at, t2, kPointerSizeLog2); |
- __ addu(at, receiver, at); |
- __ lw(v0, MemOperand(at)); |
+ __ lbu(a6, FieldMemOperand(a0, Map::kInstanceSizeOffset)); |
+ // Index from start of object. |
+ __ daddu(a6, a6, a5); |
+ // Remove the heap tag. |
+ __ Dsubu(receiver, receiver, Operand(kHeapObjectTag)); |
+ __ dsll(at, a6, kPointerSizeLog2); |
+ __ daddu(at, receiver, at); |
+ __ ld(v0, MemOperand(at)); |
__ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), |
1, |
- t0, |
+ a4, |
a3); |
__ Ret(); |
// Load property array property. |
__ bind(&property_array_property); |
- __ lw(receiver, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); |
- __ Addu(receiver, receiver, FixedArray::kHeaderSize - kHeapObjectTag); |
- __ sll(v0, t1, kPointerSizeLog2); |
- __ Addu(v0, v0, receiver); |
- __ lw(v0, MemOperand(v0)); |
+ __ ld(receiver, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); |
+ __ Daddu(receiver, receiver, FixedArray::kHeaderSize - kHeapObjectTag); |
+ __ dsll(v0, a5, kPointerSizeLog2); |
+ __ Daddu(v0, v0, a1); |
+ __ ld(v0, MemOperand(v0)); |
__ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), |
1, |
- t0, |
+ a4, |
a3); |
__ Ret(); |
@@ -723,14 +730,14 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { |
// exists. |
__ bind(&probe_dictionary); |
// a3: elements |
- __ lw(a0, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
+ __ ld(a0, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
__ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset)); |
GenerateGlobalInstanceTypeCheck(masm, a0, &slow); |
// Load the property to v0. |
- GenerateDictionaryLoad(masm, &slow, a3, key, v0, t1, t0); |
+ GenerateDictionaryLoad(masm, &slow, a3, key, v0, a5, a4); |
__ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(), |
1, |
- t0, |
+ a4, |
a3); |
__ Ret(); |
@@ -807,10 +814,10 @@ static void KeyedStoreGenerateGenericHelper( |
// Fast case: Do the store, could be either Object or double. |
__ bind(fast_object); |
- Register scratch_value = t0; |
- Register address = t1; |
+ Register scratch_value = a4; |
+ Register address = a5; |
if (check_map == kCheckMap) { |
- __ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); |
+ __ ld(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); |
__ Branch(fast_double, ne, elements_map, |
Operand(masm->isolate()->factory()->fixed_array_map())); |
} |
@@ -819,10 +826,11 @@ static void KeyedStoreGenerateGenericHelper( |
// We have to go to the runtime if the current value is the hole because |
// there may be a callback on the element. |
Label holecheck_passed1; |
- __ Addu(address, elements, FixedArray::kHeaderSize - kHeapObjectTag); |
- __ sll(at, key, kPointerSizeLog2 - kSmiTagSize); |
- __ addu(address, address, at); |
- __ lw(scratch_value, MemOperand(address)); |
+ __ Daddu(address, elements, FixedArray::kHeaderSize - kHeapObjectTag); |
+ __ SmiScale(at, key, kPointerSizeLog2); |
+ __ daddu(address, address, at); |
+ __ ld(scratch_value, MemOperand(address)); |
+ |
__ Branch(&holecheck_passed1, ne, scratch_value, |
Operand(masm->isolate()->factory()->the_hole_value())); |
__ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value, |
@@ -836,14 +844,15 @@ static void KeyedStoreGenerateGenericHelper( |
if (increment_length == kIncrementLength) { |
// Add 1 to receiver->length. |
- __ Addu(scratch_value, key, Operand(Smi::FromInt(1))); |
- __ sw(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset)); |
+ __ Daddu(scratch_value, key, Operand(Smi::FromInt(1))); |
+ __ sd(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset)); |
} |
// It's irrelevant whether array is smi-only or not when writing a smi. |
- __ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
- __ sll(scratch_value, key, kPointerSizeLog2 - kSmiTagSize); |
- __ Addu(address, address, scratch_value); |
- __ sw(value, MemOperand(address)); |
+ __ Daddu(address, elements, |
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
+ __ SmiScale(scratch_value, key, kPointerSizeLog2); |
+ __ Daddu(address, address, scratch_value); |
+ __ sd(value, MemOperand(address)); |
__ Ret(); |
__ bind(&non_smi_value); |
@@ -855,13 +864,14 @@ static void KeyedStoreGenerateGenericHelper( |
__ bind(&finish_object_store); |
if (increment_length == kIncrementLength) { |
// Add 1 to receiver->length. |
- __ Addu(scratch_value, key, Operand(Smi::FromInt(1))); |
- __ sw(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset)); |
+ __ Daddu(scratch_value, key, Operand(Smi::FromInt(1))); |
+ __ sd(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset)); |
} |
- __ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
- __ sll(scratch_value, key, kPointerSizeLog2 - kSmiTagSize); |
- __ Addu(address, address, scratch_value); |
- __ sw(value, MemOperand(address)); |
+ __ Daddu(address, elements, |
+ Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
+ __ SmiScale(scratch_value, key, kPointerSizeLog2); |
+ __ Daddu(address, address, scratch_value); |
+ __ sd(value, MemOperand(address)); |
// Update write barrier for the elements array address. |
__ mov(scratch_value, value); // Preserve the value which is returned. |
__ RecordWrite(elements, |
@@ -884,11 +894,11 @@ static void KeyedStoreGenerateGenericHelper( |
// HOLECHECK: guards "A[i] double hole?" |
// We have to see if the double version of the hole is present. If so |
// go to the runtime. |
- __ Addu(address, elements, |
- Operand(FixedDoubleArray::kHeaderSize + kHoleNanUpper32Offset |
+ __ Daddu(address, elements, |
+ Operand(FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32) |
- kHeapObjectTag)); |
- __ sll(at, key, kPointerSizeLog2); |
- __ addu(address, address, at); |
+ __ SmiScale(at, key, kPointerSizeLog2); |
+ __ daddu(address, address, at); |
__ lw(scratch_value, MemOperand(address)); |
__ Branch(&fast_double_without_map_check, ne, scratch_value, |
Operand(kHoleNanUpper32)); |
@@ -900,34 +910,34 @@ static void KeyedStoreGenerateGenericHelper( |
key, |
elements, // Overwritten. |
a3, // Scratch regs... |
- t0, |
- t1, |
+ a4, |
+ a5, |
&transition_double_elements); |
if (increment_length == kIncrementLength) { |
// Add 1 to receiver->length. |
- __ Addu(scratch_value, key, Operand(Smi::FromInt(1))); |
- __ sw(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset)); |
+ __ Daddu(scratch_value, key, Operand(Smi::FromInt(1))); |
+ __ sd(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset)); |
} |
__ Ret(); |
__ bind(&transition_smi_elements); |
// Transition the array appropriately depending on the value type. |
- __ lw(t0, FieldMemOperand(value, HeapObject::kMapOffset)); |
+ __ ld(a4, FieldMemOperand(value, HeapObject::kMapOffset)); |
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex); |
- __ Branch(&non_double_value, ne, t0, Operand(at)); |
+ __ Branch(&non_double_value, ne, a4, Operand(at)); |
// Value is a double. Transition FAST_SMI_ELEMENTS -> |
// FAST_DOUBLE_ELEMENTS and complete the store. |
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, |
FAST_DOUBLE_ELEMENTS, |
receiver_map, |
- t0, |
+ a4, |
slow); |
ASSERT(receiver_map.is(a3)); // Transition code expects map in a3 |
AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, |
FAST_DOUBLE_ELEMENTS); |
ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow); |
- __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
+ __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
__ jmp(&fast_double_without_map_check); |
__ bind(&non_double_value); |
@@ -935,13 +945,13 @@ static void KeyedStoreGenerateGenericHelper( |
__ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, |
FAST_ELEMENTS, |
receiver_map, |
- t0, |
+ a4, |
slow); |
ASSERT(receiver_map.is(a3)); // Transition code expects map in a3 |
mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS); |
ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode, |
slow); |
- __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
+ __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
__ jmp(&finish_object_store); |
__ bind(&transition_double_elements); |
@@ -951,12 +961,12 @@ static void KeyedStoreGenerateGenericHelper( |
__ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, |
FAST_ELEMENTS, |
receiver_map, |
- t0, |
+ a4, |
slow); |
ASSERT(receiver_map.is(a3)); // Transition code expects map in a3 |
mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS); |
ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow); |
- __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
+ __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
__ jmp(&finish_object_store); |
} |
@@ -978,33 +988,33 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, |
Register key = a1; |
Register receiver = a2; |
Register receiver_map = a3; |
- Register elements_map = t2; |
- Register elements = t3; // Elements array of the receiver. |
- // t0 and t1 are used as general scratch registers. |
+ Register elements_map = a6; |
+ Register elements = a7; // Elements array of the receiver. |
+ // a4 and a5 are used as general scratch registers. |
// Check that the key is a smi. |
__ JumpIfNotSmi(key, &slow); |
// Check that the object isn't a smi. |
__ JumpIfSmi(receiver, &slow); |
// Get the map of the object. |
- __ lw(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
+ __ ld(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
// Check that the receiver does not require access checks and is not observed. |
// The generic stub does not perform map checks or handle observed objects. |
- __ lbu(t0, FieldMemOperand(receiver_map, Map::kBitFieldOffset)); |
- __ And(t0, t0, Operand(1 << Map::kIsAccessCheckNeeded | |
+ __ lbu(a4, FieldMemOperand(receiver_map, Map::kBitFieldOffset)); |
+ __ And(a4, a4, Operand(1 << Map::kIsAccessCheckNeeded | |
1 << Map::kIsObserved)); |
- __ Branch(&slow, ne, t0, Operand(zero_reg)); |
+ __ Branch(&slow, ne, a4, Operand(zero_reg)); |
// Check if the object is a JS array or not. |
- __ lbu(t0, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset)); |
- __ Branch(&array, eq, t0, Operand(JS_ARRAY_TYPE)); |
+ __ lbu(a4, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset)); |
+ __ Branch(&array, eq, a4, Operand(JS_ARRAY_TYPE)); |
// Check that the object is some kind of JSObject. |
- __ Branch(&slow, lt, t0, Operand(FIRST_JS_OBJECT_TYPE)); |
+ __ Branch(&slow, lt, a4, Operand(FIRST_JS_OBJECT_TYPE)); |
// Object case: Check key against length in the elements array. |
- __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
+ __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
// Check array bounds. Both the key and the length of FixedArray are smis. |
- __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset)); |
- __ Branch(&fast_object, lo, key, Operand(t0)); |
+ __ ld(a4, FieldMemOperand(elements, FixedArray::kLengthOffset)); |
+ __ Branch(&fast_object, lo, key, Operand(a4)); |
// Slow case, handle jump to runtime. |
__ bind(&slow); |
@@ -1020,12 +1030,12 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, |
__ bind(&extra); |
// Condition code from comparing key and array length is still available. |
// Only support writing to array[array.length]. |
- __ Branch(&slow, ne, key, Operand(t0)); |
+ __ Branch(&slow, ne, key, Operand(a4)); |
// Check for room in the elements backing store. |
// Both the key and the length of FixedArray are smis. |
- __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset)); |
- __ Branch(&slow, hs, key, Operand(t0)); |
- __ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); |
+ __ ld(a4, FieldMemOperand(elements, FixedArray::kLengthOffset)); |
+ __ Branch(&slow, hs, key, Operand(a4)); |
+ __ ld(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); |
__ Branch( |
&check_if_double_array, ne, elements_map, Heap::kFixedArrayMapRootIndex); |
@@ -1039,11 +1049,11 @@ void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, |
// array. Check that the array is in fast mode (and writable); if it |
// is the length is always a smi. |
__ bind(&array); |
- __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
+ __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
// Check the key against the length in the array. |
- __ lw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset)); |
- __ Branch(&extra, hs, key, Operand(t0)); |
+ __ ld(a4, FieldMemOperand(receiver, JSArray::kLengthOffset)); |
+ __ Branch(&extra, hs, key, Operand(a4)); |
KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double, |
&slow, kCheckMap, kDontIncrementLength, |
@@ -1063,7 +1073,7 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) { |
Register receiver = ReceiverRegister(); |
Register key = NameRegister(); |
Register scratch1 = a3; |
- Register scratch2 = t0; |
+ Register scratch2 = a4; |
ASSERT(!scratch1.is(receiver) && !scratch1.is(key)); |
ASSERT(!scratch2.is(receiver) && !scratch2.is(key)); |
@@ -1071,11 +1081,11 @@ void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) { |
__ JumpIfSmi(receiver, &slow); |
// Check that the key is an array index, that is Uint32. |
- __ And(t0, key, Operand(kSmiTagMask | kSmiSignMask)); |
- __ Branch(&slow, ne, t0, Operand(zero_reg)); |
+ __ And(a4, key, Operand(kSmiTagMask | kSmiSignMask)); |
+ __ Branch(&slow, ne, a4, Operand(zero_reg)); |
// Get the map of the receiver. |
- __ lw(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
+ __ ld(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
// Check that it has indexed interceptor and access checks |
// are not enabled for this object. |
@@ -1141,7 +1151,6 @@ void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) { |
// Push receiver, key and value for runtime call. |
// We can't use MultiPush as the order of the registers is important. |
__ Push(a2, a1, a0); |
- |
// The slow case calls into the runtime to complete the store without causing |
// an IC miss that would otherwise cause a transition to the generic stub. |
ExternalReference ref = |
@@ -1162,7 +1171,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) { |
// Get the receiver from the stack and probe the stub cache. |
Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC); |
masm->isolate()->stub_cache()->GenerateProbe( |
- masm, flags, a1, a2, a3, t0, t1, t2); |
+ masm, flags, a1, a2, a3, a4, a5, a6); |
// Cache miss: Jump to runtime. |
GenerateMiss(masm); |
@@ -1194,15 +1203,15 @@ void StoreIC::GenerateNormal(MacroAssembler* masm) { |
// ----------------------------------- |
Label miss; |
- GenerateNameDictionaryReceiverCheck(masm, a1, a3, t0, t1, &miss); |
+ GenerateNameDictionaryReceiverCheck(masm, a1, a3, a4, a5, &miss); |
- GenerateDictionaryStore(masm, &miss, a3, a2, a0, t0, t1); |
+ GenerateDictionaryStore(masm, &miss, a3, a2, a0, a4, a5); |
Counters* counters = masm->isolate()->counters(); |
- __ IncrementCounter(counters->store_normal_hit(), 1, t0, t1); |
+ __ IncrementCounter(counters->store_normal_hit(), 1, a4, a5); |
__ Ret(); |
__ bind(&miss); |
- __ IncrementCounter(counters->store_normal_miss(), 1, t0, t1); |
+ __ IncrementCounter(counters->store_normal_miss(), 1, a4, a5); |
GenerateMiss(masm); |
} |
@@ -1325,4 +1334,4 @@ void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) { |
} } // namespace v8::internal |
-#endif // V8_TARGET_ARCH_MIPS |
+#endif // V8_TARGET_ARCH_MIPS64 |