Index: src/a64/stub-cache-a64.cc |
diff --git a/src/arm/stub-cache-arm.cc b/src/a64/stub-cache-a64.cc |
similarity index 69% |
copy from src/arm/stub-cache-arm.cc |
copy to src/a64/stub-cache-a64.cc |
index 694a4ed68f25b81d144555a961f6fba6b5676f26..19f9dd604330173f84e8af65794471f141d014fa 100644 |
--- a/src/arm/stub-cache-arm.cc |
+++ b/src/a64/stub-cache-a64.cc |
@@ -1,4 +1,4 @@ |
-// Copyright 2012 the V8 project authors. All rights reserved. |
+// Copyright 2013 the V8 project authors. All rights reserved. |
// Redistribution and use in source and binary forms, with or without |
// modification, are permitted provided that the following conditions are |
// met: |
@@ -27,7 +27,7 @@ |
#include "v8.h" |
-#if V8_TARGET_ARCH_ARM |
+#if V8_TARGET_ARCH_A64 |
#include "ic-inl.h" |
#include "codegen.h" |
@@ -36,143 +36,132 @@ |
namespace v8 { |
namespace internal { |
+ |
#define __ ACCESS_MASM(masm) |
+void StubCompiler::GenerateDictionaryNegativeLookup(MacroAssembler* masm, |
+ Label* miss_label, |
+ Register receiver, |
+ Handle<Name> name, |
+ Register scratch0, |
+ Register scratch1) { |
+ ASSERT(!AreAliased(receiver, scratch0, scratch1)); |
+ ASSERT(name->IsUniqueName()); |
+ Counters* counters = masm->isolate()->counters(); |
+ __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1); |
+ __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1); |
+ |
+ Label done; |
+ |
+ const int kInterceptorOrAccessCheckNeededMask = |
+ (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded); |
+ |
+ // Bail out if the receiver has a named interceptor or requires access checks. |
+ Register map = scratch1; |
+ __ Ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
+ __ Ldrb(scratch0, FieldMemOperand(map, Map::kBitFieldOffset)); |
+ __ Tst(scratch0, kInterceptorOrAccessCheckNeededMask); |
+ __ B(ne, miss_label); |
+ |
+ // Check that receiver is a JSObject. |
+ __ Ldrb(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset)); |
+ __ Cmp(scratch0, FIRST_SPEC_OBJECT_TYPE); |
+ __ B(lt, miss_label); |
+ |
+ // Load properties array. |
+ Register properties = scratch0; |
+ __ Ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); |
+ // Check that the properties array is a dictionary. |
+ __ Ldr(map, FieldMemOperand(properties, HeapObject::kMapOffset)); |
+ __ JumpIfNotRoot(map, Heap::kHashTableMapRootIndex, miss_label); |
+ |
+ NameDictionaryLookupStub::GenerateNegativeLookup(masm, |
+ miss_label, |
+ &done, |
+ receiver, |
+ properties, |
+ name, |
+ scratch1); |
+ __ Bind(&done); |
+ __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1); |
+} |
+ |
+ |
+// Probe primary or secondary table. |
+// If the entry is found in the cache, the generated code jump to the first |
+// instruction of the stub in the cache. |
+// If there is a miss the code fall trough. |
+// |
+// 'receiver', 'name' and 'offset' registers are preserved on miss. |
static void ProbeTable(Isolate* isolate, |
MacroAssembler* masm, |
Code::Flags flags, |
StubCache::Table table, |
Register receiver, |
Register name, |
- // Number of the cache entry, not scaled. |
Register offset, |
Register scratch, |
Register scratch2, |
- Register offset_scratch) { |
+ Register scratch3) { |
+ // Some code below relies on the fact that the Entry struct contains |
+ // 3 pointers (name, code, map). |
+ STATIC_ASSERT(sizeof(StubCache::Entry) == (3 * kPointerSize)); |
+ |
ExternalReference key_offset(isolate->stub_cache()->key_reference(table)); |
ExternalReference value_offset(isolate->stub_cache()->value_reference(table)); |
ExternalReference map_offset(isolate->stub_cache()->map_reference(table)); |
- uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address()); |
- uint32_t value_off_addr = reinterpret_cast<uint32_t>(value_offset.address()); |
- uint32_t map_off_addr = reinterpret_cast<uint32_t>(map_offset.address()); |
- |
- // Check the relative positions of the address fields. |
- ASSERT(value_off_addr > key_off_addr); |
- ASSERT((value_off_addr - key_off_addr) % 4 == 0); |
- ASSERT((value_off_addr - key_off_addr) < (256 * 4)); |
- ASSERT(map_off_addr > key_off_addr); |
- ASSERT((map_off_addr - key_off_addr) % 4 == 0); |
- ASSERT((map_off_addr - key_off_addr) < (256 * 4)); |
+ uintptr_t key_off_addr = reinterpret_cast<uintptr_t>(key_offset.address()); |
+ uintptr_t value_off_addr = |
+ reinterpret_cast<uintptr_t>(value_offset.address()); |
+ uintptr_t map_off_addr = reinterpret_cast<uintptr_t>(map_offset.address()); |
Label miss; |
- Register base_addr = scratch; |
- scratch = no_reg; |
- // Multiply by 3 because there are 3 fields per entry (name, code, map). |
- __ add(offset_scratch, offset, Operand(offset, LSL, 1)); |
+ ASSERT(!AreAliased(name, offset, scratch, scratch2, scratch3)); |
+ |
+ // Multiply by 3 because there are 3 fields per entry. |
+ __ Add(scratch3, offset, Operand(offset, LSL, 1)); |
// Calculate the base address of the entry. |
- __ mov(base_addr, Operand(key_offset)); |
- __ add(base_addr, base_addr, Operand(offset_scratch, LSL, kPointerSizeLog2)); |
+ __ Mov(scratch, Operand(key_offset)); |
+ __ Add(scratch, scratch, Operand(scratch3, LSL, kPointerSizeLog2)); |
// Check that the key in the entry matches the name. |
- __ ldr(ip, MemOperand(base_addr, 0)); |
- __ cmp(name, ip); |
- __ b(ne, &miss); |
+ __ Ldr(scratch2, MemOperand(scratch)); |
+ __ Cmp(name, scratch2); |
+ __ B(ne, &miss); |
// Check the map matches. |
- __ ldr(ip, MemOperand(base_addr, map_off_addr - key_off_addr)); |
- __ ldr(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
- __ cmp(ip, scratch2); |
- __ b(ne, &miss); |
+ __ Ldr(scratch2, MemOperand(scratch, map_off_addr - key_off_addr)); |
+ __ Ldr(scratch3, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
+ __ Cmp(scratch2, scratch3); |
+ __ B(ne, &miss); |
// Get the code entry from the cache. |
- Register code = scratch2; |
- scratch2 = no_reg; |
- __ ldr(code, MemOperand(base_addr, value_off_addr - key_off_addr)); |
+ __ Ldr(scratch, MemOperand(scratch, value_off_addr - key_off_addr)); |
// Check that the flags match what we're looking for. |
- Register flags_reg = base_addr; |
- base_addr = no_reg; |
- __ ldr(flags_reg, FieldMemOperand(code, Code::kFlagsOffset)); |
- // It's a nice optimization if this constant is encodable in the bic insn. |
- |
- uint32_t mask = Code::kFlagsNotUsedInLookup; |
- ASSERT(__ ImmediateFitsAddrMode1Instruction(mask)); |
- __ bic(flags_reg, flags_reg, Operand(mask)); |
- __ cmp(flags_reg, Operand(flags)); |
- __ b(ne, &miss); |
+ __ Ldr(scratch2.W(), FieldMemOperand(scratch, Code::kFlagsOffset)); |
+ __ Bic(scratch2.W(), scratch2.W(), Code::kFlagsNotUsedInLookup); |
+ __ Cmp(scratch2.W(), flags); |
+ __ B(ne, &miss); |
#ifdef DEBUG |
- if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) { |
- __ jmp(&miss); |
- } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) { |
- __ jmp(&miss); |
- } |
+ if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) { |
+ __ B(&miss); |
+ } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) { |
+ __ B(&miss); |
+ } |
#endif |
// Jump to the first instruction in the code stub. |
- __ add(pc, code, Operand(Code::kHeaderSize - kHeapObjectTag)); |
+ __ Add(scratch, scratch, Code::kHeaderSize - kHeapObjectTag); |
+ __ Br(scratch); |
// Miss: fall through. |
- __ bind(&miss); |
-} |
- |
- |
-void StubCompiler::GenerateDictionaryNegativeLookup(MacroAssembler* masm, |
- Label* miss_label, |
- Register receiver, |
- Handle<Name> name, |
- Register scratch0, |
- Register scratch1) { |
- ASSERT(name->IsUniqueName()); |
- ASSERT(!receiver.is(scratch0)); |
- Counters* counters = masm->isolate()->counters(); |
- __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1); |
- __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1); |
- |
- Label done; |
- |
- const int kInterceptorOrAccessCheckNeededMask = |
- (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded); |
- |
- // Bail out if the receiver has a named interceptor or requires access checks. |
- Register map = scratch1; |
- __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
- __ ldrb(scratch0, FieldMemOperand(map, Map::kBitFieldOffset)); |
- __ tst(scratch0, Operand(kInterceptorOrAccessCheckNeededMask)); |
- __ b(ne, miss_label); |
- |
- // Check that receiver is a JSObject. |
- __ ldrb(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset)); |
- __ cmp(scratch0, Operand(FIRST_SPEC_OBJECT_TYPE)); |
- __ b(lt, miss_label); |
- |
- // Load properties array. |
- Register properties = scratch0; |
- __ ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); |
- // Check that the properties array is a dictionary. |
- __ ldr(map, FieldMemOperand(properties, HeapObject::kMapOffset)); |
- Register tmp = properties; |
- __ LoadRoot(tmp, Heap::kHashTableMapRootIndex); |
- __ cmp(map, tmp); |
- __ b(ne, miss_label); |
- |
- // Restore the temporarily used register. |
- __ ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); |
- |
- |
- NameDictionaryLookupStub::GenerateNegativeLookup(masm, |
- miss_label, |
- &done, |
- receiver, |
- properties, |
- name, |
- scratch1); |
- __ bind(&done); |
- __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1); |
+ __ Bind(&miss); |
} |
@@ -187,26 +176,13 @@ void StubCache::GenerateProbe(MacroAssembler* masm, |
Isolate* isolate = masm->isolate(); |
Label miss; |
- // Make sure that code is valid. The multiplying code relies on the |
- // entry size being 12. |
- ASSERT(sizeof(Entry) == 12); |
- |
// Make sure the flags does not name a specific type. |
ASSERT(Code::ExtractTypeFromFlags(flags) == 0); |
// Make sure that there are no register conflicts. |
- ASSERT(!scratch.is(receiver)); |
- ASSERT(!scratch.is(name)); |
- ASSERT(!extra.is(receiver)); |
- ASSERT(!extra.is(name)); |
- ASSERT(!extra.is(scratch)); |
- ASSERT(!extra2.is(receiver)); |
- ASSERT(!extra2.is(name)); |
- ASSERT(!extra2.is(scratch)); |
- ASSERT(!extra2.is(extra)); |
- |
- // Check scratch, extra and extra2 registers are valid. |
- ASSERT(!scratch.is(no_reg)); |
+ ASSERT(!AreAliased(receiver, name, scratch, extra, extra2, extra3)); |
+ |
+ // Make sure extra and extra2 registers are valid. |
ASSERT(!extra.is(no_reg)); |
ASSERT(!extra2.is(no_reg)); |
ASSERT(!extra3.is(no_reg)); |
@@ -218,53 +194,31 @@ void StubCache::GenerateProbe(MacroAssembler* masm, |
// Check that the receiver isn't a smi. |
__ JumpIfSmi(receiver, &miss); |
- // Get the map of the receiver and compute the hash. |
- __ ldr(scratch, FieldMemOperand(name, Name::kHashFieldOffset)); |
- __ ldr(ip, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
- __ add(scratch, scratch, Operand(ip)); |
- uint32_t mask = kPrimaryTableSize - 1; |
- // We shift out the last two bits because they are not part of the hash and |
- // they are always 01 for maps. |
- __ mov(scratch, Operand(scratch, LSR, kHeapObjectTagSize)); |
- // Mask down the eor argument to the minimum to keep the immediate |
- // ARM-encodable. |
- __ eor(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask)); |
- // Prefer and_ to ubfx here because ubfx takes 2 cycles. |
- __ and_(scratch, scratch, Operand(mask)); |
+ // Compute the hash for primary table. |
+ __ Ldr(scratch, FieldMemOperand(name, Name::kHashFieldOffset)); |
+ __ Ldr(extra, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
+ __ Add(scratch, scratch, extra); |
+ __ Eor(scratch, scratch, flags); |
+ // We shift out the last two bits because they are not part of the hash. |
+ __ Ubfx(scratch, scratch, kHeapObjectTagSize, |
+ CountTrailingZeros(kPrimaryTableSize, 64)); |
// Probe the primary table. |
- ProbeTable(isolate, |
- masm, |
- flags, |
- kPrimary, |
- receiver, |
- name, |
- scratch, |
- extra, |
- extra2, |
- extra3); |
- |
- // Primary miss: Compute hash for secondary probe. |
- __ sub(scratch, scratch, Operand(name, LSR, kHeapObjectTagSize)); |
- uint32_t mask2 = kSecondaryTableSize - 1; |
- __ add(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask2)); |
- __ and_(scratch, scratch, Operand(mask2)); |
+ ProbeTable(isolate, masm, flags, kPrimary, receiver, name, |
+ scratch, extra, extra2, extra3); |
+ |
+ // Primary miss: Compute hash for secondary table. |
+ __ Sub(scratch, scratch, Operand(name, LSR, kHeapObjectTagSize)); |
+ __ Add(scratch, scratch, flags >> kHeapObjectTagSize); |
+ __ And(scratch, scratch, kSecondaryTableSize - 1); |
// Probe the secondary table. |
- ProbeTable(isolate, |
- masm, |
- flags, |
- kSecondary, |
- receiver, |
- name, |
- scratch, |
- extra, |
- extra2, |
- extra3); |
+ ProbeTable(isolate, masm, flags, kSecondary, receiver, name, |
+ scratch, extra, extra2, extra3); |
// Cache miss: Fall-through and let caller handle the miss by |
// entering the runtime system. |
- __ bind(&miss); |
+ __ Bind(&miss); |
__ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1, |
extra2, extra3); |
} |
@@ -274,18 +228,17 @@ void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm, |
int index, |
Register prototype) { |
// Load the global or builtins object from the current context. |
- __ ldr(prototype, |
- MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); |
+ __ Ldr(prototype, GlobalObjectMemOperand()); |
// Load the native context from the global or builtins object. |
- __ ldr(prototype, |
+ __ Ldr(prototype, |
FieldMemOperand(prototype, GlobalObject::kNativeContextOffset)); |
// Load the function from the native context. |
- __ ldr(prototype, MemOperand(prototype, Context::SlotOffset(index))); |
- // Load the initial map. The global functions all have initial maps. |
- __ ldr(prototype, |
+ __ Ldr(prototype, ContextMemOperand(prototype, index)); |
+ // Load the initial map. The global functions all have initial maps. |
+ __ Ldr(prototype, |
FieldMemOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset)); |
// Load the prototype from the initial map. |
- __ ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset)); |
+ __ Ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset)); |
} |
@@ -301,18 +254,16 @@ void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype( |
// Check we're still in the same context. |
Register scratch = prototype; |
- const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX); |
- __ ldr(scratch, MemOperand(cp, offset)); |
- __ ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset)); |
- __ ldr(scratch, MemOperand(scratch, Context::SlotOffset(index))); |
- __ Move(ip, function); |
- __ cmp(ip, scratch); |
- __ b(ne, miss); |
+ __ Ldr(scratch, GlobalObjectMemOperand()); |
+ __ Ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset)); |
+ __ Ldr(scratch, ContextMemOperand(scratch, index)); |
+ __ Cmp(scratch, Operand(function)); |
+ __ B(ne, miss); |
// Load its initial map. The global functions all have initial maps. |
- __ Move(prototype, Handle<Map>(function->initial_map())); |
+ __ Mov(prototype, Operand(Handle<Map>(function->initial_map()))); |
// Load the prototype from the initial map. |
- __ ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset)); |
+ __ Ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset)); |
} |
@@ -323,14 +274,16 @@ void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm, |
int index, |
Representation representation) { |
ASSERT(!FLAG_track_double_fields || !representation.IsDouble()); |
- int offset = index * kPointerSize; |
- if (!inobject) { |
+ USE(representation); |
+ if (inobject) { |
+ int offset = index * kPointerSize; |
+ __ Ldr(dst, FieldMemOperand(src, offset)); |
+ } else { |
// Calculate the offset into the properties array. |
- offset = offset + FixedArray::kHeaderSize; |
- __ ldr(dst, FieldMemOperand(src, JSObject::kPropertiesOffset)); |
- src = dst; |
+ int offset = index * kPointerSize + FixedArray::kHeaderSize; |
+ __ Ldr(dst, FieldMemOperand(src, JSObject::kPropertiesOffset)); |
+ __ Ldr(dst, FieldMemOperand(dst, offset)); |
} |
- __ ldr(dst, FieldMemOperand(src, offset)); |
} |
@@ -338,69 +291,71 @@ void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm, |
Register receiver, |
Register scratch, |
Label* miss_label) { |
+ ASSERT(!AreAliased(receiver, scratch)); |
+ |
// Check that the receiver isn't a smi. |
__ JumpIfSmi(receiver, miss_label); |
// Check that the object is a JS array. |
- __ CompareObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE); |
- __ b(ne, miss_label); |
+ __ JumpIfNotObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE, |
+ miss_label); |
// Load length directly from the JS array. |
- __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset)); |
+ __ Ldr(x0, FieldMemOperand(receiver, JSArray::kLengthOffset)); |
__ Ret(); |
} |
// Generate code to check if an object is a string. If the object is a |
// heap object, its map's instance type is left in the scratch1 register. |
-// If this is not needed, scratch1 and scratch2 may be the same register. |
static void GenerateStringCheck(MacroAssembler* masm, |
Register receiver, |
Register scratch1, |
- Register scratch2, |
Label* smi, |
Label* non_string_object) { |
// Check that the receiver isn't a smi. |
__ JumpIfSmi(receiver, smi); |
- // Check that the object is a string. |
- __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
- __ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset)); |
- __ and_(scratch2, scratch1, Operand(kIsNotStringMask)); |
- // The cast is to resolve the overload for the argument of 0x0. |
- __ cmp(scratch2, Operand(static_cast<int32_t>(kStringTag))); |
- __ b(ne, non_string_object); |
+ // Get the object's instance type filed. |
+ __ Ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
+ __ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset)); |
+ // Check if the "not string" bit is set. |
+ __ Tbnz(scratch1, MaskToBit(kNotStringTag), non_string_object); |
} |
// Generate code to load the length from a string object and return the length. |
// If the receiver object is not a string or a wrapped string object the |
// execution continues at the miss label. The register containing the |
-// receiver is potentially clobbered. |
+// receiver is not clobbered if the receiver is not a string. |
void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm, |
Register receiver, |
Register scratch1, |
Register scratch2, |
Label* miss) { |
+ // Input registers can't alias because we don't want to clobber the |
+ // receiver register if the object is not a string. |
+ ASSERT(!AreAliased(receiver, scratch1, scratch2)); |
+ |
Label check_wrapper; |
// Check if the object is a string leaving the instance type in the |
// scratch1 register. |
- GenerateStringCheck(masm, receiver, scratch1, scratch2, miss, &check_wrapper); |
+ GenerateStringCheck(masm, receiver, scratch1, miss, &check_wrapper); |
// Load length directly from the string. |
- __ ldr(r0, FieldMemOperand(receiver, String::kLengthOffset)); |
+ __ Ldr(x0, FieldMemOperand(receiver, String::kLengthOffset)); |
__ Ret(); |
// Check if the object is a JSValue wrapper. |
- __ bind(&check_wrapper); |
- __ cmp(scratch1, Operand(JS_VALUE_TYPE)); |
- __ b(ne, miss); |
+ __ Bind(&check_wrapper); |
+ __ Cmp(scratch1, Operand(JS_VALUE_TYPE)); |
+ __ B(ne, miss); |
// Unwrap the value and check if the wrapped value is a string. |
- __ ldr(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset)); |
- GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss); |
- __ ldr(r0, FieldMemOperand(scratch1, String::kLengthOffset)); |
+ __ Ldr(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset)); |
+ GenerateStringCheck(masm, scratch1, scratch2, miss, miss); |
+ __ Ldr(x0, FieldMemOperand(scratch1, String::kLengthOffset)); |
__ Ret(); |
} |
@@ -411,7 +366,11 @@ void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm, |
Register scratch2, |
Label* miss_label) { |
__ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label); |
- __ mov(r0, scratch1); |
+ // TryGetFunctionPrototype can't put the result directly in x0 because the |
+ // 3 inputs registers can't alias and we call this function from |
+ // LoadIC::GenerateFunctionPrototype, where receiver is x0. So we explicitly |
+ // move the result in x0. |
+ __ Mov(x0, scratch1); |
__ Ret(); |
} |
@@ -426,11 +385,9 @@ void StubCompiler::GenerateCheckPropertyCell(MacroAssembler* masm, |
Label* miss) { |
Handle<Cell> cell = JSGlobalObject::EnsurePropertyCell(global, name); |
ASSERT(cell->value()->IsTheHole()); |
- __ mov(scratch, Operand(cell)); |
- __ ldr(scratch, FieldMemOperand(scratch, Cell::kValueOffset)); |
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); |
- __ cmp(scratch, ip); |
- __ b(ne, miss); |
+ __ Mov(scratch, Operand(cell)); |
+ __ Ldr(scratch, FieldMemOperand(scratch, Cell::kValueOffset)); |
+ __ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, miss); |
} |
@@ -450,10 +407,10 @@ void StoreStubCompiler::GenerateNegativeHolderLookup( |
} |
-// Generate StoreTransition code, value is passed in r0 register. |
-// When leaving generated code after success, the receiver_reg and name_reg |
-// may be clobbered. Upon branch to miss_label, the receiver and name |
-// registers have their original values. |
+// Generate StoreTransition code, value is passed in x0 register. |
+// When leaving generated code after success, the receiver_reg and storage_reg |
+// may be clobbered. Upon branch to miss_label, the receiver and name registers |
+// have their original values. |
void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm, |
Handle<JSObject> object, |
LookupResult* lookup, |
@@ -467,9 +424,14 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm, |
Register scratch3, |
Label* miss_label, |
Label* slow) { |
- // r0 : value |
Label exit; |
+ ASSERT(!AreAliased(receiver_reg, storage_reg, value_reg, |
+ scratch1, scratch2, scratch3)); |
+ |
+ // We don't need scratch3. |
+ scratch3 = NoReg; |
+ |
int descriptor = transition->LastAdded(); |
DescriptorArray* descriptors = transition->instance_descriptors(); |
PropertyDetails details = descriptors->GetDetails(descriptor); |
@@ -478,45 +440,42 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm, |
if (details.type() == CONSTANT) { |
Handle<Object> constant(descriptors->GetValue(descriptor), masm->isolate()); |
- __ Move(scratch1, constant); |
- __ cmp(value_reg, scratch1); |
- __ b(ne, miss_label); |
+ __ LoadObject(scratch1, constant); |
+ __ Cmp(value_reg, scratch1); |
+ __ B(ne, miss_label); |
} else if (FLAG_track_fields && representation.IsSmi()) { |
__ JumpIfNotSmi(value_reg, miss_label); |
} else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) { |
__ JumpIfSmi(value_reg, miss_label); |
} else if (FLAG_track_double_fields && representation.IsDouble()) { |
Label do_store, heap_number; |
- __ LoadRoot(scratch3, Heap::kHeapNumberMapRootIndex); |
- __ AllocateHeapNumber(storage_reg, scratch1, scratch2, scratch3, slow); |
+ __ AllocateHeapNumber(storage_reg, slow, scratch1, scratch2); |
+ // TODO(jbramley): Is fp_scratch the most appropriate FP scratch register? |
+ // It's only used in Fcmp, but it's not really safe to use it like this. |
__ JumpIfNotSmi(value_reg, &heap_number); |
- __ SmiUntag(scratch1, value_reg); |
- __ vmov(s0, scratch1); |
- __ vcvt_f64_s32(d0, s0); |
- __ jmp(&do_store); |
+ __ SmiUntagToDouble(fp_scratch, value_reg); |
+ __ B(&do_store); |
- __ bind(&heap_number); |
+ __ Bind(&heap_number); |
__ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex, |
miss_label, DONT_DO_SMI_CHECK); |
- __ vldr(d0, FieldMemOperand(value_reg, HeapNumber::kValueOffset)); |
+ __ Ldr(fp_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset)); |
- __ bind(&do_store); |
- __ vstr(d0, FieldMemOperand(storage_reg, HeapNumber::kValueOffset)); |
+ __ Bind(&do_store); |
+ __ Str(fp_scratch, FieldMemOperand(storage_reg, HeapNumber::kValueOffset)); |
} |
- // Stub never generated for non-global objects that require access |
- // checks. |
+ // Stub never generated for non-global objects that require access checks. |
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded()); |
// Perform map transition for the receiver if necessary. |
- if (details.type() == FIELD && |
- object->map()->unused_property_fields() == 0) { |
+ if ((details.type() == FIELD) && |
+ (object->map()->unused_property_fields() == 0)) { |
// The properties must be extended before we can store the value. |
// We jump to a runtime call that extends the properties array. |
- __ push(receiver_reg); |
- __ mov(r2, Operand(transition)); |
- __ Push(r2, r0); |
+ __ Mov(scratch1, Operand(transition)); |
+ __ Push(receiver_reg, scratch1, value_reg); |
__ TailCallExternalReference( |
ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage), |
masm->isolate()), |
@@ -526,8 +485,8 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm, |
} |
// Update the map of the object. |
- __ mov(scratch1, Operand(transition)); |
- __ str(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset)); |
+ __ Mov(scratch1, Operand(transition)); |
+ __ Str(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset)); |
// Update the write barrier for the map field. |
__ RecordWriteField(receiver_reg, |
@@ -540,7 +499,7 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm, |
OMIT_SMI_CHECK); |
if (details.type() == CONSTANT) { |
- ASSERT(value_reg.is(r0)); |
+ ASSERT(value_reg.is(x0)); |
__ Ret(); |
return; |
} |
@@ -559,16 +518,18 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm, |
if (index < 0) { |
// Set the property straight into the object. |
int offset = object->map()->instance_size() + (index * kPointerSize); |
+ // TODO(jbramley): This construct appears in several places in this |
+ // function. Try to clean it up, perhaps using a result_reg. |
if (FLAG_track_double_fields && representation.IsDouble()) { |
- __ str(storage_reg, FieldMemOperand(receiver_reg, offset)); |
+ __ Str(storage_reg, FieldMemOperand(receiver_reg, offset)); |
} else { |
- __ str(value_reg, FieldMemOperand(receiver_reg, offset)); |
+ __ Str(value_reg, FieldMemOperand(receiver_reg, offset)); |
} |
if (!FLAG_track_fields || !representation.IsSmi()) { |
// Update the write barrier for the array address. |
if (!FLAG_track_double_fields || !representation.IsDouble()) { |
- __ mov(storage_reg, value_reg); |
+ __ Mov(storage_reg, value_reg); |
} |
__ RecordWriteField(receiver_reg, |
offset, |
@@ -583,18 +544,18 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm, |
// Write to the properties array. |
int offset = index * kPointerSize + FixedArray::kHeaderSize; |
// Get the properties array |
- __ ldr(scratch1, |
+ __ Ldr(scratch1, |
FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset)); |
if (FLAG_track_double_fields && representation.IsDouble()) { |
- __ str(storage_reg, FieldMemOperand(scratch1, offset)); |
+ __ Str(storage_reg, FieldMemOperand(scratch1, offset)); |
} else { |
- __ str(value_reg, FieldMemOperand(scratch1, offset)); |
+ __ Str(value_reg, FieldMemOperand(scratch1, offset)); |
} |
if (!FLAG_track_fields || !representation.IsSmi()) { |
// Update the write barrier for the array address. |
if (!FLAG_track_double_fields || !representation.IsDouble()) { |
- __ mov(storage_reg, value_reg); |
+ __ Mov(storage_reg, value_reg); |
} |
__ RecordWriteField(scratch1, |
offset, |
@@ -607,17 +568,17 @@ void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm, |
} |
} |
- // Return the value (register r0). |
- ASSERT(value_reg.is(r0)); |
- __ bind(&exit); |
+ __ Bind(&exit); |
+ // Return the value (register x0). |
+ ASSERT(value_reg.is(x0)); |
__ Ret(); |
} |
-// Generate StoreField code, value is passed in r0 register. |
-// When leaving generated code after success, the receiver_reg and name_reg |
-// may be clobbered. Upon branch to miss_label, the receiver and name |
-// registers have their original values. |
+// Generate StoreField code, value is passed in x0 register. |
+// When leaving generated code after success, the receiver_reg and name_reg may |
+// be clobbered. Upon branch to miss_label, the receiver and name registers have |
+// their original values. |
void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm, |
Handle<JSObject> object, |
LookupResult* lookup, |
@@ -627,7 +588,7 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm, |
Register scratch1, |
Register scratch2, |
Label* miss_label) { |
- // r0 : value |
+ // x0 : value |
Label exit; |
// Stub never generated for non-global objects that require access |
@@ -650,32 +611,33 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm, |
} else if (FLAG_track_double_fields && representation.IsDouble()) { |
// Load the double storage. |
if (index < 0) { |
- int offset = object->map()->instance_size() + (index * kPointerSize); |
- __ ldr(scratch1, FieldMemOperand(receiver_reg, offset)); |
+ int offset = (index * kPointerSize) + object->map()->instance_size(); |
+ __ Ldr(scratch1, FieldMemOperand(receiver_reg, offset)); |
} else { |
- __ ldr(scratch1, |
+ int offset = (index * kPointerSize) + FixedArray::kHeaderSize; |
+ __ Ldr(scratch1, |
FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset)); |
- int offset = index * kPointerSize + FixedArray::kHeaderSize; |
- __ ldr(scratch1, FieldMemOperand(scratch1, offset)); |
+ __ Ldr(scratch1, FieldMemOperand(scratch1, offset)); |
} |
// Store the value into the storage. |
Label do_store, heap_number; |
+ // TODO(jbramley): Is fp_scratch the most appropriate FP scratch register? |
+ // It's only used in Fcmp, but it's not really safe to use it like this. |
__ JumpIfNotSmi(value_reg, &heap_number); |
- __ SmiUntag(scratch2, value_reg); |
- __ vmov(s0, scratch2); |
- __ vcvt_f64_s32(d0, s0); |
- __ jmp(&do_store); |
+ __ SmiUntagToDouble(fp_scratch, value_reg); |
+ __ B(&do_store); |
- __ bind(&heap_number); |
+ __ Bind(&heap_number); |
__ CheckMap(value_reg, scratch2, Heap::kHeapNumberMapRootIndex, |
miss_label, DONT_DO_SMI_CHECK); |
- __ vldr(d0, FieldMemOperand(value_reg, HeapNumber::kValueOffset)); |
+ __ Ldr(fp_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset)); |
+ |
+ __ Bind(&do_store); |
+ __ Str(fp_scratch, FieldMemOperand(scratch1, HeapNumber::kValueOffset)); |
- __ bind(&do_store); |
- __ vstr(d0, FieldMemOperand(scratch1, HeapNumber::kValueOffset)); |
- // Return the value (register r0). |
- ASSERT(value_reg.is(r0)); |
+ // Return the value (register x0). |
+ ASSERT(value_reg.is(x0)); |
__ Ret(); |
return; |
} |
@@ -686,7 +648,7 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm, |
if (index < 0) { |
// Set the property straight into the object. |
int offset = object->map()->instance_size() + (index * kPointerSize); |
- __ str(value_reg, FieldMemOperand(receiver_reg, offset)); |
+ __ Str(value_reg, FieldMemOperand(receiver_reg, offset)); |
if (!FLAG_track_fields || !representation.IsSmi()) { |
// Skip updating write barrier if storing a smi. |
@@ -694,7 +656,7 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm, |
// Update the write barrier for the array address. |
// Pass the now unused name_reg as a scratch register. |
- __ mov(name_reg, value_reg); |
+ __ Mov(name_reg, value_reg); |
__ RecordWriteField(receiver_reg, |
offset, |
name_reg, |
@@ -708,9 +670,9 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm, |
// Write to the properties array. |
int offset = index * kPointerSize + FixedArray::kHeaderSize; |
// Get the properties array |
- __ ldr(scratch1, |
+ __ Ldr(scratch1, |
FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset)); |
- __ str(value_reg, FieldMemOperand(scratch1, offset)); |
+ __ Str(value_reg, FieldMemOperand(scratch1, offset)); |
if (!FLAG_track_fields || !representation.IsSmi()) { |
// Skip updating write barrier if storing a smi. |
@@ -718,7 +680,7 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm, |
// Update the write barrier for the array address. |
// Ok to clobber receiver_reg and name_reg, since we return. |
- __ mov(name_reg, value_reg); |
+ __ Mov(name_reg, value_reg); |
__ RecordWriteField(scratch1, |
offset, |
name_reg, |
@@ -730,9 +692,9 @@ void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm, |
} |
} |
- // Return the value (register r0). |
- ASSERT(value_reg.is(r0)); |
- __ bind(&exit); |
+ __ Bind(&exit); |
+ // Return the value (register x0). |
+ ASSERT(value_reg.is(x0)); |
__ Ret(); |
} |
@@ -741,8 +703,8 @@ void StoreStubCompiler::GenerateRestoreName(MacroAssembler* masm, |
Label* label, |
Handle<Name> name) { |
if (!label->is_unused()) { |
- __ bind(label); |
- __ mov(this->name(), Operand(name)); |
+ __ Bind(label); |
+ __ Mov(this->name(), Operand(name)); |
} |
} |
@@ -757,14 +719,13 @@ static void PushInterceptorArguments(MacroAssembler* masm, |
STATIC_ASSERT(StubCache::kInterceptorArgsThisIndex == 2); |
STATIC_ASSERT(StubCache::kInterceptorArgsHolderIndex == 3); |
STATIC_ASSERT(StubCache::kInterceptorArgsLength == 4); |
- __ push(name); |
+ |
+ __ Push(name); |
Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor()); |
ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor)); |
Register scratch = name; |
- __ mov(scratch, Operand(interceptor)); |
- __ push(scratch); |
- __ push(receiver); |
- __ push(holder); |
+ __ Mov(scratch, Operand(interceptor)); |
+ __ Push(scratch, receiver, holder); |
} |
@@ -776,6 +737,7 @@ static void CompileCallLoadPropertyWithInterceptor( |
Handle<JSObject> holder_obj, |
IC::UtilityId id) { |
PushInterceptorArguments(masm, receiver, holder, name, holder_obj); |
+ |
__ CallExternalReference( |
ExternalReference(IC_Utility(id), masm->isolate()), |
StubCache::kInterceptorArgsLength); |
@@ -787,38 +749,38 @@ static void GenerateFastApiCall(MacroAssembler* masm, |
const CallOptimization& optimization, |
Handle<Map> receiver_map, |
Register receiver, |
- Register scratch_in, |
+ Register scratch, |
int argc, |
Register* values) { |
- ASSERT(!receiver.is(scratch_in)); |
- __ push(receiver); |
+ ASSERT(!AreAliased(receiver, scratch)); |
+ __ Push(receiver); |
// Write the arguments to stack frame. |
for (int i = 0; i < argc; i++) { |
+ // TODO(jbramley): Push these in as few Push() calls as possible. |
Register arg = values[argc-1-i]; |
- ASSERT(!receiver.is(arg)); |
- ASSERT(!scratch_in.is(arg)); |
- __ push(arg); |
+ ASSERT(!AreAliased(receiver, scratch, arg)); |
+ __ Push(arg); |
} |
+ |
ASSERT(optimization.is_simple_api_call()); |
// Abi for CallApiFunctionStub. |
- Register callee = r0; |
- Register call_data = r4; |
- Register holder = r2; |
- Register api_function_address = r1; |
+ Register callee = x0; |
+ Register call_data = x4; |
+ Register holder = x2; |
+ Register api_function_address = x1; |
// Put holder in place. |
CallOptimization::HolderLookup holder_lookup; |
- Handle<JSObject> api_holder = optimization.LookupHolderOfExpectedType( |
- receiver_map, |
- &holder_lookup); |
+ Handle<JSObject> api_holder = |
+ optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup); |
switch (holder_lookup) { |
case CallOptimization::kHolderIsReceiver: |
- __ Move(holder, receiver); |
+ __ Mov(holder, receiver); |
break; |
case CallOptimization::kHolderFound: |
- __ Move(holder, api_holder); |
- break; |
+ __ LoadObject(holder, api_holder); |
+ break; |
case CallOptimization::kHolderNotFound: |
UNREACHABLE(); |
break; |
@@ -830,28 +792,27 @@ static void GenerateFastApiCall(MacroAssembler* masm, |
Handle<Object> call_data_obj(api_call_info->data(), isolate); |
// Put callee in place. |
- __ Move(callee, function); |
+ __ LoadObject(callee, function); |
bool call_data_undefined = false; |
// Put call_data in place. |
if (isolate->heap()->InNewSpace(*call_data_obj)) { |
- __ Move(call_data, api_call_info); |
- __ ldr(call_data, FieldMemOperand(call_data, CallHandlerInfo::kDataOffset)); |
+ __ LoadObject(call_data, api_call_info); |
+ __ Ldr(call_data, FieldMemOperand(call_data, CallHandlerInfo::kDataOffset)); |
} else if (call_data_obj->IsUndefined()) { |
call_data_undefined = true; |
__ LoadRoot(call_data, Heap::kUndefinedValueRootIndex); |
} else { |
- __ Move(call_data, call_data_obj); |
+ __ LoadObject(call_data, call_data_obj); |
} |
// Put api_function_address in place. |
Address function_address = v8::ToCData<Address>(api_call_info->callback()); |
ApiFunction fun(function_address); |
- ExternalReference::Type type = ExternalReference::DIRECT_API_CALL; |
ExternalReference ref = ExternalReference(&fun, |
- type, |
+ ExternalReference::DIRECT_API_CALL, |
masm->isolate()); |
- __ mov(api_function_address, Operand(ref)); |
+ __ Mov(api_function_address, Operand(ref)); |
// Jump to stub. |
CallApiFunctionStub stub(true, call_data_undefined, argc); |
@@ -880,19 +841,20 @@ Register StubCompiler::CheckPrototypes(Handle<HeapType> type, |
Handle<Map> receiver_map(IC::TypeToMap(*type, isolate())); |
// Make sure that the type feedback oracle harvests the receiver map. |
// TODO(svenpanne) Remove this hack when all ICs are reworked. |
- __ mov(scratch1, Operand(receiver_map)); |
+ __ Mov(scratch1, Operand(receiver_map)); |
- // Make sure there's no overlap between holder and object registers. |
- ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg)); |
- ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg) |
- && !scratch2.is(scratch1)); |
+ // object_reg and holder_reg registers can alias. |
+ ASSERT(!AreAliased(object_reg, scratch1, scratch2)); |
+ ASSERT(!AreAliased(holder_reg, scratch1, scratch2)); |
// Keep track of the current object in register reg. |
Register reg = object_reg; |
int depth = 0; |
Handle<JSObject> current = Handle<JSObject>::null(); |
- if (type->IsConstant()) current = Handle<JSObject>::cast(type->AsConstant()); |
+ if (type->IsConstant()) { |
+ current = Handle<JSObject>::cast(type->AsConstant()); |
+ } |
Handle<JSObject> prototype = Handle<JSObject>::null(); |
Handle<Map> current_map = receiver_map; |
Handle<Map> holder_map(holder->map()); |
@@ -915,22 +877,22 @@ Register StubCompiler::CheckPrototypes(Handle<HeapType> type, |
name = factory()->InternalizeString(Handle<String>::cast(name)); |
} |
ASSERT(current.is_null() || |
- current->property_dictionary()->FindEntry(*name) == |
- NameDictionary::kNotFound); |
+ (current->property_dictionary()->FindEntry(*name) == |
+ NameDictionary::kNotFound)); |
GenerateDictionaryNegativeLookup(masm(), miss, reg, name, |
scratch1, scratch2); |
- __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset)); |
+ __ Ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset)); |
reg = holder_reg; // From now on the object will be in holder_reg. |
- __ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset)); |
+ __ Ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset)); |
} else { |
Register map_reg = scratch1; |
+ // TODO(jbramley): Skip this load when we don't need the map. |
+ __ Ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset)); |
+ |
if (depth != 1 || check == CHECK_ALL_MAPS) { |
- // CheckMap implicitly loads the map of |reg| into |map_reg|. |
- __ CheckMap(reg, map_reg, current_map, miss, DONT_DO_SMI_CHECK); |
- } else { |
- __ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset)); |
+ __ CheckMap(map_reg, current_map, miss, DONT_DO_SMI_CHECK); |
} |
// Check access rights to the global object. This has to happen after |
@@ -949,10 +911,10 @@ Register StubCompiler::CheckPrototypes(Handle<HeapType> type, |
if (heap()->InNewSpace(*prototype)) { |
// The prototype is in new space; we cannot store a reference to it |
// in the code. Load it from the map. |
- __ ldr(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset)); |
+ __ Ldr(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset)); |
} else { |
// The prototype is in old space; load it directly. |
- __ mov(reg, Operand(prototype)); |
+ __ Mov(reg, Operand(prototype)); |
} |
} |
@@ -964,6 +926,7 @@ Register StubCompiler::CheckPrototypes(Handle<HeapType> type, |
// Log the check depth. |
LOG(isolate(), IntEvent("check-maps-depth", depth + 1)); |
+ // Check the holder map. |
if (depth != 0 || check == CHECK_ALL_MAPS) { |
// Check the holder map. |
__ CheckMap(reg, scratch1, current_map, miss, DONT_DO_SMI_CHECK); |
@@ -984,10 +947,12 @@ Register StubCompiler::CheckPrototypes(Handle<HeapType> type, |
void LoadStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) { |
if (!miss->is_unused()) { |
Label success; |
- __ b(&success); |
- __ bind(miss); |
+ __ B(&success); |
+ |
+ __ Bind(miss); |
TailCallBuiltin(masm(), MissBuiltin(kind())); |
- __ bind(&success); |
+ |
+ __ Bind(&success); |
} |
} |
@@ -995,32 +960,35 @@ void LoadStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) { |
void StoreStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) { |
if (!miss->is_unused()) { |
Label success; |
- __ b(&success); |
+ __ B(&success); |
+ |
GenerateRestoreName(masm(), miss, name); |
TailCallBuiltin(masm(), MissBuiltin(kind())); |
- __ bind(&success); |
+ |
+ __ Bind(&success); |
} |
} |
-Register LoadStubCompiler::CallbackHandlerFrontend( |
- Handle<HeapType> type, |
- Register object_reg, |
- Handle<JSObject> holder, |
- Handle<Name> name, |
- Handle<Object> callback) { |
+Register LoadStubCompiler::CallbackHandlerFrontend(Handle<HeapType> type, |
+ Register object_reg, |
+ Handle<JSObject> holder, |
+ Handle<Name> name, |
+ Handle<Object> callback) { |
Label miss; |
Register reg = HandlerFrontendHeader(type, object_reg, holder, name, &miss); |
+ // TODO(jbramely): HandlerFrontendHeader returns its result in scratch1(), so |
+ // we can't use it below, but that isn't very obvious. Is there a better way |
+ // of handling this? |
+ |
if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) { |
- ASSERT(!reg.is(scratch2())); |
- ASSERT(!reg.is(scratch3())); |
- ASSERT(!reg.is(scratch4())); |
+ ASSERT(!AreAliased(reg, scratch2(), scratch3(), scratch4())); |
// Load the properties dictionary. |
Register dictionary = scratch4(); |
- __ ldr(dictionary, FieldMemOperand(reg, JSObject::kPropertiesOffset)); |
+ __ Ldr(dictionary, FieldMemOperand(reg, JSObject::kPropertiesOffset)); |
// Probe the dictionary. |
Label probe_done; |
@@ -1031,7 +999,7 @@ Register LoadStubCompiler::CallbackHandlerFrontend( |
this->name(), |
scratch2(), |
scratch3()); |
- __ bind(&probe_done); |
+ __ Bind(&probe_done); |
// If probing finds an entry in the dictionary, scratch3 contains the |
// pointer into the dictionary. Check that the value is the callback. |
@@ -1039,9 +1007,9 @@ Register LoadStubCompiler::CallbackHandlerFrontend( |
const int kElementsStartOffset = NameDictionary::kHeaderSize + |
NameDictionary::kElementsStartIndex * kPointerSize; |
const int kValueOffset = kElementsStartOffset + kPointerSize; |
- __ ldr(scratch2(), FieldMemOperand(pointer, kValueOffset)); |
- __ cmp(scratch2(), Operand(callback)); |
- __ b(ne, &miss); |
+ __ Ldr(scratch2(), FieldMemOperand(pointer, kValueOffset)); |
+ __ Cmp(scratch2(), Operand(callback)); |
+ __ B(ne, &miss); |
} |
HandlerFrontendFooter(name, &miss); |
@@ -1053,7 +1021,7 @@ void LoadStubCompiler::GenerateLoadField(Register reg, |
Handle<JSObject> holder, |
PropertyIndex field, |
Representation representation) { |
- if (!reg.is(receiver())) __ mov(receiver(), reg); |
+ __ Mov(receiver(), reg); |
if (kind() == Code::LOAD_IC) { |
LoadFieldStub stub(field.is_inobject(holder), |
field.translate(holder), |
@@ -1070,7 +1038,7 @@ void LoadStubCompiler::GenerateLoadField(Register reg, |
void LoadStubCompiler::GenerateLoadConstant(Handle<Object> value) { |
// Return the constant value. |
- __ Move(r0, value); |
+ __ LoadObject(x0, value); |
__ Ret(); |
} |
@@ -1079,16 +1047,18 @@ void LoadStubCompiler::GenerateLoadCallback( |
const CallOptimization& call_optimization, |
Handle<Map> receiver_map) { |
GenerateFastApiCall( |
- masm(), call_optimization, receiver_map, |
- receiver(), scratch3(), 0, NULL); |
+ masm(), call_optimization, receiver_map, receiver(), scratch3(), 0, NULL); |
} |
void LoadStubCompiler::GenerateLoadCallback( |
Register reg, |
Handle<ExecutableAccessorInfo> callback) { |
- // Build AccessorInfo::args_ list on the stack and push property name below |
- // the exit frame to make GC aware of them and store pointers to them. |
+ ASSERT(!AreAliased(scratch2(), scratch3(), scratch4(), reg)); |
+ |
+ // Build ExecutableAccessorInfo::args_ list on the stack and push property |
+ // name below the exit frame to make GC aware of them and store pointers to |
+ // them. |
STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0); |
STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1); |
STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2); |
@@ -1096,35 +1066,43 @@ void LoadStubCompiler::GenerateLoadCallback( |
STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4); |
STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5); |
STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6); |
- ASSERT(!scratch2().is(reg)); |
- ASSERT(!scratch3().is(reg)); |
- ASSERT(!scratch4().is(reg)); |
- __ push(receiver()); |
+ |
+ __ Push(receiver()); |
+ |
if (heap()->InNewSpace(callback->data())) { |
- __ Move(scratch3(), callback); |
- __ ldr(scratch3(), FieldMemOperand(scratch3(), |
+ __ Mov(scratch3(), Operand(callback)); |
+ __ Ldr(scratch3(), FieldMemOperand(scratch3(), |
ExecutableAccessorInfo::kDataOffset)); |
} else { |
- __ Move(scratch3(), Handle<Object>(callback->data(), isolate())); |
+ __ Mov(scratch3(), Operand(Handle<Object>(callback->data(), isolate()))); |
} |
- __ push(scratch3()); |
- __ LoadRoot(scratch3(), Heap::kUndefinedValueRootIndex); |
- __ mov(scratch4(), scratch3()); |
+ // TODO(jbramley): Find another scratch register and combine the pushes |
+ // together. Can we use scratch1() here? |
+ __ LoadRoot(scratch4(), Heap::kUndefinedValueRootIndex); |
__ Push(scratch3(), scratch4()); |
- __ mov(scratch4(), |
- Operand(ExternalReference::isolate_address(isolate()))); |
- __ Push(scratch4(), reg); |
- __ mov(scratch2(), sp); // scratch2 = PropertyAccessorInfo::args_ |
- __ push(name()); |
+ __ Mov(scratch3(), Operand(ExternalReference::isolate_address(isolate()))); |
+ __ Push(scratch4(), scratch3(), reg, name()); |
+ |
+ Register args_addr = scratch2(); |
+ __ Add(args_addr, __ StackPointer(), kPointerSize); |
- // Abi for CallApiGetter |
- Register getter_address_reg = r2; |
+ // Stack at this point: |
+ // sp[40] callback data |
+ // sp[32] undefined |
+ // sp[24] undefined |
+ // sp[16] isolate |
+ // args_addr -> sp[8] reg |
+ // sp[0] name |
+ // Abi for CallApiGetter. |
+ Register getter_address_reg = x2; |
+ |
+ // Set up the call. |
Address getter_address = v8::ToCData<Address>(callback->getter()); |
ApiFunction fun(getter_address); |
ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL; |
ExternalReference ref = ExternalReference(&fun, type, isolate()); |
- __ mov(getter_address_reg, Operand(ref)); |
+ __ Mov(getter_address_reg, Operand(ref)); |
CallApiGetterStub stub; |
__ TailCallStub(&stub); |
@@ -1137,12 +1115,13 @@ void LoadStubCompiler::GenerateLoadInterceptor( |
Handle<JSObject> interceptor_holder, |
LookupResult* lookup, |
Handle<Name> name) { |
+ ASSERT(!AreAliased(receiver(), this->name(), |
+ scratch1(), scratch2(), scratch3())); |
ASSERT(interceptor_holder->HasNamedInterceptor()); |
ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined()); |
// So far the most popular follow ups for interceptor loads are FIELD |
- // and CALLBACKS, so inline only them, other cases may be added |
- // later. |
+ // and CALLBACKS, so inline only them, other cases may be added later. |
bool compile_followup_inline = false; |
if (lookup->IsFound() && lookup->IsCacheable()) { |
if (lookup->IsField()) { |
@@ -1167,7 +1146,7 @@ void LoadStubCompiler::GenerateLoadInterceptor( |
// result. The CALLBACKS case needs the receiver to be passed into C++ code, |
// the FIELD case might cause a miss during the prototype check. |
bool must_perfrom_prototype_check = *interceptor_holder != lookup->holder(); |
- bool must_preserve_receiver_reg = !receiver().is(holder_reg) && |
+ bool must_preserve_receiver_reg = !receiver().Is(holder_reg) && |
(lookup->type() == CALLBACKS || must_perfrom_prototype_check); |
// Save necessary data before invoking an interceptor. |
@@ -1189,27 +1168,26 @@ void LoadStubCompiler::GenerateLoadInterceptor( |
// Check if interceptor provided a value for property. If it's |
// the case, return immediately. |
Label interceptor_failed; |
- __ LoadRoot(scratch1(), Heap::kNoInterceptorResultSentinelRootIndex); |
- __ cmp(r0, scratch1()); |
- __ b(eq, &interceptor_failed); |
+ __ JumpIfRoot(x0, |
+ Heap::kNoInterceptorResultSentinelRootIndex, |
+ &interceptor_failed); |
frame_scope.GenerateLeaveFrame(); |
__ Ret(); |
- __ bind(&interceptor_failed); |
- __ pop(this->name()); |
- __ pop(holder_reg); |
+ __ Bind(&interceptor_failed); |
if (must_preserve_receiver_reg) { |
- __ pop(receiver()); |
+ __ Pop(this->name(), holder_reg, receiver()); |
+ } else { |
+ __ Pop(this->name(), holder_reg); |
} |
// Leave the internal frame. |
} |
- |
GenerateLoadPostInterceptor(holder_reg, interceptor_holder, name, lookup); |
} else { // !compile_followup_inline |
// Call the runtime system to load the interceptor. |
// Check that the maps haven't changed. |
- PushInterceptorArguments(masm(), receiver(), holder_reg, |
- this->name(), interceptor_holder); |
+ PushInterceptorArguments( |
+ masm(), receiver(), holder_reg, this->name(), interceptor_holder); |
ExternalReference ref = |
ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad), |
@@ -1222,13 +1200,10 @@ void LoadStubCompiler::GenerateLoadInterceptor( |
void StubCompiler::GenerateBooleanCheck(Register object, Label* miss) { |
Label success; |
// Check that the object is a boolean. |
- __ LoadRoot(ip, Heap::kTrueValueRootIndex); |
- __ cmp(object, ip); |
- __ b(eq, &success); |
- __ LoadRoot(ip, Heap::kFalseValueRootIndex); |
- __ cmp(object, ip); |
- __ b(ne, miss); |
- __ bind(&success); |
+ // TODO(all): Optimize this like LCodeGen::DoDeferredTaggedToI. |
+ __ JumpIfRoot(object, Heap::kTrueValueRootIndex, &success); |
+ __ JumpIfNotRoot(object, Heap::kFalseValueRootIndex, miss); |
+ __ Bind(&success); |
} |
@@ -1237,18 +1212,19 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback( |
Handle<JSObject> holder, |
Handle<Name> name, |
Handle<ExecutableAccessorInfo> callback) { |
+ ASM_LOCATION("StoreStubCompiler::CompileStoreCallback"); |
Register holder_reg = HandlerFrontend( |
IC::CurrentTypeOf(object, isolate()), receiver(), holder, name); |
// Stub never generated for non-global objects that require access checks. |
ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded()); |
- __ push(receiver()); // receiver |
- __ push(holder_reg); |
- __ mov(ip, Operand(callback)); // callback info |
- __ push(ip); |
- __ mov(ip, Operand(name)); |
- __ Push(ip, value()); |
+ // TODO(jbramley): Make Push take more than four arguments and combine these |
+ // two calls. |
+ __ Push(receiver(), holder_reg); |
+ __ Mov(scratch1(), Operand(callback)); |
+ __ Mov(scratch2(), Operand(name)); |
+ __ Push(scratch1(), scratch2(), value()); |
// Do tail-call to the runtime system. |
ExternalReference store_callback_property = |
@@ -1260,24 +1236,6 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback( |
} |
-Handle<Code> StoreStubCompiler::CompileStoreCallback( |
- Handle<JSObject> object, |
- Handle<JSObject> holder, |
- Handle<Name> name, |
- const CallOptimization& call_optimization) { |
- HandlerFrontend(IC::CurrentTypeOf(object, isolate()), |
- receiver(), holder, name); |
- |
- Register values[] = { value() }; |
- GenerateFastApiCall( |
- masm(), call_optimization, handle(object->map()), |
- receiver(), scratch3(), 1, values); |
- |
- // Return the generated code. |
- return GetCode(kind(), Code::FAST, name); |
-} |
- |
- |
#undef __ |
#define __ ACCESS_MASM(masm) |
@@ -1287,24 +1245,26 @@ void StoreStubCompiler::GenerateStoreViaSetter( |
Handle<HeapType> type, |
Handle<JSFunction> setter) { |
// ----------- S t a t e ------------- |
- // -- r0 : value |
- // -- r1 : receiver |
- // -- r2 : name |
+ // -- x0 : value |
+ // -- x1 : receiver |
+ // -- x2 : name |
// -- lr : return address |
// ----------------------------------- |
+ Register value = x0; |
+ Register receiver = x1; |
+ Label miss; |
+ |
{ |
FrameScope scope(masm, StackFrame::INTERNAL); |
- Register receiver = r1; |
- Register value = r0; |
// Save value register, so we can restore it later. |
- __ push(value); |
+ __ Push(value); |
if (!setter.is_null()) { |
// Call the JavaScript setter with receiver and value on the stack. |
if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) { |
// Swap in the global receiver. |
- __ ldr(receiver, |
+ __ Ldr(receiver, |
FieldMemOperand( |
receiver, JSGlobalObject::kGlobalReceiverOffset)); |
} |
@@ -1320,10 +1280,10 @@ void StoreStubCompiler::GenerateStoreViaSetter( |
} |
// We have to return the passed value, not the return value of the setter. |
- __ pop(r0); |
+ __ Pop(value); |
// Restore context register. |
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
} |
__ Ret(); |
} |
@@ -1338,6 +1298,8 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor( |
Handle<Name> name) { |
Label miss; |
+ ASM_LOCATION("StoreStubCompiler::CompileStoreInterceptor"); |
+ |
// Check that the map of the object hasn't changed. |
__ CheckMap(receiver(), scratch1(), Handle<Map>(object->map()), &miss, |
DO_SMI_CHECK); |
@@ -1347,8 +1309,7 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor( |
__ CheckAccessGlobalProxy(receiver(), scratch1(), &miss); |
} |
- // Stub is never generated for non-global objects that require access |
- // checks. |
+ // Stub is never generated for non-global objects that require access checks. |
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded()); |
__ Push(receiver(), this->name(), value()); |
@@ -1359,7 +1320,7 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor( |
__ TailCallExternalReference(store_ic_property, 3, 1); |
// Handle store cache miss. |
- __ bind(&miss); |
+ __ Bind(&miss); |
TailCallBuiltin(masm(), MissBuiltin(kind())); |
// Return the generated code. |
@@ -1374,7 +1335,7 @@ Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<HeapType> type, |
// Return undefined if maps of the full prototype chain are still the |
// same and no global property with this name contains a value. |
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex); |
+ __ LoadRoot(x0, Heap::kUndefinedValueRootIndex); |
__ Ret(); |
// Return the generated code. |
@@ -1382,30 +1343,36 @@ Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<HeapType> type, |
} |
+// TODO(all): The so-called scratch registers are significant in some cases. For |
+// example, KeyedStoreStubCompiler::registers()[3] (x3) is actually used for |
+// KeyedStoreCompiler::transition_map(). We should verify which registers are |
+// actually scratch registers, and which are important. For now, we use the same |
+// assignments as ARM to remain on the safe side. |
+ |
Register* LoadStubCompiler::registers() { |
// receiver, name, scratch1, scratch2, scratch3, scratch4. |
- static Register registers[] = { r0, r2, r3, r1, r4, r5 }; |
+ static Register registers[] = { x0, x2, x3, x1, x4, x5 }; |
return registers; |
} |
Register* KeyedLoadStubCompiler::registers() { |
- // receiver, name, scratch1, scratch2, scratch3, scratch4. |
- static Register registers[] = { r1, r0, r2, r3, r4, r5 }; |
+ // receiver, name/key, scratch1, scratch2, scratch3, scratch4. |
+ static Register registers[] = { x1, x0, x2, x3, x4, x5 }; |
return registers; |
} |
Register* StoreStubCompiler::registers() { |
// receiver, name, value, scratch1, scratch2, scratch3. |
- static Register registers[] = { r1, r2, r0, r3, r4, r5 }; |
+ static Register registers[] = { x1, x2, x0, x3, x4, x5 }; |
return registers; |
} |
Register* KeyedStoreStubCompiler::registers() { |
// receiver, name, value, scratch1, scratch2, scratch3. |
- static Register registers[] = { r2, r1, r0, r3, r4, r5 }; |
+ static Register registers[] = { x2, x1, x0, x3, x4, x5 }; |
return registers; |
} |
@@ -1413,16 +1380,10 @@ Register* KeyedStoreStubCompiler::registers() { |
#undef __ |
#define __ ACCESS_MASM(masm) |
- |
void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm, |
Handle<HeapType> type, |
Register receiver, |
Handle<JSFunction> getter) { |
- // ----------- S t a t e ------------- |
- // -- r0 : receiver |
- // -- r2 : name |
- // -- lr : return address |
- // ----------------------------------- |
{ |
FrameScope scope(masm, StackFrame::INTERNAL); |
@@ -1430,11 +1391,11 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm, |
// Call the JavaScript getter with the receiver on the stack. |
if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) { |
// Swap in the global receiver. |
- __ ldr(receiver, |
+ __ Ldr(receiver, |
FieldMemOperand( |
receiver, JSGlobalObject::kGlobalReceiverOffset)); |
} |
- __ push(receiver); |
+ __ Push(receiver); |
ParameterCount actual(0); |
ParameterCount expected(getter); |
__ InvokeFunction(getter, expected, actual, |
@@ -1446,7 +1407,7 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm, |
} |
// Restore context register. |
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
} |
__ Ret(); |
} |
@@ -1466,19 +1427,17 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal( |
HandlerFrontendHeader(type, receiver(), global, name, &miss); |
// Get the value from the cell. |
- __ mov(r3, Operand(cell)); |
- __ ldr(r4, FieldMemOperand(r3, Cell::kValueOffset)); |
+ __ Mov(x3, Operand(cell)); |
+ __ Ldr(x4, FieldMemOperand(x3, Cell::kValueOffset)); |
// Check for deleted property if property can actually be deleted. |
if (!is_dont_delete) { |
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); |
- __ cmp(r4, ip); |
- __ b(eq, &miss); |
+ __ JumpIfRoot(x4, Heap::kTheHoleValueRootIndex, &miss); |
} |
Counters* counters = isolate()->counters(); |
- __ IncrementCounter(counters->named_load_global_stub(), 1, r1, r3); |
- __ mov(r0, r4); |
+ __ IncrementCounter(counters->named_load_global_stub(), 1, x1, x3); |
+ __ Mov(x0, x4); |
__ Ret(); |
HandlerFrontendFooter(name, &miss); |
@@ -1498,8 +1457,7 @@ Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC( |
if (check == PROPERTY && |
(kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) { |
- __ cmp(this->name(), Operand(name)); |
- __ b(ne, &miss); |
+ __ CompareAndBranch(this->name(), Operand(name), ne, &miss); |
} |
Label number_case; |
@@ -1507,32 +1465,33 @@ Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC( |
__ JumpIfSmi(receiver(), smi_target); |
Register map_reg = scratch1(); |
- |
+ __ Ldr(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset)); |
int receiver_count = types->length(); |
int number_of_handled_maps = 0; |
- __ ldr(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset)); |
for (int current = 0; current < receiver_count; ++current) { |
Handle<HeapType> type = types->at(current); |
Handle<Map> map = IC::TypeToMap(*type, isolate()); |
if (!map->is_deprecated()) { |
number_of_handled_maps++; |
- __ mov(ip, Operand(map)); |
- __ cmp(map_reg, ip); |
+ Label try_next; |
+ __ Cmp(map_reg, Operand(map)); |
+ __ B(ne, &try_next); |
if (type->Is(HeapType::Number())) { |
ASSERT(!number_case.is_unused()); |
- __ bind(&number_case); |
+ __ Bind(&number_case); |
} |
- __ Jump(handlers->at(current), RelocInfo::CODE_TARGET, eq); |
+ __ Jump(handlers->at(current), RelocInfo::CODE_TARGET); |
+ __ Bind(&try_next); |
} |
} |
ASSERT(number_of_handled_maps != 0); |
- __ bind(&miss); |
+ __ Bind(&miss); |
TailCallBuiltin(masm(), MissBuiltin(kind())); |
// Return the generated code. |
InlineCacheState state = |
- number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC; |
+ (number_of_handled_maps > 1) ? POLYMORPHIC : MONOMORPHIC; |
return GetICCode(kind(), type, name, state); |
} |
@@ -1542,80 +1501,84 @@ Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic( |
CodeHandleList* handler_stubs, |
MapHandleList* transitioned_maps) { |
Label miss; |
+ |
+ ASM_LOCATION("KeyedStoreStubCompiler::CompileStorePolymorphic"); |
+ |
__ JumpIfSmi(receiver(), &miss); |
int receiver_count = receiver_maps->length(); |
- __ ldr(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset)); |
- for (int i = 0; i < receiver_count; ++i) { |
- __ mov(ip, Operand(receiver_maps->at(i))); |
- __ cmp(scratch1(), ip); |
- if (transitioned_maps->at(i).is_null()) { |
- __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq); |
- } else { |
- Label next_map; |
- __ b(ne, &next_map); |
- __ mov(transition_map(), Operand(transitioned_maps->at(i))); |
- __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, al); |
- __ bind(&next_map); |
+ __ Ldr(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset)); |
+ for (int i = 0; i < receiver_count; i++) { |
+ __ Cmp(scratch1(), Operand(receiver_maps->at(i))); |
+ |
+ Label skip; |
+ __ B(&skip, ne); |
+ if (!transitioned_maps->at(i).is_null()) { |
+ // This argument is used by the handler stub. For example, see |
+ // ElementsTransitionGenerator::GenerateMapChangeElementsTransition. |
+ __ Mov(transition_map(), Operand(transitioned_maps->at(i))); |
} |
+ __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET); |
+ __ Bind(&skip); |
} |
- __ bind(&miss); |
+ __ Bind(&miss); |
TailCallBuiltin(masm(), MissBuiltin(kind())); |
- // Return the generated code. |
return GetICCode( |
kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC); |
} |
+Handle<Code> StoreStubCompiler::CompileStoreCallback( |
+ Handle<JSObject> object, |
+ Handle<JSObject> holder, |
+ Handle<Name> name, |
+ const CallOptimization& call_optimization) { |
+ HandlerFrontend(IC::CurrentTypeOf(object, isolate()), |
+ receiver(), holder, name); |
+ |
+ Register values[] = { value() }; |
+ GenerateFastApiCall(masm(), call_optimization, handle(object->map()), |
+ receiver(), scratch3(), 1, values); |
+ |
+ // Return the generated code. |
+ return GetCode(kind(), Code::FAST, name); |
+} |
+ |
+ |
#undef __ |
#define __ ACCESS_MASM(masm) |
- |
void KeyedLoadStubCompiler::GenerateLoadDictionaryElement( |
MacroAssembler* masm) { |
// ---------- S t a t e -------------- |
// -- lr : return address |
- // -- r0 : key |
- // -- r1 : receiver |
+ // -- x0 : key |
+ // -- x1 : receiver |
// ----------------------------------- |
Label slow, miss; |
- Register key = r0; |
- Register receiver = r1; |
+ Register result = x0; |
+ Register key = x0; |
+ Register receiver = x1; |
- __ UntagAndJumpIfNotSmi(r2, key, &miss); |
- __ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
- __ LoadFromNumberDictionary(&slow, r4, key, r0, r2, r3, r5); |
+ __ JumpIfNotSmi(key, &miss); |
+ __ Ldr(x4, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
+ __ LoadFromNumberDictionary(&slow, x4, key, result, x2, x3, x5, x6); |
__ Ret(); |
- __ bind(&slow); |
+ __ Bind(&slow); |
__ IncrementCounter( |
- masm->isolate()->counters()->keyed_load_external_array_slow(), |
- 1, r2, r3); |
- |
- // ---------- S t a t e -------------- |
- // -- lr : return address |
- // -- r0 : key |
- // -- r1 : receiver |
- // ----------------------------------- |
+ masm->isolate()->counters()->keyed_load_external_array_slow(), 1, x2, x3); |
TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow); |
// Miss case, call the runtime. |
- __ bind(&miss); |
- |
- // ---------- S t a t e -------------- |
- // -- lr : return address |
- // -- r0 : key |
- // -- r1 : receiver |
- // ----------------------------------- |
+ __ Bind(&miss); |
TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Miss); |
} |
-#undef __ |
- |
} } // namespace v8::internal |
-#endif // V8_TARGET_ARCH_ARM |
+#endif // V8_TARGET_ARCH_A64 |