| Index: src/a64/stub-cache-a64.cc
|
| diff --git a/src/arm/stub-cache-arm.cc b/src/a64/stub-cache-a64.cc
|
| similarity index 60%
|
| copy from src/arm/stub-cache-arm.cc
|
| copy to src/a64/stub-cache-a64.cc
|
| index 3595b5233f1572c938d6b2e81659375c6f8cfeb0..3a0fd8f060c06aead1b5e406f7f98b63e1b0d693 100644
|
| --- a/src/arm/stub-cache-arm.cc
|
| +++ b/src/a64/stub-cache-a64.cc
|
| @@ -1,4 +1,4 @@
|
| -// Copyright 2012 the V8 project authors. All rights reserved.
|
| +// Copyright 2013 the V8 project authors. All rights reserved.
|
| // Redistribution and use in source and binary forms, with or without
|
| // modification, are permitted provided that the following conditions are
|
| // met:
|
| @@ -27,7 +27,7 @@
|
|
|
| #include "v8.h"
|
|
|
| -#if defined(V8_TARGET_ARCH_ARM)
|
| +#if defined(V8_TARGET_ARCH_A64)
|
|
|
| #include "ic-inl.h"
|
| #include "codegen.h"
|
| @@ -36,147 +36,170 @@
|
| namespace v8 {
|
| namespace internal {
|
|
|
| +
|
| #define __ ACCESS_MASM(masm)
|
|
|
|
|
| +// Helper function used to check that the dictionary doesn't contain
|
| +// the property. This function may return false negatives, so miss_label
|
| +// must always call a backup property check that is complete.
|
| +// This function is safe to call if the receiver has fast properties.
|
| +// Name must be unique and receiver must be a heap object.
|
| +static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
|
| + Label* miss_label,
|
| + Register receiver,
|
| + Handle<Name> name,
|
| + Register scratch0,
|
| + Register scratch1) {
|
| + ASSERT(!AreAliased(scratch0, scratch1));
|
| + ASSERT(name->IsUniqueName());
|
| + Counters* counters = masm->isolate()->counters();
|
| + __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
|
| + __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
|
| +
|
| + Label done;
|
| +
|
| + const int kInterceptorOrAccessCheckNeededMask =
|
| + (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
|
| +
|
| + // Bail out if the receiver has a named interceptor or requires access checks.
|
| + Register map = scratch1;
|
| + __ Ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
|
| + __ Ldrb(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
|
| + __ Tst(scratch0, kInterceptorOrAccessCheckNeededMask);
|
| + __ B(ne, miss_label);
|
| +
|
| + // Check that receiver is a JSObject.
|
| + __ Ldrb(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
|
| + __ Cmp(scratch0, FIRST_SPEC_OBJECT_TYPE);
|
| + __ B(lt, miss_label);
|
| +
|
| + // Load properties array.
|
| + Register properties = scratch0;
|
| + __ Ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
|
| + // Check that the properties array is a dictionary.
|
| + __ Ldr(map, FieldMemOperand(properties, HeapObject::kMapOffset));
|
| + __ JumpIfNotRoot(map, Heap::kHashTableMapRootIndex, miss_label);
|
| +
|
| + NameDictionaryLookupStub::GenerateNegativeLookup(masm,
|
| + miss_label,
|
| + &done,
|
| + receiver,
|
| + properties,
|
| + name,
|
| + scratch1);
|
| + __ Bind(&done);
|
| + __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
|
| +}
|
| +
|
| +
|
| +// Probe primary or secondary table.
|
| +// If the entry is found in the cache, the generated code jump to the first
|
| +// instruction of the stub in the cache.
|
| +// If there is a miss the code fall trough.
|
| +//
|
| +// 'receiver', 'name' and 'offset' registers are preserved on miss.
|
| static void ProbeTable(Isolate* isolate,
|
| MacroAssembler* masm,
|
| Code::Flags flags,
|
| StubCache::Table table,
|
| Register receiver,
|
| Register name,
|
| - // Number of the cache entry, not scaled.
|
| Register offset,
|
| Register scratch,
|
| Register scratch2,
|
| - Register offset_scratch) {
|
| + Register scratch3) {
|
| + // Some code below relies on the fact that the Entry struct contains
|
| + // 3 pointers (name, code, map).
|
| + STATIC_ASSERT(sizeof(StubCache::Entry) == (3 * kPointerSize));
|
| +
|
| ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
|
| ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
|
| ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
|
|
|
| - uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address());
|
| - uint32_t value_off_addr = reinterpret_cast<uint32_t>(value_offset.address());
|
| - uint32_t map_off_addr = reinterpret_cast<uint32_t>(map_offset.address());
|
| -
|
| - // Check the relative positions of the address fields.
|
| - ASSERT(value_off_addr > key_off_addr);
|
| - ASSERT((value_off_addr - key_off_addr) % 4 == 0);
|
| - ASSERT((value_off_addr - key_off_addr) < (256 * 4));
|
| - ASSERT(map_off_addr > key_off_addr);
|
| - ASSERT((map_off_addr - key_off_addr) % 4 == 0);
|
| - ASSERT((map_off_addr - key_off_addr) < (256 * 4));
|
| + uintptr_t key_off_addr = reinterpret_cast<uintptr_t>(key_offset.address());
|
| + uintptr_t value_off_addr =
|
| + reinterpret_cast<uintptr_t>(value_offset.address());
|
| + uintptr_t map_off_addr = reinterpret_cast<uintptr_t>(map_offset.address());
|
|
|
| Label miss;
|
| - Register base_addr = scratch;
|
| - scratch = no_reg;
|
|
|
| - // Multiply by 3 because there are 3 fields per entry (name, code, map).
|
| - __ add(offset_scratch, offset, Operand(offset, LSL, 1));
|
| + ASSERT(!AreAliased(name, offset, scratch, scratch2, scratch3));
|
| +
|
| + // Multiply by 3 because there are 3 fields per entry.
|
| + __ Add(scratch3, offset, Operand(offset, LSL, 1));
|
|
|
| // Calculate the base address of the entry.
|
| - __ mov(base_addr, Operand(key_offset));
|
| - __ add(base_addr, base_addr, Operand(offset_scratch, LSL, kPointerSizeLog2));
|
| + __ Mov(scratch, Operand(key_offset));
|
| + __ Add(scratch, scratch, Operand(scratch3, LSL, kPointerSizeLog2));
|
|
|
| // Check that the key in the entry matches the name.
|
| - __ ldr(ip, MemOperand(base_addr, 0));
|
| - __ cmp(name, ip);
|
| - __ b(ne, &miss);
|
| + __ Ldr(scratch2, MemOperand(scratch));
|
| + __ Cmp(name, scratch2);
|
| + __ B(ne, &miss);
|
|
|
| // Check the map matches.
|
| - __ ldr(ip, MemOperand(base_addr, map_off_addr - key_off_addr));
|
| - __ ldr(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
|
| - __ cmp(ip, scratch2);
|
| - __ b(ne, &miss);
|
| + __ Ldr(scratch2, MemOperand(scratch, map_off_addr - key_off_addr));
|
| + __ Ldr(scratch3, FieldMemOperand(receiver, HeapObject::kMapOffset));
|
| + __ Cmp(scratch2, scratch3);
|
| + __ B(ne, &miss);
|
|
|
| // Get the code entry from the cache.
|
| - Register code = scratch2;
|
| - scratch2 = no_reg;
|
| - __ ldr(code, MemOperand(base_addr, value_off_addr - key_off_addr));
|
| + __ Ldr(scratch, MemOperand(scratch, value_off_addr - key_off_addr));
|
|
|
| // Check that the flags match what we're looking for.
|
| - Register flags_reg = base_addr;
|
| - base_addr = no_reg;
|
| - __ ldr(flags_reg, FieldMemOperand(code, Code::kFlagsOffset));
|
| - // It's a nice optimization if this constant is encodable in the bic insn.
|
| -
|
| - uint32_t mask = Code::kFlagsNotUsedInLookup;
|
| - ASSERT(__ ImmediateFitsAddrMode1Instruction(mask));
|
| - __ bic(flags_reg, flags_reg, Operand(mask));
|
| - __ cmp(flags_reg, Operand(flags));
|
| - __ b(ne, &miss);
|
| + __ Ldr(scratch2.W(), FieldMemOperand(scratch, Code::kFlagsOffset));
|
| + __ Bic(scratch2.W(), scratch2.W(), Code::kFlagsNotUsedInLookup);
|
| + __ Cmp(scratch2.W(), flags);
|
| + __ B(ne, &miss);
|
|
|
| #ifdef DEBUG
|
| - if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
|
| - __ jmp(&miss);
|
| - } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
|
| - __ jmp(&miss);
|
| - }
|
| + if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
|
| + __ B(&miss);
|
| + } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
|
| + __ B(&miss);
|
| + }
|
| #endif
|
|
|
| // Jump to the first instruction in the code stub.
|
| - __ add(pc, code, Operand(Code::kHeaderSize - kHeapObjectTag));
|
| + __ Add(scratch, scratch, Code::kHeaderSize - kHeapObjectTag);
|
| + __ Br(scratch);
|
|
|
| // Miss: fall through.
|
| - __ bind(&miss);
|
| + __ Bind(&miss);
|
| }
|
|
|
|
|
| -// Helper function used to check that the dictionary doesn't contain
|
| -// the property. This function may return false negatives, so miss_label
|
| -// must always call a backup property check that is complete.
|
| -// This function is safe to call if the receiver has fast properties.
|
| -// Name must be unique and receiver must be a heap object.
|
| -static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
|
| - Label* miss_label,
|
| - Register receiver,
|
| - Handle<Name> name,
|
| - Register scratch0,
|
| - Register scratch1) {
|
| - ASSERT(name->IsUniqueName());
|
| - Counters* counters = masm->isolate()->counters();
|
| - __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
|
| - __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
|
| -
|
| - Label done;
|
| -
|
| - const int kInterceptorOrAccessCheckNeededMask =
|
| - (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
|
| -
|
| - // Bail out if the receiver has a named interceptor or requires access checks.
|
| - Register map = scratch1;
|
| - __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
|
| - __ ldrb(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
|
| - __ tst(scratch0, Operand(kInterceptorOrAccessCheckNeededMask));
|
| - __ b(ne, miss_label);
|
| -
|
| - // Check that receiver is a JSObject.
|
| - __ ldrb(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
|
| - __ cmp(scratch0, Operand(FIRST_SPEC_OBJECT_TYPE));
|
| - __ b(lt, miss_label);
|
| -
|
| - // Load properties array.
|
| - Register properties = scratch0;
|
| - __ ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
|
| - // Check that the properties array is a dictionary.
|
| - __ ldr(map, FieldMemOperand(properties, HeapObject::kMapOffset));
|
| - Register tmp = properties;
|
| - __ LoadRoot(tmp, Heap::kHashTableMapRootIndex);
|
| - __ cmp(map, tmp);
|
| - __ b(ne, miss_label);
|
| +// Check if key is a smi or can be converted into a smi.
|
| +// If not jump on 'fail' and fall-through otherwise.
|
| +static void GenerateSmiKeyCheck(MacroAssembler* masm,
|
| + Register key,
|
| + Register scratch0,
|
| + FPRegister double_scratch0,
|
| + FPRegister double_scratch1,
|
| + Label* fail) {
|
| + Label key_ok;
|
| + __ JumpIfSmi(key, &key_ok);
|
|
|
| - // Restore the temporarily used register.
|
| - __ ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
|
| + // The key is not a smi. Check for a smi inside a heap number.
|
| + __ CheckMap(key,
|
| + scratch0,
|
| + masm->isolate()->factory()->heap_number_map(),
|
| + fail,
|
| + DONT_DO_SMI_CHECK);
|
|
|
| + __ Ldr(scratch0, FieldMemOperand(key, HeapNumber::kValueOffset));
|
| + __ Fmov(double_scratch0, scratch0);
|
| + __ TryConvertDoubleToInt32(scratch0.W(),
|
| + double_scratch0,
|
| + double_scratch1,
|
| + NULL,
|
| + fail);
|
| + // The double value has been coverted to a 32-bit signed integer.
|
| + // We just need to tag it.
|
| + __ SmiTag(key, scratch0);
|
|
|
| - NameDictionaryLookupStub::GenerateNegativeLookup(masm,
|
| - miss_label,
|
| - &done,
|
| - receiver,
|
| - properties,
|
| - name,
|
| - scratch1);
|
| - __ bind(&done);
|
| - __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
|
| + __ Bind(&key_ok);
|
| }
|
|
|
|
|
| @@ -191,26 +214,13 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
|
| Isolate* isolate = masm->isolate();
|
| Label miss;
|
|
|
| - // Make sure that code is valid. The multiplying code relies on the
|
| - // entry size being 12.
|
| - ASSERT(sizeof(Entry) == 12);
|
| -
|
| // Make sure the flags does not name a specific type.
|
| ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
|
|
|
| // Make sure that there are no register conflicts.
|
| - ASSERT(!scratch.is(receiver));
|
| - ASSERT(!scratch.is(name));
|
| - ASSERT(!extra.is(receiver));
|
| - ASSERT(!extra.is(name));
|
| - ASSERT(!extra.is(scratch));
|
| - ASSERT(!extra2.is(receiver));
|
| - ASSERT(!extra2.is(name));
|
| - ASSERT(!extra2.is(scratch));
|
| - ASSERT(!extra2.is(extra));
|
| -
|
| - // Check scratch, extra and extra2 registers are valid.
|
| - ASSERT(!scratch.is(no_reg));
|
| + ASSERT(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
|
| +
|
| + // Make sure extra and extra2 registers are valid.
|
| ASSERT(!extra.is(no_reg));
|
| ASSERT(!extra2.is(no_reg));
|
| ASSERT(!extra3.is(no_reg));
|
| @@ -222,53 +232,31 @@ void StubCache::GenerateProbe(MacroAssembler* masm,
|
| // Check that the receiver isn't a smi.
|
| __ JumpIfSmi(receiver, &miss);
|
|
|
| - // Get the map of the receiver and compute the hash.
|
| - __ ldr(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
|
| - __ ldr(ip, FieldMemOperand(receiver, HeapObject::kMapOffset));
|
| - __ add(scratch, scratch, Operand(ip));
|
| - uint32_t mask = kPrimaryTableSize - 1;
|
| - // We shift out the last two bits because they are not part of the hash and
|
| - // they are always 01 for maps.
|
| - __ mov(scratch, Operand(scratch, LSR, kHeapObjectTagSize));
|
| - // Mask down the eor argument to the minimum to keep the immediate
|
| - // ARM-encodable.
|
| - __ eor(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask));
|
| - // Prefer and_ to ubfx here because ubfx takes 2 cycles.
|
| - __ and_(scratch, scratch, Operand(mask));
|
| + // Compute the hash for primary table.
|
| + __ Ldr(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
|
| + __ Ldr(extra, FieldMemOperand(receiver, HeapObject::kMapOffset));
|
| + __ Add(scratch, scratch, extra);
|
| + __ Eor(scratch, scratch, flags);
|
| + // We shift out the last two bits because they are not part of the hash.
|
| + __ Ubfx(scratch, scratch, kHeapObjectTagSize,
|
| + CountTrailingZeros(kPrimaryTableSize, 64));
|
|
|
| // Probe the primary table.
|
| - ProbeTable(isolate,
|
| - masm,
|
| - flags,
|
| - kPrimary,
|
| - receiver,
|
| - name,
|
| - scratch,
|
| - extra,
|
| - extra2,
|
| - extra3);
|
| -
|
| - // Primary miss: Compute hash for secondary probe.
|
| - __ sub(scratch, scratch, Operand(name, LSR, kHeapObjectTagSize));
|
| - uint32_t mask2 = kSecondaryTableSize - 1;
|
| - __ add(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask2));
|
| - __ and_(scratch, scratch, Operand(mask2));
|
| + ProbeTable(isolate, masm, flags, kPrimary, receiver, name,
|
| + scratch, extra, extra2, extra3);
|
| +
|
| + // Primary miss: Compute hash for secondary table.
|
| + __ Sub(scratch, scratch, Operand(name, LSR, kHeapObjectTagSize));
|
| + __ Add(scratch, scratch, flags >> kHeapObjectTagSize);
|
| + __ And(scratch, scratch, kSecondaryTableSize - 1);
|
|
|
| // Probe the secondary table.
|
| - ProbeTable(isolate,
|
| - masm,
|
| - flags,
|
| - kSecondary,
|
| - receiver,
|
| - name,
|
| - scratch,
|
| - extra,
|
| - extra2,
|
| - extra3);
|
| + ProbeTable(isolate, masm, flags, kSecondary, receiver, name,
|
| + scratch, extra, extra2, extra3);
|
|
|
| // Cache miss: Fall-through and let caller handle the miss by
|
| // entering the runtime system.
|
| - __ bind(&miss);
|
| + __ Bind(&miss);
|
| __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1,
|
| extra2, extra3);
|
| }
|
| @@ -278,18 +266,17 @@ void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
|
| int index,
|
| Register prototype) {
|
| // Load the global or builtins object from the current context.
|
| - __ ldr(prototype,
|
| - MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
|
| + __ Ldr(prototype, GlobalObjectMemOperand());
|
| // Load the native context from the global or builtins object.
|
| - __ ldr(prototype,
|
| + __ Ldr(prototype,
|
| FieldMemOperand(prototype, GlobalObject::kNativeContextOffset));
|
| // Load the function from the native context.
|
| - __ ldr(prototype, MemOperand(prototype, Context::SlotOffset(index)));
|
| - // Load the initial map. The global functions all have initial maps.
|
| - __ ldr(prototype,
|
| + __ Ldr(prototype, ContextMemOperand(prototype, index));
|
| + // Load the initial map. The global functions all have initial maps.
|
| + __ Ldr(prototype,
|
| FieldMemOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset));
|
| // Load the prototype from the initial map.
|
| - __ ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
|
| + __ Ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
|
| }
|
|
|
|
|
| @@ -300,18 +287,16 @@ void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
|
| Label* miss) {
|
| Isolate* isolate = masm->isolate();
|
| // Check we're still in the same context.
|
| - __ ldr(prototype,
|
| - MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
|
| - __ Move(ip, isolate->global_object());
|
| - __ cmp(prototype, ip);
|
| - __ b(ne, miss);
|
| + __ Ldr(prototype, GlobalObjectMemOperand());
|
| + __ Cmp(prototype, Operand(isolate->global_object()));
|
| + __ B(ne, miss);
|
| // Get the global function with the given index.
|
| Handle<JSFunction> function(
|
| JSFunction::cast(isolate->native_context()->get(index)));
|
| // Load its initial map. The global functions all have initial maps.
|
| - __ Move(prototype, Handle<Map>(function->initial_map()));
|
| + __ Mov(prototype, Operand(Handle<Map>(function->initial_map())));
|
| // Load the prototype from the initial map.
|
| - __ ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
|
| + __ Ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
|
| }
|
|
|
|
|
| @@ -322,14 +307,16 @@ void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
|
| int index,
|
| Representation representation) {
|
| ASSERT(!FLAG_track_double_fields || !representation.IsDouble());
|
| - int offset = index * kPointerSize;
|
| - if (!inobject) {
|
| + USE(representation);
|
| + if (inobject) {
|
| + int offset = index * kPointerSize;
|
| + __ Ldr(dst, FieldMemOperand(src, offset));
|
| + } else {
|
| // Calculate the offset into the properties array.
|
| - offset = offset + FixedArray::kHeaderSize;
|
| - __ ldr(dst, FieldMemOperand(src, JSObject::kPropertiesOffset));
|
| - src = dst;
|
| + int offset = index * kPointerSize + FixedArray::kHeaderSize;
|
| + __ Ldr(dst, FieldMemOperand(src, JSObject::kPropertiesOffset));
|
| + __ Ldr(dst, FieldMemOperand(dst, offset));
|
| }
|
| - __ ldr(dst, FieldMemOperand(src, offset));
|
| }
|
|
|
|
|
| @@ -337,72 +324,74 @@ void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
|
| Register receiver,
|
| Register scratch,
|
| Label* miss_label) {
|
| + ASSERT(!AreAliased(receiver, scratch));
|
| +
|
| // Check that the receiver isn't a smi.
|
| __ JumpIfSmi(receiver, miss_label);
|
|
|
| // Check that the object is a JS array.
|
| - __ CompareObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE);
|
| - __ b(ne, miss_label);
|
| + __ JumpIfNotObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE,
|
| + miss_label);
|
|
|
| // Load length directly from the JS array.
|
| - __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
|
| + __ Ldr(x0, FieldMemOperand(receiver, JSArray::kLengthOffset));
|
| __ Ret();
|
| }
|
|
|
|
|
| // Generate code to check if an object is a string. If the object is a
|
| // heap object, its map's instance type is left in the scratch1 register.
|
| -// If this is not needed, scratch1 and scratch2 may be the same register.
|
| static void GenerateStringCheck(MacroAssembler* masm,
|
| Register receiver,
|
| Register scratch1,
|
| - Register scratch2,
|
| Label* smi,
|
| Label* non_string_object) {
|
| // Check that the receiver isn't a smi.
|
| __ JumpIfSmi(receiver, smi);
|
|
|
| - // Check that the object is a string.
|
| - __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
|
| - __ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
|
| - __ and_(scratch2, scratch1, Operand(kIsNotStringMask));
|
| - // The cast is to resolve the overload for the argument of 0x0.
|
| - __ cmp(scratch2, Operand(static_cast<int32_t>(kStringTag)));
|
| - __ b(ne, non_string_object);
|
| + // Get the object's instance type filed.
|
| + __ Ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
|
| + __ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
|
| + // Check if the "not string" bit is set.
|
| + __ Tbnz(scratch1, MaskToBit(kNotStringTag), non_string_object);
|
| }
|
|
|
|
|
| // Generate code to load the length from a string object and return the length.
|
| // If the receiver object is not a string or a wrapped string object the
|
| // execution continues at the miss label. The register containing the
|
| -// receiver is potentially clobbered.
|
| +// receiver is not clobbered if the receiver is not a string.
|
| void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
|
| Register receiver,
|
| Register scratch1,
|
| Register scratch2,
|
| Label* miss,
|
| bool support_wrappers) {
|
| + // Input registers can't alias because we don't want to clobber the
|
| + // receiver register if the object is not a string.
|
| + ASSERT(!AreAliased(receiver, scratch1, scratch2));
|
| +
|
| Label check_wrapper;
|
|
|
| // Check if the object is a string leaving the instance type in the
|
| // scratch1 register.
|
| - GenerateStringCheck(masm, receiver, scratch1, scratch2, miss,
|
| + GenerateStringCheck(masm, receiver, scratch1, miss,
|
| support_wrappers ? &check_wrapper : miss);
|
|
|
| // Load length directly from the string.
|
| - __ ldr(r0, FieldMemOperand(receiver, String::kLengthOffset));
|
| + __ Ldr(x0, FieldMemOperand(receiver, String::kLengthOffset));
|
| __ Ret();
|
|
|
| if (support_wrappers) {
|
| // Check if the object is a JSValue wrapper.
|
| - __ bind(&check_wrapper);
|
| - __ cmp(scratch1, Operand(JS_VALUE_TYPE));
|
| - __ b(ne, miss);
|
| + __ Bind(&check_wrapper);
|
| + __ Cmp(scratch1, Operand(JS_VALUE_TYPE));
|
| + __ B(ne, miss);
|
|
|
| // Unwrap the value and check if the wrapped value is a string.
|
| - __ ldr(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
|
| - GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
|
| - __ ldr(r0, FieldMemOperand(scratch1, String::kLengthOffset));
|
| + __ Ldr(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
|
| + GenerateStringCheck(masm, scratch1, scratch2, miss, miss);
|
| + __ Ldr(x0, FieldMemOperand(scratch1, String::kLengthOffset));
|
| __ Ret();
|
| }
|
| }
|
| @@ -414,7 +403,11 @@ void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
|
| Register scratch2,
|
| Label* miss_label) {
|
| __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
|
| - __ mov(r0, scratch1);
|
| + // TryGetFunctionPrototype can't put the result directly in x0 because the
|
| + // 3 inputs registers can't alias and we call this function from
|
| + // LoadIC::GenerateFunctionPrototype, where receiver is x0. So we explicitly
|
| + // move the result in x0.
|
| + __ Mov(x0, scratch1);
|
| __ Ret();
|
| }
|
|
|
| @@ -426,23 +419,23 @@ static void GenerateCheckPropertyCell(MacroAssembler* masm,
|
| Handle<GlobalObject> global,
|
| Handle<Name> name,
|
| Register scratch,
|
| + Register the_hole,
|
| Label* miss) {
|
| Handle<JSGlobalPropertyCell> cell =
|
| GlobalObject::EnsurePropertyCell(global, name);
|
| ASSERT(cell->value()->IsTheHole());
|
| - __ mov(scratch, Operand(cell));
|
| - __ ldr(scratch,
|
| + __ Mov(scratch, Operand(cell));
|
| + __ Ldr(scratch,
|
| FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
|
| - __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
|
| - __ cmp(scratch, ip);
|
| - __ b(ne, miss);
|
| + __ Cmp(scratch, the_hole);
|
| + __ B(ne, miss);
|
| }
|
|
|
|
|
| -// Generate StoreTransition code, value is passed in r0 register.
|
| -// When leaving generated code after success, the receiver_reg and name_reg
|
| -// may be clobbered. Upon branch to miss_label, the receiver and name
|
| -// registers have their original values.
|
| +// Generate StoreTransition code, value is passed in x0 register.
|
| +// When leaving generated code after success, the receiver_reg and name_reg may
|
| +// be clobbered. Upon branch to miss_label, the receiver and name registers have
|
| +// their original values.
|
| void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
|
| Handle<JSObject> object,
|
| LookupResult* lookup,
|
| @@ -457,9 +450,14 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
|
| Label* miss_label,
|
| Label* miss_restore_name,
|
| Label* slow) {
|
| - // r0 : value
|
| Label exit;
|
|
|
| + ASSERT(!AreAliased(receiver_reg, name_reg, value_reg,
|
| + scratch1, scratch2, scratch3));
|
| +
|
| + // We don't need scratch3.
|
| + scratch3 = NoReg;
|
| +
|
| // Check that the map of the object hasn't changed.
|
| __ CheckMap(receiver_reg, scratch1, Handle<Map>(object->map()), miss_label,
|
| DO_SMI_CHECK);
|
| @@ -499,11 +497,13 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
|
| // holder.
|
| if (lookup->holder() == *object) {
|
| if (holder->IsJSGlobalObject()) {
|
| + __ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex);
|
| GenerateCheckPropertyCell(
|
| masm,
|
| Handle<GlobalObject>(GlobalObject::cast(holder)),
|
| name,
|
| scratch1,
|
| + scratch2, // The hole.
|
| miss_restore_name);
|
| } else if (!holder->HasFastProperties() && !holder->IsJSGlobalProxy()) {
|
| GenerateDictionaryNegativeLookup(
|
| @@ -512,6 +512,8 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
|
| }
|
| }
|
|
|
| + // We've possibly already clobbered name_reg at this point, so use it for
|
| + // storage_reg.
|
| Register storage_reg = name_reg;
|
|
|
| if (FLAG_track_fields && representation.IsSmi()) {
|
| @@ -520,35 +522,32 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
|
| __ JumpIfSmi(value_reg, miss_restore_name);
|
| } else if (FLAG_track_double_fields && representation.IsDouble()) {
|
| Label do_store, heap_number;
|
| - __ LoadRoot(scratch3, Heap::kHeapNumberMapRootIndex);
|
| - __ AllocateHeapNumber(storage_reg, scratch1, scratch2, scratch3, slow);
|
| + __ AllocateHeapNumber(storage_reg, slow, scratch1, scratch2);
|
|
|
| + // TODO(jbramley): Is fp_scratch the most appropriate FP scratch register?
|
| + // It's only used in Fcmp, but it's not really safe to use it like this.
|
| __ JumpIfNotSmi(value_reg, &heap_number);
|
| - __ SmiUntag(scratch1, value_reg);
|
| - __ vmov(s0, scratch1);
|
| - __ vcvt_f64_s32(d0, s0);
|
| - __ jmp(&do_store);
|
| + __ SmiUntagToDouble(fp_scratch, value_reg);
|
| + __ B(&do_store);
|
|
|
| - __ bind(&heap_number);
|
| + __ Bind(&heap_number);
|
| __ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex,
|
| miss_restore_name, DONT_DO_SMI_CHECK);
|
| - __ vldr(d0, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
|
| + __ Ldr(fp_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
|
|
|
| - __ bind(&do_store);
|
| - __ vstr(d0, FieldMemOperand(storage_reg, HeapNumber::kValueOffset));
|
| + __ Bind(&do_store);
|
| + __ Str(fp_scratch, FieldMemOperand(storage_reg, HeapNumber::kValueOffset));
|
| }
|
|
|
| - // Stub never generated for non-global objects that require access
|
| - // checks.
|
| + // Stub never generated for non-global objects that require access checks.
|
| ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
|
|
|
| // Perform map transition for the receiver if necessary.
|
| if (object->map()->unused_property_fields() == 0) {
|
| // The properties must be extended before we can store the value.
|
| // We jump to a runtime call that extends the properties array.
|
| - __ push(receiver_reg);
|
| - __ mov(r2, Operand(transition));
|
| - __ Push(r2, r0);
|
| + __ Mov(scratch1, Operand(transition));
|
| + __ Push(receiver_reg, scratch1, value_reg);
|
| __ TailCallExternalReference(
|
| ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
|
| masm->isolate()),
|
| @@ -558,8 +557,8 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
|
| }
|
|
|
| // Update the map of the object.
|
| - __ mov(scratch1, Operand(transition));
|
| - __ str(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
|
| + __ Mov(scratch1, Operand(transition));
|
| + __ Str(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
|
|
|
| // Update the write barrier for the map field and pass the now unused
|
| // name_reg as scratch register.
|
| @@ -571,7 +570,6 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
|
| kDontSaveFPRegs,
|
| OMIT_REMEMBERED_SET,
|
| OMIT_SMI_CHECK);
|
| -
|
| int index = transition->instance_descriptors()->GetFieldIndex(
|
| transition->LastAdded());
|
|
|
| @@ -586,10 +584,12 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
|
| if (index < 0) {
|
| // Set the property straight into the object.
|
| int offset = object->map()->instance_size() + (index * kPointerSize);
|
| + // TODO(jbramley): This construct appears in several places in this
|
| + // function. Try to clean it up, perhaps using a result_reg.
|
| if (FLAG_track_double_fields && representation.IsDouble()) {
|
| - __ str(storage_reg, FieldMemOperand(receiver_reg, offset));
|
| + __ Str(storage_reg, FieldMemOperand(receiver_reg, offset));
|
| } else {
|
| - __ str(value_reg, FieldMemOperand(receiver_reg, offset));
|
| + __ Str(value_reg, FieldMemOperand(receiver_reg, offset));
|
| }
|
|
|
| if (!FLAG_track_fields || !representation.IsSmi()) {
|
| @@ -599,7 +599,7 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
|
| // Update the write barrier for the array address.
|
| // Pass the now unused name_reg as a scratch register.
|
| if (!FLAG_track_double_fields || !representation.IsDouble()) {
|
| - __ mov(name_reg, value_reg);
|
| + __ Mov(name_reg, value_reg);
|
| } else {
|
| ASSERT(storage_reg.is(name_reg));
|
| }
|
| @@ -616,12 +616,12 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
|
| // Write to the properties array.
|
| int offset = index * kPointerSize + FixedArray::kHeaderSize;
|
| // Get the properties array
|
| - __ ldr(scratch1,
|
| + __ Ldr(scratch1,
|
| FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
|
| if (FLAG_track_double_fields && representation.IsDouble()) {
|
| - __ str(storage_reg, FieldMemOperand(scratch1, offset));
|
| + __ Str(storage_reg, FieldMemOperand(scratch1, offset));
|
| } else {
|
| - __ str(value_reg, FieldMemOperand(scratch1, offset));
|
| + __ Str(value_reg, FieldMemOperand(scratch1, offset));
|
| }
|
|
|
| if (!FLAG_track_fields || !representation.IsSmi()) {
|
| @@ -631,7 +631,7 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
|
| // Update the write barrier for the array address.
|
| // Ok to clobber receiver_reg and name_reg, since we return.
|
| if (!FLAG_track_double_fields || !representation.IsDouble()) {
|
| - __ mov(name_reg, value_reg);
|
| + __ Mov(name_reg, value_reg);
|
| } else {
|
| ASSERT(storage_reg.is(name_reg));
|
| }
|
| @@ -646,17 +646,17 @@ void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
|
| }
|
| }
|
|
|
| - // Return the value (register r0).
|
| - ASSERT(value_reg.is(r0));
|
| - __ bind(&exit);
|
| + __ Bind(&exit);
|
| + // Return the value (register x0).
|
| + ASSERT(value_reg.is(x0));
|
| __ Ret();
|
| }
|
|
|
|
|
| -// Generate StoreField code, value is passed in r0 register.
|
| -// When leaving generated code after success, the receiver_reg and name_reg
|
| -// may be clobbered. Upon branch to miss_label, the receiver and name
|
| -// registers have their original values.
|
| +// Generate StoreField code, value is passed in x0 register.
|
| +// When leaving generated code after success, the receiver_reg and name_reg may
|
| +// be clobbered. Upon branch to miss_label, the receiver and name registers have
|
| +// their original values.
|
| void StubCompiler::GenerateStoreField(MacroAssembler* masm,
|
| Handle<JSObject> object,
|
| LookupResult* lookup,
|
| @@ -666,7 +666,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
|
| Register scratch1,
|
| Register scratch2,
|
| Label* miss_label) {
|
| - // r0 : value
|
| + // x0 : value
|
| Label exit;
|
|
|
| // Check that the map of the object hasn't changed.
|
| @@ -698,32 +698,33 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
|
| } else if (FLAG_track_double_fields && representation.IsDouble()) {
|
| // Load the double storage.
|
| if (index < 0) {
|
| - int offset = object->map()->instance_size() + (index * kPointerSize);
|
| - __ ldr(scratch1, FieldMemOperand(receiver_reg, offset));
|
| + int offset = (index * kPointerSize) + object->map()->instance_size();
|
| + __ Ldr(scratch1, FieldMemOperand(receiver_reg, offset));
|
| } else {
|
| - __ ldr(scratch1,
|
| + int offset = (index * kPointerSize) + FixedArray::kHeaderSize;
|
| + __ Ldr(scratch1,
|
| FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
|
| - int offset = index * kPointerSize + FixedArray::kHeaderSize;
|
| - __ ldr(scratch1, FieldMemOperand(scratch1, offset));
|
| + __ Ldr(scratch1, FieldMemOperand(scratch1, offset));
|
| }
|
|
|
| // Store the value into the storage.
|
| Label do_store, heap_number;
|
| + // TODO(jbramley): Is fp_scratch the most appropriate FP scratch register?
|
| + // It's only used in Fcmp, but it's not really safe to use it like this.
|
| __ JumpIfNotSmi(value_reg, &heap_number);
|
| - __ SmiUntag(scratch2, value_reg);
|
| - __ vmov(s0, scratch2);
|
| - __ vcvt_f64_s32(d0, s0);
|
| - __ jmp(&do_store);
|
| + __ SmiUntagToDouble(fp_scratch, value_reg);
|
| + __ B(&do_store);
|
|
|
| - __ bind(&heap_number);
|
| + __ Bind(&heap_number);
|
| __ CheckMap(value_reg, scratch2, Heap::kHeapNumberMapRootIndex,
|
| miss_label, DONT_DO_SMI_CHECK);
|
| - __ vldr(d0, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
|
| + __ Ldr(fp_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
|
| +
|
| + __ Bind(&do_store);
|
| + __ Str(fp_scratch, FieldMemOperand(scratch1, HeapNumber::kValueOffset));
|
|
|
| - __ bind(&do_store);
|
| - __ vstr(d0, FieldMemOperand(scratch1, HeapNumber::kValueOffset));
|
| - // Return the value (register r0).
|
| - ASSERT(value_reg.is(r0));
|
| + // Return the value (register x0).
|
| + ASSERT(value_reg.is(x0));
|
| __ Ret();
|
| return;
|
| }
|
| @@ -734,7 +735,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
|
| if (index < 0) {
|
| // Set the property straight into the object.
|
| int offset = object->map()->instance_size() + (index * kPointerSize);
|
| - __ str(value_reg, FieldMemOperand(receiver_reg, offset));
|
| + __ Str(value_reg, FieldMemOperand(receiver_reg, offset));
|
|
|
| if (!FLAG_track_fields || !representation.IsSmi()) {
|
| // Skip updating write barrier if storing a smi.
|
| @@ -742,7 +743,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
|
|
|
| // Update the write barrier for the array address.
|
| // Pass the now unused name_reg as a scratch register.
|
| - __ mov(name_reg, value_reg);
|
| + __ Mov(name_reg, value_reg);
|
| __ RecordWriteField(receiver_reg,
|
| offset,
|
| name_reg,
|
| @@ -756,9 +757,9 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
|
| // Write to the properties array.
|
| int offset = index * kPointerSize + FixedArray::kHeaderSize;
|
| // Get the properties array
|
| - __ ldr(scratch1,
|
| + __ Ldr(scratch1,
|
| FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
|
| - __ str(value_reg, FieldMemOperand(scratch1, offset));
|
| + __ Str(value_reg, FieldMemOperand(scratch1, offset));
|
|
|
| if (!FLAG_track_fields || !representation.IsSmi()) {
|
| // Skip updating write barrier if storing a smi.
|
| @@ -766,7 +767,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
|
|
|
| // Update the write barrier for the array address.
|
| // Ok to clobber receiver_reg and name_reg, since we return.
|
| - __ mov(name_reg, value_reg);
|
| + __ Mov(name_reg, value_reg);
|
| __ RecordWriteField(scratch1,
|
| offset,
|
| name_reg,
|
| @@ -778,9 +779,9 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
|
| }
|
| }
|
|
|
| - // Return the value (register r0).
|
| - ASSERT(value_reg.is(r0));
|
| - __ bind(&exit);
|
| + __ Bind(&exit);
|
| + // Return the value (register x0).
|
| + ASSERT(value_reg.is(x0));
|
| __ Ret();
|
| }
|
|
|
| @@ -789,39 +790,70 @@ void BaseStoreStubCompiler::GenerateRestoreName(MacroAssembler* masm,
|
| Label* label,
|
| Handle<Name> name) {
|
| if (!label->is_unused()) {
|
| - __ bind(label);
|
| - __ mov(this->name(), Operand(name));
|
| + __ Bind(label);
|
| + __ Mov(this->name(), Operand(name));
|
| }
|
| }
|
|
|
|
|
| +// Calls GenerateCheckPropertyCell for each global object in the prototype chain
|
| +// from object to (but not including) holder.
|
| +static void GenerateCheckPropertyCells(MacroAssembler* masm,
|
| + Handle<JSObject> object,
|
| + Handle<JSObject> holder,
|
| + Handle<Name> name,
|
| + Register scratch1,
|
| + Register scratch2,
|
| + Label* miss) {
|
| + bool the_hole_is_loaded = false;
|
| + Handle<JSObject> current = object;
|
| + while (!current.is_identical_to(holder)) {
|
| + if (current->IsGlobalObject()) {
|
| + if (!the_hole_is_loaded) {
|
| + __ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex);
|
| + the_hole_is_loaded = true;
|
| + }
|
| + GenerateCheckPropertyCell(masm,
|
| + Handle<GlobalObject>::cast(current),
|
| + name,
|
| + scratch1,
|
| + scratch2,
|
| + miss);
|
| + }
|
| + current = Handle<JSObject>(JSObject::cast(current->GetPrototype()));
|
| + }
|
| +}
|
| +
|
| +
|
| +// The function to called must be passed in x1.
|
| static void GenerateCallFunction(MacroAssembler* masm,
|
| Handle<Object> object,
|
| const ParameterCount& arguments,
|
| Label* miss,
|
| - Code::ExtraICState extra_ic_state) {
|
| - // ----------- S t a t e -------------
|
| - // -- r0: receiver
|
| - // -- r1: function to call
|
| - // -----------------------------------
|
| + Code::ExtraICState extra_ic_state,
|
| + Register function,
|
| + Register receiver,
|
| + Register scratch) {
|
| + ASSERT(!AreAliased(function, receiver, scratch));
|
| + ASSERT(function.Is(x1));
|
|
|
| // Check that the function really is a function.
|
| - __ JumpIfSmi(r1, miss);
|
| - __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
|
| - __ b(ne, miss);
|
| + __ JumpIfSmi(function, miss);
|
| + __ JumpIfNotObjectType(function, scratch, scratch, JS_FUNCTION_TYPE, miss);
|
|
|
| - // Patch the receiver on the stack with the global proxy if
|
| - // necessary.
|
| + // Patch the receiver on the stack with the global proxy if necessary.
|
| if (object->IsGlobalObject()) {
|
| - __ ldr(r3, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
|
| - __ str(r3, MemOperand(sp, arguments.immediate() * kPointerSize));
|
| + __ Ldr(scratch,
|
| + FieldMemOperand(receiver, GlobalObject::kGlobalReceiverOffset));
|
| + __ Poke(scratch, arguments.immediate() * kPointerSize);
|
| }
|
|
|
| // Invoke the function.
|
| CallKind call_kind = CallICBase::Contextual::decode(extra_ic_state)
|
| ? CALL_AS_FUNCTION
|
| : CALL_AS_METHOD;
|
| - __ InvokeFunction(r1, arguments, JUMP_FUNCTION, NullCallWrapper(), call_kind);
|
| + __ InvokeFunction(
|
| + function, arguments, JUMP_FUNCTION, NullCallWrapper(), call_kind);
|
| }
|
|
|
|
|
| @@ -830,18 +862,16 @@ static void PushInterceptorArguments(MacroAssembler* masm,
|
| Register holder,
|
| Register name,
|
| Handle<JSObject> holder_obj) {
|
| - __ push(name);
|
| + __ Push(name);
|
| Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
|
| ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
|
| Register scratch = name;
|
| - __ mov(scratch, Operand(interceptor));
|
| - __ push(scratch);
|
| - __ push(receiver);
|
| - __ push(holder);
|
| - __ ldr(scratch, FieldMemOperand(scratch, InterceptorInfo::kDataOffset));
|
| - __ push(scratch);
|
| - __ mov(scratch, Operand(ExternalReference::isolate_address(masm->isolate())));
|
| - __ push(scratch);
|
| + __ Mov(scratch, Operand(interceptor));
|
| + __ Push(scratch, receiver, holder);
|
| + __ Ldr(scratch, FieldMemOperand(scratch, InterceptorInfo::kDataOffset));
|
| + __ Push(scratch);
|
| + __ Mov(scratch, Operand(ExternalReference::isolate_address(masm->isolate())));
|
| + __ Push(scratch);
|
| }
|
|
|
|
|
| @@ -856,8 +886,10 @@ static void CompileCallLoadPropertyWithInterceptor(
|
| ExternalReference ref =
|
| ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly),
|
| masm->isolate());
|
| - __ mov(r0, Operand(6));
|
| - __ mov(r1, Operand(ref));
|
| + // Put the number of on-stack arguments for runtime call in x0.
|
| + // These arguemnts have been pushed by PushInterceptorArguments.
|
| + __ Mov(x0, 6);
|
| + __ Mov(x1, Operand(ref));
|
|
|
| CEntryStub stub(1);
|
| __ CallStub(&stub);
|
| @@ -872,10 +904,8 @@ static const int kFastApiCallArguments = FunctionCallbackArguments::kArgsLength;
|
| // These arguments are set by CheckPrototypes and GenerateFastApiDirectCall.
|
| static void ReserveSpaceForFastApiCall(MacroAssembler* masm,
|
| Register scratch) {
|
| - __ mov(scratch, Operand(Smi::FromInt(0)));
|
| - for (int i = 0; i < kFastApiCallArguments; i++) {
|
| - __ push(scratch);
|
| - }
|
| + ASSERT(Smi::FromInt(0) == 0);
|
| + __ PushMultipleTimes(kFastApiCallArguments, xzr);
|
| }
|
|
|
|
|
| @@ -890,63 +920,85 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
|
| int argc) {
|
| // ----------- S t a t e -------------
|
| // -- sp[0] : holder (set by CheckPrototypes)
|
| - // -- sp[4] : callee JS function
|
| - // -- sp[8] : call data
|
| - // -- sp[12] : isolate
|
| - // -- sp[16] : ReturnValue default value
|
| - // -- sp[20] : ReturnValue
|
| - // -- sp[24] : last JS argument
|
| + // -- sp[8] : callee JS function
|
| + // -- sp[16] : call data
|
| + // -- sp[24] : isolate
|
| + // -- sp[32] : ReturnValue default value
|
| + // -- sp[40] : ReturnValue
|
| + // -- sp[48] : last JS argument
|
| // -- ...
|
| - // -- sp[(argc + 5) * 4] : first JS argument
|
| - // -- sp[(argc + 6) * 4] : receiver
|
| + // -- sp[(argc + 5) * 8] : first JS argument
|
| + // -- sp[(argc + 6) * 8] : receiver
|
| // -----------------------------------
|
| // Get the function and setup the context.
|
| Handle<JSFunction> function = optimization.constant_function();
|
| - __ LoadHeapObject(r5, function);
|
| - __ ldr(cp, FieldMemOperand(r5, JSFunction::kContextOffset));
|
| + Register function_reg = x5;
|
| + __ LoadHeapObject(function_reg, function);
|
| + __ Ldr(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
|
|
|
| // Pass the additional arguments.
|
| Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
|
| Handle<Object> call_data(api_call_info->data(), masm->isolate());
|
| + Register call_data_reg = x6;
|
| if (masm->isolate()->heap()->InNewSpace(*call_data)) {
|
| - __ Move(r0, api_call_info);
|
| - __ ldr(r6, FieldMemOperand(r0, CallHandlerInfo::kDataOffset));
|
| + __ Mov(x0, Operand(api_call_info));
|
| + __ Ldr(call_data_reg, FieldMemOperand(x0, CallHandlerInfo::kDataOffset));
|
| } else {
|
| - __ Move(r6, call_data);
|
| + __ Mov(call_data_reg, Operand(call_data));
|
| }
|
| - __ mov(r7, Operand(ExternalReference::isolate_address(masm->isolate())));
|
| - // Store JS function, call data, isolate ReturnValue default and ReturnValue.
|
| - __ stm(ib, sp, r5.bit() | r6.bit() | r7.bit());
|
| - __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
|
| - __ str(r5, MemOperand(sp, 4 * kPointerSize));
|
| - __ str(r5, MemOperand(sp, 5 * kPointerSize));
|
|
|
| - // Prepare arguments.
|
| - __ add(r2, sp, Operand(5 * kPointerSize));
|
| + Register isolate_reg = x7;
|
| + __ Mov(isolate_reg,
|
| + Operand(ExternalReference::isolate_address(masm->isolate())));
|
| +
|
| + Register undefined_reg = x8;
|
| + __ LoadRoot(undefined_reg, Heap::kUndefinedValueRootIndex);
|
|
|
| - // Allocate the v8::Arguments structure in the arguments' space since
|
| - // it's not controlled by GC.
|
| - const int kApiStackSpace = 4;
|
| + // Store JS function, call data, isolate, ReturnValue default and ReturnValue.
|
| + // TODO(jbramley): Try to combine these accesses using stp.
|
| + __ Poke(function_reg, 1 * kXRegSizeInBytes);
|
| + __ Poke(call_data_reg, 2 * kXRegSizeInBytes);
|
| + __ Poke(isolate_reg, 3 * kXRegSizeInBytes);
|
| + __ Poke(undefined_reg, 4 * kXRegSizeInBytes);
|
| + __ Poke(undefined_reg, 5 * kXRegSizeInBytes);
|
| +
|
| + Register implicit_args = x2;
|
| + __ Add(implicit_args, masm->StackPointer(), 5 * kXRegSizeInBytes);
|
|
|
| FrameScope frame_scope(masm, StackFrame::MANUAL);
|
| - __ EnterExitFrame(false, kApiStackSpace);
|
| + // Allocate the v8::Arguments structure inside the ExitFrame since it's not
|
| + // controlled by GC.
|
| + const int kApiArgsStackSpace = 4;
|
| + __ EnterExitFrame(
|
| + false,
|
| + x3,
|
| + kApiArgsStackSpace + MacroAssembler::kCallApiFunctionSpillSpace);
|
| +
|
| + // Arguments structure is after the return address.
|
| + Register args = x0;
|
| + __ Add(args, masm->StackPointer(), kPointerSize);
|
|
|
| - // r0 = v8::Arguments&
|
| - // Arguments is after the return address.
|
| - __ add(r0, sp, Operand(1 * kPointerSize));
|
| // v8::Arguments::implicit_args_
|
| - __ str(r2, MemOperand(r0, 0 * kPointerSize));
|
| + __ Str(implicit_args, MemOperand(args, 0 * kPointerSize));
|
| // v8::Arguments::values_
|
| - __ add(ip, r2, Operand(argc * kPointerSize));
|
| - __ str(ip, MemOperand(r0, 1 * kPointerSize));
|
| + __ Add(x3, implicit_args, argc * kPointerSize);
|
| + __ Str(x3, MemOperand(args, 1 * kPointerSize));
|
| // v8::Arguments::length_ = argc
|
| - __ mov(ip, Operand(argc));
|
| - __ str(ip, MemOperand(r0, 2 * kPointerSize));
|
| + __ Mov(x3, argc);
|
| + __ Str(x3, MemOperand(args, 2 * kPointerSize));
|
| // v8::Arguments::is_construct_call = 0
|
| - __ mov(ip, Operand::Zero());
|
| - __ str(ip, MemOperand(r0, 3 * kPointerSize));
|
| -
|
| + __ Mov(x3, 0);
|
| + __ Str(x3, MemOperand(args, 3 * kPointerSize));
|
| +
|
| + // After the call to the API function we need to free memory used for:
|
| + // - JS arguments
|
| + // - the receiver
|
| + // - the space allocated by ReserveSpaceForFastApiCall.
|
| + //
|
| + // The memory allocated for v8::Arguments structure will be freed when we'll
|
| + // leave the ExitFrame.
|
| const int kStackUnwindSpace = argc + kFastApiCallArguments + 1;
|
| +
|
| Address function_address = v8::ToCData<Address>(api_call_info->callback());
|
| bool returns_handle =
|
| !CallbackTable::ReturnsVoid(masm->isolate(), function_address);
|
| @@ -955,12 +1007,14 @@ static void GenerateFastApiDirectCall(MacroAssembler* masm,
|
| returns_handle ?
|
| ExternalReference::DIRECT_API_CALL :
|
| ExternalReference::DIRECT_API_CALL_NEW;
|
| - ExternalReference ref = ExternalReference(&fun,
|
| - type,
|
| - masm->isolate());
|
| + ExternalReference ref = ExternalReference(&fun, type, masm->isolate());
|
| AllowExternalCallThatCantCauseGC scope(masm);
|
| + // CallApiFunctionAndReturn can spill registers inside the exit frame,
|
| + // after the return address and the v8::Arguments structure.
|
| + const int spill_offset = 1 + kApiArgsStackSpace;
|
| __ CallApiFunctionAndReturn(ref,
|
| kStackUnwindSpace,
|
| + spill_offset,
|
| returns_handle,
|
| kFastApiCallArguments + 1);
|
| }
|
| @@ -992,6 +1046,7 @@ class CallInterceptorCompiler BASE_EMBEDDED {
|
|
|
| // Check that the receiver isn't a smi.
|
| __ JumpIfSmi(receiver, miss);
|
| +
|
| CallOptimization optimization(lookup);
|
| if (optimization.is_constant_call()) {
|
| CompileCacheable(masm, object, receiver, scratch1, scratch2, scratch3,
|
| @@ -1016,10 +1071,12 @@ class CallInterceptorCompiler BASE_EMBEDDED {
|
| Label* miss_label) {
|
| ASSERT(optimization.is_constant_call());
|
| ASSERT(!lookup->holder()->IsGlobalObject());
|
| +
|
| Counters* counters = masm->isolate()->counters();
|
| int depth1 = kInvalidProtoDepth;
|
| int depth2 = kInvalidProtoDepth;
|
| bool can_do_fast_api_call = false;
|
| +
|
| if (optimization.is_simple_api_call() &&
|
| !lookup->holder()->IsGlobalObject()) {
|
| depth1 = optimization.GetPrototypeDepthOfExpectedType(
|
| @@ -1087,15 +1144,15 @@ class CallInterceptorCompiler BASE_EMBEDDED {
|
| JUMP_FUNCTION, NullCallWrapper(), call_kind);
|
| }
|
|
|
| - // Deferred code for fast API call case---clean preallocated space.
|
| + // Deferred code for fast API call case, clean preallocated space.
|
| if (can_do_fast_api_call) {
|
| - __ bind(&miss_cleanup);
|
| + __ Bind(&miss_cleanup);
|
| FreeSpaceForFastApiCall(masm);
|
| - __ b(miss_label);
|
| + __ B(miss_label);
|
| }
|
|
|
| // Invoke a regular function.
|
| - __ bind(®ular_invoke);
|
| + __ Bind(®ular_invoke);
|
| if (can_do_fast_api_call) {
|
| FreeSpaceForFastApiCall(masm);
|
| }
|
| @@ -1117,18 +1174,17 @@ class CallInterceptorCompiler BASE_EMBEDDED {
|
|
|
| // Call a runtime function to load the interceptor property.
|
| FrameScope scope(masm, StackFrame::INTERNAL);
|
| - // Save the name_ register across the call.
|
| - __ push(name_);
|
| + // The name_ register must be preserved across the call.
|
| + __ Push(name_);
|
| PushInterceptorArguments(masm, receiver, holder, name_, interceptor_holder);
|
| __ CallExternalReference(
|
| ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForCall),
|
| masm->isolate()),
|
| 6);
|
| - // Restore the name_ register.
|
| - __ pop(name_);
|
| - // Leave the internal frame.
|
| + __ Pop(name_);
|
| }
|
|
|
| +
|
| void LoadWithInterceptor(MacroAssembler* masm,
|
| Register receiver,
|
| Register holder,
|
| @@ -1143,13 +1199,13 @@ class CallInterceptorCompiler BASE_EMBEDDED {
|
| holder,
|
| name_,
|
| holder_obj);
|
| - __ pop(name_); // Restore the name.
|
| - __ pop(receiver); // Restore the holder.
|
| + __ Pop(name_, receiver);
|
| }
|
| +
|
| // If interceptor returns no-result sentinel, call the constant function.
|
| - __ LoadRoot(scratch, Heap::kNoInterceptorResultSentinelRootIndex);
|
| - __ cmp(r0, scratch);
|
| - __ b(ne, interceptor_succeeded);
|
| + __ JumpIfNotRoot(x0,
|
| + Heap::kNoInterceptorResultSentinelRootIndex,
|
| + interceptor_succeeded);
|
| }
|
|
|
| StubCompiler* stub_compiler_;
|
| @@ -1158,44 +1214,6 @@ class CallInterceptorCompiler BASE_EMBEDDED {
|
| Code::ExtraICState extra_ic_state_;
|
| };
|
|
|
| -
|
| -// Calls GenerateCheckPropertyCell for each global object in the prototype chain
|
| -// from object to (but not including) holder.
|
| -static void GenerateCheckPropertyCells(MacroAssembler* masm,
|
| - Handle<JSObject> object,
|
| - Handle<JSObject> holder,
|
| - Handle<Name> name,
|
| - Register scratch,
|
| - Label* miss) {
|
| - Handle<JSObject> current = object;
|
| - while (!current.is_identical_to(holder)) {
|
| - if (current->IsGlobalObject()) {
|
| - GenerateCheckPropertyCell(masm,
|
| - Handle<GlobalObject>::cast(current),
|
| - name,
|
| - scratch,
|
| - miss);
|
| - }
|
| - current = Handle<JSObject>(JSObject::cast(current->GetPrototype()));
|
| - }
|
| -}
|
| -
|
| -
|
| -// Convert and store int passed in register ival to IEEE 754 single precision
|
| -// floating point value at memory location (dst + 4 * wordoffset)
|
| -// If VFP3 is available use it for conversion.
|
| -static void StoreIntAsFloat(MacroAssembler* masm,
|
| - Register dst,
|
| - Register wordoffset,
|
| - Register ival,
|
| - Register scratch1) {
|
| - __ vmov(s0, ival);
|
| - __ add(scratch1, dst, Operand(wordoffset, LSL, 2));
|
| - __ vcvt_f32_s32(s0, s0);
|
| - __ vstr(s0, scratch1, 0);
|
| -}
|
| -
|
| -
|
| void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) {
|
| __ Jump(code, RelocInfo::CODE_TARGET);
|
| }
|
| @@ -1216,17 +1234,17 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
|
| Label* miss,
|
| PrototypeCheckType check) {
|
| Handle<JSObject> first = object;
|
| - // Make sure there's no overlap between holder and object registers.
|
| - ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
|
| - ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
|
| - && !scratch2.is(scratch1));
|
| +
|
| + // object_reg and holder_reg registers can alias.
|
| + ASSERT(!AreAliased(object_reg, scratch1, scratch2));
|
| + ASSERT(!AreAliased(holder_reg, scratch1, scratch2));
|
|
|
| // Keep track of the current object in register reg.
|
| Register reg = object_reg;
|
| int depth = 0;
|
|
|
| if (save_at_depth == depth) {
|
| - __ str(reg, MemOperand(sp));
|
| + __ Poke(reg, 0);
|
| }
|
|
|
| // Check the maps in the prototype chain.
|
| @@ -1253,17 +1271,17 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
|
| GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
|
| scratch1, scratch2);
|
|
|
| - __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
|
| + __ Ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
|
| reg = holder_reg; // From now on the object will be in holder_reg.
|
| - __ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
|
| + __ Ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
|
| } else {
|
| Register map_reg = scratch1;
|
| - if (!current.is_identical_to(first) || check == CHECK_ALL_MAPS) {
|
| + // TODO(jbramley): Skip this load when we don't need the map.
|
| + __ Ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
|
| +
|
| + if (!current.is_identical_to(first) || (check == CHECK_ALL_MAPS)) {
|
| Handle<Map> current_map(current->map());
|
| - // CheckMap implicitly loads the map of |reg| into |map_reg|.
|
| - __ CheckMap(reg, map_reg, current_map, miss, DONT_DO_SMI_CHECK);
|
| - } else {
|
| - __ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
|
| + __ CheckMap(map_reg, current_map, miss, DONT_DO_SMI_CHECK);
|
| }
|
|
|
| // Check access rights to the global object. This has to happen after
|
| @@ -1277,15 +1295,15 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
|
| if (heap()->InNewSpace(*prototype)) {
|
| // The prototype is in new space; we cannot store a reference to it
|
| // in the code. Load it from the map.
|
| - __ ldr(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
|
| + __ Ldr(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
|
| } else {
|
| // The prototype is in old space; load it directly.
|
| - __ mov(reg, Operand(prototype));
|
| + __ Mov(reg, Operand(prototype));
|
| }
|
| }
|
|
|
| if (save_at_depth == depth) {
|
| - __ str(reg, MemOperand(sp));
|
| + __ Poke(reg, 0);
|
| }
|
|
|
| // Go to the next object in the prototype chain.
|
| @@ -1295,9 +1313,10 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
|
| // Log the check depth.
|
| LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
|
|
|
| + // Check the holder map.
|
| if (!holder.is_identical_to(first) || check == CHECK_ALL_MAPS) {
|
| // Check the holder map.
|
| - __ CheckMap(reg, scratch1, Handle<Map>(holder->map()), miss,
|
| + __ CheckMap(reg, scratch1, Handle<Map>(current->map()), miss,
|
| DONT_DO_SMI_CHECK);
|
| }
|
|
|
| @@ -1310,7 +1329,8 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
|
| // If we've skipped any global objects, it's not enough to verify that
|
| // their maps haven't changed. We also need to check that the property
|
| // cell for the property is still empty.
|
| - GenerateCheckPropertyCells(masm(), object, holder, name, scratch1, miss);
|
| + GenerateCheckPropertyCells(masm(), object, holder, name,
|
| + scratch1, scratch2, miss);
|
|
|
| // Return the register containing the holder.
|
| return reg;
|
| @@ -1320,8 +1340,8 @@ Register StubCompiler::CheckPrototypes(Handle<JSObject> object,
|
| void BaseLoadStubCompiler::HandlerFrontendFooter(Label* success,
|
| Label* miss) {
|
| if (!miss->is_unused()) {
|
| - __ b(success);
|
| - __ bind(miss);
|
| + __ B(success);
|
| + __ Bind(miss);
|
| TailCallBuiltin(masm(), MissBuiltin(kind()));
|
| }
|
| }
|
| @@ -1338,14 +1358,16 @@ Register BaseLoadStubCompiler::CallbackHandlerFrontend(
|
|
|
| Register reg = HandlerFrontendHeader(object, object_reg, holder, name, &miss);
|
|
|
| + // TODO(jbramely): HandlerFrontendHeader returns its result in scratch1(), so
|
| + // we can't use it below, but that isn't very obvious. Is there a better way
|
| + // of handling this?
|
| +
|
| if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
|
| - ASSERT(!reg.is(scratch2()));
|
| - ASSERT(!reg.is(scratch3()));
|
| - ASSERT(!reg.is(scratch4()));
|
| + ASSERT(!AreAliased(reg, scratch2(), scratch3(), scratch4()));
|
|
|
| // Load the properties dictionary.
|
| Register dictionary = scratch4();
|
| - __ ldr(dictionary, FieldMemOperand(reg, JSObject::kPropertiesOffset));
|
| + __ Ldr(dictionary, FieldMemOperand(reg, JSObject::kPropertiesOffset));
|
|
|
| // Probe the dictionary.
|
| Label probe_done;
|
| @@ -1356,7 +1378,7 @@ Register BaseLoadStubCompiler::CallbackHandlerFrontend(
|
| this->name(),
|
| scratch2(),
|
| scratch3());
|
| - __ bind(&probe_done);
|
| + __ Bind(&probe_done);
|
|
|
| // If probing finds an entry in the dictionary, scratch3 contains the
|
| // pointer into the dictionary. Check that the value is the callback.
|
| @@ -1364,9 +1386,9 @@ Register BaseLoadStubCompiler::CallbackHandlerFrontend(
|
| const int kElementsStartOffset = NameDictionary::kHeaderSize +
|
| NameDictionary::kElementsStartIndex * kPointerSize;
|
| const int kValueOffset = kElementsStartOffset + kPointerSize;
|
| - __ ldr(scratch2(), FieldMemOperand(pointer, kValueOffset));
|
| - __ cmp(scratch2(), Operand(callback));
|
| - __ b(ne, &miss);
|
| + __ Ldr(scratch2(), FieldMemOperand(pointer, kValueOffset));
|
| + __ Cmp(scratch2(), Operand(callback));
|
| + __ B(ne, &miss);
|
| }
|
|
|
| HandlerFrontendFooter(success, &miss);
|
| @@ -1387,7 +1409,8 @@ void BaseLoadStubCompiler::NonexistentHandlerFrontend(
|
| // If the last object in the prototype chain is a global object,
|
| // check that the global property cell is empty.
|
| if (!global.is_null()) {
|
| - GenerateCheckPropertyCell(masm(), global, name, scratch2(), &miss);
|
| + GenerateCheckPropertyCell(masm(), global, name,
|
| + scratch1(), scratch2(), &miss);
|
| }
|
|
|
| HandlerFrontendFooter(success, &miss);
|
| @@ -1398,7 +1421,7 @@ void BaseLoadStubCompiler::GenerateLoadField(Register reg,
|
| Handle<JSObject> holder,
|
| PropertyIndex field,
|
| Representation representation) {
|
| - if (!reg.is(receiver())) __ mov(receiver(), reg);
|
| + __ Mov(receiver(), reg);
|
| if (kind() == Code::LOAD_IC) {
|
| LoadFieldStub stub(field.is_inobject(holder),
|
| field.translate(holder),
|
| @@ -1415,7 +1438,7 @@ void BaseLoadStubCompiler::GenerateLoadField(Register reg,
|
|
|
| void BaseLoadStubCompiler::GenerateLoadConstant(Handle<JSFunction> value) {
|
| // Return the constant value.
|
| - __ LoadHeapObject(r0, value);
|
| + __ LoadHeapObject(x0, value);
|
| __ Ret();
|
| }
|
|
|
| @@ -1423,48 +1446,73 @@ void BaseLoadStubCompiler::GenerateLoadConstant(Handle<JSFunction> value) {
|
| void BaseLoadStubCompiler::GenerateLoadCallback(
|
| Register reg,
|
| Handle<ExecutableAccessorInfo> callback) {
|
| - // Build AccessorInfo::args_ list on the stack and push property name below
|
| - // the exit frame to make GC aware of them and store pointers to them.
|
| - __ push(receiver());
|
| - __ mov(scratch2(), sp); // scratch2 = AccessorInfo::args_
|
| + // Build ExecutableAccessorInfo::args_ list on the stack and push property
|
| + // name below the exit frame to make GC aware of them and store pointers to
|
| + // them.
|
| + __ Push(receiver());
|
| + Register args_addr = scratch2();
|
| + __ Mov(args_addr, __ StackPointer());
|
| +
|
| if (heap()->InNewSpace(callback->data())) {
|
| - __ Move(scratch3(), callback);
|
| - __ ldr(scratch3(), FieldMemOperand(scratch3(),
|
| + __ Mov(scratch3(), Operand(callback));
|
| + __ Ldr(scratch3(), FieldMemOperand(scratch3(),
|
| ExecutableAccessorInfo::kDataOffset));
|
| } else {
|
| - __ Move(scratch3(), Handle<Object>(callback->data(), isolate()));
|
| + __ Mov(scratch3(), Operand(Handle<Object>(callback->data(), isolate())));
|
| }
|
| + // TODO(jbramley): Find another scratch register and combine the pushes
|
| + // together. Can we use scratch1() and scratch2() here?
|
| __ Push(reg, scratch3());
|
| __ LoadRoot(scratch3(), Heap::kUndefinedValueRootIndex);
|
| - __ mov(scratch4(), scratch3());
|
| - __ Push(scratch3(), scratch4());
|
| - __ mov(scratch4(),
|
| - Operand(ExternalReference::isolate_address(isolate())));
|
| - __ Push(scratch4(), name());
|
| - __ mov(r0, sp); // r0 = Handle<Name>
|
| -
|
| - const int kApiStackSpace = 1;
|
| - FrameScope frame_scope(masm(), StackFrame::MANUAL);
|
| - __ EnterExitFrame(false, kApiStackSpace);
|
| + __ Mov(scratch4(), Operand(ExternalReference::isolate_address(isolate())));
|
| + __ Push(scratch3(), scratch3(), scratch4(), name());
|
|
|
| - // Create AccessorInfo instance on the stack above the exit frame with
|
| - // scratch2 (internal::Object** args_) as the data.
|
| - __ str(scratch2(), MemOperand(sp, 1 * kPointerSize));
|
| - __ add(r1, sp, Operand(1 * kPointerSize)); // r1 = AccessorInfo&
|
| + // Pass the Handle<Name> of the property name to the runtime.
|
| + __ Mov(x0, __ StackPointer());
|
|
|
| + FrameScope frame_scope(masm(), StackFrame::MANUAL);
|
| + const int kApiStackSpace = 1;
|
| + __ EnterExitFrame(false, scratch4(),
|
| + kApiStackSpace + MacroAssembler::kCallApiFunctionSpillSpace);
|
| +
|
| + // Create ExecutableAccessorInfo instance on the stack above the exit frame
|
| + // before the return address. ExecutableAccessorInfo has only one field: the
|
| + // address of args_.
|
| + __ Poke(args_addr, 1 * kPointerSize);
|
| +
|
| + // Get the address of ExecutableAccessorInfo instance and pass it to the
|
| + // runtime.
|
| + __ Add(x1, __ StackPointer(), 1 * kPointerSize);
|
| +
|
| + // CallApiFunctionAndReturn can spill registers inside the exit frame, after
|
| + // the return address and the ExecutableAccessorInfo instance.
|
| + const int spill_offset = 1 + kApiStackSpace;
|
| +
|
| + // After the call to the API function we need to free memory used for:
|
| + // - the holder
|
| + // - the callback data
|
| + // - the isolate
|
| + // - the property name
|
| + // - the receiver.
|
| + //
|
| + // The memory allocated inside the ExitFrame will be freed when we'll leave
|
| + // the ExitFrame in CallApiFunctionAndReturn.
|
| const int kStackUnwindSpace = kFastApiCallArguments + 1;
|
| +
|
| + // Do the API call.
|
| Address getter_address = v8::ToCData<Address>(callback->getter());
|
| - bool returns_handle =
|
| - !CallbackTable::ReturnsVoid(isolate(), getter_address);
|
| + bool returns_handle = !CallbackTable::ReturnsVoid(isolate(), getter_address);
|
| ApiFunction fun(getter_address);
|
| ExternalReference::Type type =
|
| returns_handle ?
|
| ExternalReference::DIRECT_GETTER_CALL :
|
| ExternalReference::DIRECT_GETTER_CALL_NEW;
|
| -
|
| ExternalReference ref = ExternalReference(&fun, type, isolate());
|
| + // TODO(jbramley): I don't know where '5' comes from, but this goes away at
|
| + // some point.
|
| __ CallApiFunctionAndReturn(ref,
|
| kStackUnwindSpace,
|
| + spill_offset,
|
| returns_handle,
|
| 5);
|
| }
|
| @@ -1476,12 +1524,13 @@ void BaseLoadStubCompiler::GenerateLoadInterceptor(
|
| Handle<JSObject> interceptor_holder,
|
| LookupResult* lookup,
|
| Handle<Name> name) {
|
| + ASSERT(!AreAliased(receiver(), this->name(),
|
| + scratch1(), scratch2(), scratch3()));
|
| ASSERT(interceptor_holder->HasNamedInterceptor());
|
| ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
|
|
|
| // So far the most popular follow ups for interceptor loads are FIELD
|
| - // and CALLBACKS, so inline only them, other cases may be added
|
| - // later.
|
| + // and CALLBACKS, so inline only them, other cases may be added later.
|
| bool compile_followup_inline = false;
|
| if (lookup->IsFound() && lookup->IsCacheable()) {
|
| if (lookup->IsField()) {
|
| @@ -1506,7 +1555,7 @@ void BaseLoadStubCompiler::GenerateLoadInterceptor(
|
| // result. The CALLBACKS case needs the receiver to be passed into C++ code,
|
| // the FIELD case might cause a miss during the prototype check.
|
| bool must_perfrom_prototype_check = *interceptor_holder != lookup->holder();
|
| - bool must_preserve_receiver_reg = !receiver().is(holder_reg) &&
|
| + bool must_preserve_receiver_reg = !receiver().Is(holder_reg) &&
|
| (lookup->type() == CALLBACKS || must_perfrom_prototype_check);
|
|
|
| // Save necessary data before invoking an interceptor.
|
| @@ -1529,27 +1578,26 @@ void BaseLoadStubCompiler::GenerateLoadInterceptor(
|
| // Check if interceptor provided a value for property. If it's
|
| // the case, return immediately.
|
| Label interceptor_failed;
|
| - __ LoadRoot(scratch1(), Heap::kNoInterceptorResultSentinelRootIndex);
|
| - __ cmp(r0, scratch1());
|
| - __ b(eq, &interceptor_failed);
|
| + __ JumpIfRoot(x0,
|
| + Heap::kNoInterceptorResultSentinelRootIndex,
|
| + &interceptor_failed);
|
| frame_scope.GenerateLeaveFrame();
|
| __ Ret();
|
|
|
| - __ bind(&interceptor_failed);
|
| - __ pop(this->name());
|
| - __ pop(holder_reg);
|
| + __ Bind(&interceptor_failed);
|
| if (must_preserve_receiver_reg) {
|
| - __ pop(receiver());
|
| + __ Pop(this->name(), holder_reg, receiver());
|
| + } else {
|
| + __ Pop(this->name(), holder_reg);
|
| }
|
| // Leave the internal frame.
|
| }
|
| -
|
| GenerateLoadPostInterceptor(holder_reg, interceptor_holder, name, lookup);
|
| } else { // !compile_followup_inline
|
| // Call the runtime system to load the interceptor.
|
| // Check that the maps haven't changed.
|
| - PushInterceptorArguments(masm(), receiver(), holder_reg,
|
| - this->name(), interceptor_holder);
|
| + PushInterceptorArguments(
|
| + masm(), receiver(), holder_reg, this->name(), interceptor_holder);
|
|
|
| ExternalReference ref =
|
| ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorForLoad),
|
| @@ -1560,38 +1608,43 @@ void BaseLoadStubCompiler::GenerateLoadInterceptor(
|
|
|
|
|
| void CallStubCompiler::GenerateNameCheck(Handle<Name> name, Label* miss) {
|
| + Register name_reg = x2;
|
| +
|
| if (kind_ == Code::KEYED_CALL_IC) {
|
| - __ cmp(r2, Operand(name));
|
| - __ b(ne, miss);
|
| + __ Cmp(name_reg, Operand(name));
|
| + __ B(ne, miss);
|
| }
|
| }
|
|
|
|
|
| +// The receiver is loaded from the stack and left in x0 register.
|
| void CallStubCompiler::GenerateGlobalReceiverCheck(Handle<JSObject> object,
|
| Handle<JSObject> holder,
|
| Handle<Name> name,
|
| Label* miss) {
|
| ASSERT(holder->IsGlobalObject());
|
|
|
| - // Get the number of arguments.
|
| const int argc = arguments().immediate();
|
|
|
| // Get the receiver from the stack.
|
| - __ ldr(r0, MemOperand(sp, argc * kPointerSize));
|
| + Register receiver = x0;
|
| + __ Peek(receiver, argc * kPointerSize);
|
|
|
| // Check that the maps haven't changed.
|
| - __ JumpIfSmi(r0, miss);
|
| - CheckPrototypes(object, r0, holder, r3, r1, r4, name, miss);
|
| + __ JumpIfSmi(receiver, miss);
|
| + CheckPrototypes(object, receiver, holder, x3, x1, x4, name, miss);
|
| }
|
|
|
|
|
| +// Load the function object into x1 register.
|
| void CallStubCompiler::GenerateLoadFunctionFromCell(
|
| Handle<JSGlobalPropertyCell> cell,
|
| Handle<JSFunction> function,
|
| Label* miss) {
|
| // Get the value from the cell.
|
| - __ mov(r3, Operand(cell));
|
| - __ ldr(r1, FieldMemOperand(r3, JSGlobalPropertyCell::kValueOffset));
|
| + __ Mov(x3, Operand(cell));
|
| + Register function_reg = x1;
|
| + __ Ldr(function_reg, FieldMemOperand(x3, JSGlobalPropertyCell::kValueOffset));
|
|
|
| // Check that the cell contains the same function.
|
| if (heap()->InNewSpace(*function)) {
|
| @@ -1600,18 +1653,18 @@ void CallStubCompiler::GenerateLoadFunctionFromCell(
|
| // the nice side effect that multiple closures based on the same
|
| // function can all use this call IC. Before we load through the
|
| // function, we have to verify that it still is a function.
|
| - __ JumpIfSmi(r1, miss);
|
| - __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
|
| - __ b(ne, miss);
|
| + __ JumpIfSmi(function_reg, miss);
|
| + __ JumpIfNotObjectType(function_reg, x3, x3, JS_FUNCTION_TYPE, miss);
|
|
|
| // Check the shared function info. Make sure it hasn't changed.
|
| - __ Move(r3, Handle<SharedFunctionInfo>(function->shared()));
|
| - __ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
|
| - __ cmp(r4, r3);
|
| + __ Mov(x3, Operand(Handle<SharedFunctionInfo>(function->shared())));
|
| + __ Ldr(x4,
|
| + FieldMemOperand(function_reg, JSFunction::kSharedFunctionInfoOffset));
|
| + __ Cmp(x4, x3);
|
| } else {
|
| - __ cmp(r1, Operand(function));
|
| + __ Cmp(function_reg, Operand(function));
|
| }
|
| - __ b(ne, miss);
|
| + __ B(ne, miss);
|
| }
|
|
|
|
|
| @@ -1629,29 +1682,34 @@ Handle<Code> CallStubCompiler::CompileCallField(Handle<JSObject> object,
|
| PropertyIndex index,
|
| Handle<Name> name) {
|
| // ----------- S t a t e -------------
|
| - // -- r2 : name
|
| + // -- x2 : name
|
| // -- lr : return address
|
| // -----------------------------------
|
| Label miss;
|
| + const int argc = arguments().immediate();
|
|
|
| GenerateNameCheck(name, &miss);
|
|
|
| - const int argc = arguments().immediate();
|
| -
|
| - // Get the receiver of the function from the stack into r0.
|
| - __ ldr(r0, MemOperand(sp, argc * kPointerSize));
|
| + // Get the receiver of the function from the stack.
|
| + Register receiver = x0;
|
| + __ Peek(receiver, argc * kXRegSizeInBytes);
|
| // Check that the receiver isn't a smi.
|
| - __ JumpIfSmi(r0, &miss);
|
| + __ JumpIfSmi(receiver, &miss);
|
|
|
| // Do the right check and compute the holder register.
|
| - Register reg = CheckPrototypes(object, r0, holder, r1, r3, r4, name, &miss);
|
| - GenerateFastPropertyLoad(masm(), r1, reg, index.is_inobject(holder),
|
| - index.translate(holder), Representation::Tagged());
|
| + Register holder_reg = CheckPrototypes(
|
| + object, receiver, holder, x1, x3, x4, name, &miss);
|
| + Register function = x1;
|
| + GenerateFastPropertyLoad(masm(), function, holder_reg,
|
| + index.is_inobject(holder),
|
| + index.translate(holder),
|
| + Representation::Tagged());
|
|
|
| - GenerateCallFunction(masm(), object, arguments(), &miss, extra_state_);
|
| + GenerateCallFunction(
|
| + masm(), object, arguments(), &miss, extra_state_, function, receiver, x3);
|
|
|
| // Handle call cache miss.
|
| - __ bind(&miss);
|
| + __ Bind(&miss);
|
| GenerateMissBranch();
|
|
|
| // Return the generated code.
|
| @@ -1666,34 +1724,36 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
|
| Handle<JSFunction> function,
|
| Handle<String> name) {
|
| // ----------- S t a t e -------------
|
| - // -- r2 : name
|
| + // -- x2 : name (Must be preserved on miss.)
|
| // -- lr : return address
|
| - // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
|
| + // -- sp[(argc - n - 1) * 8] : arg[n] (zero-based)
|
| // -- ...
|
| - // -- sp[argc * 4] : receiver
|
| + // -- sp[argc * 8] : receiver
|
| // -----------------------------------
|
|
|
| // If object is not an array, bail out to regular call.
|
| if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
|
|
|
| Label miss;
|
| + Register result = x0;
|
| + const int argc = arguments().immediate();
|
| +
|
| GenerateNameCheck(name, &miss);
|
|
|
| - Register receiver = r1;
|
| // Get the receiver from the stack
|
| - const int argc = arguments().immediate();
|
| - __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
|
| + Register receiver = x1;
|
| + __ Peek(receiver, argc * kPointerSize);
|
|
|
| // Check that the receiver isn't a smi.
|
| __ JumpIfSmi(receiver, &miss);
|
|
|
| // Check that the maps haven't changed.
|
| - CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, r3, r0, r4,
|
| + CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, x3, x0, x4,
|
| name, &miss);
|
|
|
| if (argc == 0) {
|
| // Nothing to do, just return the length.
|
| - __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
|
| + __ Ldr(result, FieldMemOperand(receiver, JSArray::kLengthOffset));
|
| __ Drop(argc + 1);
|
| __ Ret();
|
| } else {
|
| @@ -1702,138 +1762,147 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
|
| if (argc == 1) { // Otherwise fall through to call the builtin.
|
| Label attempt_to_grow_elements, with_write_barrier, check_double;
|
|
|
| - Register elements = r6;
|
| - Register end_elements = r5;
|
| + // Note that even though we assign the array length to x0 and the value
|
| + // to push in x4, they are not always live. Both x0 and x4 can be locally
|
| + // reused as scratch registers.
|
| + Register length = x0;
|
| + Register value = x4;
|
| + Register elements = x6;
|
| + Register end_elements = x5;
|
| // Get the elements array of the object.
|
| - __ ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
|
| + __ Ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
|
|
|
| // Check that the elements are in fast mode and writable.
|
| __ CheckMap(elements,
|
| - r0,
|
| + x0,
|
| Heap::kFixedArrayMapRootIndex,
|
| &check_double,
|
| DONT_DO_SMI_CHECK);
|
|
|
| - // Get the array's length into r0 and calculate new length.
|
| - __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
|
| - __ add(r0, r0, Operand(Smi::FromInt(argc)));
|
| -
|
| - // Get the elements' length.
|
| - __ ldr(r4, FieldMemOperand(elements, FixedArray::kLengthOffset));
|
| + // Get the array's length and calculate new length.
|
| + __ Ldr(length, FieldMemOperand(receiver, JSArray::kLengthOffset));
|
| + STATIC_ASSERT(kSmiTag == 0);
|
| + __ Add(length, length, Operand(Smi::FromInt(argc)));
|
|
|
| // Check if we could survive without allocation.
|
| - __ cmp(r0, r4);
|
| - __ b(gt, &attempt_to_grow_elements);
|
| + __ Ldr(x4, FieldMemOperand(elements, FixedArray::kLengthOffset));
|
| + __ Cmp(length, x4);
|
| + __ B(gt, &attempt_to_grow_elements);
|
|
|
| // Check if value is a smi.
|
| - __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
|
| - __ JumpIfNotSmi(r4, &with_write_barrier);
|
| + __ Peek(value, (argc - 1) * kPointerSize);
|
| + __ JumpIfNotSmi(value, &with_write_barrier);
|
|
|
| // Save new length.
|
| - __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
|
| + __ Str(length, FieldMemOperand(receiver, JSArray::kLengthOffset));
|
|
|
| // Store the value.
|
| // We may need a register containing the address end_elements below,
|
| // so write back the value in end_elements.
|
| - __ add(end_elements, elements, Operand::PointerOffsetFromSmiKey(r0));
|
| + __ Add(end_elements, elements,
|
| + Operand::UntagSmiAndScale(length, kPointerSizeLog2));
|
| const int kEndElementsOffset =
|
| FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize;
|
| - __ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
|
| + __ Str(value, MemOperand(end_elements, kEndElementsOffset, PreIndex));
|
|
|
| // Check for a smi.
|
| __ Drop(argc + 1);
|
| __ Ret();
|
|
|
| - __ bind(&check_double);
|
| -
|
| + __ Bind(&check_double);
|
| // Check that the elements are in fast mode and writable.
|
| __ CheckMap(elements,
|
| - r0,
|
| + x0,
|
| Heap::kFixedDoubleArrayMapRootIndex,
|
| &call_builtin,
|
| DONT_DO_SMI_CHECK);
|
|
|
| - // Get the array's length into r0 and calculate new length.
|
| - __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
|
| - __ add(r0, r0, Operand(Smi::FromInt(argc)));
|
| -
|
| - // Get the elements' length.
|
| - __ ldr(r4, FieldMemOperand(elements, FixedArray::kLengthOffset));
|
| + // Get the array's length and calculate new length.
|
| + Register old_length = x5;
|
| + __ Ldr(old_length, FieldMemOperand(receiver, JSArray::kLengthOffset));
|
| + STATIC_ASSERT(kSmiTag == 0);
|
| + __ Add(length, old_length, Operand(Smi::FromInt(argc)));
|
|
|
| // Check if we could survive without allocation.
|
| - __ cmp(r0, r4);
|
| - __ b(gt, &call_builtin);
|
| + __ Ldr(x4, FieldMemOperand(elements, FixedArray::kLengthOffset));
|
| + __ Cmp(length, x4);
|
| + __ B(gt, &call_builtin);
|
|
|
| - __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
|
| - __ StoreNumberToDoubleElements(r4, r0, elements, r5,
|
| - &call_builtin, argc * kDoubleSize);
|
| + __ Peek(value, (argc - 1) * kPointerSize);
|
| + __ StoreNumberToDoubleElements(
|
| + value, old_length, elements, x3, d0, d1,
|
| + &call_builtin);
|
|
|
| // Save new length.
|
| - __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
|
| + __ Str(length, FieldMemOperand(receiver, JSArray::kLengthOffset));
|
|
|
| // Check for a smi.
|
| __ Drop(argc + 1);
|
| __ Ret();
|
|
|
| - __ bind(&with_write_barrier);
|
|
|
| - __ ldr(r3, FieldMemOperand(receiver, HeapObject::kMapOffset));
|
| + __ Bind(&with_write_barrier);
|
| + Register map = x3;
|
| + __ Ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
|
|
|
| if (FLAG_smi_only_arrays && !FLAG_trace_elements_transitions) {
|
| Label fast_object, not_fast_object;
|
| - __ CheckFastObjectElements(r3, r7, ¬_fast_object);
|
| - __ jmp(&fast_object);
|
| + __ CheckFastObjectElements(map, x7, ¬_fast_object);
|
| + __ B(&fast_object);
|
| +
|
| // In case of fast smi-only, convert to fast object, otherwise bail out.
|
| - __ bind(¬_fast_object);
|
| - __ CheckFastSmiElements(r3, r7, &call_builtin);
|
| -
|
| - __ ldr(r7, FieldMemOperand(r4, HeapObject::kMapOffset));
|
| - __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
|
| - __ cmp(r7, ip);
|
| - __ b(eq, &call_builtin);
|
| - // edx: receiver
|
| - // r3: map
|
| + __ Bind(¬_fast_object);
|
| + __ CheckFastSmiElements(map, x7, &call_builtin);
|
| +
|
| + __ Ldr(x7, FieldMemOperand(x4, HeapObject::kMapOffset));
|
| + __ JumpIfRoot(x7, Heap::kHeapNumberMapRootIndex, &call_builtin);
|
| +
|
| Label try_holey_map;
|
| __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
|
| FAST_ELEMENTS,
|
| - r3,
|
| - r7,
|
| + map,
|
| + x7,
|
| &try_holey_map);
|
| - __ mov(r2, receiver);
|
| + // GenerateMapChangeElementsTransition expects the receiver to be in x2.
|
| + // Since from this point we cannot jump on 'miss' it is ok to clobber
|
| + // x2 (which initialy contained called function name).
|
| + __ Mov(x2, receiver);
|
| ElementsTransitionGenerator::
|
| GenerateMapChangeElementsTransition(masm(),
|
| DONT_TRACK_ALLOCATION_SITE,
|
| NULL);
|
| - __ jmp(&fast_object);
|
| + __ B(&fast_object);
|
|
|
| - __ bind(&try_holey_map);
|
| + __ Bind(&try_holey_map);
|
| __ LoadTransitionedArrayMapConditional(FAST_HOLEY_SMI_ELEMENTS,
|
| FAST_HOLEY_ELEMENTS,
|
| - r3,
|
| - r7,
|
| + map,
|
| + x7,
|
| &call_builtin);
|
| - __ mov(r2, receiver);
|
| + // The previous comment about x2 usage also applies here.
|
| + __ Mov(x2, receiver);
|
| ElementsTransitionGenerator::
|
| GenerateMapChangeElementsTransition(masm(),
|
| DONT_TRACK_ALLOCATION_SITE,
|
| NULL);
|
| - __ bind(&fast_object);
|
| + __ Bind(&fast_object);
|
| } else {
|
| - __ CheckFastObjectElements(r3, r3, &call_builtin);
|
| + __ CheckFastObjectElements(map, x3, &call_builtin);
|
| }
|
|
|
| // Save new length.
|
| - __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
|
| + __ Str(length, FieldMemOperand(receiver, JSArray::kLengthOffset));
|
|
|
| // Store the value.
|
| // We may need a register containing the address end_elements below,
|
| // so write back the value in end_elements.
|
| - __ add(end_elements, elements, Operand::PointerOffsetFromSmiKey(r0));
|
| - __ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
|
| + __ Add(end_elements, elements,
|
| + Operand::UntagSmiAndScale(length, kPointerSizeLog2));
|
| + __ Str(x4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
|
|
|
| __ RecordWrite(elements,
|
| end_elements,
|
| - r4,
|
| + x4,
|
| kLRHasNotBeenSaved,
|
| kDontSaveFPRegs,
|
| EMIT_REMEMBERED_SET,
|
| @@ -1841,22 +1910,23 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
|
| __ Drop(argc + 1);
|
| __ Ret();
|
|
|
| - __ bind(&attempt_to_grow_elements);
|
| - // r0: array's length + 1.
|
| - // r4: elements' length.
|
| +
|
| + __ Bind(&attempt_to_grow_elements);
|
| + // When we jump here, x4 must hold the length of elements.
|
| + Register elements_length = x4;
|
|
|
| if (!FLAG_inline_new) {
|
| - __ b(&call_builtin);
|
| + __ B(&call_builtin);
|
| }
|
|
|
| - __ ldr(r2, MemOperand(sp, (argc - 1) * kPointerSize));
|
| + __ Peek(x2, (argc - 1) * kPointerSize);
|
| // Growing elements that are SMI-only requires special handling in case
|
| // the new element is non-Smi. For now, delegate to the builtin.
|
| Label no_fast_elements_check;
|
| - __ JumpIfSmi(r2, &no_fast_elements_check);
|
| - __ ldr(r7, FieldMemOperand(receiver, HeapObject::kMapOffset));
|
| - __ CheckFastObjectElements(r7, r7, &call_builtin);
|
| - __ bind(&no_fast_elements_check);
|
| + __ JumpIfSmi(x2, &no_fast_elements_check);
|
| + __ Ldr(x7, FieldMemOperand(receiver, HeapObject::kMapOffset));
|
| + __ CheckFastObjectElements(x7, x7, &call_builtin);
|
| + __ Bind(&no_fast_elements_check);
|
|
|
| ExternalReference new_space_allocation_top =
|
| ExternalReference::new_space_allocation_top_address(isolate());
|
| @@ -1865,46 +1935,50 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
|
|
|
| const int kAllocationDelta = 4;
|
| // Load top and check if it is the end of elements.
|
| - __ add(end_elements, elements, Operand::PointerOffsetFromSmiKey(r0));
|
| - __ add(end_elements, end_elements, Operand(kEndElementsOffset));
|
| - __ mov(r7, Operand(new_space_allocation_top));
|
| - __ ldr(r3, MemOperand(r7));
|
| - __ cmp(end_elements, r3);
|
| - __ b(ne, &call_builtin);
|
| -
|
| - __ mov(r9, Operand(new_space_allocation_limit));
|
| - __ ldr(r9, MemOperand(r9));
|
| - __ add(r3, r3, Operand(kAllocationDelta * kPointerSize));
|
| - __ cmp(r3, r9);
|
| - __ b(hi, &call_builtin);
|
| + __ Add(end_elements, elements,
|
| + Operand::UntagSmiAndScale(length, kPointerSizeLog2));
|
| + __ Add(end_elements, end_elements, kEndElementsOffset);
|
| + __ Mov(x7, Operand(new_space_allocation_top));
|
| + __ Ldr(x3, MemOperand(x7));
|
| + __ Cmp(end_elements, x3);
|
| + __ B(ne, &call_builtin);
|
| +
|
| + __ Mov(x10, Operand(new_space_allocation_limit));
|
| + __ Ldr(x10, MemOperand(x10));
|
| + __ Add(x3, x3, kAllocationDelta * kPointerSize);
|
| + __ Cmp(x3, x10);
|
| + __ B(hi, &call_builtin);
|
|
|
| // We fit and could grow elements.
|
| // Update new_space_allocation_top.
|
| - __ str(r3, MemOperand(r7));
|
| + __ Str(x3, MemOperand(x7));
|
| // Push the argument.
|
| - __ str(r2, MemOperand(end_elements));
|
| + __ Str(x2, MemOperand(end_elements));
|
| // Fill the rest with holes.
|
| - __ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
|
| + __ LoadRoot(x3, Heap::kTheHoleValueRootIndex);
|
| for (int i = 1; i < kAllocationDelta; i++) {
|
| - __ str(r3, MemOperand(end_elements, i * kPointerSize));
|
| + __ Str(x3, MemOperand(end_elements, i * kPointerSize));
|
| }
|
|
|
| // Update elements' and array's sizes.
|
| - __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
|
| - __ add(r4, r4, Operand(Smi::FromInt(kAllocationDelta)));
|
| - __ str(r4, FieldMemOperand(elements, FixedArray::kLengthOffset));
|
| + __ Str(length, FieldMemOperand(receiver, JSArray::kLengthOffset));
|
| + __ Add(elements_length,
|
| + elements_length,
|
| + Operand(Smi::FromInt(kAllocationDelta)));
|
| + __ Str(elements_length,
|
| + FieldMemOperand(elements, FixedArray::kLengthOffset));
|
|
|
| // Elements are in new space, so write barrier is not required.
|
| __ Drop(argc + 1);
|
| __ Ret();
|
| }
|
| - __ bind(&call_builtin);
|
| + __ Bind(&call_builtin);
|
| __ TailCallExternalReference(
|
| ExternalReference(Builtins::c_ArrayPush, isolate()), argc + 1, 1);
|
| }
|
|
|
| // Handle call cache miss.
|
| - __ bind(&miss);
|
| + __ Bind(&miss);
|
| GenerateMissBranch();
|
|
|
| // Return the generated code.
|
| @@ -1919,74 +1993,76 @@ Handle<Code> CallStubCompiler::CompileArrayPopCall(
|
| Handle<JSFunction> function,
|
| Handle<String> name) {
|
| // ----------- S t a t e -------------
|
| - // -- r2 : name
|
| + // -- x2 : name
|
| // -- lr : return address
|
| - // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
|
| + // -- sp[(argc - n - 1) * 8] : arg[n] (zero-based)
|
| // -- ...
|
| - // -- sp[argc * 4] : receiver
|
| + // -- sp[argc * 8] : receiver
|
| // -----------------------------------
|
|
|
| // If object is not an array, bail out to regular call.
|
| if (!object->IsJSArray() || !cell.is_null()) return Handle<Code>::null();
|
|
|
| + const int argc = arguments().immediate();
|
| + Register result = x0;
|
| Label miss, return_undefined, call_builtin;
|
| - Register receiver = r1;
|
| - Register elements = r3;
|
| +
|
| GenerateNameCheck(name, &miss);
|
|
|
| // Get the receiver from the stack
|
| - const int argc = arguments().immediate();
|
| - __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
|
| + Register receiver = x1;
|
| + __ Peek(receiver, argc * kPointerSize);
|
| // Check that the receiver isn't a smi.
|
| __ JumpIfSmi(receiver, &miss);
|
|
|
| // Check that the maps haven't changed.
|
| - CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, elements,
|
| - r4, r0, name, &miss);
|
| + CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder,
|
| + x3, x4, x0, name, &miss);
|
|
|
| // Get the elements array of the object.
|
| - __ ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
|
| + Register elements = x3;
|
| + __ Ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
|
|
|
| // Check that the elements are in fast mode and writable.
|
| __ CheckMap(elements,
|
| - r0,
|
| + x0,
|
| Heap::kFixedArrayMapRootIndex,
|
| &call_builtin,
|
| DONT_DO_SMI_CHECK);
|
|
|
| - // Get the array's length into r4 and calculate new length.
|
| - __ ldr(r4, FieldMemOperand(receiver, JSArray::kLengthOffset));
|
| - __ sub(r4, r4, Operand(Smi::FromInt(1)), SetCC);
|
| - __ b(lt, &return_undefined);
|
| + // Get the array's length and calculate new length.
|
| + Register length = x4;
|
| + __ Ldr(length, FieldMemOperand(receiver, JSArray::kLengthOffset));
|
| + __ Subs(length, length, Operand(Smi::FromInt(1)));
|
| + __ B(lt, &return_undefined);
|
|
|
| // Get the last element.
|
| - __ LoadRoot(r6, Heap::kTheHoleValueRootIndex);
|
| - // We can't address the last element in one operation. Compute the more
|
| - // expensive shift first, and use an offset later on.
|
| - __ add(elements, elements, Operand::PointerOffsetFromSmiKey(r4));
|
| - __ ldr(r0, FieldMemOperand(elements, FixedArray::kHeaderSize));
|
| - __ cmp(r0, r6);
|
| - __ b(eq, &call_builtin);
|
| + __ Add(elements, elements,
|
| + Operand::UntagSmiAndScale(length, kPointerSizeLog2));
|
| + __ Ldr(result, FieldMemOperand(elements, FixedArray::kHeaderSize));
|
| + __ JumpIfRoot(result, Heap::kTheHoleValueRootIndex, &call_builtin);
|
|
|
| // Set the array's length.
|
| - __ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset));
|
| + __ Str(length, FieldMemOperand(receiver, JSArray::kLengthOffset));
|
|
|
| // Fill with the hole.
|
| - __ str(r6, FieldMemOperand(elements, FixedArray::kHeaderSize));
|
| + Register hole_value = x6;
|
| + __ LoadRoot(hole_value, Heap::kTheHoleValueRootIndex);
|
| + __ Str(hole_value, FieldMemOperand(elements, FixedArray::kHeaderSize));
|
| __ Drop(argc + 1);
|
| __ Ret();
|
|
|
| - __ bind(&return_undefined);
|
| - __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
|
| + __ Bind(&return_undefined);
|
| + __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
|
| __ Drop(argc + 1);
|
| __ Ret();
|
|
|
| - __ bind(&call_builtin);
|
| + __ Bind(&call_builtin);
|
| __ TailCallExternalReference(
|
| ExternalReference(Builtins::c_ArrayPop, isolate()), argc + 1, 1);
|
|
|
| // Handle call cache miss.
|
| - __ bind(&miss);
|
| + __ Bind(&miss);
|
| GenerateMissBranch();
|
|
|
| // Return the generated code.
|
| @@ -2001,11 +2077,11 @@ Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
|
| Handle<JSFunction> function,
|
| Handle<String> name) {
|
| // ----------- S t a t e -------------
|
| - // -- r2 : function name
|
| + // -- x2 : function name
|
| // -- lr : return address
|
| - // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
|
| + // -- sp[(argc - n - 1) * 8] : arg[n] (zero-based)
|
| // -- ...
|
| - // -- sp[argc * 4] : receiver
|
| + // -- sp[argc * 8] : receiver
|
| // -----------------------------------
|
|
|
| // If object is not a string, bail out to regular call.
|
| @@ -2025,21 +2101,23 @@ Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
|
| GenerateNameCheck(name, &name_miss);
|
|
|
| // Check that the maps starting from the prototype haven't changed.
|
| + Register prototype = x0;
|
| GenerateDirectLoadGlobalFunctionPrototype(masm(),
|
| Context::STRING_FUNCTION_INDEX,
|
| - r0,
|
| + prototype,
|
| &miss);
|
| ASSERT(!object.is_identical_to(holder));
|
| CheckPrototypes(
|
| Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
|
| - r0, holder, r1, r3, r4, name, &miss);
|
| + prototype, holder, x1, x3, x4, name, &miss);
|
|
|
| - Register receiver = r1;
|
| - Register index = r4;
|
| - Register result = r0;
|
| - __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
|
| + Register result = x0;
|
| + Register receiver = x1;
|
| + Register index = x4;
|
| +
|
| + __ Peek(receiver, argc * kPointerSize);
|
| if (argc > 0) {
|
| - __ ldr(index, MemOperand(sp, (argc - 1) * kPointerSize));
|
| + __ Peek(index, (argc - 1) * kPointerSize);
|
| } else {
|
| __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
|
| }
|
| @@ -2059,16 +2137,16 @@ Handle<Code> CallStubCompiler::CompileStringCharCodeAtCall(
|
| generator.GenerateSlow(masm(), call_helper);
|
|
|
| if (index_out_of_range.is_linked()) {
|
| - __ bind(&index_out_of_range);
|
| - __ LoadRoot(r0, Heap::kNanValueRootIndex);
|
| + __ Bind(&index_out_of_range);
|
| + __ LoadRoot(result, Heap::kNanValueRootIndex);
|
| __ Drop(argc + 1);
|
| __ Ret();
|
| }
|
|
|
| - __ bind(&miss);
|
| - // Restore function name in r2.
|
| - __ Move(r2, name);
|
| - __ bind(&name_miss);
|
| + __ Bind(&miss);
|
| + // Restore function name in x2.
|
| + __ Mov(x2, Operand(name));
|
| + __ Bind(&name_miss);
|
| GenerateMissBranch();
|
|
|
| // Return the generated code.
|
| @@ -2083,11 +2161,11 @@ Handle<Code> CallStubCompiler::CompileStringCharAtCall(
|
| Handle<JSFunction> function,
|
| Handle<String> name) {
|
| // ----------- S t a t e -------------
|
| - // -- r2 : function name
|
| + // -- x2 : function name
|
| // -- lr : return address
|
| - // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
|
| + // -- sp[(argc - n - 1) * 8] : arg[n] (zero-based)
|
| // -- ...
|
| - // -- sp[argc * 4] : receiver
|
| + // -- sp[argc * 8] : receiver
|
| // -----------------------------------
|
|
|
| // If object is not a string, bail out to regular call.
|
| @@ -2098,6 +2176,7 @@ Handle<Code> CallStubCompiler::CompileStringCharAtCall(
|
| Label name_miss;
|
| Label index_out_of_range;
|
| Label* index_out_of_range_label = &index_out_of_range;
|
| +
|
| if (kind_ == Code::CALL_IC &&
|
| (CallICBase::StringStubState::decode(extra_state_) ==
|
| DEFAULT_STRING_STUB)) {
|
| @@ -2106,22 +2185,24 @@ Handle<Code> CallStubCompiler::CompileStringCharAtCall(
|
| GenerateNameCheck(name, &name_miss);
|
|
|
| // Check that the maps starting from the prototype haven't changed.
|
| + Register prototype = x0;
|
| GenerateDirectLoadGlobalFunctionPrototype(masm(),
|
| Context::STRING_FUNCTION_INDEX,
|
| - r0,
|
| + prototype,
|
| &miss);
|
| ASSERT(!object.is_identical_to(holder));
|
| CheckPrototypes(
|
| Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
|
| - r0, holder, r1, r3, r4, name, &miss);
|
| + prototype, holder, x1, x3, x4, name, &miss);
|
| +
|
| + Register receiver = x0;
|
| + Register index = x4;
|
| + Register scratch = x3;
|
| + Register result = x0;
|
|
|
| - Register receiver = r0;
|
| - Register index = r4;
|
| - Register scratch = r3;
|
| - Register result = r0;
|
| - __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
|
| + __ Peek(receiver, argc * kPointerSize);
|
| if (argc > 0) {
|
| - __ ldr(index, MemOperand(sp, (argc - 1) * kPointerSize));
|
| + __ Peek(index, (argc - 1) * kPointerSize);
|
| } else {
|
| __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
|
| }
|
| @@ -2142,16 +2223,16 @@ Handle<Code> CallStubCompiler::CompileStringCharAtCall(
|
| generator.GenerateSlow(masm(), call_helper);
|
|
|
| if (index_out_of_range.is_linked()) {
|
| - __ bind(&index_out_of_range);
|
| - __ LoadRoot(r0, Heap::kempty_stringRootIndex);
|
| + __ Bind(&index_out_of_range);
|
| + __ LoadRoot(result, Heap::kempty_stringRootIndex);
|
| __ Drop(argc + 1);
|
| __ Ret();
|
| }
|
|
|
| - __ bind(&miss);
|
| - // Restore function name in r2.
|
| - __ Move(r2, name);
|
| - __ bind(&name_miss);
|
| + __ Bind(&miss);
|
| + // Restore function name in x2.
|
| + __ Mov(x2, Operand(name));
|
| + __ Bind(&name_miss);
|
| GenerateMissBranch();
|
|
|
| // Return the generated code.
|
| @@ -2166,13 +2247,12 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
|
| Handle<JSFunction> function,
|
| Handle<String> name) {
|
| // ----------- S t a t e -------------
|
| - // -- r2 : function name
|
| + // -- x2 : function name
|
| // -- lr : return address
|
| - // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
|
| + // -- sp[(argc - n - 1) * 8] : arg[n] (zero-based)
|
| // -- ...
|
| - // -- sp[argc * 4] : receiver
|
| + // -- sp[argc * 8] : receiver
|
| // -----------------------------------
|
| -
|
| const int argc = arguments().immediate();
|
|
|
| // If the object is not a JSObject or we got an unexpected number of
|
| @@ -2183,12 +2263,12 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
|
| GenerateNameCheck(name, &miss);
|
|
|
| if (cell.is_null()) {
|
| - __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
|
| -
|
| - __ JumpIfSmi(r1, &miss);
|
| + Register receiver = x1;
|
| + __ Peek(receiver, kPointerSize);
|
| + __ JumpIfSmi(receiver, &miss);
|
|
|
| - CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
|
| - name, &miss);
|
| + CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder,
|
| + x0, x3, x4, name, &miss);
|
| } else {
|
| ASSERT(cell->value() == *function);
|
| GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
|
| @@ -2197,17 +2277,18 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
|
| }
|
|
|
| // Load the char code argument.
|
| - Register code = r1;
|
| - __ ldr(code, MemOperand(sp, 0 * kPointerSize));
|
| + Register code = x1;
|
| + __ Peek(code, 0);
|
|
|
| // Check the code is a smi.
|
| Label slow;
|
| __ JumpIfNotSmi(code, &slow);
|
|
|
| - // Convert the smi code to uint16.
|
| - __ and_(code, code, Operand(Smi::FromInt(0xffff)));
|
| + // Make sure the smi code is a uint16.
|
| + __ And(code, code, Operand(Smi::FromInt(0xffff)));
|
|
|
| - StringCharFromCodeGenerator generator(code, r0);
|
| + Register result = x0;
|
| + StringCharFromCodeGenerator generator(code, result);
|
| generator.GenerateFast(masm());
|
| __ Drop(argc + 1);
|
| __ Ret();
|
| @@ -2217,13 +2298,12 @@ Handle<Code> CallStubCompiler::CompileStringFromCharCodeCall(
|
|
|
| // Tail call the full function. We do not have to patch the receiver
|
| // because the function makes no use of it.
|
| - __ bind(&slow);
|
| + __ Bind(&slow);
|
| ParameterCount expected(function);
|
| __ InvokeFunction(function, expected, arguments(),
|
| JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
|
|
|
| - __ bind(&miss);
|
| - // r2: function name.
|
| + __ Bind(&miss);
|
| GenerateMissBranch();
|
|
|
| // Return the generated code.
|
| @@ -2238,100 +2318,107 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
|
| Handle<JSFunction> function,
|
| Handle<String> name) {
|
| // ----------- S t a t e -------------
|
| - // -- r2 : function name
|
| + // -- x2 : function name (must be preserved on miss)
|
| // -- lr : return address
|
| - // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
|
| + // -- sp[(argc - n - 1) * 8] : arg[n] (zero-based)
|
| // -- ...
|
| - // -- sp[argc * 4] : receiver
|
| + // -- sp[argc * 8] : receiver
|
| // -----------------------------------
|
| -
|
| + Label miss;
|
| + Label return_result;
|
| + Register result = x0;
|
| const int argc = arguments().immediate();
|
| +
|
| // If the object is not a JSObject or we got an unexpected number of
|
| // arguments, bail out to the regular call.
|
| if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
|
|
|
| - Label miss, slow;
|
| GenerateNameCheck(name, &miss);
|
|
|
| if (cell.is_null()) {
|
| - __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
|
| - __ JumpIfSmi(r1, &miss);
|
| - CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
|
| - name, &miss);
|
| + Register receiver = x1;
|
| + __ Peek(receiver, kPointerSize);
|
| + __ JumpIfSmi(receiver, &miss);
|
| + CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder,
|
| + x0, x3, x4, name, &miss);
|
| } else {
|
| ASSERT(cell->value() == *function);
|
| - GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
|
| - &miss);
|
| + GenerateGlobalReceiverCheck(
|
| + Handle<JSObject>::cast(object), holder, name, &miss);
|
| GenerateLoadFunctionFromCell(cell, function, &miss);
|
| }
|
|
|
| - // Load the (only) argument into r0.
|
| - __ ldr(r0, MemOperand(sp, 0 * kPointerSize));
|
| + // Load the (only) argument.
|
| + Register arg = x0;
|
| + __ Peek(arg, 0);
|
|
|
| // If the argument is a smi, just return.
|
| - __ SmiTst(r0);
|
| - __ Drop(argc + 1, eq);
|
| - __ Ret(eq);
|
| -
|
| - __ CheckMap(r0, r1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK);
|
| -
|
| - Label smi_check, just_return;
|
| -
|
| - // Load the HeapNumber value.
|
| - // We will need access to the value in the core registers, so we load it
|
| - // with ldrd and move it to the fpu. It also spares a sub instruction for
|
| - // updating the HeapNumber value address, as vldr expects a multiple
|
| - // of 4 offset.
|
| - __ Ldrd(r4, r5, FieldMemOperand(r0, HeapNumber::kValueOffset));
|
| - __ vmov(d1, r4, r5);
|
| -
|
| - // Check for NaN, Infinities and -0.
|
| - // They are invariant through a Math.Floor call, so just
|
| - // return the original argument.
|
| - __ Sbfx(r3, r5, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
|
| - __ cmp(r3, Operand(-1));
|
| - __ b(eq, &just_return);
|
| - __ eor(r3, r5, Operand(0x80000000u));
|
| - __ orr(r3, r3, r4, SetCC);
|
| - __ b(eq, &just_return);
|
| - // Test for values that can be exactly represented as a
|
| - // signed 32-bit integer.
|
| - __ TryDoubleToInt32Exact(r0, d1, d2);
|
| - // If exact, check smi
|
| - __ b(eq, &smi_check);
|
| - __ cmp(r5, Operand(0));
|
| -
|
| - // If input is in ]+0, +inf[, the cmp has cleared overflow and negative
|
| - // (V=0 and N=0), the two following instructions won't execute and
|
| - // we fall through smi_check to check if the result can fit into a smi.
|
| -
|
| - // If input is in ]-inf, -0[, sub one and, go to slow if we have
|
| - // an overflow. Else we fall through smi check.
|
| - // Hint: if x is a negative, non integer number,
|
| - // floor(x) <=> round_to_zero(x) - 1.
|
| - __ sub(r0, r0, Operand(1), SetCC, mi);
|
| - __ b(vs, &slow);
|
| -
|
| - __ bind(&smi_check);
|
| - // Check if the result can fit into an smi. If we had an overflow,
|
| - // the result is either 0x80000000 or 0x7FFFFFFF and won't fit into an smi.
|
| - // If result doesn't fit into an smi, branch to slow.
|
| - __ SmiTag(r0, SetCC);
|
| - __ b(vs, &slow);
|
| -
|
| - __ bind(&just_return);
|
| + __ JumpIfSmi(arg, &return_result);
|
| +
|
| + // Load the HeapNumber.
|
| + Label slow;
|
| + __ CheckMap(arg, x1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK);
|
| +
|
| + FPRegister double_value = d0;
|
| + __ Ldr(double_value, FieldMemOperand(arg, HeapNumber::kValueOffset));
|
| +
|
| + // Try to do the conversion and check for overflow.
|
| + Label zero_or_overflow;
|
| + Register int_value = x3;
|
| + __ Fcvtms(int_value, double_value);
|
| + __ Cmp(int_value, Smi::kMaxValue);
|
| + __ Ccmp(int_value, Smi::kMinValue, NFlag, le);
|
| + // If the second comparison is skipped, we will have N=1 and V=0, this will
|
| + // force the following "lt" condition to be true.
|
| + __ B(lt, &zero_or_overflow);
|
| +
|
| + Label smi_result;
|
| + __ Cbnz(int_value, &smi_result);
|
| +
|
| + __ Bind(&zero_or_overflow);
|
| + Register value = x1;
|
| + __ Fmov(value, double_value);
|
| +
|
| + // Extract the exponent.
|
| + // TODO(all): The constants in the HeapNumber class assume that the double
|
| + // is stored in two 32-bit registers. They should assume offset within a
|
| + // 64-bit register on 64-bit systems. However if we want to change that we
|
| + // have to make some changes in x64 back-end.
|
| + static const int exponent_shift =
|
| + CountTrailingZeros(Double::kExponentMask, 64);
|
| + static const int exponent_width = CountSetBits(Double::kExponentMask, 64);
|
| + Register exponent = x3;
|
| + __ Ubfx(exponent, value, exponent_shift, exponent_width);
|
| +
|
| + // Check for NaN, Infinity, and -Infinity. They are invariant through
|
| + // a Math.Floor call, so just return the original argument.
|
| + __ Cmp(exponent, Double::kExponentMask >> exponent_shift);
|
| + __ B(&return_result, eq);
|
| +
|
| + // If the exponent is null, the number was 0 or -0. Otherwise the result
|
| + // can't fit in a smi and we go to the slow path.
|
| + __ Cbnz(exponent, &slow);
|
| +
|
| + // Check for -0.
|
| + // If our HeapNumber is negative it was -0, so we just return it.
|
| + __ TestAndBranchIfAnySet(value, Double::kSignMask, &return_result);
|
| +
|
| + __ Bind(&smi_result);
|
| + // Tag and return the result.
|
| + __ SmiTag(result, int_value);
|
| +
|
| + __ Bind(&return_result);
|
| __ Drop(argc + 1);
|
| __ Ret();
|
|
|
| - __ bind(&slow);
|
| + __ Bind(&slow);
|
| // Tail call the full function. We do not have to patch the receiver
|
| // because the function makes no use of it.
|
| ParameterCount expected(function);
|
| __ InvokeFunction(function, expected, arguments(),
|
| JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
|
|
|
| - __ bind(&miss);
|
| - // r2: function name.
|
| + __ Bind(&miss);
|
| GenerateMissBranch();
|
|
|
| // Return the generated code.
|
| @@ -2346,25 +2433,29 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
|
| Handle<JSFunction> function,
|
| Handle<String> name) {
|
| // ----------- S t a t e -------------
|
| - // -- r2 : function name
|
| + // -- x2 : function name
|
| // -- lr : return address
|
| - // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based)
|
| + // -- sp[(argc - n - 1) * 8] : arg[n] (zero-based)
|
| // -- ...
|
| - // -- sp[argc * 4] : receiver
|
| + // -- sp[argc * 8] : receiver
|
| // -----------------------------------
|
|
|
| const int argc = arguments().immediate();
|
| +
|
| // If the object is not a JSObject or we got an unexpected number of
|
| // arguments, bail out to the regular call.
|
| if (!object->IsJSObject() || argc != 1) return Handle<Code>::null();
|
|
|
| - Label miss;
|
| + Register result = x0;
|
| + Label miss, slow;
|
| GenerateNameCheck(name, &miss);
|
| +
|
| if (cell.is_null()) {
|
| - __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
|
| - __ JumpIfSmi(r1, &miss);
|
| - CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
|
| - name, &miss);
|
| + Register receiver = x1;
|
| + __ Peek(receiver, kPointerSize);
|
| + __ JumpIfSmi(receiver, &miss);
|
| + CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder,
|
| + x0, x3, x4, name, &miss);
|
| } else {
|
| ASSERT(cell->value() == *function);
|
| GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name,
|
| @@ -2372,64 +2463,48 @@ Handle<Code> CallStubCompiler::CompileMathAbsCall(
|
| GenerateLoadFunctionFromCell(cell, function, &miss);
|
| }
|
|
|
| - // Load the (only) argument into r0.
|
| - __ ldr(r0, MemOperand(sp, 0 * kPointerSize));
|
| + // Load the (only) argument.
|
| + Register arg = x0;
|
| + __ Peek(arg, 0);
|
|
|
| // Check if the argument is a smi.
|
| Label not_smi;
|
| - __ JumpIfNotSmi(r0, ¬_smi);
|
| -
|
| - // Do bitwise not or do nothing depending on the sign of the
|
| - // argument.
|
| - __ eor(r1, r0, Operand(r0, ASR, kBitsPerInt - 1));
|
| -
|
| - // Add 1 or do nothing depending on the sign of the argument.
|
| - __ sub(r0, r1, Operand(r0, ASR, kBitsPerInt - 1), SetCC);
|
| -
|
| - // If the result is still negative, go to the slow case.
|
| - // This only happens for the most negative smi.
|
| - Label slow;
|
| - __ b(mi, &slow);
|
| + __ JumpIfNotSmi(arg, ¬_smi);
|
|
|
| + __ SmiAbs(arg, x1, &slow);
|
| // Smi case done.
|
| __ Drop(argc + 1);
|
| __ Ret();
|
|
|
| - // Check if the argument is a heap number and load its exponent and
|
| - // sign.
|
| - __ bind(¬_smi);
|
| - __ CheckMap(r0, r1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK);
|
| - __ ldr(r1, FieldMemOperand(r0, HeapNumber::kExponentOffset));
|
| + // Check if the argument is a heap number and load its value.
|
| + __ Bind(¬_smi);
|
| + __ CheckMap(
|
| + arg, x1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK);
|
| + Register value = x1;
|
| + __ Ldr(value, FieldMemOperand(arg, HeapNumber::kValueOffset));
|
|
|
| - // Check the sign of the argument. If the argument is positive,
|
| - // just return it.
|
| + // Check the sign of the argument. If the argument is positive, return it.
|
| Label negative_sign;
|
| - __ tst(r1, Operand(HeapNumber::kSignMask));
|
| - __ b(ne, &negative_sign);
|
| + __ TestAndBranchIfAnySet(value, Double::kSignMask, &negative_sign);
|
| __ Drop(argc + 1);
|
| __ Ret();
|
|
|
| - // If the argument is negative, clear the sign, and return a new
|
| - // number.
|
| - __ bind(&negative_sign);
|
| - __ eor(r1, r1, Operand(HeapNumber::kSignMask));
|
| - __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
|
| - __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
|
| - __ AllocateHeapNumber(r0, r4, r5, r6, &slow);
|
| - __ str(r1, FieldMemOperand(r0, HeapNumber::kExponentOffset));
|
| - __ str(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
|
| + __ Bind(&negative_sign);
|
| + FPRegister double_value = d0;
|
| + __ Fmov(double_value, value);
|
| + __ Fabs(double_value, double_value);
|
| + __ AllocateHeapNumberWithValue(result, double_value, &slow, x1, x3);
|
| __ Drop(argc + 1);
|
| __ Ret();
|
|
|
| // Tail call the full function. We do not have to patch the receiver
|
| // because the function makes no use of it.
|
| - __ bind(&slow);
|
| + __ Bind(&slow);
|
| ParameterCount expected(function);
|
| __ InvokeFunction(function, expected, arguments(),
|
| JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD);
|
|
|
| - __ bind(&miss);
|
| - // r2: function name.
|
| + __ Bind(&miss);
|
| GenerateMissBranch();
|
|
|
| // Return the generated code.
|
| @@ -2459,28 +2534,30 @@ Handle<Code> CallStubCompiler::CompileFastApiCall(
|
| Label miss, miss_before_stack_reserved;
|
| GenerateNameCheck(name, &miss_before_stack_reserved);
|
|
|
| - // Get the receiver from the stack.
|
| const int argc = arguments().immediate();
|
| - __ ldr(r1, MemOperand(sp, argc * kPointerSize));
|
| +
|
| + // Get the receiver from the stack.
|
| + Register receiver = x1;
|
| + __ Peek(receiver, argc * kPointerSize);
|
|
|
| // Check that the receiver isn't a smi.
|
| - __ JumpIfSmi(r1, &miss_before_stack_reserved);
|
| + __ JumpIfSmi(receiver, &miss_before_stack_reserved);
|
|
|
| - __ IncrementCounter(counters->call_const(), 1, r0, r3);
|
| - __ IncrementCounter(counters->call_const_fast_api(), 1, r0, r3);
|
| + __ IncrementCounter(counters->call_const(), 1, x0, x3);
|
| + __ IncrementCounter(counters->call_const_fast_api(), 1, x0, x3);
|
|
|
| - ReserveSpaceForFastApiCall(masm(), r0);
|
| + ReserveSpaceForFastApiCall(masm(), x0);
|
|
|
| // Check that the maps haven't changed and find a Holder as a side effect.
|
| - CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4, name,
|
| - depth, &miss);
|
| + CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder, x0, x3, x4,
|
| + name, depth, &miss);
|
|
|
| GenerateFastApiDirectCall(masm(), optimization, argc);
|
|
|
| - __ bind(&miss);
|
| + __ Bind(&miss);
|
| FreeSpaceForFastApiCall(masm());
|
|
|
| - __ bind(&miss_before_stack_reserved);
|
| + __ Bind(&miss_before_stack_reserved);
|
| GenerateMissBranch();
|
|
|
| // Return the generated code.
|
| @@ -2494,103 +2571,104 @@ void CallStubCompiler::CompileHandlerFrontend(Handle<Object> object,
|
| CheckType check,
|
| Label* success) {
|
| // ----------- S t a t e -------------
|
| - // -- r2 : name
|
| + // -- x2 : name
|
| // -- lr : return address
|
| // -----------------------------------
|
| Label miss;
|
| GenerateNameCheck(name, &miss);
|
|
|
| - // Get the receiver from the stack
|
| + // Get the receiver from the stack.
|
| const int argc = arguments().immediate();
|
| - __ ldr(r1, MemOperand(sp, argc * kPointerSize));
|
| + Register receiver = x1;
|
| + __ Peek(receiver, argc * kPointerSize);
|
|
|
| // Check that the receiver isn't a smi.
|
| if (check != NUMBER_CHECK) {
|
| - __ JumpIfSmi(r1, &miss);
|
| + __ JumpIfSmi(receiver, &miss);
|
| }
|
|
|
| // Make sure that it's okay not to patch the on stack receiver
|
| // unless we're doing a receiver map check.
|
| ASSERT(!object->IsGlobalObject() || check == RECEIVER_MAP_CHECK);
|
| +
|
| switch (check) {
|
| - case RECEIVER_MAP_CHECK:
|
| - __ IncrementCounter(isolate()->counters()->call_const(), 1, r0, r3);
|
| + case RECEIVER_MAP_CHECK: {
|
| + __ IncrementCounter(isolate()->counters()->call_const(), 1, x0, x3);
|
|
|
| // Check that the maps haven't changed.
|
| - CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4,
|
| - name, &miss);
|
| + CheckPrototypes(Handle<JSObject>::cast(object), receiver, holder,
|
| + x0, x3, x4, name, &miss);
|
|
|
| - // Patch the receiver on the stack with the global proxy if
|
| - // necessary.
|
| + // Patch the receiver on the stack with the global proxy if necessary.
|
| if (object->IsGlobalObject()) {
|
| - __ ldr(r3, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset));
|
| - __ str(r3, MemOperand(sp, argc * kPointerSize));
|
| + __ Ldr(x3,
|
| + FieldMemOperand(receiver, GlobalObject::kGlobalReceiverOffset));
|
| + __ Poke(x3, argc * kPointerSize);
|
| }
|
| break;
|
| -
|
| - case STRING_CHECK:
|
| + }
|
| + case STRING_CHECK: {
|
| // Check that the object is a string.
|
| - __ CompareObjectType(r1, r3, r3, FIRST_NONSTRING_TYPE);
|
| - __ b(ge, &miss);
|
| + __ JumpIfObjectType(receiver, x3, x3, FIRST_NONSTRING_TYPE, &miss, ge);
|
| // Check that the maps starting from the prototype haven't changed.
|
| + Register prototype = x0;
|
| GenerateDirectLoadGlobalFunctionPrototype(
|
| - masm(), Context::STRING_FUNCTION_INDEX, r0, &miss);
|
| + masm(), Context::STRING_FUNCTION_INDEX, prototype, &miss);
|
| CheckPrototypes(
|
| Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
|
| - r0, holder, r3, r1, r4, name, &miss);
|
| + prototype, holder, x3, x1, x4, name, &miss);
|
| break;
|
| -
|
| - case SYMBOL_CHECK:
|
| + }
|
| + case SYMBOL_CHECK: {
|
| // Check that the object is a symbol.
|
| - __ CompareObjectType(r1, r1, r3, SYMBOL_TYPE);
|
| - __ b(ne, &miss);
|
| + __ JumpIfNotObjectType(receiver, x3, x3, SYMBOL_TYPE, &miss);
|
| // Check that the maps starting from the prototype haven't changed.
|
| + Register prototype = x0;
|
| GenerateDirectLoadGlobalFunctionPrototype(
|
| - masm(), Context::SYMBOL_FUNCTION_INDEX, r0, &miss);
|
| + masm(), Context::SYMBOL_FUNCTION_INDEX, prototype, &miss);
|
| CheckPrototypes(
|
| Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
|
| - r0, holder, r3, r1, r4, name, &miss);
|
| + prototype, holder, x3, x1, x4, name, &miss);
|
| break;
|
| -
|
| + }
|
| case NUMBER_CHECK: {
|
| Label fast;
|
| // Check that the object is a smi or a heap number.
|
| - __ JumpIfSmi(r1, &fast);
|
| - __ CompareObjectType(r1, r0, r0, HEAP_NUMBER_TYPE);
|
| - __ b(ne, &miss);
|
| - __ bind(&fast);
|
| + __ JumpIfSmi(receiver, &fast);
|
| + __ JumpIfNotObjectType(receiver, x0, x0, HEAP_NUMBER_TYPE, &miss);
|
| +
|
| + __ Bind(&fast);
|
| // Check that the maps starting from the prototype haven't changed.
|
| + Register prototype = x0;
|
| GenerateDirectLoadGlobalFunctionPrototype(
|
| - masm(), Context::NUMBER_FUNCTION_INDEX, r0, &miss);
|
| + masm(), Context::NUMBER_FUNCTION_INDEX, prototype, &miss);
|
| CheckPrototypes(
|
| Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
|
| - r0, holder, r3, r1, r4, name, &miss);
|
| + prototype, holder, x3, x1, x4, name, &miss);
|
| break;
|
| }
|
| case BOOLEAN_CHECK: {
|
| Label fast;
|
| // Check that the object is a boolean.
|
| - __ LoadRoot(ip, Heap::kTrueValueRootIndex);
|
| - __ cmp(r1, ip);
|
| - __ b(eq, &fast);
|
| - __ LoadRoot(ip, Heap::kFalseValueRootIndex);
|
| - __ cmp(r1, ip);
|
| - __ b(ne, &miss);
|
| - __ bind(&fast);
|
| + __ JumpIfRoot(receiver, Heap::kTrueValueRootIndex, &fast);
|
| + __ JumpIfNotRoot(receiver, Heap::kFalseValueRootIndex, &miss);
|
| +
|
| + __ Bind(&fast);
|
| // Check that the maps starting from the prototype haven't changed.
|
| + Register prototype = x0;
|
| GenerateDirectLoadGlobalFunctionPrototype(
|
| - masm(), Context::BOOLEAN_FUNCTION_INDEX, r0, &miss);
|
| + masm(), Context::BOOLEAN_FUNCTION_INDEX, prototype, &miss);
|
| CheckPrototypes(
|
| Handle<JSObject>(JSObject::cast(object->GetPrototype(isolate()))),
|
| - r0, holder, r3, r1, r4, name, &miss);
|
| + prototype, holder, x3, x1, x4, name, &miss);
|
| break;
|
| }
|
| }
|
|
|
| - __ b(success);
|
| + __ B(success);
|
|
|
| // Handle call cache miss.
|
| - __ bind(&miss);
|
| + __ Bind(&miss);
|
| GenerateMissBranch();
|
| }
|
|
|
| @@ -2622,7 +2700,7 @@ Handle<Code> CallStubCompiler::CompileCallConstant(
|
| Label success;
|
|
|
| CompileHandlerFrontend(object, holder, name, check, &success);
|
| - __ bind(&success);
|
| + __ Bind(&success);
|
| CompileHandlerBackend(function);
|
|
|
| // Return the generated code.
|
| @@ -2634,33 +2712,39 @@ Handle<Code> CallStubCompiler::CompileCallInterceptor(Handle<JSObject> object,
|
| Handle<JSObject> holder,
|
| Handle<Name> name) {
|
| // ----------- S t a t e -------------
|
| - // -- r2 : name
|
| + // -- x2 : name
|
| // -- lr : return address
|
| // -----------------------------------
|
| Label miss;
|
| + Register name_reg = x2;
|
| +
|
| GenerateNameCheck(name, &miss);
|
|
|
| - // Get the number of arguments.
|
| const int argc = arguments().immediate();
|
| LookupResult lookup(isolate());
|
| LookupPostInterceptor(holder, name, &lookup);
|
|
|
| // Get the receiver from the stack.
|
| - __ ldr(r1, MemOperand(sp, argc * kPointerSize));
|
| + Register receiver = x5;
|
| + __ Peek(receiver, argc * kPointerSize);
|
|
|
| - CallInterceptorCompiler compiler(this, arguments(), r2, extra_state_);
|
| - compiler.Compile(masm(), object, holder, name, &lookup, r1, r3, r4, r0,
|
| - &miss);
|
| + CallInterceptorCompiler compiler(this, arguments(), name_reg, extra_state_);
|
| + compiler.Compile(
|
| + masm(), object, holder, name, &lookup, receiver, x3, x4, x0, &miss);
|
| +
|
| + // Move returned value, the function to call, to x1 (this is required by
|
| + // GenerateCallFunction).
|
| + Register function = x1;
|
| + __ Mov(function, x0);
|
|
|
| - // Move returned value, the function to call, to r1.
|
| - __ mov(r1, r0);
|
| // Restore receiver.
|
| - __ ldr(r0, MemOperand(sp, argc * kPointerSize));
|
| + __ Peek(receiver, argc * kPointerSize);
|
|
|
| - GenerateCallFunction(masm(), object, arguments(), &miss, extra_state_);
|
| + GenerateCallFunction(
|
| + masm(), object, arguments(), &miss, extra_state_, function, receiver, x3);
|
|
|
| // Handle call cache miss.
|
| - __ bind(&miss);
|
| + __ Bind(&miss);
|
| GenerateMissBranch();
|
|
|
| // Return the generated code.
|
| @@ -2675,7 +2759,7 @@ Handle<Code> CallStubCompiler::CompileCallGlobal(
|
| Handle<JSFunction> function,
|
| Handle<Name> name) {
|
| // ----------- S t a t e -------------
|
| - // -- r2 : name
|
| + // -- x2 : name
|
| // -- lr : return address
|
| // -----------------------------------
|
| if (HasCustomCallGenerator(function)) {
|
| @@ -2690,22 +2774,26 @@ Handle<Code> CallStubCompiler::CompileCallGlobal(
|
|
|
| // Get the number of arguments.
|
| const int argc = arguments().immediate();
|
| +
|
| GenerateGlobalReceiverCheck(object, holder, name, &miss);
|
| GenerateLoadFunctionFromCell(cell, function, &miss);
|
| + // After these two calls the receiver is left in x0 and the function in x1.
|
| + Register receiver_reg = x0;
|
| + Register function_reg = x1;
|
|
|
| - // Patch the receiver on the stack with the global proxy if
|
| - // necessary.
|
| + // Patch the receiver on the stack with the global proxy if necessary.
|
| if (object->IsGlobalObject()) {
|
| - __ ldr(r3, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
|
| - __ str(r3, MemOperand(sp, argc * kPointerSize));
|
| + __ Ldr(x3,
|
| + FieldMemOperand(receiver_reg, GlobalObject::kGlobalReceiverOffset));
|
| + __ Poke(x3, argc * kPointerSize);
|
| }
|
|
|
| - // Set up the context (function already in r1).
|
| - __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
|
| + // Set up the context.
|
| + __ Ldr(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
|
|
|
| // Jump to the cached code (tail call).
|
| Counters* counters = isolate()->counters();
|
| - __ IncrementCounter(counters->call_global_inline(), 1, r3, r4);
|
| + __ IncrementCounter(counters->call_global_inline(), 1, x3, x4);
|
| ParameterCount expected(function->shared()->formal_parameter_count());
|
| CallKind call_kind = CallICBase::Contextual::decode(extra_state_)
|
| ? CALL_AS_FUNCTION
|
| @@ -2713,13 +2801,13 @@ Handle<Code> CallStubCompiler::CompileCallGlobal(
|
| // We call indirectly through the code field in the function to
|
| // allow recompilation to take effect without changing any of the
|
| // call sites.
|
| - __ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
|
| - __ InvokeCode(r3, expected, arguments(), JUMP_FUNCTION,
|
| - NullCallWrapper(), call_kind);
|
| + __ Ldr(x3, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
|
| + __ InvokeCode(
|
| + x3, expected, arguments(), JUMP_FUNCTION, NullCallWrapper(), call_kind);
|
|
|
| // Handle call cache miss.
|
| - __ bind(&miss);
|
| - __ IncrementCounter(counters->call_global_inline_miss(), 1, r1, r3);
|
| + __ Bind(&miss);
|
| + __ IncrementCounter(counters->call_global_inline_miss(), 1, x1, x3);
|
| GenerateMissBranch();
|
|
|
| // Return the generated code.
|
| @@ -2733,6 +2821,9 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
|
| Handle<JSObject> holder,
|
| Handle<ExecutableAccessorInfo> callback) {
|
| Label miss;
|
| +
|
| + ASM_LOCATION("StoreStubCompiler::CompileStoreCallback");
|
| +
|
| // Check that the maps haven't changed.
|
| __ JumpIfSmi(receiver(), &miss);
|
| CheckPrototypes(object, receiver(), holder,
|
| @@ -2741,9 +2832,8 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
|
| // Stub never generated for non-global objects that require access checks.
|
| ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
|
|
|
| - __ push(receiver()); // receiver
|
| - __ mov(ip, Operand(callback)); // callback info
|
| - __ Push(ip, this->name(), value());
|
| + __ Mov(scratch1(), Operand(callback));
|
| + __ Push(receiver(), scratch1(), this->name(), value());
|
|
|
| // Do tail-call to the runtime system.
|
| ExternalReference store_callback_property =
|
| @@ -2751,7 +2841,7 @@ Handle<Code> StoreStubCompiler::CompileStoreCallback(
|
| __ TailCallExternalReference(store_callback_property, 4, 1);
|
|
|
| // Handle store cache miss.
|
| - __ bind(&miss);
|
| + __ Bind(&miss);
|
| TailCallBuiltin(masm(), MissBuiltin(kind()));
|
|
|
| // Return the generated code.
|
| @@ -2767,20 +2857,24 @@ void StoreStubCompiler::GenerateStoreViaSetter(
|
| MacroAssembler* masm,
|
| Handle<JSFunction> setter) {
|
| // ----------- S t a t e -------------
|
| - // -- r0 : value
|
| - // -- r1 : receiver
|
| - // -- r2 : name
|
| + // -- x0 : value
|
| + // -- x1 : receiver
|
| + // -- x2 : name
|
| // -- lr : return address
|
| // -----------------------------------
|
| + Register value_reg = x0;
|
| + Register receiver_reg = x1;
|
| + Label miss;
|
| +
|
| {
|
| FrameScope scope(masm, StackFrame::INTERNAL);
|
|
|
| // Save value register, so we can restore it later.
|
| - __ push(r0);
|
| + __ Push(value_reg);
|
|
|
| if (!setter.is_null()) {
|
| // Call the JavaScript setter with receiver and value on the stack.
|
| - __ Push(r1, r0);
|
| + __ Push(receiver_reg, value_reg);
|
| ParameterCount actual(1);
|
| ParameterCount expected(setter);
|
| __ InvokeFunction(setter, expected, actual,
|
| @@ -2792,10 +2886,10 @@ void StoreStubCompiler::GenerateStoreViaSetter(
|
| }
|
|
|
| // We have to return the passed value, not the return value of the setter.
|
| - __ pop(r0);
|
| + __ Pop(value_reg);
|
|
|
| // Restore context register.
|
| - __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
|
| + __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
|
| }
|
| __ Ret();
|
| }
|
| @@ -2810,6 +2904,8 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
|
| Handle<Name> name) {
|
| Label miss;
|
|
|
| + ASM_LOCATION("StoreStubCompiler::CompileStoreInterceptor");
|
| +
|
| // Check that the map of the object hasn't changed.
|
| __ CheckMap(receiver(), scratch1(), Handle<Map>(object->map()), &miss,
|
| DO_SMI_CHECK);
|
| @@ -2819,14 +2915,11 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
|
| __ CheckAccessGlobalProxy(receiver(), scratch1(), &miss);
|
| }
|
|
|
| - // Stub is never generated for non-global objects that require access
|
| - // checks.
|
| + // Stub is never generated for non-global objects that require access checks.
|
| ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
|
|
|
| - __ Push(receiver(), this->name(), value());
|
| -
|
| - __ mov(scratch1(), Operand(Smi::FromInt(strict_mode())));
|
| - __ push(scratch1()); // strict mode
|
| + __ Mov(scratch1(), Operand(Smi::FromInt(strict_mode())));
|
| + __ Push(receiver(), this->name(), value(), scratch1());
|
|
|
| // Do tail-call to the runtime system.
|
| ExternalReference store_ic_property =
|
| @@ -2834,7 +2927,7 @@ Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
|
| __ TailCallExternalReference(store_ic_property, 4, 1);
|
|
|
| // Handle store cache miss.
|
| - __ bind(&miss);
|
| + __ Bind(&miss);
|
| TailCallBuiltin(masm(), MissBuiltin(kind()));
|
|
|
| // Return the generated code.
|
| @@ -2848,36 +2941,36 @@ Handle<Code> StoreStubCompiler::CompileStoreGlobal(
|
| Handle<Name> name) {
|
| Label miss;
|
|
|
| + ASM_LOCATION("StoreStubCompiler::CompileStoreGlobal");
|
| +
|
| // Check that the map of the global has not changed.
|
| - __ ldr(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset));
|
| - __ cmp(scratch1(), Operand(Handle<Map>(object->map())));
|
| - __ b(ne, &miss);
|
| + __ Ldr(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset));
|
| + __ Cmp(scratch1(), Operand(Handle<Map>(object->map())));
|
| + __ B(ne, &miss);
|
|
|
| // Check that the value in the cell is not the hole. If it is, this
|
| // cell could have been deleted and reintroducing the global needs
|
| // to update the property details in the property dictionary of the
|
| // global object. We bail out to the runtime system to do that.
|
| - __ mov(scratch1(), Operand(cell));
|
| - __ LoadRoot(scratch2(), Heap::kTheHoleValueRootIndex);
|
| - __ ldr(scratch3(),
|
| - FieldMemOperand(scratch1(), JSGlobalPropertyCell::kValueOffset));
|
| - __ cmp(scratch3(), scratch2());
|
| - __ b(eq, &miss);
|
| + __ Mov(scratch1(), Operand(cell));
|
| + __ Ldr(scratch2(), FieldMemOperand(scratch1(),
|
| + JSGlobalPropertyCell::kValueOffset));
|
| + __ JumpIfRoot(scratch2(), Heap::kTheHoleValueRootIndex, &miss);
|
|
|
| // Store the value in the cell.
|
| - __ str(value(),
|
| - FieldMemOperand(scratch1(), JSGlobalPropertyCell::kValueOffset));
|
| + __ Str(value(), FieldMemOperand(scratch1(),
|
| + JSGlobalPropertyCell::kValueOffset));
|
| // Cells are always rescanned, so no write barrier here.
|
|
|
| Counters* counters = isolate()->counters();
|
| - __ IncrementCounter(
|
| - counters->named_store_global_inline(), 1, scratch1(), scratch2());
|
| + __ IncrementCounter(counters->named_store_global_inline(), 1,
|
| + scratch1(), scratch2());
|
| __ Ret();
|
|
|
| // Handle store cache miss.
|
| - __ bind(&miss);
|
| - __ IncrementCounter(
|
| - counters->named_store_global_inline_miss(), 1, scratch1(), scratch2());
|
| + __ Bind(&miss);
|
| + __ IncrementCounter(counters->named_store_global_inline_miss(), 1,
|
| + scratch1(), scratch2());
|
| TailCallBuiltin(masm(), MissBuiltin(kind()));
|
|
|
| // Return the generated code.
|
| @@ -2891,13 +2984,12 @@ Handle<Code> LoadStubCompiler::CompileLoadNonexistent(
|
| Handle<Name> name,
|
| Handle<GlobalObject> global) {
|
| Label success;
|
| -
|
| NonexistentHandlerFrontend(object, last, name, &success, global);
|
|
|
| - __ bind(&success);
|
| + __ Bind(&success);
|
| // Return undefined if maps of the full prototype chain are still the
|
| // same and no global property with this name contains a value.
|
| - __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
|
| + __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
|
| __ Ret();
|
|
|
| // Return the generated code.
|
| @@ -2905,30 +2997,35 @@ Handle<Code> LoadStubCompiler::CompileLoadNonexistent(
|
| }
|
|
|
|
|
| +// TODO(all): The so-called scratch registers are significant in some cases. For
|
| +// example, KeyedStoreStubCompiler::registers()[3] (x3) is actually used for
|
| +// KeyedStoreCompiler::transition_map(). We should verify which registers are
|
| +// actually scratch registers, and which are important. For now, we use the same
|
| +// assignments as ARM to remain on the safe side.
|
| +
|
| Register* LoadStubCompiler::registers() {
|
| // receiver, name, scratch1, scratch2, scratch3, scratch4.
|
| - static Register registers[] = { r0, r2, r3, r1, r4, r5 };
|
| + static Register registers[] = { x0, x2, x3, x1, x4, x5 };
|
| return registers;
|
| }
|
|
|
| -
|
| Register* KeyedLoadStubCompiler::registers() {
|
| - // receiver, name, scratch1, scratch2, scratch3, scratch4.
|
| - static Register registers[] = { r1, r0, r2, r3, r4, r5 };
|
| + // receiver, name/key, scratch1, scratch2, scratch3, scratch4.
|
| + static Register registers[] = { x1, x0, x2, x3, x4, x5 };
|
| return registers;
|
| }
|
|
|
|
|
| Register* StoreStubCompiler::registers() {
|
| // receiver, name, value, scratch1, scratch2, scratch3.
|
| - static Register registers[] = { r1, r2, r0, r3, r4, r5 };
|
| + static Register registers[] = { x1, x2, x0, x3, x4, x5 };
|
| return registers;
|
| }
|
|
|
|
|
| Register* KeyedStoreStubCompiler::registers() {
|
| // receiver, name, value, scratch1, scratch2, scratch3.
|
| - static Register registers[] = { r2, r1, r0, r3, r4, r5 };
|
| + static Register registers[] = { x2, x1, x0, x3, x4, x5 };
|
| return registers;
|
| }
|
|
|
| @@ -2936,28 +3033,27 @@ Register* KeyedStoreStubCompiler::registers() {
|
| void KeyedLoadStubCompiler::GenerateNameCheck(Handle<Name> name,
|
| Register name_reg,
|
| Label* miss) {
|
| - __ cmp(name_reg, Operand(name));
|
| - __ b(ne, miss);
|
| + __ Cmp(name_reg, Operand(name));
|
| + __ B(ne, miss);
|
| }
|
|
|
|
|
| void KeyedStoreStubCompiler::GenerateNameCheck(Handle<Name> name,
|
| Register name_reg,
|
| Label* miss) {
|
| - __ cmp(name_reg, Operand(name));
|
| - __ b(ne, miss);
|
| + __ Cmp(name_reg, Operand(name));
|
| + __ B(ne, miss);
|
| }
|
|
|
|
|
| #undef __
|
| #define __ ACCESS_MASM(masm)
|
|
|
| -
|
| void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
|
| Handle<JSFunction> getter) {
|
| // ----------- S t a t e -------------
|
| - // -- r0 : receiver
|
| - // -- r2 : name
|
| + // -- x0 : receiver
|
| + // -- x2 : name
|
| // -- lr : return address
|
| // -----------------------------------
|
| {
|
| @@ -2965,7 +3061,7 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
|
|
|
| if (!getter.is_null()) {
|
| // Call the JavaScript getter with the receiver on the stack.
|
| - __ push(r0);
|
| + __ Push(x0);
|
| ParameterCount actual(0);
|
| ParameterCount expected(getter);
|
| __ InvokeFunction(getter, expected, actual,
|
| @@ -2977,7 +3073,7 @@ void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
|
| }
|
|
|
| // Restore context register.
|
| - __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
|
| + __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
|
| }
|
| __ Ret();
|
| }
|
| @@ -3001,22 +3097,20 @@ Handle<Code> LoadStubCompiler::CompileLoadGlobal(
|
| object, receiver(), Handle<JSObject>::cast(global), name, &miss);
|
|
|
| // Get the value from the cell.
|
| - __ mov(r3, Operand(cell));
|
| - __ ldr(r4, FieldMemOperand(r3, JSGlobalPropertyCell::kValueOffset));
|
| + __ Mov(x3, Operand(cell));
|
| + __ Ldr(x4, FieldMemOperand(x3, JSGlobalPropertyCell::kValueOffset));
|
|
|
| // Check for deleted property if property can actually be deleted.
|
| if (!is_dont_delete) {
|
| - __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
|
| - __ cmp(r4, ip);
|
| - __ b(eq, &miss);
|
| + __ JumpIfRoot(x4, Heap::kTheHoleValueRootIndex, &miss);
|
| }
|
|
|
| HandlerFrontendFooter(&success, &miss);
|
| __ bind(&success);
|
|
|
| Counters* counters = isolate()->counters();
|
| - __ IncrementCounter(counters->named_load_global_stub(), 1, r1, r3);
|
| - __ mov(r0, r4);
|
| + __ IncrementCounter(counters->named_load_global_stub(), 1, x1, x3);
|
| + __ Mov(x0, x4);
|
| __ Ret();
|
|
|
| // Return the generated code.
|
| @@ -3037,28 +3131,30 @@ Handle<Code> BaseLoadStubCompiler::CompilePolymorphicIC(
|
| }
|
|
|
| __ JumpIfSmi(receiver(), &miss);
|
| - Register map_reg = scratch1();
|
|
|
| + Register map_reg = scratch1();
|
| + __ Ldr(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
|
| int receiver_count = receiver_maps->length();
|
| int number_of_handled_maps = 0;
|
| - __ ldr(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
|
| for (int current = 0; current < receiver_count; ++current) {
|
| Handle<Map> map = receiver_maps->at(current);
|
| if (!map->is_deprecated()) {
|
| number_of_handled_maps++;
|
| - __ mov(ip, Operand(receiver_maps->at(current)));
|
| - __ cmp(map_reg, ip);
|
| - __ Jump(handlers->at(current), RelocInfo::CODE_TARGET, eq);
|
| + Label try_next;
|
| + __ Cmp(map_reg, Operand(receiver_maps->at(current)));
|
| + __ B(ne, &try_next);
|
| + __ Jump(handlers->at(current), RelocInfo::CODE_TARGET);
|
| + __ Bind(&try_next);
|
| }
|
| }
|
| ASSERT(number_of_handled_maps != 0);
|
|
|
| - __ bind(&miss);
|
| + __ Bind(&miss);
|
| TailCallBuiltin(masm(), MissBuiltin(kind()));
|
|
|
| // Return the generated code.
|
| InlineCacheState state =
|
| - number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
|
| + (number_of_handled_maps > 1) ? POLYMORPHIC : MONOMORPHIC;
|
| return GetICCode(kind(), type, name, state);
|
| }
|
|
|
| @@ -3068,28 +3164,30 @@ Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
|
| CodeHandleList* handler_stubs,
|
| MapHandleList* transitioned_maps) {
|
| Label miss;
|
| +
|
| + ASM_LOCATION("KeyedStoreStubCompiler::CompileStorePolymorphic");
|
| +
|
| __ JumpIfSmi(receiver(), &miss);
|
|
|
| int receiver_count = receiver_maps->length();
|
| - __ ldr(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset));
|
| - for (int i = 0; i < receiver_count; ++i) {
|
| - __ mov(ip, Operand(receiver_maps->at(i)));
|
| - __ cmp(scratch1(), ip);
|
| - if (transitioned_maps->at(i).is_null()) {
|
| - __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq);
|
| - } else {
|
| - Label next_map;
|
| - __ b(ne, &next_map);
|
| - __ mov(transition_map(), Operand(transitioned_maps->at(i)));
|
| - __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, al);
|
| - __ bind(&next_map);
|
| + __ Ldr(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset));
|
| + for (int i = 0; i < receiver_count; i++) {
|
| + __ Cmp(scratch1(), Operand(receiver_maps->at(i)));
|
| +
|
| + Label skip;
|
| + __ B(&skip, ne);
|
| + if (!transitioned_maps->at(i).is_null()) {
|
| + // This argument is used by the handler stub. For example, see
|
| + // ElementsTransitionGenerator::GenerateMapChangeElementsTransition.
|
| + __ Mov(transition_map(), Operand(transitioned_maps->at(i)));
|
| }
|
| + __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET);
|
| + __ Bind(&skip);
|
| }
|
|
|
| - __ bind(&miss);
|
| + __ Bind(&miss);
|
| TailCallBuiltin(masm(), MissBuiltin(kind()));
|
|
|
| - // Return the generated code.
|
| return GetICCode(
|
| kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
|
| }
|
| @@ -3098,146 +3196,87 @@ Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
|
| #undef __
|
| #define __ ACCESS_MASM(masm)
|
|
|
| -
|
| void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
|
| MacroAssembler* masm) {
|
| // ---------- S t a t e --------------
|
| // -- lr : return address
|
| - // -- r0 : key
|
| - // -- r1 : receiver
|
| + // -- x0 : key
|
| + // -- x1 : receiver
|
| // -----------------------------------
|
| Label slow, miss_force_generic;
|
|
|
| - Register key = r0;
|
| - Register receiver = r1;
|
| + Register result = x0;
|
| + Register key = x0;
|
| + Register receiver = x1;
|
|
|
| - __ UntagAndJumpIfNotSmi(r2, key, &miss_force_generic);
|
| - __ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
| - __ LoadFromNumberDictionary(&slow, r4, key, r0, r2, r3, r5);
|
| + __ JumpIfNotSmi(key, &miss_force_generic);
|
| + __ Ldr(x4, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
| + __ LoadFromNumberDictionary(&slow, x4, key, result, x2, x3, x5, x6);
|
| __ Ret();
|
|
|
| - __ bind(&slow);
|
| + __ Bind(&slow);
|
| __ IncrementCounter(
|
| - masm->isolate()->counters()->keyed_load_external_array_slow(),
|
| - 1, r2, r3);
|
| -
|
| - // ---------- S t a t e --------------
|
| - // -- lr : return address
|
| - // -- r0 : key
|
| - // -- r1 : receiver
|
| - // -----------------------------------
|
| + masm->isolate()->counters()->keyed_load_external_array_slow(), 1, x2, x3);
|
| TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow);
|
|
|
| // Miss case, call the runtime.
|
| - __ bind(&miss_force_generic);
|
| -
|
| - // ---------- S t a t e --------------
|
| - // -- lr : return address
|
| - // -- r0 : key
|
| - // -- r1 : receiver
|
| - // -----------------------------------
|
| + __ Bind(&miss_force_generic);
|
| TailCallBuiltin(masm, Builtins::kKeyedLoadIC_MissForceGeneric);
|
| }
|
|
|
|
|
| -static void GenerateSmiKeyCheck(MacroAssembler* masm,
|
| - Register key,
|
| - Register scratch0,
|
| - DwVfpRegister double_scratch0,
|
| - DwVfpRegister double_scratch1,
|
| - Label* fail) {
|
| - Label key_ok;
|
| - // Check for smi or a smi inside a heap number. We convert the heap
|
| - // number and check if the conversion is exact and fits into the smi
|
| - // range.
|
| - __ JumpIfSmi(key, &key_ok);
|
| - __ CheckMap(key,
|
| - scratch0,
|
| - Heap::kHeapNumberMapRootIndex,
|
| - fail,
|
| - DONT_DO_SMI_CHECK);
|
| - __ sub(ip, key, Operand(kHeapObjectTag));
|
| - __ vldr(double_scratch0, ip, HeapNumber::kValueOffset);
|
| - __ TryDoubleToInt32Exact(scratch0, double_scratch0, double_scratch1);
|
| - __ b(ne, fail);
|
| - __ TrySmiTag(key, scratch0, fail);
|
| - __ bind(&key_ok);
|
| -}
|
| -
|
| -
|
| -void KeyedStoreStubCompiler::GenerateStoreExternalArray(
|
| +static void GenerateStoreSmiToExternalArray(
|
| MacroAssembler* masm,
|
| - ElementsKind elements_kind) {
|
| - // ---------- S t a t e --------------
|
| - // -- r0 : value
|
| - // -- r1 : key
|
| - // -- r2 : receiver
|
| - // -- lr : return address
|
| - // -----------------------------------
|
| - Label slow, check_heap_number, miss_force_generic;
|
| -
|
| - // Register usage.
|
| - Register value = r0;
|
| - Register key = r1;
|
| - Register receiver = r2;
|
| - // r3 mostly holds the elements array or the destination external array.
|
| -
|
| - // This stub is meant to be tail-jumped to, the receiver must already
|
| - // have been verified by the caller to not be a smi.
|
| -
|
| - // Check that the key is a smi or a heap number convertible to a smi.
|
| - GenerateSmiKeyCheck(masm, key, r4, d1, d2, &miss_force_generic);
|
| -
|
| - __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
| -
|
| - // Check that the index is in range
|
| - __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
|
| - __ cmp(key, ip);
|
| - // Unsigned comparison catches both negative and too-large values.
|
| - __ b(hs, &miss_force_generic);
|
| + ElementsKind elements_kind,
|
| + Register value,
|
| + Register key_raw, // Untagged 'key'.
|
| + Register elements_ext, // elements[ExternalArray::kExternalPointerOffset]
|
| + Register scratch,
|
| + FPRegister double_scratch) {
|
| + // Convert the smi in value (x0) to the specified element kind, and store it
|
| + // in the external array. No input registers are clobbered by this helper,
|
| + // other than the scratch registers.
|
|
|
| - // Handle both smis and HeapNumbers in the fast path. Go to the
|
| - // runtime for all other kinds of values.
|
| - // r3: external array.
|
| - if (elements_kind == EXTERNAL_PIXEL_ELEMENTS) {
|
| - // Double to pixel conversion is only implemented in the runtime for now.
|
| - __ UntagAndJumpIfNotSmi(r5, value, &slow);
|
| - } else {
|
| - __ UntagAndJumpIfNotSmi(r5, value, &check_heap_number);
|
| - }
|
| - __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
|
| + ASSERT(!AreAliased(value, key_raw, elements_ext, scratch, double_scratch));
|
|
|
| - // r3: base pointer of external storage.
|
| - // r5: value (integer).
|
| switch (elements_kind) {
|
| case EXTERNAL_PIXEL_ELEMENTS:
|
| + __ SmiUntag(scratch, value);
|
| // Clamp the value to [0..255].
|
| - __ Usat(r5, 8, Operand(r5));
|
| - __ strb(r5, MemOperand(r3, key, LSR, 1));
|
| + __ Cmp(scratch, Operand(scratch, UXTB));
|
| + // If scratch < scratch & 0xff, it must be < 0, so saturate to 0.
|
| + __ CzeroX(scratch, lt);
|
| + // If scratch > scratch & 0xff, it must be > 255, so saturate to 255.
|
| + // This actually generates ~0, but it doesn't matter if we use strb.
|
| + __ Csinv(scratch, scratch, xzr, le);
|
| + __ Strb(scratch.W(), MemOperand(elements_ext, key_raw));
|
| break;
|
| case EXTERNAL_BYTE_ELEMENTS:
|
| case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
|
| - __ strb(r5, MemOperand(r3, key, LSR, 1));
|
| + __ SmiUntag(scratch, value);
|
| + __ Strb(scratch.W(), MemOperand(elements_ext, key_raw));
|
| break;
|
| case EXTERNAL_SHORT_ELEMENTS:
|
| case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
|
| - __ strh(r5, MemOperand(r3, key, LSL, 0));
|
| + __ SmiUntag(scratch, value);
|
| + __ Strh(scratch.W(),
|
| + MemOperand(elements_ext, key_raw, LSL, kHalfWordSizeInBytesLog2));
|
| break;
|
| case EXTERNAL_INT_ELEMENTS:
|
| case EXTERNAL_UNSIGNED_INT_ELEMENTS:
|
| - __ str(r5, MemOperand(r3, key, LSL, 1));
|
| + __ SmiUntag(scratch, value);
|
| + __ Str(scratch.W(),
|
| + MemOperand(elements_ext, key_raw, LSL, kWordSizeInBytesLog2));
|
| break;
|
| case EXTERNAL_FLOAT_ELEMENTS:
|
| - // Perform int-to-float conversion and store to memory.
|
| - __ SmiUntag(r4, key);
|
| - StoreIntAsFloat(masm, r3, r4, r5, r7);
|
| + __ SmiUntagToFloat(double_scratch.S(), value);
|
| + __ Str(double_scratch.S(),
|
| + MemOperand(elements_ext, key_raw, LSL, kSRegSizeInBytesLog2));
|
| break;
|
| case EXTERNAL_DOUBLE_ELEMENTS:
|
| - __ vmov(s2, r5);
|
| - __ vcvt_f64_s32(d0, s2);
|
| - __ add(r3, r3, Operand(key, LSL, 2));
|
| - // r3: effective address of the double element
|
| - __ vstr(d0, r3, 0);
|
| + __ SmiUntagToDouble(double_scratch, value);
|
| + __ Str(double_scratch,
|
| + MemOperand(elements_ext, key_raw, LSL, kDRegSizeInBytesLog2));
|
| break;
|
| case FAST_ELEMENTS:
|
| case FAST_SMI_ELEMENTS:
|
| @@ -3250,401 +3289,418 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
|
| UNREACHABLE();
|
| break;
|
| }
|
| +}
|
|
|
| - // Entry registers are intact, r0 holds the value which is the return value.
|
| - __ Ret();
|
|
|
| - if (elements_kind != EXTERNAL_PIXEL_ELEMENTS) {
|
| - // r3: external array.
|
| - __ bind(&check_heap_number);
|
| - __ CompareObjectType(value, r5, r6, HEAP_NUMBER_TYPE);
|
| - __ b(ne, &slow);
|
| -
|
| - __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
|
| -
|
| - // r3: base pointer of external storage.
|
| -
|
| - // The WebGL specification leaves the behavior of storing NaN and
|
| - // +/-Infinity into integer arrays basically undefined. For more
|
| - // reproducible behavior, convert these to zero.
|
| -
|
| - if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
|
| - // vldr requires offset to be a multiple of 4 so we can not
|
| - // include -kHeapObjectTag into it.
|
| - __ sub(r5, r0, Operand(kHeapObjectTag));
|
| - __ vldr(d0, r5, HeapNumber::kValueOffset);
|
| - __ add(r5, r3, Operand(key, LSL, 1));
|
| - __ vcvt_f32_f64(s0, d0);
|
| - __ vstr(s0, r5, 0);
|
| - } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
|
| - __ sub(r5, r0, Operand(kHeapObjectTag));
|
| - __ vldr(d0, r5, HeapNumber::kValueOffset);
|
| - __ add(r5, r3, Operand(key, LSL, 2));
|
| - __ vstr(d0, r5, 0);
|
| - } else {
|
| - // Hoisted load. vldr requires offset to be a multiple of 4 so we can
|
| - // not include -kHeapObjectTag into it.
|
| - __ sub(r5, value, Operand(kHeapObjectTag));
|
| - __ vldr(d0, r5, HeapNumber::kValueOffset);
|
| - __ ECMAToInt32(r5, d0, r6, r7, r9, d1);
|
| -
|
| - switch (elements_kind) {
|
| - case EXTERNAL_BYTE_ELEMENTS:
|
| - case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
|
| - __ strb(r5, MemOperand(r3, key, LSR, 1));
|
| - break;
|
| - case EXTERNAL_SHORT_ELEMENTS:
|
| - case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
|
| - __ strh(r5, MemOperand(r3, key, LSL, 0));
|
| - break;
|
| - case EXTERNAL_INT_ELEMENTS:
|
| - case EXTERNAL_UNSIGNED_INT_ELEMENTS:
|
| - __ str(r5, MemOperand(r3, key, LSL, 1));
|
| - break;
|
| - case EXTERNAL_PIXEL_ELEMENTS:
|
| - case EXTERNAL_FLOAT_ELEMENTS:
|
| - case EXTERNAL_DOUBLE_ELEMENTS:
|
| - case FAST_ELEMENTS:
|
| - case FAST_SMI_ELEMENTS:
|
| - case FAST_DOUBLE_ELEMENTS:
|
| - case FAST_HOLEY_ELEMENTS:
|
| - case FAST_HOLEY_SMI_ELEMENTS:
|
| - case FAST_HOLEY_DOUBLE_ELEMENTS:
|
| - case DICTIONARY_ELEMENTS:
|
| - case NON_STRICT_ARGUMENTS_ELEMENTS:
|
| - UNREACHABLE();
|
| - break;
|
| - }
|
| - }
|
| +static void GenerateStoreHeapNumberToExternalArray(
|
| + MacroAssembler* masm,
|
| + ElementsKind elements_kind,
|
| + Register value,
|
| + Register key_raw, // Untagged 'key'.
|
| + Register elements_ext, // elements[ExternalArray::kExternalPointerOffset]
|
| + Register scratch,
|
| + FPRegister double_scratch1,
|
| + FPRegister double_scratch2) {
|
| + // Convert the heap number in value (x0) to the specified element kind, and
|
| + // store it in the external array. No input registers are clobbered by this
|
| + // helper, other than the scratch registers.
|
| +
|
| + ASSERT(!AreAliased(value, key_raw, elements_ext, scratch,
|
| + double_scratch1, double_scratch2));
|
| +
|
| + FPRegister value_d = double_scratch1;
|
| + __ Ldr(value_d, FieldMemOperand(value, HeapNumber::kValueOffset));
|
| +
|
| + // Convert the (double) input to an integral type.
|
| + switch (elements_kind) {
|
| + case EXTERNAL_FLOAT_ELEMENTS:
|
| + __ Fcvt(s16, value_d);
|
| + __ Str(s16, MemOperand(elements_ext, key_raw, LSL, 2));
|
| + break;
|
| + case EXTERNAL_DOUBLE_ELEMENTS:
|
| + __ Str(value_d, MemOperand(elements_ext, key_raw, LSL, 3));
|
| + break;
|
| + case EXTERNAL_PIXEL_ELEMENTS:
|
| + // This conversion follows the WebIDL "[Clamp]" rules:
|
| + // - Inputs lower than 0 (including -infinity) produce 0.
|
| + // - Inputs higher than 255 (including +infinity) produce 255.
|
| + // Also, it seems that PIXEL types use round-to-nearest rather than
|
| + // round-towards-zero.
|
| +
|
| + // Squash +infinity before the conversion, since Fcvtnu will normally
|
| + // convert it to 0.
|
| + __ Fmov(double_scratch2, 255);
|
| + __ Fmin(double_scratch2, double_scratch2, value_d);
|
| +
|
| + // Convert double to unsigned integer. Values less than zero become zero.
|
| + // Values greater than 255 have already been clamped to 255.
|
| + __ Fcvtnu(scratch.W(), double_scratch2);
|
| +
|
| + __ Strb(scratch.W(), MemOperand(elements_ext, key_raw));
|
| + break;
|
| + case EXTERNAL_BYTE_ELEMENTS:
|
| + case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
|
| + __ ECMA262ToInt32(scratch, value_d, x11, x12, MacroAssembler::INT32_IN_W);
|
| + __ Strb(scratch.W(), MemOperand(elements_ext, key_raw));
|
| + break;
|
| + case EXTERNAL_SHORT_ELEMENTS:
|
| + case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
|
| + __ ECMA262ToInt32(scratch, value_d, x11, x12, MacroAssembler::INT32_IN_W);
|
| + __ Strh(scratch.W(),
|
| + MemOperand(elements_ext, key_raw, LSL, kHalfWordSizeInBytesLog2));
|
| + break;
|
| + case EXTERNAL_INT_ELEMENTS:
|
| + case EXTERNAL_UNSIGNED_INT_ELEMENTS:
|
| + __ ECMA262ToInt32(scratch, value_d, x11, x12, MacroAssembler::INT32_IN_W);
|
| + __ Str(scratch.W(),
|
| + MemOperand(elements_ext, key_raw, LSL, kWordSizeInBytesLog2));
|
| + break;
|
| + case FAST_ELEMENTS:
|
| + case FAST_SMI_ELEMENTS:
|
| + case FAST_DOUBLE_ELEMENTS:
|
| + case FAST_HOLEY_ELEMENTS:
|
| + case FAST_HOLEY_SMI_ELEMENTS:
|
| + case FAST_HOLEY_DOUBLE_ELEMENTS:
|
| + case DICTIONARY_ELEMENTS:
|
| + case NON_STRICT_ARGUMENTS_ELEMENTS:
|
| + UNREACHABLE();
|
| + break;
|
| + }
|
| +}
|
|
|
| - // Entry registers are intact, r0 holds the value which is the return
|
| - // value.
|
| - __ Ret();
|
| +
|
| +void KeyedStoreStubCompiler::GenerateStoreExternalArray(
|
| + MacroAssembler* masm,
|
| + ElementsKind elements_kind) {
|
| + // ---------- S t a t e --------------
|
| + // -- lr : return address
|
| + // -- x0 : value
|
| + // -- x1 : key
|
| + // -- x2 : receiver
|
| + // -----------------------------------
|
| + Label slow, check_heap_number, miss_force_generic;
|
| +
|
| + // Register usage.
|
| + Register value = x0;
|
| + Register key = x1;
|
| + Register receiver = x2;
|
| +
|
| + // This stub is meant to be tail-jumped to, the receiver must already
|
| + // have been verified by the caller to not be a smi.
|
| + if (__ emit_debug_code()) {
|
| + Label ok;
|
| + __ JumpIfNotSmi(receiver, &ok);
|
| + __ Abort("KeyedStoreStubCompiler::GenerateStoreExternalArray: "
|
| + "receiver is a SMI\n");
|
| + __ Bind(&ok);
|
| }
|
|
|
| - // Slow case, key and receiver still in r0 and r1.
|
| - __ bind(&slow);
|
| - __ IncrementCounter(
|
| - masm->isolate()->counters()->keyed_load_external_array_slow(),
|
| - 1, r2, r3);
|
| + // Check that the key is a smi or a heap number convertible to a smi.
|
| + GenerateSmiKeyCheck(masm, key, x10, d16, d17, &miss_force_generic);
|
| +
|
| + Register elements = x3;
|
| + __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
| +
|
| + Register key_raw = x4;
|
| + __ SmiUntag(key_raw, key);
|
| +
|
| + // Check that the key is within bounds. An unsigned comparison catches both
|
| + // negative and out-of-bound indexes.
|
| + __ Ldrsw(x10,
|
| + UntagSmiFieldMemOperand(elements, ExternalArray::kLengthOffset));
|
| + __ Cmp(key_raw.W(), w10);
|
| + __ B(&miss_force_generic, hs);
|
| +
|
| + // Get the externally-stored elements.
|
| + Register elements_ext = x5;
|
| + __ Ldr(elements_ext,
|
| + FieldMemOperand(elements, ExternalArray::kExternalPointerOffset));
|
| +
|
| + // x0: value
|
| + // x1: key
|
| + // x2: receiver
|
| + // x3: elements
|
| + // x4: key_raw Untagged 'key'.
|
| + // x5: elements_ext From elements[ExternalArray::kExternalPointerOffset].
|
| +
|
| + // Handle both smis and HeapNumbers in the fast path. Go to the
|
| + // runtime for all other kinds of values.
|
| + __ JumpIfNotSmi(value, &check_heap_number);
|
| +
|
| + GenerateStoreSmiToExternalArray(
|
| + masm, elements_kind, value, key_raw, elements_ext, x10, d16);
|
| + // Entry registers are intact and x0 holds 'value', which is the return value.
|
| + __ Ret();
|
|
|
| + __ Bind(&check_heap_number);
|
| + // Convert the double at 'value' to the specified element kind.
|
| + //
|
| + // x0: value
|
| + // x1: key
|
| + // x2: receiver
|
| + // x3: elements
|
| + // x4: key_raw Untagged 'key'.
|
| + // x5: elements_ext From elements[ExternalArray::kExternalPointerOffset].
|
| + __ JumpIfNotObjectType(value, x10, x11, HEAP_NUMBER_TYPE, &slow);
|
| +
|
| + GenerateStoreHeapNumberToExternalArray(
|
| + masm, elements_kind, value, key_raw, elements_ext, x10, d16, d17);
|
| + // Entry registers are intact and x0 holds 'value', which is the return value.
|
| + __ Ret();
|
| +
|
| + __ Bind(&slow);
|
| // ---------- S t a t e --------------
|
| // -- lr : return address
|
| - // -- r0 : key
|
| - // -- r1 : receiver
|
| + // -- x0 : value
|
| + // -- x1 : key
|
| + // -- x2 : receiver
|
| // -----------------------------------
|
| + __ IncrementCounter(
|
| + masm->isolate()->counters()->keyed_load_external_array_slow(),
|
| + 1, x10, x11);
|
| +
|
| TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Slow);
|
|
|
| // Miss case, call the runtime.
|
| - __ bind(&miss_force_generic);
|
| -
|
| + __ Bind(&miss_force_generic);
|
| // ---------- S t a t e --------------
|
| // -- lr : return address
|
| - // -- r0 : key
|
| - // -- r1 : receiver
|
| + // -- x0 : value
|
| + // -- x1 : key
|
| + // -- x2 : receiver
|
| // -----------------------------------
|
| +
|
| TailCallBuiltin(masm, Builtins::kKeyedStoreIC_MissForceGeneric);
|
| }
|
|
|
|
|
| -void KeyedStoreStubCompiler::GenerateStoreFastElement(
|
| +static void GenerateStoreFastSmiOrDoubleElement(
|
| MacroAssembler* masm,
|
| bool is_js_array,
|
| ElementsKind elements_kind,
|
| - KeyedAccessStoreMode store_mode) {
|
| - // ----------- S t a t e -------------
|
| - // -- r0 : value
|
| - // -- r1 : key
|
| - // -- r2 : receiver
|
| - // -- lr : return address
|
| - // -- r3 : scratch
|
| - // -- r4 : scratch (elements)
|
| - // -----------------------------------
|
| + KeyedAccessStoreMode store_mode,
|
| + bool store_double) {
|
| Label miss_force_generic, transition_elements_kind, grow, slow;
|
| Label finish_store, check_capacity;
|
|
|
| - Register value_reg = r0;
|
| - Register key_reg = r1;
|
| - Register receiver_reg = r2;
|
| - Register scratch = r4;
|
| - Register elements_reg = r3;
|
| - Register length_reg = r5;
|
| - Register scratch2 = r6;
|
| + Register value = x0;
|
| + Register key = x1;
|
| + Register receiver = x2;
|
|
|
| // This stub is meant to be tail-jumped to, the receiver must already
|
| // have been verified by the caller to not be a smi.
|
| + if (__ emit_debug_code()) {
|
| + Label ok;
|
| + __ JumpIfNotSmi(receiver, &ok);
|
| + __ Abort("GenerateStoreFastSmiOrDoubleElement: receiver is a SMI\n");
|
| + __ Bind(&ok);
|
| + }
|
|
|
| // Check that the key is a smi or a heap number convertible to a smi.
|
| - GenerateSmiKeyCheck(masm, key_reg, r4, d1, d2, &miss_force_generic);
|
| + GenerateSmiKeyCheck(masm, key, x10, d16, d17, &miss_force_generic);
|
|
|
| - if (IsFastSmiElementsKind(elements_kind)) {
|
| - __ JumpIfNotSmi(value_reg, &transition_elements_kind);
|
| + if (!store_double && IsFastSmiElementsKind(elements_kind)) {
|
| + __ JumpIfNotSmi(value, &transition_elements_kind);
|
| }
|
|
|
| + Register elements = x3;
|
| + __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
| +
|
| // Check that the key is within bounds.
|
| - __ ldr(elements_reg,
|
| - FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
|
| + Register length = x4;
|
| if (is_js_array) {
|
| - __ ldr(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
|
| + __ Ldr(length, FieldMemOperand(receiver, JSArray::kLengthOffset));
|
| } else {
|
| - __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
|
| + __ Ldr(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
|
| }
|
| - // Compare smis.
|
| - __ cmp(key_reg, scratch);
|
| + // Compare smis. An unsigned comparison catches both negative and out-of-bound
|
| + // indexes.
|
| + __ Cmp(key, length);
|
| if (is_js_array && IsGrowStoreMode(store_mode)) {
|
| - __ b(hs, &grow);
|
| - } else {
|
| - __ b(hs, &miss_force_generic);
|
| + // We can handle the case where the array needs to grow by a single element
|
| + // without falling back to run-time.
|
| + __ B(&grow, eq);
|
| }
|
| + // Fall back to the run-time if the key is out of bounds.
|
| + __ B(&miss_force_generic, hs);
|
|
|
| - // Make sure elements is a fast element array, not 'cow'.
|
| - __ CheckMap(elements_reg,
|
| - scratch,
|
| - Heap::kFixedArrayMapRootIndex,
|
| - &miss_force_generic,
|
| - DONT_DO_SMI_CHECK);
|
| -
|
| - __ bind(&finish_store);
|
| - if (IsFastSmiElementsKind(elements_kind)) {
|
| - __ add(scratch,
|
| - elements_reg,
|
| - Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
| - __ add(scratch, scratch, Operand::PointerOffsetFromSmiKey(key_reg));
|
| - __ str(value_reg, MemOperand(scratch));
|
| + if (store_double) {
|
| + __ Bind(&finish_store);
|
| + __ StoreNumberToDoubleElements(value, key, elements, x10, d16, d17,
|
| + &transition_elements_kind);
|
| } else {
|
| - ASSERT(IsFastObjectElementsKind(elements_kind));
|
| - __ add(scratch,
|
| - elements_reg,
|
| - Operand(FixedArray::kHeaderSize - kHeapObjectTag));
|
| - __ add(scratch, scratch, Operand::PointerOffsetFromSmiKey(key_reg));
|
| - __ str(value_reg, MemOperand(scratch));
|
| - __ mov(receiver_reg, value_reg);
|
| - __ RecordWrite(elements_reg, // Object.
|
| - scratch, // Address.
|
| - receiver_reg, // Value.
|
| - kLRHasNotBeenSaved,
|
| - kDontSaveFPRegs);
|
| + // Make sure elements is a fast element array, not 'cow'.
|
| + // TODO(jbramley): Why is this only done when storing a smi?
|
| + __ CheckMap(elements, x10,
|
| + Heap::kFixedArrayMapRootIndex,
|
| + &miss_force_generic,
|
| + DONT_DO_SMI_CHECK);
|
| +
|
| + __ Bind(&finish_store);
|
| +
|
| + STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kPointerSizeLog2);
|
| + __ Add(x10, elements, FixedArray::kHeaderSize - kHeapObjectTag);
|
| + __ Add(x10, x10, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
|
| + __ Str(value, MemOperand(x10));
|
| + if (!IsFastSmiElementsKind(elements_kind)) {
|
| + ASSERT(IsFastObjectElementsKind(elements_kind));
|
| + __ Mov(receiver, value);
|
| + __ RecordWrite(elements, // Object.
|
| + x10, // Address.
|
| + receiver, // Value.
|
| + kLRHasNotBeenSaved,
|
| + kDontSaveFPRegs,
|
| + EMIT_REMEMBERED_SET,
|
| + INLINE_SMI_CHECK,
|
| + EXPECT_PREGENERATED);
|
| + }
|
| + // Value (x0) is preserved.
|
| }
|
| - // value_reg (r0) is preserved.
|
| - // Done.
|
| __ Ret();
|
|
|
| - __ bind(&miss_force_generic);
|
| - TailCallBuiltin(masm, Builtins::kKeyedStoreIC_MissForceGeneric);
|
| + __ Bind(&miss_force_generic);
|
| + KeyedStoreStubCompiler::TailCallBuiltin(
|
| + masm, Builtins::kKeyedStoreIC_MissForceGeneric);
|
|
|
| - __ bind(&transition_elements_kind);
|
| - TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Miss);
|
| + __ Bind(&transition_elements_kind);
|
| + KeyedStoreStubCompiler::TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Miss);
|
|
|
| if (is_js_array && IsGrowStoreMode(store_mode)) {
|
| - // Grow the array by a single element if possible.
|
| - __ bind(&grow);
|
| -
|
| - // Make sure the array is only growing by a single element, anything else
|
| - // must be handled by the runtime. Flags already set by previous compare.
|
| - __ b(ne, &miss_force_generic);
|
| -
|
| - // Check for the empty array, and preallocate a small backing store if
|
| - // possible.
|
| - __ ldr(length_reg,
|
| - FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
|
| - __ ldr(elements_reg,
|
| - FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
|
| - __ CompareRoot(elements_reg, Heap::kEmptyFixedArrayRootIndex);
|
| - __ b(ne, &check_capacity);
|
| + // Grow a JSArray by a single element.
|
| + __ Bind(&grow);
|
| +
|
| + // x1: key
|
| + // x2: receiver
|
| + // x3: elements From receiver[JSObject::kElementsOffset].
|
| + // x4: length From receiver[JSArray::kLengthOffset].
|
| +
|
| + if (__ emit_debug_code()) {
|
| + // Check that 'elements' and 'length' are pre-loaded.
|
| + __ Ldr(x10, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
| + __ Cmp(x10, x3);
|
| + __ Ldr(x11, FieldMemOperand(receiver, JSArray::kLengthOffset));
|
| + __ Ccmp(x11, x4, NoFlag, eq);
|
| +
|
| + // Check that the key is equal to length, so we need to extend the array
|
| + // by one element.
|
| + __ Ccmp(x1, x4, NoFlag, eq);
|
| +
|
| + __ Check(eq, "GenerateStoreFastSmiOrDoubleElement [grow]: "
|
| + "Preconditions were not met.");
|
| + }
|
| +
|
| + __ JumpIfNotRoot(elements, Heap::kEmptyFixedArrayRootIndex,
|
| + &check_capacity);
|
|
|
| + // The array is currently empty, so allocate a new backing store.
|
| int size = FixedArray::SizeFor(JSArray::kPreallocatedArrayElements);
|
| - __ Allocate(size, elements_reg, scratch, scratch2, &slow, TAG_OBJECT);
|
| -
|
| - __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
|
| - __ str(scratch, FieldMemOperand(elements_reg, JSObject::kMapOffset));
|
| - __ mov(scratch, Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
|
| - __ str(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
|
| - __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
|
| - for (int i = 1; i < JSArray::kPreallocatedArrayElements; ++i) {
|
| - __ str(scratch, FieldMemOperand(elements_reg, FixedArray::SizeFor(i)));
|
| + __ Allocate(size, elements, x10, x11, &slow, TAG_OBJECT);
|
| + Heap::RootListIndex root_index = store_double
|
| + ? Heap::kFixedDoubleArrayMapRootIndex
|
| + : Heap::kFixedArrayMapRootIndex;
|
| + __ LoadRoot(x12, root_index);
|
| + __ Str(x12, FieldMemOperand(elements, JSObject::kMapOffset));
|
| + __ Mov(x13, Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
|
| + __ Str(x13, FieldMemOperand(elements, FixedArray::kLengthOffset));
|
| +
|
| + // Store the element at index zero, and fill the rest with the hole value.
|
| + if (store_double) {
|
| + __ StoreNumberToDoubleElements(value,
|
| + key,
|
| + elements,
|
| + x10,
|
| + d16,
|
| + d17,
|
| + &transition_elements_kind);
|
| + __ Fmov(d16, rawbits_to_double(kHoleNanInt64));
|
| + for (int i = 1; i < JSArray::kPreallocatedArrayElements; i++) {
|
| + __ Str(d16, FieldMemOperand(elements,
|
| + FixedDoubleArray::OffsetOfElementAt(i)));
|
| + }
|
| + } else {
|
| + __ Str(value, FieldMemOperand(elements, FixedArray::SizeFor(0)));
|
| + __ LoadRoot(x10, Heap::kTheHoleValueRootIndex);
|
| + for (int i = 1; i < JSArray::kPreallocatedArrayElements; i++) {
|
| + __ Str(x10, FieldMemOperand(elements,
|
| + FixedArray::OffsetOfElementAt(i)));
|
| + }
|
| }
|
|
|
| - // Store the element at index zero.
|
| - __ str(value_reg, FieldMemOperand(elements_reg, FixedArray::SizeFor(0)));
|
| -
|
| // Install the new backing store in the JSArray.
|
| - __ str(elements_reg,
|
| - FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
|
| - __ RecordWriteField(receiver_reg, JSObject::kElementsOffset, elements_reg,
|
| - scratch, kLRHasNotBeenSaved, kDontSaveFPRegs,
|
| - EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
|
| + __ Str(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
|
| + __ RecordWriteField(receiver, JSObject::kElementsOffset, elements,
|
| + x10, kLRHasNotBeenSaved, kDontSaveFPRegs,
|
| + EMIT_REMEMBERED_SET, OMIT_SMI_CHECK,
|
| + EXPECT_PREGENERATED);
|
|
|
| // Increment the length of the array.
|
| - __ mov(length_reg, Operand(Smi::FromInt(1)));
|
| - __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
|
| + __ Mov(length, Operand(Smi::FromInt(1)));
|
| + __ Str(length, FieldMemOperand(receiver, JSArray::kLengthOffset));
|
| __ Ret();
|
|
|
| - __ bind(&check_capacity);
|
| - // Check for cow elements, in general they are not handled by this stub
|
| - __ CheckMap(elements_reg,
|
| - scratch,
|
| - Heap::kFixedCOWArrayMapRootIndex,
|
| - &miss_force_generic,
|
| - DONT_DO_SMI_CHECK);
|
| + __ Bind(&check_capacity);
|
| +
|
| + if (!store_double) {
|
| + // Check for cow elements, in general they are not handled by this stub
|
| + // TODO(jbramley): Why is this only done when storing a smi?
|
| + __ CheckMap(elements, x10,
|
| + Heap::kFixedCOWArrayMapRootIndex,
|
| + &miss_force_generic,
|
| + DONT_DO_SMI_CHECK);
|
| + }
|
|
|
| - __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
|
| - __ cmp(length_reg, scratch);
|
| - __ b(hs, &slow);
|
| + // See if there are any free preallocated slots. If not, defer to the
|
| + // runtime to extend the backing store.
|
| + __ Ldr(x10, FieldMemOperand(elements, FixedArray::kLengthOffset));
|
| + __ Cmp(length, x10);
|
| + __ B(&slow, hs);
|
|
|
| // Grow the array and finish the store.
|
| - __ add(length_reg, length_reg, Operand(Smi::FromInt(1)));
|
| - __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
|
| - __ jmp(&finish_store);
|
| + __ Add(length, length, Operand(Smi::FromInt(1)));
|
| + __ Str(length, FieldMemOperand(receiver, JSArray::kLengthOffset));
|
| + __ B(&finish_store);
|
|
|
| - __ bind(&slow);
|
| - TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Slow);
|
| + __ Bind(&slow);
|
| + KeyedStoreStubCompiler::TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Slow);
|
| }
|
| }
|
|
|
|
|
| -void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
|
| +void KeyedStoreStubCompiler::GenerateStoreFastElement(
|
| MacroAssembler* masm,
|
| bool is_js_array,
|
| + ElementsKind elements_kind,
|
| KeyedAccessStoreMode store_mode) {
|
| +
|
| // ----------- S t a t e -------------
|
| - // -- r0 : value
|
| - // -- r1 : key
|
| - // -- r2 : receiver
|
| // -- lr : return address
|
| - // -- r3 : scratch (elements backing store)
|
| - // -- r4 : scratch
|
| - // -- r5 : scratch
|
| + // -- x0 : value
|
| + // -- x1 : key
|
| + // -- x2 : receiver
|
| // -----------------------------------
|
| - Label miss_force_generic, transition_elements_kind, grow, slow;
|
| - Label finish_store, check_capacity;
|
| -
|
| - Register value_reg = r0;
|
| - Register key_reg = r1;
|
| - Register receiver_reg = r2;
|
| - Register elements_reg = r3;
|
| - Register scratch1 = r4;
|
| - Register scratch2 = r5;
|
| - Register length_reg = r7;
|
| -
|
| - // This stub is meant to be tail-jumped to, the receiver must already
|
| - // have been verified by the caller to not be a smi.
|
|
|
| - // Check that the key is a smi or a heap number convertible to a smi.
|
| - GenerateSmiKeyCheck(masm, key_reg, r4, d1, d2, &miss_force_generic);
|
| -
|
| - __ ldr(elements_reg,
|
| - FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
|
| -
|
| - // Check that the key is within bounds.
|
| - if (is_js_array) {
|
| - __ ldr(scratch1, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
|
| - } else {
|
| - __ ldr(scratch1,
|
| - FieldMemOperand(elements_reg, FixedArray::kLengthOffset));
|
| - }
|
| - // Compare smis, unsigned compare catches both negative and out-of-bound
|
| - // indexes.
|
| - __ cmp(key_reg, scratch1);
|
| - if (IsGrowStoreMode(store_mode)) {
|
| - __ b(hs, &grow);
|
| - } else {
|
| - __ b(hs, &miss_force_generic);
|
| - }
|
| -
|
| - __ bind(&finish_store);
|
| - __ StoreNumberToDoubleElements(value_reg, key_reg, elements_reg,
|
| - scratch1, &transition_elements_kind);
|
| - __ Ret();
|
| -
|
| - // Handle store cache miss, replacing the ic with the generic stub.
|
| - __ bind(&miss_force_generic);
|
| - TailCallBuiltin(masm, Builtins::kKeyedStoreIC_MissForceGeneric);
|
| -
|
| - __ bind(&transition_elements_kind);
|
| - TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Miss);
|
| -
|
| - if (is_js_array && IsGrowStoreMode(store_mode)) {
|
| - // Grow the array by a single element if possible.
|
| - __ bind(&grow);
|
| -
|
| - // Make sure the array is only growing by a single element, anything else
|
| - // must be handled by the runtime. Flags already set by previous compare.
|
| - __ b(ne, &miss_force_generic);
|
| -
|
| - // Transition on values that can't be stored in a FixedDoubleArray.
|
| - Label value_is_smi;
|
| - __ JumpIfSmi(value_reg, &value_is_smi);
|
| - __ ldr(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset));
|
| - __ CompareRoot(scratch1, Heap::kHeapNumberMapRootIndex);
|
| - __ b(ne, &transition_elements_kind);
|
| - __ bind(&value_is_smi);
|
| -
|
| - // Check for the empty array, and preallocate a small backing store if
|
| - // possible.
|
| - __ ldr(length_reg,
|
| - FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
|
| - __ ldr(elements_reg,
|
| - FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
|
| - __ CompareRoot(elements_reg, Heap::kEmptyFixedArrayRootIndex);
|
| - __ b(ne, &check_capacity);
|
| -
|
| - int size = FixedDoubleArray::SizeFor(JSArray::kPreallocatedArrayElements);
|
| - __ Allocate(size, elements_reg, scratch1, scratch2, &slow, TAG_OBJECT);
|
| -
|
| - // Initialize the new FixedDoubleArray.
|
| - __ LoadRoot(scratch1, Heap::kFixedDoubleArrayMapRootIndex);
|
| - __ str(scratch1, FieldMemOperand(elements_reg, JSObject::kMapOffset));
|
| - __ mov(scratch1,
|
| - Operand(Smi::FromInt(JSArray::kPreallocatedArrayElements)));
|
| - __ str(scratch1,
|
| - FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset));
|
| -
|
| - __ mov(scratch1, elements_reg);
|
| - __ StoreNumberToDoubleElements(value_reg, key_reg, scratch1,
|
| - scratch2, &transition_elements_kind);
|
| -
|
| - __ mov(scratch1, Operand(kHoleNanLower32));
|
| - __ mov(scratch2, Operand(kHoleNanUpper32));
|
| - for (int i = 1; i < JSArray::kPreallocatedArrayElements; i++) {
|
| - int offset = FixedDoubleArray::OffsetOfElementAt(i);
|
| - __ str(scratch1, FieldMemOperand(elements_reg, offset));
|
| - __ str(scratch2, FieldMemOperand(elements_reg, offset + kPointerSize));
|
| - }
|
| + GenerateStoreFastSmiOrDoubleElement(masm, is_js_array, elements_kind,
|
| + store_mode, false);
|
| +}
|
|
|
| - // Install the new backing store in the JSArray.
|
| - __ str(elements_reg,
|
| - FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
|
| - __ RecordWriteField(receiver_reg, JSObject::kElementsOffset, elements_reg,
|
| - scratch1, kLRHasNotBeenSaved, kDontSaveFPRegs,
|
| - EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
|
|
|
| - // Increment the length of the array.
|
| - __ mov(length_reg, Operand(Smi::FromInt(1)));
|
| - __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
|
| - __ ldr(elements_reg,
|
| - FieldMemOperand(receiver_reg, JSObject::kElementsOffset));
|
| - __ Ret();
|
| -
|
| - __ bind(&check_capacity);
|
| - // Make sure that the backing store can hold additional elements.
|
| - __ ldr(scratch1,
|
| - FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset));
|
| - __ cmp(length_reg, scratch1);
|
| - __ b(hs, &slow);
|
| +void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
|
| + MacroAssembler* masm,
|
| + bool is_js_array,
|
| + KeyedAccessStoreMode store_mode) {
|
|
|
| - // Grow the array and finish the store.
|
| - __ add(length_reg, length_reg, Operand(Smi::FromInt(1)));
|
| - __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset));
|
| - __ jmp(&finish_store);
|
| + // ----------- S t a t e -------------
|
| + // -- lr : return address
|
| + // -- x0 : value
|
| + // -- x1 : key
|
| + // -- x2 : receiver
|
| + // ----------- S t a t e -------------
|
|
|
| - __ bind(&slow);
|
| - TailCallBuiltin(masm, Builtins::kKeyedStoreIC_Slow);
|
| - }
|
| + GenerateStoreFastSmiOrDoubleElement(masm, is_js_array, FAST_DOUBLE_ELEMENTS,
|
| + store_mode, true);
|
| }
|
|
|
|
|
| -#undef __
|
| -
|
| } } // namespace v8::internal
|
|
|
| -#endif // V8_TARGET_ARCH_ARM
|
| +#endif // V8_TARGET_ARCH_A64
|
|
|