Index: src/ppc/stub-cache-ppc.cc |
diff --git a/src/arm/stub-cache-arm.cc b/src/ppc/stub-cache-ppc.cc |
similarity index 76% |
copy from src/arm/stub-cache-arm.cc |
copy to src/ppc/stub-cache-ppc.cc |
index b3b567436913147e535158ccc52a3db1f187d7a1..9d56bbb8b75b7e2a909df2d67ac82226a021cfe7 100644 |
--- a/src/arm/stub-cache-arm.cc |
+++ b/src/ppc/stub-cache-ppc.cc |
@@ -1,10 +1,13 @@ |
// Copyright 2012 the V8 project authors. All rights reserved. |
+// |
+// Copyright IBM Corp. 2012, 2013. All rights reserved. |
+// |
// Use of this source code is governed by a BSD-style license that can be |
// found in the LICENSE file. |
#include "src/v8.h" |
-#if V8_TARGET_ARCH_ARM |
+#if V8_TARGET_ARCH_PPC |
#include "src/codegen.h" |
#include "src/ic-inl.h" |
@@ -16,24 +19,20 @@ namespace internal { |
#define __ ACCESS_MASM(masm) |
-static void ProbeTable(Isolate* isolate, |
- MacroAssembler* masm, |
- Code::Flags flags, |
- StubCache::Table table, |
- Register receiver, |
- Register name, |
+static void ProbeTable(Isolate* isolate, MacroAssembler* masm, |
+ Code::Flags flags, StubCache::Table table, |
+ Register receiver, Register name, |
// Number of the cache entry, not scaled. |
- Register offset, |
- Register scratch, |
- Register scratch2, |
+ Register offset, Register scratch, Register scratch2, |
Register offset_scratch) { |
ExternalReference key_offset(isolate->stub_cache()->key_reference(table)); |
ExternalReference value_offset(isolate->stub_cache()->value_reference(table)); |
ExternalReference map_offset(isolate->stub_cache()->map_reference(table)); |
- uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address()); |
- uint32_t value_off_addr = reinterpret_cast<uint32_t>(value_offset.address()); |
- uint32_t map_off_addr = reinterpret_cast<uint32_t>(map_offset.address()); |
+ uintptr_t key_off_addr = reinterpret_cast<uintptr_t>(key_offset.address()); |
+ uintptr_t value_off_addr = |
+ reinterpret_cast<uintptr_t>(value_offset.address()); |
+ uintptr_t map_off_addr = reinterpret_cast<uintptr_t>(map_offset.address()); |
// Check the relative positions of the address fields. |
DCHECK(value_off_addr > key_off_addr); |
@@ -48,50 +47,54 @@ static void ProbeTable(Isolate* isolate, |
scratch = no_reg; |
// Multiply by 3 because there are 3 fields per entry (name, code, map). |
- __ add(offset_scratch, offset, Operand(offset, LSL, 1)); |
+ __ ShiftLeftImm(offset_scratch, offset, Operand(1)); |
+ __ add(offset_scratch, offset, offset_scratch); |
// Calculate the base address of the entry. |
__ mov(base_addr, Operand(key_offset)); |
- __ add(base_addr, base_addr, Operand(offset_scratch, LSL, kPointerSizeLog2)); |
+ __ ShiftLeftImm(scratch2, offset_scratch, Operand(kPointerSizeLog2)); |
+ __ add(base_addr, base_addr, scratch2); |
// Check that the key in the entry matches the name. |
- __ ldr(ip, MemOperand(base_addr, 0)); |
+ __ LoadP(ip, MemOperand(base_addr, 0)); |
__ cmp(name, ip); |
- __ b(ne, &miss); |
+ __ bne(&miss); |
// Check the map matches. |
- __ ldr(ip, MemOperand(base_addr, map_off_addr - key_off_addr)); |
- __ ldr(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
+ __ LoadP(ip, MemOperand(base_addr, map_off_addr - key_off_addr)); |
+ __ LoadP(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
__ cmp(ip, scratch2); |
- __ b(ne, &miss); |
+ __ bne(&miss); |
// Get the code entry from the cache. |
Register code = scratch2; |
scratch2 = no_reg; |
- __ ldr(code, MemOperand(base_addr, value_off_addr - key_off_addr)); |
+ __ LoadP(code, MemOperand(base_addr, value_off_addr - key_off_addr)); |
// Check that the flags match what we're looking for. |
Register flags_reg = base_addr; |
base_addr = no_reg; |
- __ ldr(flags_reg, FieldMemOperand(code, Code::kFlagsOffset)); |
- // It's a nice optimization if this constant is encodable in the bic insn. |
+ __ lwz(flags_reg, FieldMemOperand(code, Code::kFlagsOffset)); |
- uint32_t mask = Code::kFlagsNotUsedInLookup; |
- DCHECK(__ ImmediateFitsAddrMode1Instruction(mask)); |
- __ bic(flags_reg, flags_reg, Operand(mask)); |
- __ cmp(flags_reg, Operand(flags)); |
- __ b(ne, &miss); |
+ DCHECK(!r0.is(flags_reg)); |
+ __ li(r0, Operand(Code::kFlagsNotUsedInLookup)); |
+ __ andc(flags_reg, flags_reg, r0); |
+ __ mov(r0, Operand(flags)); |
+ __ cmpl(flags_reg, r0); |
+ __ bne(&miss); |
#ifdef DEBUG |
- if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) { |
- __ jmp(&miss); |
- } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) { |
- __ jmp(&miss); |
- } |
+ if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) { |
+ __ b(&miss); |
+ } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) { |
+ __ b(&miss); |
+ } |
#endif |
// Jump to the first instruction in the code stub. |
- __ add(pc, code, Operand(Code::kHeaderSize - kHeapObjectTag)); |
+ __ addi(r0, code, Operand(Code::kHeaderSize - kHeapObjectTag)); |
+ __ mtctr(r0); |
+ __ bctr(); |
// Miss: fall through. |
__ bind(&miss); |
@@ -114,56 +117,53 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup( |
// Bail out if the receiver has a named interceptor or requires access checks. |
Register map = scratch1; |
- __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
- __ ldrb(scratch0, FieldMemOperand(map, Map::kBitFieldOffset)); |
- __ tst(scratch0, Operand(kInterceptorOrAccessCheckNeededMask)); |
- __ b(ne, miss_label); |
+ __ LoadP(map, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
+ __ lbz(scratch0, FieldMemOperand(map, Map::kBitFieldOffset)); |
+ __ andi(r0, scratch0, Operand(kInterceptorOrAccessCheckNeededMask)); |
+ __ bne(miss_label, cr0); |
// Check that receiver is a JSObject. |
- __ ldrb(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset)); |
- __ cmp(scratch0, Operand(FIRST_SPEC_OBJECT_TYPE)); |
- __ b(lt, miss_label); |
+ __ lbz(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset)); |
+ __ cmpi(scratch0, Operand(FIRST_SPEC_OBJECT_TYPE)); |
+ __ blt(miss_label); |
// Load properties array. |
Register properties = scratch0; |
- __ ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); |
+ __ LoadP(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); |
// Check that the properties array is a dictionary. |
- __ ldr(map, FieldMemOperand(properties, HeapObject::kMapOffset)); |
+ __ LoadP(map, FieldMemOperand(properties, HeapObject::kMapOffset)); |
Register tmp = properties; |
__ LoadRoot(tmp, Heap::kHashTableMapRootIndex); |
__ cmp(map, tmp); |
- __ b(ne, miss_label); |
+ __ bne(miss_label); |
// Restore the temporarily used register. |
- __ ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); |
+ __ LoadP(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); |
- NameDictionaryLookupStub::GenerateNegativeLookup(masm, |
- miss_label, |
- &done, |
- receiver, |
- properties, |
- name, |
- scratch1); |
+ NameDictionaryLookupStub::GenerateNegativeLookup( |
+ masm, miss_label, &done, receiver, properties, name, scratch1); |
__ bind(&done); |
__ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1); |
} |
-void StubCache::GenerateProbe(MacroAssembler* masm, |
- Code::Flags flags, |
- Register receiver, |
- Register name, |
- Register scratch, |
- Register extra, |
- Register extra2, |
+void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags, |
+ Register receiver, Register name, |
+ Register scratch, Register extra, Register extra2, |
Register extra3) { |
Isolate* isolate = masm->isolate(); |
Label miss; |
+#if V8_TARGET_ARCH_PPC64 |
+ // Make sure that code is valid. The multiplying code relies on the |
+ // entry size being 24. |
+ DCHECK(sizeof(Entry) == 24); |
+#else |
// Make sure that code is valid. The multiplying code relies on the |
// entry size being 12. |
DCHECK(sizeof(Entry) == 12); |
+#endif |
// Make sure the flags does not name a specific type. |
DCHECK(Code::ExtractTypeFromFlags(flags) == 0); |
@@ -186,61 +186,50 @@ void StubCache::GenerateProbe(MacroAssembler* masm, |
DCHECK(!extra3.is(no_reg)); |
Counters* counters = masm->isolate()->counters(); |
- __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, |
- extra2, extra3); |
+ __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2, |
+ extra3); |
// Check that the receiver isn't a smi. |
__ JumpIfSmi(receiver, &miss); |
// Get the map of the receiver and compute the hash. |
- __ ldr(scratch, FieldMemOperand(name, Name::kHashFieldOffset)); |
- __ ldr(ip, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
- __ add(scratch, scratch, Operand(ip)); |
+ __ lwz(scratch, FieldMemOperand(name, Name::kHashFieldOffset)); |
+ __ LoadP(ip, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
+ __ add(scratch, scratch, ip); |
+#if V8_TARGET_ARCH_PPC64 |
+ // Use only the low 32 bits of the map pointer. |
+ __ rldicl(scratch, scratch, 0, 32); |
+#endif |
uint32_t mask = kPrimaryTableSize - 1; |
// We shift out the last two bits because they are not part of the hash and |
// they are always 01 for maps. |
- __ mov(scratch, Operand(scratch, LSR, kCacheIndexShift)); |
+ __ ShiftRightImm(scratch, scratch, Operand(kCacheIndexShift)); |
// Mask down the eor argument to the minimum to keep the immediate |
- // ARM-encodable. |
- __ eor(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask)); |
+ // encodable. |
+ __ xori(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask)); |
// Prefer and_ to ubfx here because ubfx takes 2 cycles. |
- __ and_(scratch, scratch, Operand(mask)); |
+ __ andi(scratch, scratch, Operand(mask)); |
// Probe the primary table. |
- ProbeTable(isolate, |
- masm, |
- flags, |
- kPrimary, |
- receiver, |
- name, |
- scratch, |
- extra, |
- extra2, |
- extra3); |
+ ProbeTable(isolate, masm, flags, kPrimary, receiver, name, scratch, extra, |
+ extra2, extra3); |
// Primary miss: Compute hash for secondary probe. |
- __ sub(scratch, scratch, Operand(name, LSR, kCacheIndexShift)); |
+ __ ShiftRightImm(extra, name, Operand(kCacheIndexShift)); |
+ __ sub(scratch, scratch, extra); |
uint32_t mask2 = kSecondaryTableSize - 1; |
- __ add(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask2)); |
- __ and_(scratch, scratch, Operand(mask2)); |
+ __ addi(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask2)); |
+ __ andi(scratch, scratch, Operand(mask2)); |
// Probe the secondary table. |
- ProbeTable(isolate, |
- masm, |
- flags, |
- kSecondary, |
- receiver, |
- name, |
- scratch, |
- extra, |
- extra2, |
- extra3); |
+ ProbeTable(isolate, masm, flags, kSecondary, receiver, name, scratch, extra, |
+ extra2, extra3); |
// Cache miss: Fall-through and let caller handle the miss by |
// entering the runtime system. |
__ bind(&miss); |
- __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1, |
- extra2, extra3); |
+ __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1, extra2, |
+ extra3); |
} |
@@ -254,17 +243,18 @@ void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype( |
// Check we're still in the same context. |
Register scratch = prototype; |
const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX); |
- __ ldr(scratch, MemOperand(cp, offset)); |
- __ ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset)); |
- __ ldr(scratch, MemOperand(scratch, Context::SlotOffset(index))); |
+ __ LoadP(scratch, MemOperand(cp, offset)); |
+ __ LoadP(scratch, |
+ FieldMemOperand(scratch, GlobalObject::kNativeContextOffset)); |
+ __ LoadP(scratch, MemOperand(scratch, Context::SlotOffset(index))); |
__ Move(ip, function); |
__ cmp(ip, scratch); |
- __ b(ne, miss); |
+ __ bne(miss); |
// Load its initial map. The global functions all have initial maps. |
__ Move(prototype, Handle<Map>(function->initial_map())); |
// Load the prototype from the initial map. |
- __ ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset)); |
+ __ LoadP(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset)); |
} |
@@ -272,7 +262,7 @@ void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype( |
MacroAssembler* masm, Register receiver, Register scratch1, |
Register scratch2, Label* miss_label) { |
__ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label); |
- __ mov(r0, scratch1); |
+ __ mr(r3, scratch1); |
__ Ret(); |
} |
@@ -286,10 +276,10 @@ void PropertyHandlerCompiler::GenerateCheckPropertyCell( |
Handle<Cell> cell = JSGlobalObject::EnsurePropertyCell(global, name); |
DCHECK(cell->value()->IsTheHole()); |
__ mov(scratch, Operand(cell)); |
- __ ldr(scratch, FieldMemOperand(scratch, Cell::kValueOffset)); |
+ __ LoadP(scratch, FieldMemOperand(scratch, Cell::kValueOffset)); |
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex); |
__ cmp(scratch, ip); |
- __ b(ne, miss); |
+ __ bne(miss); |
} |
@@ -338,10 +328,10 @@ void PropertyHandlerCompiler::GenerateFastApiCall( |
DCHECK(optimization.is_simple_api_call()); |
// Abi for CallApiFunctionStub. |
- Register callee = r0; |
- Register call_data = r4; |
- Register holder = r2; |
- Register api_function_address = r1; |
+ Register callee = r3; |
+ Register call_data = r7; |
+ Register holder = r5; |
+ Register api_function_address = r4; |
// Put holder in place. |
CallOptimization::HolderLookup holder_lookup; |
@@ -371,7 +361,8 @@ void PropertyHandlerCompiler::GenerateFastApiCall( |
// Put call_data in place. |
if (isolate->heap()->InNewSpace(*call_data_obj)) { |
__ Move(call_data, api_call_info); |
- __ ldr(call_data, FieldMemOperand(call_data, CallHandlerInfo::kDataOffset)); |
+ __ LoadP(call_data, |
+ FieldMemOperand(call_data, CallHandlerInfo::kDataOffset)); |
} else if (call_data_obj->IsUndefined()) { |
call_data_undefined = true; |
__ LoadRoot(call_data, Heap::kUndefinedValueRootIndex); |
@@ -411,7 +402,7 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label, |
} |
-// Generate StoreTransition code, value is passed in r0 register. |
+// Generate StoreTransition code, value is passed in r3 register. |
// When leaving generated code after success, the receiver_reg and name_reg |
// may be clobbered. Upon branch to miss_label, the receiver and name |
// registers have their original values. |
@@ -419,7 +410,7 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition( |
Handle<Map> transition, Handle<Name> name, Register receiver_reg, |
Register storage_reg, Register value_reg, Register scratch1, |
Register scratch2, Register scratch3, Label* miss_label, Label* slow) { |
- // r0 : value |
+ // r3 : value |
Label exit; |
int descriptor = transition->LastAdded(); |
@@ -432,7 +423,7 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition( |
Handle<Object> constant(descriptors->GetValue(descriptor), isolate()); |
__ Move(scratch1, constant); |
__ cmp(value_reg, scratch1); |
- __ b(ne, miss_label); |
+ __ bne(miss_label); |
} else if (representation.IsSmi()) { |
__ JumpIfNotSmi(value_reg, miss_label); |
} else if (representation.IsHeapObject()) { |
@@ -440,16 +431,16 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition( |
HeapType* field_type = descriptors->GetFieldType(descriptor); |
HeapType::Iterator<Map> it = field_type->Classes(); |
if (!it.Done()) { |
- __ ldr(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset)); |
+ __ LoadP(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset)); |
Label do_store; |
while (true) { |
__ CompareMap(scratch1, it.Current(), &do_store); |
it.Advance(); |
if (it.Done()) { |
- __ b(ne, miss_label); |
+ __ bne(miss_label); |
break; |
} |
- __ b(eq, &do_store); |
+ __ beq(&do_store); |
} |
__ bind(&do_store); |
} |
@@ -461,17 +452,16 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition( |
__ JumpIfNotSmi(value_reg, &heap_number); |
__ SmiUntag(scratch1, value_reg); |
- __ vmov(s0, scratch1); |
- __ vcvt_f64_s32(d0, s0); |
+ __ ConvertIntToDouble(scratch1, d0); |
__ jmp(&do_store); |
__ bind(&heap_number); |
- __ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex, |
- miss_label, DONT_DO_SMI_CHECK); |
- __ vldr(d0, FieldMemOperand(value_reg, HeapNumber::kValueOffset)); |
+ __ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex, miss_label, |
+ DONT_DO_SMI_CHECK); |
+ __ lfd(d0, FieldMemOperand(value_reg, HeapNumber::kValueOffset)); |
__ bind(&do_store); |
- __ vstr(d0, FieldMemOperand(storage_reg, HeapNumber::kValueOffset)); |
+ __ stfd(d0, FieldMemOperand(storage_reg, HeapNumber::kValueOffset)); |
} |
// Stub never generated for objects that require access checks. |
@@ -483,8 +473,8 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition( |
// The properties must be extended before we can store the value. |
// We jump to a runtime call that extends the properties array. |
__ push(receiver_reg); |
- __ mov(r2, Operand(transition)); |
- __ Push(r2, r0); |
+ __ mov(r5, Operand(transition)); |
+ __ Push(r5, r3); |
__ TailCallExternalReference( |
ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage), |
isolate()), |
@@ -494,20 +484,16 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition( |
// Update the map of the object. |
__ mov(scratch1, Operand(transition)); |
- __ str(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset)); |
+ __ StoreP(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset), |
+ r0); |
// Update the write barrier for the map field. |
- __ RecordWriteField(receiver_reg, |
- HeapObject::kMapOffset, |
- scratch1, |
- scratch2, |
- kLRHasNotBeenSaved, |
- kDontSaveFPRegs, |
- OMIT_REMEMBERED_SET, |
+ __ RecordWriteField(receiver_reg, HeapObject::kMapOffset, scratch1, scratch2, |
+ kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET, |
OMIT_SMI_CHECK); |
if (details.type() == CONSTANT) { |
- DCHECK(value_reg.is(r0)); |
+ DCHECK(value_reg.is(r3)); |
__ Ret(); |
return; |
} |
@@ -521,61 +507,51 @@ void NamedStoreHandlerCompiler::GenerateStoreTransition( |
index -= transition->inobject_properties(); |
// TODO(verwaest): Share this code as a code stub. |
- SmiCheck smi_check = representation.IsTagged() |
- ? INLINE_SMI_CHECK : OMIT_SMI_CHECK; |
+ SmiCheck smi_check = |
+ representation.IsTagged() ? INLINE_SMI_CHECK : OMIT_SMI_CHECK; |
if (index < 0) { |
// Set the property straight into the object. |
int offset = transition->instance_size() + (index * kPointerSize); |
if (representation.IsDouble()) { |
- __ str(storage_reg, FieldMemOperand(receiver_reg, offset)); |
+ __ StoreP(storage_reg, FieldMemOperand(receiver_reg, offset), r0); |
} else { |
- __ str(value_reg, FieldMemOperand(receiver_reg, offset)); |
+ __ StoreP(value_reg, FieldMemOperand(receiver_reg, offset), r0); |
} |
if (!representation.IsSmi()) { |
// Update the write barrier for the array address. |
if (!representation.IsDouble()) { |
- __ mov(storage_reg, value_reg); |
+ __ mr(storage_reg, value_reg); |
} |
- __ RecordWriteField(receiver_reg, |
- offset, |
- storage_reg, |
- scratch1, |
- kLRHasNotBeenSaved, |
- kDontSaveFPRegs, |
- EMIT_REMEMBERED_SET, |
- smi_check); |
+ __ RecordWriteField(receiver_reg, offset, storage_reg, scratch1, |
+ kLRHasNotBeenSaved, kDontSaveFPRegs, |
+ EMIT_REMEMBERED_SET, smi_check); |
} |
} else { |
// Write to the properties array. |
int offset = index * kPointerSize + FixedArray::kHeaderSize; |
// Get the properties array |
- __ ldr(scratch1, |
- FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset)); |
+ __ LoadP(scratch1, |
+ FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset)); |
if (representation.IsDouble()) { |
- __ str(storage_reg, FieldMemOperand(scratch1, offset)); |
+ __ StoreP(storage_reg, FieldMemOperand(scratch1, offset), r0); |
} else { |
- __ str(value_reg, FieldMemOperand(scratch1, offset)); |
+ __ StoreP(value_reg, FieldMemOperand(scratch1, offset), r0); |
} |
if (!representation.IsSmi()) { |
// Update the write barrier for the array address. |
if (!representation.IsDouble()) { |
- __ mov(storage_reg, value_reg); |
+ __ mr(storage_reg, value_reg); |
} |
- __ RecordWriteField(scratch1, |
- offset, |
- storage_reg, |
- receiver_reg, |
- kLRHasNotBeenSaved, |
- kDontSaveFPRegs, |
- EMIT_REMEMBERED_SET, |
- smi_check); |
+ __ RecordWriteField(scratch1, offset, storage_reg, receiver_reg, |
+ kLRHasNotBeenSaved, kDontSaveFPRegs, |
+ EMIT_REMEMBERED_SET, smi_check); |
} |
} |
- // Return the value (register r0). |
- DCHECK(value_reg.is(r0)); |
+ // Return the value (register r3). |
+ DCHECK(value_reg.is(r3)); |
__ bind(&exit); |
__ Ret(); |
} |
@@ -587,16 +563,16 @@ void NamedStoreHandlerCompiler::GenerateStoreField(LookupResult* lookup, |
DCHECK(lookup->representation().IsHeapObject()); |
__ JumpIfSmi(value_reg, miss_label); |
HeapType::Iterator<Map> it = lookup->GetFieldType()->Classes(); |
- __ ldr(scratch1(), FieldMemOperand(value_reg, HeapObject::kMapOffset)); |
+ __ LoadP(scratch1(), FieldMemOperand(value_reg, HeapObject::kMapOffset)); |
Label do_store; |
while (true) { |
__ CompareMap(scratch1(), it.Current(), &do_store); |
it.Advance(); |
if (it.Done()) { |
- __ b(ne, miss_label); |
+ __ bne(miss_label); |
break; |
} |
- __ b(eq, &do_store); |
+ __ beq(&do_store); |
} |
__ bind(&do_store); |
@@ -614,8 +590,8 @@ Register PropertyHandlerCompiler::CheckPrototypes( |
// Make sure there's no overlap between holder and object registers. |
DCHECK(!scratch1.is(object_reg) && !scratch1.is(holder_reg)); |
- DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg) |
- && !scratch2.is(scratch1)); |
+ DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg) && |
+ !scratch2.is(scratch1)); |
// Keep track of the current object in register reg. |
Register reg = object_reg; |
@@ -648,21 +624,21 @@ Register PropertyHandlerCompiler::CheckPrototypes( |
} |
DCHECK(current.is_null() || |
current->property_dictionary()->FindEntry(name) == |
- NameDictionary::kNotFound); |
+ NameDictionary::kNotFound); |
- GenerateDictionaryNegativeLookup(masm(), miss, reg, name, |
- scratch1, scratch2); |
+ GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1, |
+ scratch2); |
- __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset)); |
+ __ LoadP(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset)); |
reg = holder_reg; // From now on the object will be in holder_reg. |
- __ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset)); |
+ __ LoadP(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset)); |
} else { |
Register map_reg = scratch1; |
if (depth != 1 || check == CHECK_ALL_MAPS) { |
// CheckMap implicitly loads the map of |reg| into |map_reg|. |
__ CheckMap(reg, map_reg, current_map, miss, DONT_DO_SMI_CHECK); |
} else { |
- __ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset)); |
+ __ LoadP(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset)); |
} |
// Check access rights to the global object. This has to happen after |
@@ -674,9 +650,8 @@ Register PropertyHandlerCompiler::CheckPrototypes( |
if (current_map->IsJSGlobalProxyMap()) { |
__ CheckAccessGlobalProxy(reg, scratch2, miss); |
} else if (current_map->IsJSGlobalObjectMap()) { |
- GenerateCheckPropertyCell( |
- masm(), Handle<JSGlobalObject>::cast(current), name, |
- scratch2, miss); |
+ GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current), |
+ name, scratch2, miss); |
} |
reg = holder_reg; // From now on the object will be in holder_reg. |
@@ -688,7 +663,7 @@ Register PropertyHandlerCompiler::CheckPrototypes( |
bool load_prototype_from_map = |
heap()->InNewSpace(*prototype) || depth == 1; |
if (load_prototype_from_map) { |
- __ ldr(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset)); |
+ __ LoadP(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset)); |
} else { |
__ mov(reg, Operand(prototype)); |
} |
@@ -743,7 +718,7 @@ void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) { |
void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) { |
// Return the constant value. |
- __ Move(r0, value); |
+ __ Move(r3, value); |
__ Ret(); |
} |
@@ -765,23 +740,21 @@ void NamedLoadHandlerCompiler::GenerateLoadCallback( |
__ push(receiver()); |
if (heap()->InNewSpace(callback->data())) { |
__ Move(scratch3(), callback); |
- __ ldr(scratch3(), FieldMemOperand(scratch3(), |
- ExecutableAccessorInfo::kDataOffset)); |
+ __ LoadP(scratch3(), |
+ FieldMemOperand(scratch3(), ExecutableAccessorInfo::kDataOffset)); |
} else { |
__ Move(scratch3(), Handle<Object>(callback->data(), isolate())); |
} |
__ push(scratch3()); |
__ LoadRoot(scratch3(), Heap::kUndefinedValueRootIndex); |
- __ mov(scratch4(), scratch3()); |
+ __ mr(scratch4(), scratch3()); |
__ Push(scratch3(), scratch4()); |
- __ mov(scratch4(), |
- Operand(ExternalReference::isolate_address(isolate()))); |
+ __ mov(scratch4(), Operand(ExternalReference::isolate_address(isolate()))); |
__ Push(scratch4(), reg); |
- __ mov(scratch2(), sp); // scratch2 = PropertyAccessorInfo::args_ |
__ push(name()); |
// Abi for CallApiGetter |
- Register getter_address_reg = r2; |
+ Register getter_address_reg = r5; |
Address getter_address = v8::ToCData<Address>(callback->getter()); |
ApiFunction fun(getter_address); |
@@ -835,8 +808,8 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup( |
// the case, return immediately. |
Label interceptor_failed; |
__ LoadRoot(scratch1(), Heap::kNoInterceptorResultSentinelRootIndex); |
- __ cmp(r0, scratch1()); |
- __ b(eq, &interceptor_failed); |
+ __ cmp(r3, scratch1()); |
+ __ beq(&interceptor_failed); |
frame_scope.GenerateLeaveFrame(); |
__ Ret(); |
@@ -872,9 +845,8 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback( |
Handle<ExecutableAccessorInfo> callback) { |
Register holder_reg = Frontend(receiver(), name); |
- __ push(receiver()); // receiver |
- __ push(holder_reg); |
- __ mov(ip, Operand(callback)); // callback info |
+ __ Push(receiver(), holder_reg); // receiver |
+ __ mov(ip, Operand(callback)); // callback info |
__ push(ip); |
__ mov(ip, Operand(name)); |
__ Push(ip, value()); |
@@ -909,14 +881,14 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter( |
// Call the JavaScript setter with receiver and value on the stack. |
if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) { |
// Swap in the global receiver. |
- __ ldr(receiver, |
- FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset)); |
+ __ LoadP(receiver, |
+ FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset)); |
} |
__ Push(receiver, value()); |
ParameterCount actual(1); |
ParameterCount expected(setter); |
- __ InvokeFunction(setter, expected, actual, |
- CALL_FUNCTION, NullCallWrapper()); |
+ __ InvokeFunction(setter, expected, actual, CALL_FUNCTION, |
+ NullCallWrapper()); |
} else { |
// If we generate a global code snippet for deoptimization only, remember |
// the place to continue after deoptimization. |
@@ -924,10 +896,10 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter( |
} |
// We have to return the passed value, not the return value of the setter. |
- __ pop(r0); |
+ __ pop(r3); |
// Restore context register. |
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
+ __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
} |
__ Ret(); |
} |
@@ -955,7 +927,7 @@ Register* PropertyAccessCompiler::load_calling_convention() { |
// receiver, name, scratch1, scratch2, scratch3, scratch4. |
Register receiver = LoadIC::ReceiverRegister(); |
Register name = LoadIC::NameRegister(); |
- static Register registers[] = { receiver, name, r3, r0, r4, r5 }; |
+ static Register registers[] = {receiver, name, r6, r3, r7, r8}; |
return registers; |
} |
@@ -964,8 +936,8 @@ Register* PropertyAccessCompiler::store_calling_convention() { |
// receiver, name, scratch1, scratch2, scratch3. |
Register receiver = StoreIC::ReceiverRegister(); |
Register name = StoreIC::NameRegister(); |
- DCHECK(r3.is(KeyedStoreIC::MapRegister())); |
- static Register registers[] = { receiver, name, r3, r4, r5 }; |
+ DCHECK(r6.is(KeyedStoreIC::MapRegister())); |
+ static Register registers[] = {receiver, name, r6, r7, r8}; |
return registers; |
} |
@@ -981,8 +953,8 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter( |
MacroAssembler* masm, Handle<HeapType> type, Register receiver, |
Handle<JSFunction> getter) { |
// ----------- S t a t e ------------- |
- // -- r0 : receiver |
- // -- r2 : name |
+ // -- r3 : receiver |
+ // -- r5 : name |
// -- lr : return address |
// ----------------------------------- |
{ |
@@ -992,14 +964,14 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter( |
// Call the JavaScript getter with the receiver on the stack. |
if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) { |
// Swap in the global receiver. |
- __ ldr(receiver, |
- FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset)); |
+ __ LoadP(receiver, |
+ FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset)); |
} |
__ push(receiver); |
ParameterCount actual(0); |
ParameterCount expected(getter); |
- __ InvokeFunction(getter, expected, actual, |
- CALL_FUNCTION, NullCallWrapper()); |
+ __ InvokeFunction(getter, expected, actual, CALL_FUNCTION, |
+ NullCallWrapper()); |
} else { |
// If we generate a global code snippet for deoptimization only, remember |
// the place to continue after deoptimization. |
@@ -1007,7 +979,7 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter( |
} |
// Restore context register. |
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
+ __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
} |
__ Ret(); |
} |
@@ -1025,17 +997,17 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal( |
// Get the value from the cell. |
Register result = StoreIC::ValueRegister(); |
__ mov(result, Operand(cell)); |
- __ ldr(result, FieldMemOperand(result, Cell::kValueOffset)); |
+ __ LoadP(result, FieldMemOperand(result, Cell::kValueOffset)); |
// Check for deleted property if property can actually be deleted. |
- if (is_configurable) { |
+ if (!is_configurable) { |
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex); |
__ cmp(result, ip); |
- __ b(eq, &miss); |
+ __ beq(&miss); |
} |
Counters* counters = isolate()->counters(); |
- __ IncrementCounter(counters->named_load_global_stub(), 1, r1, r3); |
+ __ IncrementCounter(counters->named_load_global_stub(), 1, r4, r6); |
__ Ret(); |
FrontendFooter(name, &miss); |
@@ -1059,8 +1031,8 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types, |
if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) { |
__ JumpIfNotUniqueName(this->name(), &miss); |
} else { |
- __ cmp(this->name(), Operand(name)); |
- __ b(ne, &miss); |
+ __ Cmpi(this->name(), Operand(name), r0); |
+ __ bne(&miss); |
} |
} |
@@ -1075,7 +1047,7 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types, |
int receiver_count = types->length(); |
int number_of_handled_maps = 0; |
- __ ldr(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset)); |
+ __ LoadP(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset)); |
for (int current = 0; current < receiver_count; ++current) { |
Handle<HeapType> type = types->at(current); |
Handle<Map> map = IC::TypeToMap(*type, isolate()); |
@@ -1109,15 +1081,18 @@ Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic( |
__ JumpIfSmi(receiver(), &miss); |
int receiver_count = receiver_maps->length(); |
- __ ldr(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset)); |
+ __ LoadP(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset)); |
for (int i = 0; i < receiver_count; ++i) { |
__ mov(ip, Operand(receiver_maps->at(i))); |
__ cmp(scratch1(), ip); |
if (transitioned_maps->at(i).is_null()) { |
- __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq); |
+ Label skip; |
+ __ bne(&skip); |
+ __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET); |
+ __ bind(&skip); |
} else { |
Label next_map; |
- __ b(ne, &next_map); |
+ __ bne(&next_map); |
__ mov(transition_map(), Operand(transitioned_maps->at(i))); |
__ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, al); |
__ bind(&next_map); |
@@ -1143,18 +1118,17 @@ void ElementHandlerCompiler::GenerateLoadDictionaryElement( |
Register key = LoadIC::NameRegister(); |
Register receiver = LoadIC::ReceiverRegister(); |
- DCHECK(receiver.is(r1)); |
- DCHECK(key.is(r2)); |
+ DCHECK(receiver.is(r4)); |
+ DCHECK(key.is(r5)); |
- __ UntagAndJumpIfNotSmi(r6, key, &miss); |
- __ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
- __ LoadFromNumberDictionary(&slow, r4, key, r0, r6, r3, r5); |
+ __ UntagAndJumpIfNotSmi(r9, key, &miss); |
+ __ LoadP(r7, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
+ __ LoadFromNumberDictionary(&slow, r7, key, r3, r9, r6, r8); |
__ Ret(); |
__ bind(&slow); |
__ IncrementCounter( |
- masm->isolate()->counters()->keyed_load_external_array_slow(), |
- 1, r2, r3); |
+ masm->isolate()->counters()->keyed_load_external_array_slow(), 1, r5, r6); |
TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow); |
@@ -1166,7 +1140,7 @@ void ElementHandlerCompiler::GenerateLoadDictionaryElement( |
#undef __ |
+} |
+} // namespace v8::internal |
-} } // namespace v8::internal |
- |
-#endif // V8_TARGET_ARCH_ARM |
+#endif // V8_TARGET_ARCH_PPC |