Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(24)

Unified Diff: src/ppc/full-codegen-ppc.cc

Issue 571173003: PowerPC specific sub-directories (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Address comments Created 6 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « src/ppc/frames-ppc.cc ('k') | src/ppc/interface-descriptors-ppc.cc » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: src/ppc/full-codegen-ppc.cc
diff --git a/src/arm/full-codegen-arm.cc b/src/ppc/full-codegen-ppc.cc
similarity index 66%
copy from src/arm/full-codegen-arm.cc
copy to src/ppc/full-codegen-ppc.cc
index 1710b609dc68740a60cb1984cdbb9223415e99f5..b6004b924fb0e405b1b4a527336468c0fae3a904 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/ppc/full-codegen-ppc.cc
@@ -1,10 +1,10 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
+// Copyright 2014 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/v8.h"
-#if V8_TARGET_ARCH_ARM
+#if V8_TARGET_ARCH_PPC
#include "src/code-factory.h"
#include "src/code-stubs.h"
@@ -17,21 +17,21 @@
#include "src/parser.h"
#include "src/scopes.h"
-#include "src/arm/code-stubs-arm.h"
-#include "src/arm/macro-assembler-arm.h"
+#include "src/ppc/code-stubs-ppc.h"
+#include "src/ppc/macro-assembler-ppc.h"
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm_)
-
// A patch site is a location in the code which it is possible to patch. This
// class has a number of methods to emit the code which is patchable and the
// method EmitPatchInfo to record a marker back to the patchable code. This
-// marker is a cmp rx, #yyy instruction, and x * 0x00000fff + yyy (raw 12 bit
+// marker is a cmpi rx, #yyy instruction, and x * 0x0000ffff + yyy (raw 16 bit
// immediate value is used) is the delta from the pc to the first instruction of
// the patchable code.
+// See PatchInlinedSmiCode in ic-ppc.cc for the code that patches it
class JumpPatchSite BASE_EMBEDDED {
public:
explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) {
@@ -40,38 +40,35 @@ class JumpPatchSite BASE_EMBEDDED {
#endif
}
- ~JumpPatchSite() {
- DCHECK(patch_site_.is_bound() == info_emitted_);
- }
+ ~JumpPatchSite() { DCHECK(patch_site_.is_bound() == info_emitted_); }
// When initially emitting this ensure that a jump is always generated to skip
// the inlined smi code.
void EmitJumpIfNotSmi(Register reg, Label* target) {
DCHECK(!patch_site_.is_bound() && !info_emitted_);
- Assembler::BlockConstPoolScope block_const_pool(masm_);
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
__ bind(&patch_site_);
- __ cmp(reg, Operand(reg));
- __ b(eq, target); // Always taken before patched.
+ __ cmp(reg, reg, cr0);
+ __ beq(target, cr0); // Always taken before patched.
}
// When initially emitting this ensure that a jump is never generated to skip
// the inlined smi code.
void EmitJumpIfSmi(Register reg, Label* target) {
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
DCHECK(!patch_site_.is_bound() && !info_emitted_);
- Assembler::BlockConstPoolScope block_const_pool(masm_);
__ bind(&patch_site_);
- __ cmp(reg, Operand(reg));
- __ b(ne, target); // Never taken before patched.
+ __ cmp(reg, reg, cr0);
+ __ bne(target, cr0); // Never taken before patched.
}
void EmitPatchInfo() {
- // Block literal pool emission whilst recording patch site information.
- Assembler::BlockConstPoolScope block_const_pool(masm_);
if (patch_site_.is_bound()) {
int delta_to_patch_site = masm_->InstructionsGeneratedSince(&patch_site_);
Register reg;
- reg.set_code(delta_to_patch_site / kOff12Mask);
- __ cmp_raw_immediate(reg, delta_to_patch_site % kOff12Mask);
+ // I believe this is using reg as the high bits of of the offset
+ reg.set_code(delta_to_patch_site / kOff16Mask);
+ __ cmpi(reg, Operand(delta_to_patch_site % kOff16Mask));
#ifdef DEBUG
info_emitted_ = true;
#endif
@@ -95,15 +92,15 @@ class JumpPatchSite BASE_EMBEDDED {
// function.
//
// The live registers are:
-// o r1: the JS function object being called (i.e., ourselves)
+// o r4: the JS function object being called (i.e., ourselves)
// o cp: our context
-// o pp: our caller's constant pool pointer (if FLAG_enable_ool_constant_pool)
-// o fp: our caller's frame pointer
+// o fp: our caller's frame pointer (aka r31)
// o sp: stack pointer
// o lr: return address
+// o ip: our own function entry (required by the prologue)
//
// The function builds a JS frame. Please see JavaScriptFrameConstants in
-// frames-arm.h for its layout.
+// frames-ppc.h for its layout.
void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
handler_table_ =
@@ -129,14 +126,14 @@ void FullCodeGenerator::Generate() {
if (info->strict_mode() == SLOPPY && !info->is_native()) {
Label ok;
int receiver_offset = info->scope()->num_parameters() * kPointerSize;
- __ ldr(r2, MemOperand(sp, receiver_offset));
- __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
- __ b(ne, &ok);
+ __ LoadP(r5, MemOperand(sp, receiver_offset), r0);
+ __ CompareRoot(r5, Heap::kUndefinedValueRootIndex);
+ __ bne(&ok);
- __ ldr(r2, GlobalObjectOperand());
- __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalProxyOffset));
+ __ LoadP(r5, GlobalObjectOperand());
+ __ LoadP(r5, FieldMemOperand(r5, GlobalObject::kGlobalProxyOffset));
- __ str(r2, MemOperand(sp, receiver_offset));
+ __ StoreP(r5, MemOperand(sp, receiver_offset), r0);
__ bind(&ok);
}
@@ -145,44 +142,52 @@ void FullCodeGenerator::Generate() {
// MANUAL indicates that the scope shouldn't actually generate code to set up
// the frame (that is done below).
FrameScope frame_scope(masm_, StackFrame::MANUAL);
+ int prologue_offset = masm_->pc_offset();
- info->set_prologue_offset(masm_->pc_offset());
- __ Prologue(info->IsCodePreAgingActive());
+ if (prologue_offset) {
+ // Prologue logic requires it's starting address in ip and the
+ // corresponding offset from the function entry.
+ prologue_offset += Instruction::kInstrSize;
+ __ addi(ip, ip, Operand(prologue_offset));
+ }
+ info->set_prologue_offset(prologue_offset);
+ __ Prologue(info->IsCodePreAgingActive(), prologue_offset);
info->AddNoFrameRange(0, masm_->pc_offset());
- { Comment cmnt(masm_, "[ Allocate locals");
+ {
+ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
// Generators allocate locals, if any, in context slots.
DCHECK(!info->function()->is_generator() || locals_count == 0);
if (locals_count > 0) {
if (locals_count >= 128) {
Label ok;
- __ sub(r9, sp, Operand(locals_count * kPointerSize));
- __ LoadRoot(r2, Heap::kRealStackLimitRootIndex);
- __ cmp(r9, Operand(r2));
- __ b(hs, &ok);
+ __ Add(ip, sp, -(locals_count * kPointerSize), r0);
+ __ LoadRoot(r5, Heap::kRealStackLimitRootIndex);
+ __ cmpl(ip, r5);
+ __ bc_short(ge, &ok);
__ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
__ bind(&ok);
}
- __ LoadRoot(r9, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
int kMaxPushes = FLAG_optimize_for_size ? 4 : 32;
if (locals_count >= kMaxPushes) {
int loop_iterations = locals_count / kMaxPushes;
- __ mov(r2, Operand(loop_iterations));
+ __ mov(r5, Operand(loop_iterations));
+ __ mtctr(r5);
Label loop_header;
__ bind(&loop_header);
// Do pushes.
for (int i = 0; i < kMaxPushes; i++) {
- __ push(r9);
+ __ push(ip);
}
// Continue loop if not done.
- __ sub(r2, r2, Operand(1), SetCC);
- __ b(&loop_header, ne);
+ __ bdnz(&loop_header);
}
int remaining = locals_count % kMaxPushes;
// Emit the remaining pushes.
- for (int i = 0; i < remaining; i++) {
- __ push(r9);
+ for (int i = 0; i < remaining; i++) {
+ __ push(ip);
}
}
}
@@ -192,11 +197,11 @@ void FullCodeGenerator::Generate() {
// Possibly allocate a local context.
int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (heap_slots > 0) {
- // Argument to NewContext is the function, which is still in r1.
+ // Argument to NewContext is the function, which is still in r4.
Comment cmnt(masm_, "[ Allocate context");
bool need_write_barrier = true;
if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
- __ push(r1);
+ __ push(r4);
__ Push(info->scope()->GetScopeInfo());
__ CallRuntime(Runtime::kNewGlobalContext, 2);
} else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
@@ -205,34 +210,34 @@ void FullCodeGenerator::Generate() {
// Result of FastNewContextStub is always in new space.
need_write_barrier = false;
} else {
- __ push(r1);
+ __ push(r4);
__ CallRuntime(Runtime::kNewFunctionContext, 1);
}
function_in_register = false;
- // Context is returned in r0. It replaces the context passed to us.
+ // Context is returned in r3. It replaces the context passed to us.
// It's saved in the stack and kept live in cp.
- __ mov(cp, r0);
- __ str(r0, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ mr(cp, r3);
+ __ StoreP(r3, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Copy any necessary parameters into the context.
int num_parameters = info->scope()->num_parameters();
for (int i = 0; i < num_parameters; i++) {
Variable* var = scope()->parameter(i);
if (var->IsContextSlot()) {
int parameter_offset = StandardFrameConstants::kCallerSPOffset +
- (num_parameters - 1 - i) * kPointerSize;
+ (num_parameters - 1 - i) * kPointerSize;
// Load parameter from stack.
- __ ldr(r0, MemOperand(fp, parameter_offset));
+ __ LoadP(r3, MemOperand(fp, parameter_offset), r0);
// Store it in the context.
MemOperand target = ContextOperand(cp, var->index());
- __ str(r0, target);
+ __ StoreP(r3, target, r0);
// Update the write barrier.
if (need_write_barrier) {
- __ RecordWriteContextSlot(
- cp, target.offset(), r0, r3, kLRHasBeenSaved, kDontSaveFPRegs);
+ __ RecordWriteContextSlot(cp, target.offset(), r3, r6,
+ kLRHasBeenSaved, kDontSaveFPRegs);
} else if (FLAG_debug_code) {
Label done;
- __ JumpIfInNewSpace(cp, r0, &done);
+ __ JumpIfInNewSpace(cp, r3, &done);
__ Abort(kExpectedNewSpaceObject);
__ bind(&done);
}
@@ -246,17 +251,16 @@ void FullCodeGenerator::Generate() {
Comment cmnt(masm_, "[ Allocate arguments object");
if (!function_in_register) {
// Load this again, if it's used by the local context below.
- __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ LoadP(r6, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
} else {
- __ mov(r3, r1);
+ __ mr(r6, r4);
}
// Receiver is just before the parameters on the caller's stack.
int num_parameters = info->scope()->num_parameters();
int offset = num_parameters * kPointerSize;
- __ add(r2, fp,
- Operand(StandardFrameConstants::kCallerSPOffset + offset));
- __ mov(r1, Operand(Smi::FromInt(num_parameters)));
- __ Push(r3, r2, r1);
+ __ addi(r5, fp, Operand(StandardFrameConstants::kCallerSPOffset + offset));
+ __ LoadSmiLiteral(r4, Smi::FromInt(num_parameters));
+ __ Push(r6, r5, r4);
// Arguments to ArgumentsAccessStub:
// function, receiver address, parameter count.
@@ -273,7 +277,7 @@ void FullCodeGenerator::Generate() {
ArgumentsAccessStub stub(isolate(), type);
__ CallStub(&stub);
- SetVar(arguments, r0, r1, r2);
+ SetVar(arguments, r3, r4, r5);
}
if (FLAG_trace) {
@@ -288,7 +292,8 @@ void FullCodeGenerator::Generate() {
} else {
PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
- { Comment cmnt(masm_, "[ Declarations");
+ {
+ Comment cmnt(masm_, "[ Declarations");
// For named function expressions, declare the function name as a
// constant.
if (scope()->is_function_scope() && scope()->function() != NULL) {
@@ -301,20 +306,19 @@ void FullCodeGenerator::Generate() {
VisitDeclarations(scope()->declarations());
}
- { Comment cmnt(masm_, "[ Stack check");
+ {
+ Comment cmnt(masm_, "[ Stack check");
PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
Label ok;
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
- __ cmp(sp, Operand(ip));
- __ b(hs, &ok);
- Handle<Code> stack_check = isolate()->builtins()->StackCheck();
- PredictableCodeSizeScope predictable(masm_,
- masm_->CallSize(stack_check, RelocInfo::CODE_TARGET));
- __ Call(stack_check, RelocInfo::CODE_TARGET);
+ __ cmpl(sp, ip);
+ __ bc_short(ge, &ok);
+ __ Call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
__ bind(&ok);
}
- { Comment cmnt(masm_, "[ Body");
+ {
+ Comment cmnt(masm_, "[ Body");
DCHECK(loop_depth() == 0);
VisitStatements(function()->body());
DCHECK(loop_depth() == 0);
@@ -323,83 +327,61 @@ void FullCodeGenerator::Generate() {
// Always emit a 'return undefined' in case control fell off the end of
// the body.
- { Comment cmnt(masm_, "[ return <undefined>;");
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+ {
+ Comment cmnt(masm_, "[ return <undefined>;");
+ __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
}
EmitReturnSequence();
-
- // Force emit the constant pool, so it doesn't get emitted in the middle
- // of the back edge table.
- masm()->CheckConstPool(true, false);
}
void FullCodeGenerator::ClearAccumulator() {
- __ mov(r0, Operand(Smi::FromInt(0)));
+ __ LoadSmiLiteral(r3, Smi::FromInt(0));
}
void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
- __ mov(r2, Operand(profiling_counter_));
- __ ldr(r3, FieldMemOperand(r2, Cell::kValueOffset));
- __ sub(r3, r3, Operand(Smi::FromInt(delta)), SetCC);
- __ str(r3, FieldMemOperand(r2, Cell::kValueOffset));
+ __ mov(r5, Operand(profiling_counter_));
+ __ LoadP(r6, FieldMemOperand(r5, Cell::kValueOffset));
+ __ SubSmiLiteral(r6, r6, Smi::FromInt(delta), r0);
+ __ StoreP(r6, FieldMemOperand(r5, Cell::kValueOffset), r0);
}
-#ifdef CAN_USE_ARMV7_INSTRUCTIONS
-static const int kProfileCounterResetSequenceLength = 5 * Assembler::kInstrSize;
-#else
-static const int kProfileCounterResetSequenceLength = 7 * Assembler::kInstrSize;
-#endif
-
-
void FullCodeGenerator::EmitProfilingCounterReset() {
- Assembler::BlockConstPoolScope block_const_pool(masm_);
- PredictableCodeSizeScope predictable_code_size_scope(
- masm_, kProfileCounterResetSequenceLength);
- Label start;
- __ bind(&start);
int reset_value = FLAG_interrupt_budget;
if (info_->is_debug()) {
// Detect debug break requests as soon as possible.
reset_value = FLAG_interrupt_budget >> 4;
}
- __ mov(r2, Operand(profiling_counter_));
- // The mov instruction above can be either 1 to 3 (for ARMv7) or 1 to 5
- // instructions (for ARMv6) depending upon whether it is an extended constant
- // pool - insert nop to compensate.
- int expected_instr_count =
- (kProfileCounterResetSequenceLength / Assembler::kInstrSize) - 2;
- DCHECK(masm_->InstructionsGeneratedSince(&start) <= expected_instr_count);
- while (masm_->InstructionsGeneratedSince(&start) != expected_instr_count) {
- __ nop();
- }
- __ mov(r3, Operand(Smi::FromInt(reset_value)));
- __ str(r3, FieldMemOperand(r2, Cell::kValueOffset));
+ __ mov(r5, Operand(profiling_counter_));
+ __ LoadSmiLiteral(r6, Smi::FromInt(reset_value));
+ __ StoreP(r6, FieldMemOperand(r5, Cell::kValueOffset), r0);
}
void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
Label* back_edge_target) {
Comment cmnt(masm_, "[ Back edge bookkeeping");
- // Block literal pools whilst emitting back edge code.
- Assembler::BlockConstPoolScope block_const_pool(masm_);
Label ok;
DCHECK(back_edge_target->is_bound());
- int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
- int weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kCodeSizeMultiplier));
+ int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target) +
+ kCodeSizeMultiplier / 2;
+ int weight = Min(kMaxBackEdgeWeight, Max(1, distance / kCodeSizeMultiplier));
EmitProfilingCounterDecrement(weight);
- __ b(pl, &ok);
- __ Call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
-
- // Record a mapping of this PC offset to the OSR id. This is used to find
- // the AST id from the unoptimized code in order to use it as a key into
- // the deoptimization input data found in the optimized code.
- RecordBackEdge(stmt->OsrEntryId());
-
+ {
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+ // BackEdgeTable::PatchAt manipulates this sequence.
+ __ cmpi(r6, Operand::Zero());
+ __ bc_short(ge, &ok);
+ __ Call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
+
+ // Record a mapping of this PC offset to the OSR id. This is used to find
+ // the AST id from the unoptimized code in order to use it as a key into
+ // the deoptimization input data found in the optimized code.
+ RecordBackEdge(stmt->OsrEntryId());
+ }
EmitProfilingCounterReset();
__ bind(&ok);
@@ -419,8 +401,8 @@ void FullCodeGenerator::EmitReturnSequence() {
__ bind(&return_label_);
if (FLAG_trace) {
// Push the return value on the stack as the parameter.
- // Runtime::TraceExit returns its parameter in r0.
- __ push(r0);
+ // Runtime::TraceExit returns its parameter in r3
+ __ push(r3);
__ CallRuntime(Runtime::kTraceExit, 1);
}
// Pretend that the exit is a backwards jump to the entry.
@@ -428,17 +410,16 @@ void FullCodeGenerator::EmitReturnSequence() {
if (info_->ShouldSelfOptimize()) {
weight = FLAG_interrupt_budget / FLAG_self_opt_count;
} else {
- int distance = masm_->pc_offset();
- weight = Min(kMaxBackEdgeWeight,
- Max(1, distance / kCodeSizeMultiplier));
+ int distance = masm_->pc_offset() + kCodeSizeMultiplier / 2;
+ weight = Min(kMaxBackEdgeWeight, Max(1, distance / kCodeSizeMultiplier));
}
EmitProfilingCounterDecrement(weight);
Label ok;
- __ b(pl, &ok);
- __ push(r0);
- __ Call(isolate()->builtins()->InterruptCheck(),
- RelocInfo::CODE_TARGET);
- __ pop(r0);
+ __ cmpi(r6, Operand::Zero());
+ __ bge(&ok);
+ __ push(r3);
+ __ Call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
+ __ pop(r3);
EmitProfilingCounterReset();
__ bind(&ok);
@@ -449,18 +430,24 @@ void FullCodeGenerator::EmitReturnSequence() {
#endif
// Make sure that the constant pool is not emitted inside of the return
// sequence.
- { Assembler::BlockConstPoolScope block_const_pool(masm_);
+ {
+ Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
int32_t sp_delta = (info_->scope()->num_parameters() + 1) * kPointerSize;
CodeGenerator::RecordPositions(masm_, function()->end_position() - 1);
- // TODO(svenpanne) The code below is sometimes 4 words, sometimes 5!
- PredictableCodeSizeScope predictable(masm_, -1);
__ RecordJSReturn();
- int no_frame_start = __ LeaveFrame(StackFrame::JAVA_SCRIPT);
- { ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
- __ add(sp, sp, Operand(sp_delta));
- __ Jump(lr);
- info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
+ int no_frame_start = __ LeaveFrame(StackFrame::JAVA_SCRIPT, sp_delta);
+#if V8_TARGET_ARCH_PPC64
+ // With 64bit we may need nop() instructions to ensure we have
+ // enough space to SetDebugBreakAtReturn()
+ if (is_int16(sp_delta)) {
+#if !V8_OOL_CONSTANT_POOL
+ masm_->nop();
+#endif
+ masm_->nop();
}
+#endif
+ __ blr();
+ info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
}
#ifdef DEBUG
@@ -500,8 +487,7 @@ void FullCodeGenerator::TestContext::Plug(Variable* var) const {
}
-void FullCodeGenerator::EffectContext::Plug(Heap::RootListIndex index) const {
-}
+void FullCodeGenerator::EffectContext::Plug(Heap::RootListIndex index) const {}
void FullCodeGenerator::AccumulatorValueContext::Plug(
@@ -518,9 +504,7 @@ void FullCodeGenerator::StackValueContext::Plug(
void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
- codegen()->PrepareForBailoutBeforeSplit(condition(),
- true,
- true_label_,
+ codegen()->PrepareForBailoutBeforeSplit(condition(), true, true_label_,
false_label_);
if (index == Heap::kUndefinedValueRootIndex ||
index == Heap::kNullValueRootIndex ||
@@ -535,8 +519,7 @@ void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
}
-void FullCodeGenerator::EffectContext::Plug(Handle<Object> lit) const {
-}
+void FullCodeGenerator::EffectContext::Plug(Handle<Object> lit) const {}
void FullCodeGenerator::AccumulatorValueContext::Plug(
@@ -553,9 +536,7 @@ void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
- codegen()->PrepareForBailoutBeforeSplit(condition(),
- true,
- true_label_,
+ codegen()->PrepareForBailoutBeforeSplit(condition(), true, true_label_,
false_label_);
DCHECK(!lit->IsUndetectableObject()); // There are no undetectable literals.
if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
@@ -590,8 +571,7 @@ void FullCodeGenerator::EffectContext::DropAndPlug(int count,
void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
- int count,
- Register reg) const {
+ int count, Register reg) const {
DCHECK(count > 0);
__ Drop(count);
__ Move(result_register(), reg);
@@ -602,7 +582,7 @@ void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
Register reg) const {
DCHECK(count > 0);
if (count > 1) __ Drop(count - 1);
- __ str(reg, MemOperand(sp, 0));
+ __ StoreP(reg, MemOperand(sp, 0));
}
@@ -625,12 +605,11 @@ void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
void FullCodeGenerator::AccumulatorValueContext::Plug(
- Label* materialize_true,
- Label* materialize_false) const {
+ Label* materialize_true, Label* materialize_false) const {
Label done;
__ bind(materialize_true);
__ LoadRoot(result_register(), Heap::kTrueValueRootIndex);
- __ jmp(&done);
+ __ b(&done);
__ bind(materialize_false);
__ LoadRoot(result_register(), Heap::kFalseValueRootIndex);
__ bind(&done);
@@ -638,12 +617,11 @@ void FullCodeGenerator::AccumulatorValueContext::Plug(
void FullCodeGenerator::StackValueContext::Plug(
- Label* materialize_true,
- Label* materialize_false) const {
+ Label* materialize_true, Label* materialize_false) const {
Label done;
__ bind(materialize_true);
__ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ jmp(&done);
+ __ b(&done);
__ bind(materialize_false);
__ LoadRoot(ip, Heap::kFalseValueRootIndex);
__ bind(&done);
@@ -658,8 +636,7 @@ void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
}
-void FullCodeGenerator::EffectContext::Plug(bool flag) const {
-}
+void FullCodeGenerator::EffectContext::Plug(bool flag) const {}
void FullCodeGenerator::AccumulatorValueContext::Plug(bool flag) const {
@@ -678,9 +655,7 @@ void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
void FullCodeGenerator::TestContext::Plug(bool flag) const {
- codegen()->PrepareForBailoutBeforeSplit(condition(),
- true,
- true_label_,
+ codegen()->PrepareForBailoutBeforeSplit(condition(), true, true_label_,
false_label_);
if (flag) {
if (true_label_ != fall_through_) __ b(true_label_);
@@ -690,27 +665,23 @@ void FullCodeGenerator::TestContext::Plug(bool flag) const {
}
-void FullCodeGenerator::DoTest(Expression* condition,
- Label* if_true,
- Label* if_false,
- Label* fall_through) {
+void FullCodeGenerator::DoTest(Expression* condition, Label* if_true,
+ Label* if_false, Label* fall_through) {
Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(ic, condition->test_id());
- __ tst(result_register(), result_register());
+ __ cmpi(result_register(), Operand::Zero());
Split(ne, if_true, if_false, fall_through);
}
-void FullCodeGenerator::Split(Condition cond,
- Label* if_true,
- Label* if_false,
- Label* fall_through) {
+void FullCodeGenerator::Split(Condition cond, Label* if_true, Label* if_false,
+ Label* fall_through, CRegister cr) {
if (if_false == fall_through) {
- __ b(cond, if_true);
+ __ b(cond, if_true, cr);
} else if (if_true == fall_through) {
- __ b(NegateCondition(cond), if_false);
+ __ b(NegateCondition(cond), if_false, cr);
} else {
- __ b(cond, if_true);
+ __ b(cond, if_true, cr);
__ b(if_false);
}
}
@@ -745,29 +716,23 @@ MemOperand FullCodeGenerator::VarOperand(Variable* var, Register scratch) {
void FullCodeGenerator::GetVar(Register dest, Variable* var) {
// Use destination as scratch.
MemOperand location = VarOperand(var, dest);
- __ ldr(dest, location);
+ __ LoadP(dest, location, r0);
}
-void FullCodeGenerator::SetVar(Variable* var,
- Register src,
- Register scratch0,
+void FullCodeGenerator::SetVar(Variable* var, Register src, Register scratch0,
Register scratch1) {
DCHECK(var->IsContextSlot() || var->IsStackAllocated());
DCHECK(!scratch0.is(src));
DCHECK(!scratch0.is(scratch1));
DCHECK(!scratch1.is(src));
MemOperand location = VarOperand(var, scratch0);
- __ str(src, location);
+ __ StoreP(src, location, r0);
// Emit the write barrier code if the location is in the heap.
if (var->IsContextSlot()) {
- __ RecordWriteContextSlot(scratch0,
- location.offset(),
- src,
- scratch1,
- kLRHasBeenSaved,
- kDontSaveFPRegs);
+ __ RecordWriteContextSlot(scratch0, location.offset(), src, scratch1,
+ kLRHasBeenSaved, kDontSaveFPRegs);
}
}
@@ -786,7 +751,7 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
PrepareForBailout(expr, TOS_REG);
if (should_normalize) {
__ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ cmp(r0, ip);
+ __ cmp(r3, ip);
Split(eq, if_true, if_false, NULL);
__ bind(&skip);
}
@@ -799,10 +764,10 @@ void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
DCHECK_EQ(0, scope()->ContextChainLength(variable->scope()));
if (generate_debug_code_) {
// Check that we're not inside a with or catch context.
- __ ldr(r1, FieldMemOperand(cp, HeapObject::kMapOffset));
- __ CompareRoot(r1, Heap::kWithContextMapRootIndex);
+ __ LoadP(r4, FieldMemOperand(cp, HeapObject::kMapOffset));
+ __ CompareRoot(r4, Heap::kWithContextMapRootIndex);
__ Check(ne, kDeclarationInWithContext);
- __ CompareRoot(r1, Heap::kCatchContextMapRootIndex);
+ __ CompareRoot(r4, Heap::kCatchContextMapRootIndex);
__ Check(ne, kDeclarationInCatchContext);
}
}
@@ -831,7 +796,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
if (hole_init) {
Comment cmnt(masm_, "[ VariableDeclaration");
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ str(ip, StackOperand(variable));
+ __ StoreP(ip, StackOperand(variable));
}
break;
@@ -840,7 +805,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
Comment cmnt(masm_, "[ VariableDeclaration");
EmitDebugCheckDeclarationContext(variable);
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ str(ip, ContextOperand(cp, variable->index()));
+ __ StoreP(ip, ContextOperand(cp, variable->index()), r0);
// No write barrier since the_hole_value is in old space.
PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
}
@@ -848,22 +813,22 @@ void FullCodeGenerator::VisitVariableDeclaration(
case Variable::LOOKUP: {
Comment cmnt(masm_, "[ VariableDeclaration");
- __ mov(r2, Operand(variable->name()));
+ __ mov(r5, Operand(variable->name()));
// Declaration nodes are always introduced in one of four modes.
DCHECK(IsDeclaredVariableMode(mode));
PropertyAttributes attr =
IsImmutableVariableMode(mode) ? READ_ONLY : NONE;
- __ mov(r1, Operand(Smi::FromInt(attr)));
+ __ LoadSmiLiteral(r4, Smi::FromInt(attr));
// Push initial value, if any.
// Note: For variables we must not push an initial value (such as
// 'undefined') because we may have a (legal) redeclaration and we
// must not destroy the current value.
if (hole_init) {
- __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
- __ Push(cp, r2, r1, r0);
+ __ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
+ __ Push(cp, r5, r4, r3);
} else {
- __ mov(r0, Operand(Smi::FromInt(0))); // Indicates no initial value.
- __ Push(cp, r2, r1, r0);
+ __ LoadSmiLiteral(r3, Smi::FromInt(0)); // Indicates no initial value.
+ __ Push(cp, r5, r4, r3);
}
__ CallRuntime(Runtime::kDeclareLookupSlot, 4);
break;
@@ -891,7 +856,7 @@ void FullCodeGenerator::VisitFunctionDeclaration(
case Variable::LOCAL: {
Comment cmnt(masm_, "[ FunctionDeclaration");
VisitForAccumulatorValue(declaration->fun());
- __ str(result_register(), StackOperand(variable));
+ __ StoreP(result_register(), StackOperand(variable));
break;
}
@@ -899,26 +864,21 @@ void FullCodeGenerator::VisitFunctionDeclaration(
Comment cmnt(masm_, "[ FunctionDeclaration");
EmitDebugCheckDeclarationContext(variable);
VisitForAccumulatorValue(declaration->fun());
- __ str(result_register(), ContextOperand(cp, variable->index()));
+ __ StoreP(result_register(), ContextOperand(cp, variable->index()), r0);
int offset = Context::SlotOffset(variable->index());
// We know that we have written a function, which is not a smi.
- __ RecordWriteContextSlot(cp,
- offset,
- result_register(),
- r2,
- kLRHasBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
+ __ RecordWriteContextSlot(cp, offset, result_register(), r5,
+ kLRHasBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
break;
}
case Variable::LOOKUP: {
Comment cmnt(masm_, "[ FunctionDeclaration");
- __ mov(r2, Operand(variable->name()));
- __ mov(r1, Operand(Smi::FromInt(NONE)));
- __ Push(cp, r2, r1);
+ __ mov(r5, Operand(variable->name()));
+ __ LoadSmiLiteral(r4, Smi::FromInt(NONE));
+ __ Push(cp, r5, r4);
// Push initial value for function declaration.
VisitForStackValue(declaration->fun());
__ CallRuntime(Runtime::kDeclareLookupSlot, 4);
@@ -937,21 +897,16 @@ void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
EmitDebugCheckDeclarationContext(variable);
// Load instance object.
- __ LoadContext(r1, scope_->ContextChainLength(scope_->GlobalScope()));
- __ ldr(r1, ContextOperand(r1, variable->interface()->Index()));
- __ ldr(r1, ContextOperand(r1, Context::EXTENSION_INDEX));
+ __ LoadContext(r4, scope_->ContextChainLength(scope_->GlobalScope()));
+ __ LoadP(r4, ContextOperand(r4, variable->interface()->Index()));
+ __ LoadP(r4, ContextOperand(r4, Context::EXTENSION_INDEX));
// Assign it.
- __ str(r1, ContextOperand(cp, variable->index()));
+ __ StoreP(r4, ContextOperand(cp, variable->index()), r0);
// We know that we have written a module, which is not a smi.
- __ RecordWriteContextSlot(cp,
- Context::SlotOffset(variable->index()),
- r1,
- r3,
- kLRHasBeenSaved,
- kDontSaveFPRegs,
- EMIT_REMEMBERED_SET,
- OMIT_SMI_CHECK);
+ __ RecordWriteContextSlot(cp, Context::SlotOffset(variable->index()), r4, r6,
+ kLRHasBeenSaved, kDontSaveFPRegs,
+ EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
PrepareForBailoutForId(declaration->proxy()->id(), NO_REGISTERS);
// Traverse into body.
@@ -990,9 +945,9 @@ void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* declaration) {
void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// Call the runtime to declare the globals.
// The context is the first argument.
- __ mov(r1, Operand(pairs));
- __ mov(r0, Operand(Smi::FromInt(DeclareGlobalsFlags())));
- __ Push(cp, r1, r0);
+ __ mov(r4, Operand(pairs));
+ __ LoadSmiLiteral(r3, Smi::FromInt(DeclareGlobalsFlags()));
+ __ Push(cp, r4, r3);
__ CallRuntime(Runtime::kDeclareGlobals, 3);
// Return value is ignored.
}
@@ -1038,16 +993,16 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
VisitForAccumulatorValue(clause->label());
// Perform the comparison as if via '==='.
- __ ldr(r1, MemOperand(sp, 0)); // Switch value.
+ __ LoadP(r4, MemOperand(sp, 0)); // Switch value.
bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
JumpPatchSite patch_site(masm_);
if (inline_smi_code) {
Label slow_case;
- __ orr(r2, r1, r0);
- patch_site.EmitJumpIfNotSmi(r2, &slow_case);
+ __ orx(r5, r4, r3);
+ patch_site.EmitJumpIfNotSmi(r5, &slow_case);
- __ cmp(r1, r0);
- __ b(ne, &next_test);
+ __ cmp(r4, r3);
+ __ bne(&next_test);
__ Drop(1); // Switch value is no longer needed.
__ b(clause->body_target());
__ bind(&slow_case);
@@ -1064,14 +1019,14 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
__ b(&skip);
PrepareForBailout(clause, TOS_REG);
__ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ cmp(r0, ip);
- __ b(ne, &next_test);
+ __ cmp(r3, ip);
+ __ bne(&next_test);
__ Drop(1);
- __ jmp(clause->body_target());
+ __ b(clause->body_target());
__ bind(&skip);
- __ cmp(r0, Operand::Zero());
- __ b(ne, &next_test);
+ __ cmpi(r3, Operand::Zero());
+ __ bne(&next_test);
__ Drop(1); // Switch value is no longer needed.
__ b(clause->body_target());
}
@@ -1113,32 +1068,32 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// over the loop. See ECMA-262 version 5, section 12.6.4.
VisitForAccumulatorValue(stmt->enumerable());
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(r0, ip);
- __ b(eq, &exit);
- Register null_value = r5;
+ __ cmp(r3, ip);
+ __ beq(&exit);
+ Register null_value = r7;
__ LoadRoot(null_value, Heap::kNullValueRootIndex);
- __ cmp(r0, null_value);
- __ b(eq, &exit);
+ __ cmp(r3, null_value);
+ __ beq(&exit);
PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
// Convert the object to a JS object.
Label convert, done_convert;
- __ JumpIfSmi(r0, &convert);
- __ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE);
- __ b(ge, &done_convert);
+ __ JumpIfSmi(r3, &convert);
+ __ CompareObjectType(r3, r4, r4, FIRST_SPEC_OBJECT_TYPE);
+ __ bge(&done_convert);
__ bind(&convert);
- __ push(r0);
+ __ push(r3);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ bind(&done_convert);
PrepareForBailoutForId(stmt->ToObjectId(), TOS_REG);
- __ push(r0);
+ __ push(r3);
// Check for proxies.
Label call_runtime;
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- __ CompareObjectType(r0, r1, r1, LAST_JS_PROXY_TYPE);
- __ b(le, &call_runtime);
+ __ CompareObjectType(r3, r4, r4, LAST_JS_PROXY_TYPE);
+ __ ble(&call_runtime);
// Check cache validity in generated code. This is a fast case for
// the JSObject::IsSimpleEnum cache validity checks. If we cannot
@@ -1149,12 +1104,12 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// The enum cache is valid. Load the map of the object being
// iterated over and use the cache for the iteration.
Label use_cache;
- __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ LoadP(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
__ b(&use_cache);
// Get the set of properties to enumerate.
__ bind(&call_runtime);
- __ push(r0); // Duplicate the enumerable object on the stack.
+ __ push(r3); // Duplicate the enumerable object on the stack.
__ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
@@ -1162,100 +1117,105 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// modification check. Otherwise, we got a fixed array, and we have
// to do a slow check.
Label fixed_array;
- __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ LoadP(r5, FieldMemOperand(r3, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kMetaMapRootIndex);
- __ cmp(r2, ip);
- __ b(ne, &fixed_array);
+ __ cmp(r5, ip);
+ __ bne(&fixed_array);
- // We got a map in register r0. Get the enumeration cache from it.
+ // We got a map in register r3. Get the enumeration cache from it.
Label no_descriptors;
__ bind(&use_cache);
- __ EnumLength(r1, r0);
- __ cmp(r1, Operand(Smi::FromInt(0)));
- __ b(eq, &no_descriptors);
+ __ EnumLength(r4, r3);
+ __ CmpSmiLiteral(r4, Smi::FromInt(0), r0);
+ __ beq(&no_descriptors);
- __ LoadInstanceDescriptors(r0, r2);
- __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumCacheOffset));
- __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumCacheBridgeCacheOffset));
+ __ LoadInstanceDescriptors(r3, r5);
+ __ LoadP(r5, FieldMemOperand(r5, DescriptorArray::kEnumCacheOffset));
+ __ LoadP(r5,
+ FieldMemOperand(r5, DescriptorArray::kEnumCacheBridgeCacheOffset));
// Set up the four remaining stack slots.
- __ push(r0); // Map.
- __ mov(r0, Operand(Smi::FromInt(0)));
+ __ push(r3); // Map.
+ __ LoadSmiLiteral(r3, Smi::FromInt(0));
// Push enumeration cache, enumeration cache length (as smi) and zero.
- __ Push(r2, r1, r0);
- __ jmp(&loop);
+ __ Push(r5, r4, r3);
+ __ b(&loop);
__ bind(&no_descriptors);
__ Drop(1);
- __ jmp(&exit);
+ __ b(&exit);
- // We got a fixed array in register r0. Iterate through that.
+ // We got a fixed array in register r3. Iterate through that.
Label non_proxy;
__ bind(&fixed_array);
- __ Move(r1, FeedbackVector());
- __ mov(r2, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
+ __ Move(r4, FeedbackVector());
+ __ mov(r5, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
int vector_index = FeedbackVector()->GetIndex(slot);
- __ str(r2, FieldMemOperand(r1, FixedArray::OffsetOfElementAt(vector_index)));
+ __ StoreP(
+ r5, FieldMemOperand(r4, FixedArray::OffsetOfElementAt(vector_index)), r0);
- __ mov(r1, Operand(Smi::FromInt(1))); // Smi indicates slow check
- __ ldr(r2, MemOperand(sp, 0 * kPointerSize)); // Get enumerated object
+ __ LoadSmiLiteral(r4, Smi::FromInt(1)); // Smi indicates slow check
+ __ LoadP(r5, MemOperand(sp, 0 * kPointerSize)); // Get enumerated object
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
- __ CompareObjectType(r2, r3, r3, LAST_JS_PROXY_TYPE);
- __ b(gt, &non_proxy);
- __ mov(r1, Operand(Smi::FromInt(0))); // Zero indicates proxy
+ __ CompareObjectType(r5, r6, r6, LAST_JS_PROXY_TYPE);
+ __ bgt(&non_proxy);
+ __ LoadSmiLiteral(r4, Smi::FromInt(0)); // Zero indicates proxy
__ bind(&non_proxy);
- __ Push(r1, r0); // Smi and array
- __ ldr(r1, FieldMemOperand(r0, FixedArray::kLengthOffset));
- __ mov(r0, Operand(Smi::FromInt(0)));
- __ Push(r1, r0); // Fixed array length (as smi) and initial index.
+ __ Push(r4, r3); // Smi and array
+ __ LoadP(r4, FieldMemOperand(r3, FixedArray::kLengthOffset));
+ __ LoadSmiLiteral(r3, Smi::FromInt(0));
+ __ Push(r4, r3); // Fixed array length (as smi) and initial index.
// Generate code for doing the condition check.
PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
__ bind(&loop);
- // Load the current count to r0, load the length to r1.
- __ Ldrd(r0, r1, MemOperand(sp, 0 * kPointerSize));
- __ cmp(r0, r1); // Compare to the array length.
- __ b(hs, loop_statement.break_label());
-
- // Get the current entry of the array into register r3.
- __ ldr(r2, MemOperand(sp, 2 * kPointerSize));
- __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ ldr(r3, MemOperand::PointerAddressFromSmiKey(r2, r0));
+ // Load the current count to r3, load the length to r4.
+ __ LoadP(r3, MemOperand(sp, 0 * kPointerSize));
+ __ LoadP(r4, MemOperand(sp, 1 * kPointerSize));
+ __ cmpl(r3, r4); // Compare to the array length.
+ __ bge(loop_statement.break_label());
+
+ // Get the current entry of the array into register r6.
+ __ LoadP(r5, MemOperand(sp, 2 * kPointerSize));
+ __ addi(r5, r5, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ SmiToPtrArrayOffset(r6, r3);
+ __ LoadPX(r6, MemOperand(r6, r5));
// Get the expected map from the stack or a smi in the
- // permanent slow case into register r2.
- __ ldr(r2, MemOperand(sp, 3 * kPointerSize));
+ // permanent slow case into register r5.
+ __ LoadP(r5, MemOperand(sp, 3 * kPointerSize));
// Check if the expected map still matches that of the enumerable.
// If not, we may have to filter the key.
Label update_each;
- __ ldr(r1, MemOperand(sp, 4 * kPointerSize));
- __ ldr(r4, FieldMemOperand(r1, HeapObject::kMapOffset));
- __ cmp(r4, Operand(r2));
- __ b(eq, &update_each);
+ __ LoadP(r4, MemOperand(sp, 4 * kPointerSize));
+ __ LoadP(r7, FieldMemOperand(r4, HeapObject::kMapOffset));
+ __ cmp(r7, r5);
+ __ beq(&update_each);
// For proxies, no filtering is done.
// TODO(rossberg): What if only a prototype is a proxy? Not specified yet.
- __ cmp(r2, Operand(Smi::FromInt(0)));
- __ b(eq, &update_each);
+ __ CmpSmiLiteral(r5, Smi::FromInt(0), r0);
+ __ beq(&update_each);
// Convert the entry to a string or (smi) 0 if it isn't a property
// any more. If the property has been removed while iterating, we
// just skip it.
- __ push(r1); // Enumerable.
- __ push(r3); // Current entry.
+ __ Push(r4, r6); // Enumerable and current entry.
__ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
- __ mov(r3, Operand(r0), SetCC);
- __ b(eq, loop_statement.continue_label());
+ __ mr(r6, r3);
+ __ cmpi(r6, Operand::Zero());
+ __ beq(loop_statement.continue_label());
// Update the 'each' property or variable from the possibly filtered
- // entry in register r3.
+ // entry in register r6.
__ bind(&update_each);
- __ mov(result_register(), r3);
+ __ mr(result_register(), r6);
// Perform the assignment as if via '='.
- { EffectContext context(this);
+ {
+ EffectContext context(this);
EmitAssignment(stmt->each());
}
@@ -1265,9 +1225,9 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Generate code for the going to the next element by incrementing
// the index (smi) stored on top of the stack.
__ bind(loop_statement.continue_label());
- __ pop(r0);
- __ add(r0, r0, Operand(Smi::FromInt(1)));
- __ push(r0);
+ __ pop(r3);
+ __ AddSmiLiteral(r3, r3, Smi::FromInt(1), r0);
+ __ push(r3);
EmitBackEdgeBookkeeping(stmt, &loop);
__ b(&loop);
@@ -1301,10 +1261,8 @@ void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
// if (result.done) break;
Label result_not_done;
- VisitForControl(stmt->result_done(),
- loop_statement.break_label(),
- &result_not_done,
- &result_not_done);
+ VisitForControl(stmt->result_done(), loop_statement.break_label(),
+ &result_not_done, &result_not_done);
__ bind(&result_not_done);
// each = result.value
@@ -1316,7 +1274,7 @@ void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
// Check stack before looping.
PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS);
EmitBackEdgeBookkeeping(stmt, loop_statement.continue_label());
- __ jmp(loop_statement.continue_label());
+ __ b(loop_statement.continue_label());
// Exit and decrement the loop depth.
PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
@@ -1333,22 +1291,19 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
// flag, we need to use the runtime function so that the new function
// we are creating here gets a chance to have its code optimized and
// doesn't just get a copy of the existing unoptimized code.
- if (!FLAG_always_opt &&
- !FLAG_prepare_always_opt &&
- !pretenure &&
- scope()->is_function_scope() &&
- info->num_literals() == 0) {
+ if (!FLAG_always_opt && !FLAG_prepare_always_opt && !pretenure &&
+ scope()->is_function_scope() && info->num_literals() == 0) {
FastNewClosureStub stub(isolate(), info->strict_mode(), info->kind());
- __ mov(r2, Operand(info));
+ __ mov(r5, Operand(info));
__ CallStub(&stub);
} else {
- __ mov(r0, Operand(info));
- __ LoadRoot(r1, pretenure ? Heap::kTrueValueRootIndex
- : Heap::kFalseValueRootIndex);
- __ Push(cp, r0, r1);
+ __ mov(r3, Operand(info));
+ __ LoadRoot(
+ r4, pretenure ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex);
+ __ Push(cp, r3, r4);
__ CallRuntime(Runtime::kNewClosure, 3);
}
- context()->Plug(r0);
+ context()->Plug(r3);
}
@@ -1361,8 +1316,8 @@ void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
void FullCodeGenerator::EmitLoadHomeObject(SuperReference* expr) {
Comment cnmt(masm_, "[ SuperReference ");
- __ ldr(LoadDescriptor::ReceiverRegister(),
- MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ LoadP(LoadDescriptor::ReceiverRegister(),
+ MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
Handle<Symbol> home_object_symbol(isolate()->heap()->home_object_symbol());
__ Move(LoadDescriptor::NameRegister(), home_object_symbol);
@@ -1375,9 +1330,9 @@ void FullCodeGenerator::EmitLoadHomeObject(SuperReference* expr) {
CallLoadIC(NOT_CONTEXTUAL, expr->HomeObjectFeedbackId());
}
- __ cmp(r0, Operand(isolate()->factory()->undefined_value()));
+ __ Cmpi(r3, Operand(isolate()->factory()->undefined_value()), r0);
Label done;
- __ b(ne, &done);
+ __ bne(&done);
__ CallRuntime(Runtime::kThrowNonMethodError, 0);
__ bind(&done);
}
@@ -1387,20 +1342,20 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
TypeofState typeof_state,
Label* slow) {
Register current = cp;
- Register next = r1;
- Register temp = r2;
+ Register next = r4;
+ Register temp = r5;
Scope* s = scope();
while (s != NULL) {
if (s->num_heap_slots() > 0) {
if (s->calls_sloppy_eval()) {
// Check that extension is NULL.
- __ ldr(temp, ContextOperand(current, Context::EXTENSION_INDEX));
- __ tst(temp, temp);
- __ b(ne, slow);
+ __ LoadP(temp, ContextOperand(current, Context::EXTENSION_INDEX));
+ __ cmpi(temp, Operand::Zero());
+ __ bne(slow);
}
// Load next context in chain.
- __ ldr(next, ContextOperand(current, Context::PREVIOUS_INDEX));
+ __ LoadP(next, ContextOperand(current, Context::PREVIOUS_INDEX));
// Walk the rest of the chain without clobbering cp.
current = next;
}
@@ -1417,30 +1372,29 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
}
__ bind(&loop);
// Terminate at native context.
- __ ldr(temp, FieldMemOperand(next, HeapObject::kMapOffset));
+ __ LoadP(temp, FieldMemOperand(next, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kNativeContextMapRootIndex);
__ cmp(temp, ip);
- __ b(eq, &fast);
+ __ beq(&fast);
// Check that extension is NULL.
- __ ldr(temp, ContextOperand(next, Context::EXTENSION_INDEX));
- __ tst(temp, temp);
- __ b(ne, slow);
+ __ LoadP(temp, ContextOperand(next, Context::EXTENSION_INDEX));
+ __ cmpi(temp, Operand::Zero());
+ __ bne(slow);
// Load next context in chain.
- __ ldr(next, ContextOperand(next, Context::PREVIOUS_INDEX));
+ __ LoadP(next, ContextOperand(next, Context::PREVIOUS_INDEX));
__ b(&loop);
__ bind(&fast);
}
- __ ldr(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+ __ LoadP(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
__ mov(LoadDescriptor::NameRegister(), Operand(proxy->var()->name()));
if (FLAG_vector_ics) {
__ mov(VectorLoadICDescriptor::SlotRegister(),
Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
}
- ContextualMode mode = (typeof_state == INSIDE_TYPEOF)
- ? NOT_CONTEXTUAL
- : CONTEXTUAL;
+ ContextualMode mode =
+ (typeof_state == INSIDE_TYPEOF) ? NOT_CONTEXTUAL : CONTEXTUAL;
CallLoadIC(mode);
}
@@ -1449,26 +1403,26 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
Label* slow) {
DCHECK(var->IsContextSlot());
Register context = cp;
- Register next = r3;
- Register temp = r4;
+ Register next = r6;
+ Register temp = r7;
for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
if (s->num_heap_slots() > 0) {
if (s->calls_sloppy_eval()) {
// Check that extension is NULL.
- __ ldr(temp, ContextOperand(context, Context::EXTENSION_INDEX));
- __ tst(temp, temp);
- __ b(ne, slow);
+ __ LoadP(temp, ContextOperand(context, Context::EXTENSION_INDEX));
+ __ cmpi(temp, Operand::Zero());
+ __ bne(slow);
}
- __ ldr(next, ContextOperand(context, Context::PREVIOUS_INDEX));
+ __ LoadP(next, ContextOperand(context, Context::PREVIOUS_INDEX));
// Walk the rest of the chain without clobbering cp.
context = next;
}
}
// Check that last extension is NULL.
- __ ldr(temp, ContextOperand(context, Context::EXTENSION_INDEX));
- __ tst(temp, temp);
- __ b(ne, slow);
+ __ LoadP(temp, ContextOperand(context, Context::EXTENSION_INDEX));
+ __ cmpi(temp, Operand::Zero());
+ __ bne(slow);
// This function is used only for loads, not stores, so it's safe to
// return an cp-based operand (the write barrier cannot be allowed to
@@ -1479,8 +1433,7 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
TypeofState typeof_state,
- Label* slow,
- Label* done) {
+ Label* slow, Label* done) {
// Generate fast-case code for variables that might be shadowed by
// eval-introduced variables. Eval is used a lot without
// introducing variables. In those cases, we do not want to
@@ -1489,23 +1442,23 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
Variable* var = proxy->var();
if (var->mode() == DYNAMIC_GLOBAL) {
EmitLoadGlobalCheckExtensions(proxy, typeof_state, slow);
- __ jmp(done);
+ __ b(done);
} else if (var->mode() == DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
- __ ldr(r0, ContextSlotOperandCheckExtensions(local, slow));
+ __ LoadP(r3, ContextSlotOperandCheckExtensions(local, slow));
if (local->mode() == LET || local->mode() == CONST ||
local->mode() == CONST_LEGACY) {
- __ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
+ __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
+ __ bne(done);
if (local->mode() == CONST_LEGACY) {
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
+ __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
} else { // LET || CONST
- __ b(ne, done);
- __ mov(r0, Operand(var->name()));
- __ push(r0);
+ __ mov(r3, Operand(var->name()));
+ __ push(r3);
__ CallRuntime(Runtime::kThrowReferenceError, 1);
}
}
- __ jmp(done);
+ __ b(done);
}
}
@@ -1520,14 +1473,14 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
switch (var->location()) {
case Variable::UNALLOCATED: {
Comment cmnt(masm_, "[ Global variable");
- __ ldr(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+ __ LoadP(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
__ mov(LoadDescriptor::NameRegister(), Operand(var->name()));
if (FLAG_vector_ics) {
__ mov(VectorLoadICDescriptor::SlotRegister(),
Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
}
CallLoadIC(CONTEXTUAL);
- context()->Plug(r0);
+ context()->Plug(r3);
break;
}
@@ -1568,28 +1521,28 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
DCHECK(var->initializer_position() != RelocInfo::kNoPosition);
DCHECK(proxy->position() != RelocInfo::kNoPosition);
skip_init_check = var->mode() != CONST_LEGACY &&
- var->initializer_position() < proxy->position();
+ var->initializer_position() < proxy->position();
}
if (!skip_init_check) {
+ Label done;
// Let and const need a read barrier.
- GetVar(r0, var);
- __ CompareRoot(r0, Heap::kTheHoleValueRootIndex);
+ GetVar(r3, var);
+ __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
+ __ bne(&done);
if (var->mode() == LET || var->mode() == CONST) {
// Throw a reference error when using an uninitialized let/const
// binding in harmony mode.
- Label done;
- __ b(ne, &done);
- __ mov(r0, Operand(var->name()));
- __ push(r0);
+ __ mov(r3, Operand(var->name()));
+ __ push(r3);
__ CallRuntime(Runtime::kThrowReferenceError, 1);
- __ bind(&done);
} else {
// Uninitalized const bindings outside of harmony mode are unholed.
DCHECK(var->mode() == CONST_LEGACY);
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
+ __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
}
- context()->Plug(r0);
+ __ bind(&done);
+ context()->Plug(r3);
break;
}
}
@@ -1604,11 +1557,11 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
// by eval-introduced variables.
EmitDynamicLookupFastCase(proxy, NOT_INSIDE_TYPEOF, &slow, &done);
__ bind(&slow);
- __ mov(r1, Operand(var->name()));
- __ Push(cp, r1); // Context and name.
+ __ mov(r4, Operand(var->name()));
+ __ Push(cp, r4); // Context and name.
__ CallRuntime(Runtime::kLoadLookupSlot, 2);
__ bind(&done);
- context()->Plug(r0);
+ context()->Plug(r3);
}
}
}
@@ -1618,56 +1571,56 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
Comment cmnt(masm_, "[ RegExpLiteral");
Label materialized;
// Registers will be used as follows:
- // r5 = materialized value (RegExp literal)
- // r4 = JS function, literals array
- // r3 = literal index
- // r2 = RegExp pattern
- // r1 = RegExp flags
- // r0 = RegExp literal clone
- __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ ldr(r4, FieldMemOperand(r0, JSFunction::kLiteralsOffset));
+ // r8 = materialized value (RegExp literal)
+ // r7 = JS function, literals array
+ // r6 = literal index
+ // r5 = RegExp pattern
+ // r4 = RegExp flags
+ // r3 = RegExp literal clone
+ __ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ LoadP(r7, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
int literal_offset =
FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
- __ ldr(r5, FieldMemOperand(r4, literal_offset));
+ __ LoadP(r8, FieldMemOperand(r7, literal_offset), r0);
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
- __ cmp(r5, ip);
- __ b(ne, &materialized);
+ __ cmp(r8, ip);
+ __ bne(&materialized);
// Create regexp literal using runtime function.
- // Result will be in r0.
- __ mov(r3, Operand(Smi::FromInt(expr->literal_index())));
- __ mov(r2, Operand(expr->pattern()));
- __ mov(r1, Operand(expr->flags()));
- __ Push(r4, r3, r2, r1);
+ // Result will be in r3.
+ __ LoadSmiLiteral(r6, Smi::FromInt(expr->literal_index()));
+ __ mov(r5, Operand(expr->pattern()));
+ __ mov(r4, Operand(expr->flags()));
+ __ Push(r7, r6, r5, r4);
__ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
- __ mov(r5, r0);
+ __ mr(r8, r3);
__ bind(&materialized);
int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
Label allocated, runtime_allocate;
- __ Allocate(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT);
- __ jmp(&allocated);
+ __ Allocate(size, r3, r5, r6, &runtime_allocate, TAG_OBJECT);
+ __ b(&allocated);
__ bind(&runtime_allocate);
- __ mov(r0, Operand(Smi::FromInt(size)));
- __ Push(r5, r0);
+ __ LoadSmiLiteral(r3, Smi::FromInt(size));
+ __ Push(r8, r3);
__ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ pop(r5);
+ __ pop(r8);
__ bind(&allocated);
// After this, registers are used as follows:
- // r0: Newly allocated regexp.
- // r5: Materialized regexp.
- // r2: temp.
- __ CopyFields(r0, r5, d0, size / kPointerSize);
- context()->Plug(r0);
+ // r3: Newly allocated regexp.
+ // r8: Materialized regexp.
+ // r5: temp.
+ __ CopyFields(r3, r8, r5.bit(), size / kPointerSize);
+ context()->Plug(r3);
}
void FullCodeGenerator::EmitAccessor(Expression* expression) {
if (expression == NULL) {
- __ LoadRoot(r1, Heap::kNullValueRootIndex);
- __ push(r1);
+ __ LoadRoot(r4, Heap::kNullValueRootIndex);
+ __ push(r4);
} else {
VisitForStackValue(expression);
}
@@ -1679,22 +1632,20 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
expr->BuildConstantProperties(isolate());
Handle<FixedArray> constant_properties = expr->constant_properties();
- __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
- __ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
- __ mov(r1, Operand(constant_properties));
- int flags = expr->fast_elements()
- ? ObjectLiteral::kFastElements
- : ObjectLiteral::kNoFlags;
- flags |= expr->has_function()
- ? ObjectLiteral::kHasFunction
- : ObjectLiteral::kNoFlags;
- __ mov(r0, Operand(Smi::FromInt(flags)));
+ __ LoadP(r6, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ LoadP(r6, FieldMemOperand(r6, JSFunction::kLiteralsOffset));
+ __ LoadSmiLiteral(r5, Smi::FromInt(expr->literal_index()));
+ __ mov(r4, Operand(constant_properties));
+ int flags = expr->fast_elements() ? ObjectLiteral::kFastElements
+ : ObjectLiteral::kNoFlags;
+ flags |= expr->has_function() ? ObjectLiteral::kHasFunction
+ : ObjectLiteral::kNoFlags;
+ __ LoadSmiLiteral(r3, Smi::FromInt(flags));
int properties_count = constant_properties->length() / 2;
if (expr->may_store_doubles() || expr->depth() > 1 ||
masm()->serializer_enabled() || flags != ObjectLiteral::kFastElements ||
properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
- __ Push(r3, r2, r1, r0);
+ __ Push(r6, r5, r4, r3);
__ CallRuntime(Runtime::kCreateObjectLiteral, 4);
} else {
FastCloneShallowObjectStub stub(isolate(), properties_count);
@@ -1703,7 +1654,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
// If result_saved is true the result is on top of the stack. If
- // result_saved is false the result is in r0.
+ // result_saved is false the result is in r3.
bool result_saved = false;
// Mark all computed expressions that are bound to a key that
@@ -1719,7 +1670,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Literal* key = property->key();
Expression* value = property->value();
if (!result_saved) {
- __ push(r0); // Save result on stack
+ __ push(r3); // Save result on stack
result_saved = true;
}
switch (property->kind()) {
@@ -1727,16 +1678,16 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
UNREACHABLE();
case ObjectLiteral::Property::MATERIALIZED_LITERAL:
DCHECK(!CompileTimeValue::IsCompileTimeValue(property->value()));
- // Fall through.
+ // Fall through.
case ObjectLiteral::Property::COMPUTED:
// It is safe to use [[Put]] here because the boilerplate already
// contains computed properties with an uninitialized value.
if (key->value()->IsInternalizedString()) {
if (property->emit_store()) {
VisitForAccumulatorValue(value);
- DCHECK(StoreDescriptor::ValueRegister().is(r0));
+ DCHECK(StoreDescriptor::ValueRegister().is(r3));
__ mov(StoreDescriptor::NameRegister(), Operand(key->value()));
- __ ldr(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
+ __ LoadP(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
CallStoreIC(key->LiteralFeedbackId());
PrepareForBailoutForId(key->id(), NO_REGISTERS);
} else {
@@ -1745,13 +1696,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
break;
}
// Duplicate receiver on stack.
- __ ldr(r0, MemOperand(sp));
- __ push(r0);
+ __ LoadP(r3, MemOperand(sp));
+ __ push(r3);
VisitForStackValue(key);
VisitForStackValue(value);
if (property->emit_store()) {
- __ mov(r0, Operand(Smi::FromInt(SLOPPY))); // PropertyAttributes
- __ push(r0);
+ __ LoadSmiLiteral(r3, Smi::FromInt(SLOPPY)); // PropertyAttributes
+ __ push(r3);
__ CallRuntime(Runtime::kSetProperty, 4);
} else {
__ Drop(3);
@@ -1759,8 +1710,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
break;
case ObjectLiteral::Property::PROTOTYPE:
// Duplicate receiver on stack.
- __ ldr(r0, MemOperand(sp));
- __ push(r0);
+ __ LoadP(r3, MemOperand(sp));
+ __ push(r3);
VisitForStackValue(value);
if (property->emit_store()) {
__ CallRuntime(Runtime::kInternalSetPrototype, 2);
@@ -1768,7 +1719,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
__ Drop(2);
}
break;
-
case ObjectLiteral::Property::GETTER:
accessor_table.lookup(key)->second->getter = value;
break;
@@ -1781,29 +1731,28 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// Emit code to define accessors, using only a single call to the runtime for
// each pair of corresponding getters and setters.
for (AccessorTable::Iterator it = accessor_table.begin();
- it != accessor_table.end();
- ++it) {
- __ ldr(r0, MemOperand(sp)); // Duplicate receiver.
- __ push(r0);
+ it != accessor_table.end(); ++it) {
+ __ LoadP(r3, MemOperand(sp)); // Duplicate receiver.
+ __ push(r3);
VisitForStackValue(it->first);
EmitAccessor(it->second->getter);
EmitAccessor(it->second->setter);
- __ mov(r0, Operand(Smi::FromInt(NONE)));
- __ push(r0);
+ __ LoadSmiLiteral(r3, Smi::FromInt(NONE));
+ __ push(r3);
__ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
}
if (expr->has_function()) {
DCHECK(result_saved);
- __ ldr(r0, MemOperand(sp));
- __ push(r0);
+ __ LoadP(r3, MemOperand(sp));
+ __ push(r3);
__ CallRuntime(Runtime::kToFastProperties, 1);
}
if (result_saved) {
context()->PlugTOS();
} else {
- context()->Plug(r0);
+ context()->Plug(r3);
}
}
@@ -1812,9 +1761,8 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Comment cmnt(masm_, "[ ArrayLiteral");
expr->BuildConstantElements(isolate());
- int flags = expr->depth() == 1
- ? ArrayLiteral::kShallowElements
- : ArrayLiteral::kNoFlags;
+ int flags = expr->depth() == 1 ? ArrayLiteral::kShallowElements
+ : ArrayLiteral::kNoFlags;
ZoneList<Expression*>* subexprs = expr->values();
int length = subexprs->length();
@@ -1833,13 +1781,13 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
}
- __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
- __ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
- __ mov(r1, Operand(constant_elements));
+ __ LoadP(r6, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ LoadP(r6, FieldMemOperand(r6, JSFunction::kLiteralsOffset));
+ __ LoadSmiLiteral(r5, Smi::FromInt(expr->literal_index()));
+ __ mov(r4, Operand(constant_elements));
if (expr->depth() > 1 || length > JSObject::kInitialMaxFastElementArray) {
- __ mov(r0, Operand(Smi::FromInt(flags)));
- __ Push(r3, r2, r1, r0);
+ __ LoadSmiLiteral(r3, Smi::FromInt(flags));
+ __ Push(r6, r5, r4, r3);
__ CallRuntime(Runtime::kCreateArrayLiteral, 4);
} else {
FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
@@ -1857,7 +1805,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
if (!result_saved) {
- __ push(r0);
+ __ push(r3);
__ Push(Smi::FromInt(expr->literal_index()));
result_saved = true;
}
@@ -1865,15 +1813,15 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
if (IsFastObjectElementsKind(constant_elements_kind)) {
int offset = FixedArray::kHeaderSize + (i * kPointerSize);
- __ ldr(r6, MemOperand(sp, kPointerSize)); // Copy of array literal.
- __ ldr(r1, FieldMemOperand(r6, JSObject::kElementsOffset));
- __ str(result_register(), FieldMemOperand(r1, offset));
+ __ LoadP(r8, MemOperand(sp, kPointerSize)); // Copy of array literal.
+ __ LoadP(r4, FieldMemOperand(r8, JSObject::kElementsOffset));
+ __ StoreP(result_register(), FieldMemOperand(r4, offset), r0);
// Update the write barrier for the array store.
- __ RecordWriteField(r1, offset, result_register(), r2,
- kLRHasBeenSaved, kDontSaveFPRegs,
- EMIT_REMEMBERED_SET, INLINE_SMI_CHECK);
+ __ RecordWriteField(r4, offset, result_register(), r5, kLRHasBeenSaved,
+ kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+ INLINE_SMI_CHECK);
} else {
- __ mov(r3, Operand(Smi::FromInt(i)));
+ __ LoadSmiLiteral(r6, Smi::FromInt(i));
StoreArrayLiteralElementStub stub(isolate());
__ CallStub(&stub);
}
@@ -1885,7 +1833,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ pop(); // literal index
context()->PlugTOS();
} else {
- context()->Plug(r0);
+ context()->Plug(r3);
}
}
@@ -1907,7 +1855,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
if (expr->is_compound()) {
// We need the receiver both on the stack and in the register.
VisitForStackValue(property->obj());
- __ ldr(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
+ __ LoadP(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
} else {
VisitForStackValue(property->obj());
}
@@ -1917,34 +1865,32 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
EmitLoadHomeObject(property->obj()->AsSuperReference());
__ Push(result_register());
if (expr->is_compound()) {
- const Register scratch = r1;
- __ ldr(scratch, MemOperand(sp, kPointerSize));
- __ Push(scratch);
- __ Push(result_register());
+ const Register scratch = r4;
+ __ LoadP(scratch, MemOperand(sp, kPointerSize));
+ __ Push(scratch, result_register());
}
break;
- case KEYED_SUPER_PROPERTY:
+ case KEYED_SUPER_PROPERTY: {
+ const Register scratch = r4;
VisitForStackValue(property->obj()->AsSuperReference()->this_var());
EmitLoadHomeObject(property->obj()->AsSuperReference());
- __ Push(result_register());
+ __ Move(scratch, result_register());
VisitForAccumulatorValue(property->key());
- __ Push(result_register());
+ __ Push(scratch, result_register());
if (expr->is_compound()) {
- const Register scratch = r1;
- __ ldr(scratch, MemOperand(sp, 2 * kPointerSize));
- __ Push(scratch);
- __ ldr(scratch, MemOperand(sp, 2 * kPointerSize));
- __ Push(scratch);
- __ Push(result_register());
+ const Register scratch1 = r5;
+ __ LoadP(scratch1, MemOperand(sp, 2 * kPointerSize));
+ __ Push(scratch1, scratch, result_register());
}
break;
+ }
case KEYED_PROPERTY:
if (expr->is_compound()) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- __ ldr(LoadDescriptor::ReceiverRegister(),
- MemOperand(sp, 1 * kPointerSize));
- __ ldr(LoadDescriptor::NameRegister(), MemOperand(sp, 0));
+ __ LoadP(LoadDescriptor::ReceiverRegister(),
+ MemOperand(sp, 1 * kPointerSize));
+ __ LoadP(LoadDescriptor::NameRegister(), MemOperand(sp, 0));
} else {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
@@ -1955,7 +1901,8 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
// For compound assignments we need another deoptimization point after the
// variable/property load.
if (expr->is_compound()) {
- { AccumulatorValueContext context(this);
+ {
+ AccumulatorValueContext context(this);
switch (assign_type) {
case VARIABLE:
EmitVariableLoad(expr->target()->AsVariableProxy());
@@ -1981,19 +1928,16 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
}
Token::Value op = expr->binary_op();
- __ push(r0); // Left operand goes on the stack.
+ __ push(r3); // Left operand goes on the stack.
VisitForAccumulatorValue(expr->value());
OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
- ? OVERWRITE_RIGHT
- : NO_OVERWRITE;
+ ? OVERWRITE_RIGHT
+ : NO_OVERWRITE;
SetSourcePosition(expr->position() + 1);
AccumulatorValueContext context(this);
if (ShouldInlineSmiCase(op)) {
- EmitInlineSmiBinaryOp(expr->binary_operation(),
- op,
- mode,
- expr->target(),
+ EmitInlineSmiBinaryOp(expr->binary_operation(), op, mode, expr->target(),
expr->value());
} else {
EmitBinaryOp(expr->binary_operation(), op, mode);
@@ -2014,18 +1958,18 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
expr->op());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(r0);
+ context()->Plug(r3);
break;
case NAMED_PROPERTY:
EmitNamedPropertyAssignment(expr);
break;
case NAMED_SUPER_PROPERTY:
EmitNamedSuperPropertyStore(property);
- context()->Plug(r0);
+ context()->Plug(r3);
break;
case KEYED_SUPER_PROPERTY:
EmitKeyedSuperPropertyStore(property);
- context()->Plug(r0);
+ context()->Plug(r3);
break;
case KEYED_PROPERTY:
EmitKeyedPropertyAssignment(expr);
@@ -2045,30 +1989,31 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// Pop value from top-of-stack slot; box result into result register.
EmitCreateIteratorResult(false);
__ push(result_register());
- // Fall through.
+ // Fall through.
case Yield::kInitial: {
Label suspend, continuation, post_runtime, resume;
- __ jmp(&suspend);
+ __ b(&suspend);
__ bind(&continuation);
- __ jmp(&resume);
+ __ b(&resume);
__ bind(&suspend);
VisitForAccumulatorValue(expr->generator_object());
DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
- __ mov(r1, Operand(Smi::FromInt(continuation.pos())));
- __ str(r1, FieldMemOperand(r0, JSGeneratorObject::kContinuationOffset));
- __ str(cp, FieldMemOperand(r0, JSGeneratorObject::kContextOffset));
- __ mov(r1, cp);
- __ RecordWriteField(r0, JSGeneratorObject::kContextOffset, r1, r2,
+ __ LoadSmiLiteral(r4, Smi::FromInt(continuation.pos()));
+ __ StoreP(r4, FieldMemOperand(r3, JSGeneratorObject::kContinuationOffset),
+ r0);
+ __ StoreP(cp, FieldMemOperand(r3, JSGeneratorObject::kContextOffset), r0);
+ __ mr(r4, cp);
+ __ RecordWriteField(r3, JSGeneratorObject::kContextOffset, r4, r5,
kLRHasBeenSaved, kDontSaveFPRegs);
- __ add(r1, fp, Operand(StandardFrameConstants::kExpressionsOffset));
- __ cmp(sp, r1);
- __ b(eq, &post_runtime);
- __ push(r0); // generator object
+ __ addi(r4, fp, Operand(StandardFrameConstants::kExpressionsOffset));
+ __ cmp(sp, r4);
+ __ beq(&post_runtime);
+ __ push(r3); // generator object
__ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ bind(&post_runtime);
__ pop(result_register());
EmitReturnSequence();
@@ -2080,9 +2025,10 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
case Yield::kFinal: {
VisitForAccumulatorValue(expr->generator_object());
- __ mov(r1, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorClosed)));
- __ str(r1, FieldMemOperand(result_register(),
- JSGeneratorObject::kContinuationOffset));
+ __ LoadSmiLiteral(r4, Smi::FromInt(JSGeneratorObject::kGeneratorClosed));
+ __ StoreP(r4, FieldMemOperand(result_register(),
+ JSGeneratorObject::kContinuationOffset),
+ r0);
// Pop value from top-of-stack slot, box result into result register.
EmitCreateIteratorResult(true);
EmitUnwindBeforeReturn();
@@ -2098,79 +2044,79 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// [sp + 0 * kPointerSize] g
Label l_catch, l_try, l_suspend, l_continuation, l_resume;
- Label l_next, l_call, l_loop;
+ Label l_next, l_call;
Register load_receiver = LoadDescriptor::ReceiverRegister();
Register load_name = LoadDescriptor::NameRegister();
// Initial send value is undefined.
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
__ b(&l_next);
// catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; }
__ bind(&l_catch);
handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos()));
__ LoadRoot(load_name, Heap::kthrow_stringRootIndex); // "throw"
- __ ldr(r3, MemOperand(sp, 1 * kPointerSize)); // iter
- __ Push(load_name, r3, r0); // "throw", iter, except
- __ jmp(&l_call);
+ __ LoadP(r6, MemOperand(sp, 1 * kPointerSize)); // iter
+ __ Push(load_name, r6, r3); // "throw", iter, except
+ __ b(&l_call);
// try { received = %yield result }
// Shuffle the received result above a try handler and yield it without
// re-boxing.
__ bind(&l_try);
- __ pop(r0); // result
+ __ pop(r3); // result
__ PushTryHandler(StackHandler::CATCH, expr->index());
const int handler_size = StackHandlerConstants::kSize;
- __ push(r0); // result
- __ jmp(&l_suspend);
+ __ push(r3); // result
+ __ b(&l_suspend);
__ bind(&l_continuation);
- __ jmp(&l_resume);
+ __ b(&l_resume);
__ bind(&l_suspend);
const int generator_object_depth = kPointerSize + handler_size;
- __ ldr(r0, MemOperand(sp, generator_object_depth));
- __ push(r0); // g
+ __ LoadP(r3, MemOperand(sp, generator_object_depth));
+ __ push(r3); // g
DCHECK(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
- __ mov(r1, Operand(Smi::FromInt(l_continuation.pos())));
- __ str(r1, FieldMemOperand(r0, JSGeneratorObject::kContinuationOffset));
- __ str(cp, FieldMemOperand(r0, JSGeneratorObject::kContextOffset));
- __ mov(r1, cp);
- __ RecordWriteField(r0, JSGeneratorObject::kContextOffset, r1, r2,
+ __ LoadSmiLiteral(r4, Smi::FromInt(l_continuation.pos()));
+ __ StoreP(r4, FieldMemOperand(r3, JSGeneratorObject::kContinuationOffset),
+ r0);
+ __ StoreP(cp, FieldMemOperand(r3, JSGeneratorObject::kContextOffset), r0);
+ __ mr(r4, cp);
+ __ RecordWriteField(r3, JSGeneratorObject::kContextOffset, r4, r5,
kLRHasBeenSaved, kDontSaveFPRegs);
__ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ pop(r0); // result
+ __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ pop(r3); // result
EmitReturnSequence();
- __ bind(&l_resume); // received in r0
+ __ bind(&l_resume); // received in r3
__ PopTryHandler();
// receiver = iter; f = 'next'; arg = received;
__ bind(&l_next);
__ LoadRoot(load_name, Heap::knext_stringRootIndex); // "next"
- __ ldr(r3, MemOperand(sp, 1 * kPointerSize)); // iter
- __ Push(load_name, r3, r0); // "next", iter, received
+ __ LoadP(r6, MemOperand(sp, 1 * kPointerSize)); // iter
+ __ Push(load_name, r6, r3); // "next", iter, received
// result = receiver[f](arg);
__ bind(&l_call);
- __ ldr(load_receiver, MemOperand(sp, kPointerSize));
- __ ldr(load_name, MemOperand(sp, 2 * kPointerSize));
+ __ LoadP(load_receiver, MemOperand(sp, kPointerSize));
+ __ LoadP(load_name, MemOperand(sp, 2 * kPointerSize));
if (FLAG_vector_ics) {
__ mov(VectorLoadICDescriptor::SlotRegister(),
Operand(SmiFromSlot(expr->KeyedLoadFeedbackSlot())));
}
Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
CallIC(ic, TypeFeedbackId::None());
- __ mov(r1, r0);
- __ str(r1, MemOperand(sp, 2 * kPointerSize));
+ __ mr(r4, r3);
+ __ StoreP(r4, MemOperand(sp, 2 * kPointerSize));
CallFunctionStub stub(isolate(), 1, CALL_AS_METHOD);
__ CallStub(&stub);
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ Drop(1); // The function is still on the stack; drop it.
// if (!result.done) goto l_try;
- __ bind(&l_loop);
- __ Move(load_receiver, r0);
+ __ Move(load_receiver, r3);
__ push(load_receiver); // save result
__ LoadRoot(load_name, Heap::kdone_stringRootIndex); // "done"
@@ -2178,11 +2124,11 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ mov(VectorLoadICDescriptor::SlotRegister(),
Operand(SmiFromSlot(expr->DoneFeedbackSlot())));
}
- CallLoadIC(NOT_CONTEXTUAL); // r0=result.done
+ CallLoadIC(NOT_CONTEXTUAL); // r0=result.done
Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(bool_ic);
- __ cmp(r0, Operand(0));
- __ b(eq, &l_try);
+ __ cmpi(r3, Operand::Zero());
+ __ beq(&l_try);
// result.value
__ pop(load_receiver); // result
@@ -2191,111 +2137,121 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
__ mov(VectorLoadICDescriptor::SlotRegister(),
Operand(SmiFromSlot(expr->ValueFeedbackSlot())));
}
- CallLoadIC(NOT_CONTEXTUAL); // r0=result.value
- context()->DropAndPlug(2, r0); // drop iter and g
+ CallLoadIC(NOT_CONTEXTUAL); // r3=result.value
+ context()->DropAndPlug(2, r3); // drop iter and g
break;
}
}
}
-void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
- Expression *value,
+void FullCodeGenerator::EmitGeneratorResume(
+ Expression* generator, Expression* value,
JSGeneratorObject::ResumeMode resume_mode) {
- // The value stays in r0, and is ultimately read by the resumed generator, as
+ // The value stays in r3, and is ultimately read by the resumed generator, as
// if CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it
// is read to throw the value when the resumed generator is already closed.
- // r1 will hold the generator object until the activation has been resumed.
+ // r4 will hold the generator object until the activation has been resumed.
VisitForStackValue(generator);
VisitForAccumulatorValue(value);
- __ pop(r1);
+ __ pop(r4);
// Check generator state.
Label wrong_state, closed_state, done;
- __ ldr(r3, FieldMemOperand(r1, JSGeneratorObject::kContinuationOffset));
+ __ LoadP(r6, FieldMemOperand(r4, JSGeneratorObject::kContinuationOffset));
STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting < 0);
STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed == 0);
- __ cmp(r3, Operand(Smi::FromInt(0)));
- __ b(eq, &closed_state);
- __ b(lt, &wrong_state);
+ __ CmpSmiLiteral(r6, Smi::FromInt(0), r0);
+ __ beq(&closed_state);
+ __ blt(&wrong_state);
// Load suspended function and context.
- __ ldr(cp, FieldMemOperand(r1, JSGeneratorObject::kContextOffset));
- __ ldr(r4, FieldMemOperand(r1, JSGeneratorObject::kFunctionOffset));
+ __ LoadP(cp, FieldMemOperand(r4, JSGeneratorObject::kContextOffset));
+ __ LoadP(r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset));
// Load receiver and store as the first argument.
- __ ldr(r2, FieldMemOperand(r1, JSGeneratorObject::kReceiverOffset));
- __ push(r2);
+ __ LoadP(r5, FieldMemOperand(r4, JSGeneratorObject::kReceiverOffset));
+ __ push(r5);
// Push holes for the rest of the arguments to the generator function.
- __ ldr(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r3,
- FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
- __ LoadRoot(r2, Heap::kTheHoleValueRootIndex);
- Label push_argument_holes, push_frame;
- __ bind(&push_argument_holes);
- __ sub(r3, r3, Operand(Smi::FromInt(1)), SetCC);
- __ b(mi, &push_frame);
- __ push(r2);
- __ jmp(&push_argument_holes);
+ __ LoadP(r6, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadWordArith(
+ r6, FieldMemOperand(r6, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ LoadRoot(r5, Heap::kTheHoleValueRootIndex);
+ Label argument_loop, push_frame;
+#if V8_TARGET_ARCH_PPC64
+ __ cmpi(r6, Operand::Zero());
+ __ beq(&push_frame);
+#else
+ __ SmiUntag(r6, SetRC);
+ __ beq(&push_frame, cr0);
+#endif
+ __ mtctr(r6);
+ __ bind(&argument_loop);
+ __ push(r5);
+ __ bdnz(&argument_loop);
// Enter a new JavaScript frame, and initialize its slots as they were when
// the generator was suspended.
Label resume_frame;
__ bind(&push_frame);
- __ bl(&resume_frame);
- __ jmp(&done);
+ __ b(&resume_frame, SetLK);
+ __ b(&done);
__ bind(&resume_frame);
// lr = return address.
// fp = caller's frame pointer.
- // pp = caller's constant pool (if FLAG_enable_ool_constant_pool),
// cp = callee's context,
- // r4 = callee's JS function.
- __ PushFixedFrame(r4);
+ // r7 = callee's JS function.
+ __ PushFixedFrame(r7);
// Adjust FP to point to saved FP.
- __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+ __ addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
// Load the operand stack size.
- __ ldr(r3, FieldMemOperand(r1, JSGeneratorObject::kOperandStackOffset));
- __ ldr(r3, FieldMemOperand(r3, FixedArray::kLengthOffset));
- __ SmiUntag(r3);
+ __ LoadP(r6, FieldMemOperand(r4, JSGeneratorObject::kOperandStackOffset));
+ __ LoadP(r6, FieldMemOperand(r6, FixedArray::kLengthOffset));
+ __ SmiUntag(r6, SetRC);
// If we are sending a value and there is no operand stack, we can jump back
// in directly.
+ Label call_resume;
if (resume_mode == JSGeneratorObject::NEXT) {
Label slow_resume;
- __ cmp(r3, Operand(0));
- __ b(ne, &slow_resume);
- __ ldr(r3, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
-
- { ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
- if (FLAG_enable_ool_constant_pool) {
- // Load the new code object's constant pool pointer.
- __ ldr(pp,
- MemOperand(r3, Code::kConstantPoolOffset - Code::kHeaderSize));
- }
-
- __ ldr(r2, FieldMemOperand(r1, JSGeneratorObject::kContinuationOffset));
- __ SmiUntag(r2);
- __ add(r3, r3, r2);
- __ mov(r2, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
- __ str(r2, FieldMemOperand(r1, JSGeneratorObject::kContinuationOffset));
- __ Jump(r3);
+ __ bne(&slow_resume, cr0);
+ __ LoadP(ip, FieldMemOperand(r7, JSFunction::kCodeEntryOffset));
+#if V8_OOL_CONSTANT_POOL
+ {
+ ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
+ // Load the new code object's constant pool pointer.
+ __ LoadP(kConstantPoolRegister,
+ MemOperand(ip, Code::kConstantPoolOffset - Code::kHeaderSize));
+#endif
+ __ LoadP(r5, FieldMemOperand(r4, JSGeneratorObject::kContinuationOffset));
+ __ SmiUntag(r5);
+ __ add(ip, ip, r5);
+ __ LoadSmiLiteral(r5,
+ Smi::FromInt(JSGeneratorObject::kGeneratorExecuting));
+ __ StoreP(r5, FieldMemOperand(r4, JSGeneratorObject::kContinuationOffset),
+ r0);
+ __ Jump(ip);
+ __ bind(&slow_resume);
+#if V8_OOL_CONSTANT_POOL
}
- __ bind(&slow_resume);
+#endif
+ } else {
+ __ beq(&call_resume, cr0);
}
// Otherwise, we push holes for the operand stack and call the runtime to fix
// up the stack and the handlers.
- Label push_operand_holes, call_resume;
- __ bind(&push_operand_holes);
- __ sub(r3, r3, Operand(1), SetCC);
- __ b(mi, &call_resume);
- __ push(r2);
- __ b(&push_operand_holes);
+ Label operand_loop;
+ __ mtctr(r6);
+ __ bind(&operand_loop);
+ __ push(r5);
+ __ bdnz(&operand_loop);
+
__ bind(&call_resume);
- DCHECK(!result_register().is(r1));
- __ Push(r1, result_register());
+ DCHECK(!result_register().is(r4));
+ __ Push(r4, result_register());
__ Push(Smi::FromInt(resume_mode));
__ CallRuntime(Runtime::kResumeJSGeneratorObject, 3);
// Not reached: the runtime call returns elsewhere.
@@ -2305,20 +2261,20 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
__ bind(&closed_state);
if (resume_mode == JSGeneratorObject::NEXT) {
// Return completed iterator result when generator is closed.
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- __ push(r2);
+ __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+ __ push(r5);
// Pop value from top-of-stack slot; box result into result register.
EmitCreateIteratorResult(true);
} else {
// Throw the provided value.
- __ push(r0);
+ __ push(r3);
__ CallRuntime(Runtime::kThrow, 1);
}
- __ jmp(&done);
+ __ b(&done);
// Throw error if we attempt to operate on a running generator.
__ bind(&wrong_state);
- __ push(r1);
+ __ push(r4);
__ CallRuntime(Runtime::kThrowGeneratorStateError, 1);
__ bind(&done);
@@ -2334,34 +2290,36 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
DCHECK_EQ(isolate()->native_context()->iterator_result_map()->instance_size(),
instance_size);
- __ Allocate(instance_size, r0, r2, r3, &gc_required, TAG_OBJECT);
- __ jmp(&allocated);
+ __ Allocate(instance_size, r3, r5, r6, &gc_required, TAG_OBJECT);
+ __ b(&allocated);
__ bind(&gc_required);
__ Push(Smi::FromInt(instance_size));
__ CallRuntime(Runtime::kAllocateInNewSpace, 1);
- __ ldr(context_register(),
- MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ LoadP(context_register(),
+ MemOperand(fp, StandardFrameConstants::kContextOffset));
__ bind(&allocated);
- __ ldr(r1, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ ldr(r1, FieldMemOperand(r1, GlobalObject::kNativeContextOffset));
- __ ldr(r1, ContextOperand(r1, Context::ITERATOR_RESULT_MAP_INDEX));
- __ pop(r2);
- __ mov(r3, Operand(isolate()->factory()->ToBoolean(done)));
- __ mov(r4, Operand(isolate()->factory()->empty_fixed_array()));
- __ str(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ str(r4, FieldMemOperand(r0, JSObject::kPropertiesOffset));
- __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
- __ str(r2,
- FieldMemOperand(r0, JSGeneratorObject::kResultValuePropertyOffset));
- __ str(r3,
- FieldMemOperand(r0, JSGeneratorObject::kResultDonePropertyOffset));
+ __ LoadP(r4, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
+ __ LoadP(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset));
+ __ LoadP(r4, ContextOperand(r4, Context::ITERATOR_RESULT_MAP_INDEX));
+ __ pop(r5);
+ __ mov(r6, Operand(isolate()->factory()->ToBoolean(done)));
+ __ mov(r7, Operand(isolate()->factory()->empty_fixed_array()));
+ __ StoreP(r4, FieldMemOperand(r3, HeapObject::kMapOffset), r0);
+ __ StoreP(r7, FieldMemOperand(r3, JSObject::kPropertiesOffset), r0);
+ __ StoreP(r7, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
+ __ StoreP(r5,
+ FieldMemOperand(r3, JSGeneratorObject::kResultValuePropertyOffset),
+ r0);
+ __ StoreP(r6,
+ FieldMemOperand(r3, JSGeneratorObject::kResultDonePropertyOffset),
+ r0);
// Only the value field needs a write barrier, as the other values are in the
// root set.
- __ RecordWriteField(r0, JSGeneratorObject::kResultValuePropertyOffset,
- r2, r3, kLRHasBeenSaved, kDontSaveFPRegs);
+ __ RecordWriteField(r3, JSGeneratorObject::kResultValuePropertyOffset, r5, r6,
+ kLRHasBeenSaved, kDontSaveFPRegs);
}
@@ -2421,16 +2379,16 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
Expression* right_expr) {
Label done, smi_case, stub_call;
- Register scratch1 = r2;
- Register scratch2 = r3;
+ Register scratch1 = r5;
+ Register scratch2 = r6;
// Get the arguments.
- Register left = r1;
- Register right = r0;
+ Register left = r4;
+ Register right = r3;
__ pop(left);
// Perform combined smi check on both operands.
- __ orr(scratch1, left, Operand(right));
+ __ orx(scratch1, left, right);
STATIC_ASSERT(kSmiTag == 0);
JumpPatchSite patch_site(masm_);
patch_site.EmitJumpIfSmi(scratch1, &smi_case);
@@ -2439,85 +2397,117 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op, mode).code();
CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
- __ jmp(&done);
+ __ b(&done);
__ bind(&smi_case);
// Smi case. This code works the same way as the smi-smi case in the type
- // recording binary operation stub, see
+ // recording binary operation stub.
switch (op) {
case Token::SAR:
__ GetLeastBitsFromSmi(scratch1, right, 5);
- __ mov(right, Operand(left, ASR, scratch1));
- __ bic(right, right, Operand(kSmiTagMask));
+ __ ShiftRightArith(right, left, scratch1);
+ __ ClearRightImm(right, right, Operand(kSmiTagSize + kSmiShiftSize));
break;
case Token::SHL: {
- __ SmiUntag(scratch1, left);
__ GetLeastBitsFromSmi(scratch2, right, 5);
- __ mov(scratch1, Operand(scratch1, LSL, scratch2));
- __ TrySmiTag(right, scratch1, &stub_call);
+#if V8_TARGET_ARCH_PPC64
+ __ ShiftLeft_(right, left, scratch2);
+#else
+ __ SmiUntag(scratch1, left);
+ __ ShiftLeft_(scratch1, scratch1, scratch2);
+ // Check that the *signed* result fits in a smi
+ __ JumpIfNotSmiCandidate(scratch1, scratch2, &stub_call);
+ __ SmiTag(right, scratch1);
+#endif
break;
}
case Token::SHR: {
__ SmiUntag(scratch1, left);
__ GetLeastBitsFromSmi(scratch2, right, 5);
- __ mov(scratch1, Operand(scratch1, LSR, scratch2));
- __ tst(scratch1, Operand(0xc0000000));
- __ b(ne, &stub_call);
+ __ srw(scratch1, scratch1, scratch2);
+ // Unsigned shift is not allowed to produce a negative number.
+ __ JumpIfNotUnsignedSmiCandidate(scratch1, r0, &stub_call);
__ SmiTag(right, scratch1);
break;
}
- case Token::ADD:
- __ add(scratch1, left, Operand(right), SetCC);
- __ b(vs, &stub_call);
- __ mov(right, scratch1);
+ case Token::ADD: {
+ __ AddAndCheckForOverflow(scratch1, left, right, scratch2, r0);
+ __ bne(&stub_call, cr0);
+ __ mr(right, scratch1);
break;
- case Token::SUB:
- __ sub(scratch1, left, Operand(right), SetCC);
- __ b(vs, &stub_call);
- __ mov(right, scratch1);
+ }
+ case Token::SUB: {
+ __ SubAndCheckForOverflow(scratch1, left, right, scratch2, r0);
+ __ bne(&stub_call, cr0);
+ __ mr(right, scratch1);
break;
+ }
case Token::MUL: {
+ Label mul_zero;
+#if V8_TARGET_ARCH_PPC64
+ // Remove tag from both operands.
+ __ SmiUntag(ip, right);
+ __ SmiUntag(r0, left);
+ __ Mul(scratch1, r0, ip);
+ // Check for overflowing the smi range - no overflow if higher 33 bits of
+ // the result are identical.
+ __ TestIfInt32(scratch1, scratch2, ip);
+ __ bne(&stub_call);
+#else
__ SmiUntag(ip, right);
- __ smull(scratch1, scratch2, left, ip);
- __ mov(ip, Operand(scratch1, ASR, 31));
- __ cmp(ip, Operand(scratch2));
- __ b(ne, &stub_call);
- __ cmp(scratch1, Operand::Zero());
- __ mov(right, Operand(scratch1), LeaveCC, ne);
- __ b(ne, &done);
- __ add(scratch2, right, Operand(left), SetCC);
- __ mov(right, Operand(Smi::FromInt(0)), LeaveCC, pl);
- __ b(mi, &stub_call);
+ __ mullw(scratch1, left, ip);
+ __ mulhw(scratch2, left, ip);
+ // Check for overflowing the smi range - no overflow if higher 33 bits of
+ // the result are identical.
+ __ TestIfInt32(scratch2, scratch1, ip);
+ __ bne(&stub_call);
+#endif
+ // Go slow on zero result to handle -0.
+ __ cmpi(scratch1, Operand::Zero());
+ __ beq(&mul_zero);
+#if V8_TARGET_ARCH_PPC64
+ __ SmiTag(right, scratch1);
+#else
+ __ mr(right, scratch1);
+#endif
+ __ b(&done);
+ // We need -0 if we were multiplying a negative number with 0 to get 0.
+ // We know one of them was zero.
+ __ bind(&mul_zero);
+ __ add(scratch2, right, left);
+ __ cmpi(scratch2, Operand::Zero());
+ __ blt(&stub_call);
+ __ LoadSmiLiteral(right, Smi::FromInt(0));
break;
}
case Token::BIT_OR:
- __ orr(right, left, Operand(right));
+ __ orx(right, left, right);
break;
case Token::BIT_AND:
- __ and_(right, left, Operand(right));
+ __ and_(right, left, right);
break;
case Token::BIT_XOR:
- __ eor(right, left, Operand(right));
+ __ xor_(right, left, right);
break;
default:
UNREACHABLE();
}
__ bind(&done);
- context()->Plug(r0);
+ context()->Plug(r3);
}
void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
- // Constructor is in r0.
+ // Constructor is in r3.
DCHECK(lit != NULL);
- __ push(r0);
+ __ push(r3);
// No access check is needed here since the constructor is created by the
// class literal.
- Register scratch = r1;
- __ ldr(scratch,
- FieldMemOperand(r0, JSFunction::kPrototypeOrInitialMapOffset));
+ Register scratch = r4;
+ __ LoadP(scratch,
+ FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset));
__ push(scratch);
for (int i = 0; i < lit->properties()->length(); i++) {
@@ -2527,9 +2517,9 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
DCHECK(key != NULL);
if (property->is_static()) {
- __ ldr(scratch, MemOperand(sp, kPointerSize)); // constructor
+ __ LoadP(scratch, MemOperand(sp, kPointerSize)); // constructor
} else {
- __ ldr(scratch, MemOperand(sp, 0)); // prototype
+ __ LoadP(scratch, MemOperand(sp, 0)); // prototype
}
__ push(scratch);
VisitForStackValue(key);
@@ -2564,15 +2554,14 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
}
-void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
- Token::Value op,
+void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op,
OverwriteMode mode) {
- __ pop(r1);
+ __ pop(r4);
Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op, mode).code();
- JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
+ JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
- context()->Plug(r0);
+ context()->Plug(r3);
}
@@ -2590,9 +2579,9 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
break;
}
case NAMED_PROPERTY: {
- __ push(r0); // Preserve value.
+ __ push(r3); // Preserve value.
VisitForAccumulatorValue(prop->obj());
- __ Move(StoreDescriptor::ReceiverRegister(), r0);
+ __ Move(StoreDescriptor::ReceiverRegister(), r3);
__ pop(StoreDescriptor::ValueRegister()); // Restore value.
__ mov(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
@@ -2600,46 +2589,46 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
break;
}
case NAMED_SUPER_PROPERTY: {
- __ Push(r0);
+ __ Push(r3);
VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
EmitLoadHomeObject(prop->obj()->AsSuperReference());
- // stack: value, this; r0: home_object
- Register scratch = r2;
- Register scratch2 = r3;
- __ mov(scratch, result_register()); // home_object
- __ ldr(r0, MemOperand(sp, kPointerSize)); // value
- __ ldr(scratch2, MemOperand(sp, 0)); // this
- __ str(scratch2, MemOperand(sp, kPointerSize)); // this
- __ str(scratch, MemOperand(sp, 0)); // home_object
- // stack: this, home_object; r0: value
+ // stack: value, this; r3: home_object
+ Register scratch = r5;
+ Register scratch2 = r6;
+ __ mr(scratch, result_register()); // home_object
+ __ LoadP(r3, MemOperand(sp, kPointerSize)); // value
+ __ LoadP(scratch2, MemOperand(sp, 0)); // this
+ __ StoreP(scratch2, MemOperand(sp, kPointerSize)); // this
+ __ StoreP(scratch, MemOperand(sp, 0)); // home_object
+ // stack: this, home_object; r3: value
EmitNamedSuperPropertyStore(prop);
break;
}
case KEYED_SUPER_PROPERTY: {
- __ Push(r0);
+ __ Push(r3);
VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
EmitLoadHomeObject(prop->obj()->AsSuperReference());
__ Push(result_register());
VisitForAccumulatorValue(prop->key());
- Register scratch = r2;
- Register scratch2 = r3;
- __ ldr(scratch2, MemOperand(sp, 2 * kPointerSize)); // value
- // stack: value, this, home_object; r0: key, r3: value
- __ ldr(scratch, MemOperand(sp, kPointerSize)); // this
- __ str(scratch, MemOperand(sp, 2 * kPointerSize));
- __ ldr(scratch, MemOperand(sp, 0)); // home_object
- __ str(scratch, MemOperand(sp, kPointerSize));
- __ str(r0, MemOperand(sp, 0));
- __ Move(r0, scratch2);
- // stack: this, home_object, key; r0: value.
+ Register scratch = r5;
+ Register scratch2 = r6;
+ __ LoadP(scratch2, MemOperand(sp, 2 * kPointerSize)); // value
+ // stack: value, this, home_object; r3: key, r6: value
+ __ LoadP(scratch, MemOperand(sp, kPointerSize)); // this
+ __ StoreP(scratch, MemOperand(sp, 2 * kPointerSize));
+ __ LoadP(scratch, MemOperand(sp, 0)); // home_object
+ __ StoreP(scratch, MemOperand(sp, kPointerSize));
+ __ StoreP(r3, MemOperand(sp, 0));
+ __ Move(r3, scratch2);
+ // stack: this, home_object, key; r3: value.
EmitKeyedSuperPropertyStore(prop);
break;
}
case KEYED_PROPERTY: {
- __ push(r0); // Preserve value.
+ __ push(r3); // Preserve value.
VisitForStackValue(prop->obj());
VisitForAccumulatorValue(prop->key());
- __ Move(StoreDescriptor::NameRegister(), r0);
+ __ Move(StoreDescriptor::NameRegister(), r3);
__ Pop(StoreDescriptor::ValueRegister(),
StoreDescriptor::ReceiverRegister());
Handle<Code> ic =
@@ -2648,19 +2637,19 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) {
break;
}
}
- context()->Plug(r0);
+ context()->Plug(r3);
}
void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
Variable* var, MemOperand location) {
- __ str(result_register(), location);
+ __ StoreP(result_register(), location, r0);
if (var->IsContextSlot()) {
// RecordWrite may destroy all its register arguments.
- __ mov(r3, result_register());
+ __ mr(r6, result_register());
int offset = Context::SlotOffset(var->index());
- __ RecordWriteContextSlot(
- r1, offset, r3, r2, kLRHasBeenSaved, kDontSaveFPRegs);
+ __ RecordWriteContextSlot(r4, offset, r6, r5, kLRHasBeenSaved,
+ kDontSaveFPRegs);
}
}
@@ -2669,24 +2658,24 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) {
if (var->IsUnallocated()) {
// Global var, const, or let.
__ mov(StoreDescriptor::NameRegister(), Operand(var->name()));
- __ ldr(StoreDescriptor::ReceiverRegister(), GlobalObjectOperand());
+ __ LoadP(StoreDescriptor::ReceiverRegister(), GlobalObjectOperand());
CallStoreIC();
} else if (op == Token::INIT_CONST_LEGACY) {
// Const initializers need a write barrier.
DCHECK(!var->IsParameter()); // No const parameters.
if (var->IsLookupSlot()) {
- __ push(r0);
- __ mov(r0, Operand(var->name()));
- __ Push(cp, r0); // Context and name.
+ __ push(r3);
+ __ mov(r3, Operand(var->name()));
+ __ Push(cp, r3); // Context and name.
__ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, 3);
} else {
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
Label skip;
- MemOperand location = VarOperand(var, r1);
- __ ldr(r2, location);
- __ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
- __ b(ne, &skip);
+ MemOperand location = VarOperand(var, r4);
+ __ LoadP(r5, location);
+ __ CompareRoot(r5, Heap::kTheHoleValueRootIndex);
+ __ bne(&skip);
EmitStoreToStackLocalOrContextSlot(var, location);
__ bind(&skip);
}
@@ -2696,12 +2685,12 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) {
DCHECK(!var->IsLookupSlot());
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
Label assign;
- MemOperand location = VarOperand(var, r1);
- __ ldr(r3, location);
- __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
- __ b(ne, &assign);
- __ mov(r3, Operand(var->name()));
- __ push(r3);
+ MemOperand location = VarOperand(var, r4);
+ __ LoadP(r6, location);
+ __ CompareRoot(r6, Heap::kTheHoleValueRootIndex);
+ __ bne(&assign);
+ __ mov(r6, Operand(var->name()));
+ __ push(r6);
__ CallRuntime(Runtime::kThrowReferenceError, 1);
// Perform the assignment.
__ bind(&assign);
@@ -2710,20 +2699,20 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) {
} else if (!var->is_const_mode() || op == Token::INIT_CONST) {
if (var->IsLookupSlot()) {
// Assignment to var.
- __ push(r0); // Value.
- __ mov(r1, Operand(var->name()));
- __ mov(r0, Operand(Smi::FromInt(strict_mode())));
- __ Push(cp, r1, r0); // Context, name, strict mode.
+ __ push(r3); // Value.
+ __ mov(r4, Operand(var->name()));
+ __ mov(r3, Operand(Smi::FromInt(strict_mode())));
+ __ Push(cp, r4, r3); // Context, name, strict mode.
__ CallRuntime(Runtime::kStoreLookupSlot, 4);
} else {
// Assignment to var or initializing assignment to let/const in harmony
// mode.
DCHECK((var->IsStackAllocated() || var->IsContextSlot()));
- MemOperand location = VarOperand(var, r1);
+ MemOperand location = VarOperand(var, r4);
if (generate_debug_code_ && op == Token::INIT_LET) {
// Check for an uninitialized let binding.
- __ ldr(r2, location);
- __ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
+ __ LoadP(r5, location);
+ __ CompareRoot(r5, Heap::kTheHoleValueRootIndex);
__ Check(eq, kLetBindingReInitialization);
}
EmitStoreToStackLocalOrContextSlot(var, location);
@@ -2747,20 +2736,20 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
CallStoreIC(expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(r0);
+ context()->Plug(r3);
}
void FullCodeGenerator::EmitNamedSuperPropertyStore(Property* prop) {
// Assignment to named property of super.
- // r0 : value
+ // r3 : value
// stack : receiver ('this'), home_object
DCHECK(prop != NULL);
Literal* key = prop->key()->AsLiteral();
DCHECK(key != NULL);
__ Push(key->value());
- __ Push(r0);
+ __ Push(r3);
__ CallRuntime((strict_mode() == STRICT ? Runtime::kStoreToSuper_Strict
: Runtime::kStoreToSuper_Sloppy),
4);
@@ -2769,11 +2758,11 @@ void FullCodeGenerator::EmitNamedSuperPropertyStore(Property* prop) {
void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
// Assignment to named property of super.
- // r0 : value
+ // r3 : value
// stack : receiver ('this'), home_object, key
DCHECK(prop != NULL);
- __ Push(r0);
+ __ Push(r3);
__ CallRuntime((strict_mode() == STRICT ? Runtime::kStoreKeyedToSuper_Strict
: Runtime::kStoreKeyedToSuper_Sloppy),
4);
@@ -2786,13 +2775,13 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// Record source code position before IC call.
SetSourcePosition(expr->position());
__ Pop(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister());
- DCHECK(StoreDescriptor::ValueRegister().is(r0));
+ DCHECK(StoreDescriptor::ValueRegister().is(r3));
Handle<Code> ic = CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
CallIC(ic, expr->AssignmentFeedbackId());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(r0);
+ context()->Plug(r3);
}
@@ -2803,7 +2792,7 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
if (key->IsPropertyName()) {
if (!expr->IsSuperAccess()) {
VisitForAccumulatorValue(expr->obj());
- __ Move(LoadDescriptor::ReceiverRegister(), r0);
+ __ Move(LoadDescriptor::ReceiverRegister(), r3);
EmitNamedPropertyLoad(expr);
} else {
VisitForStackValue(expr->obj()->AsSuperReference()->this_var());
@@ -2812,12 +2801,12 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
EmitNamedSuperPropertyLoad(expr);
}
PrepareForBailoutForId(expr->LoadId(), TOS_REG);
- context()->Plug(r0);
+ context()->Plug(r3);
} else {
if (!expr->IsSuperAccess()) {
VisitForStackValue(expr->obj());
VisitForAccumulatorValue(expr->key());
- __ Move(LoadDescriptor::NameRegister(), r0);
+ __ Move(LoadDescriptor::NameRegister(), r3);
__ pop(LoadDescriptor::ReceiverRegister());
EmitKeyedPropertyLoad(expr);
} else {
@@ -2827,18 +2816,14 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
VisitForStackValue(expr->key());
EmitKeyedSuperPropertyLoad(expr);
}
- context()->Plug(r0);
+ context()->Plug(r3);
}
}
-void FullCodeGenerator::CallIC(Handle<Code> code,
- TypeFeedbackId ast_id) {
+void FullCodeGenerator::CallIC(Handle<Code> code, TypeFeedbackId ast_id) {
ic_total_count_++;
- // All calls must have a predictable size in full-codegen code to ensure that
- // the debugger can patch them correctly.
- __ Call(code, RelocInfo::CODE_TARGET, ast_id, al,
- NEVER_INLINE_TARGET_ADDRESS);
+ __ Call(code, RelocInfo::CODE_TARGET, ast_id);
}
@@ -2851,7 +2836,8 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
// Get the target function.
if (call_type == CallICState::FUNCTION) {
- { StackValueContext context(this);
+ {
+ StackValueContext context(this);
EmitVariableLoad(callee->AsVariableProxy());
PrepareForBailout(callee, NO_REGISTERS);
}
@@ -2862,13 +2848,13 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
// Load the function from the receiver.
DCHECK(callee->IsProperty());
DCHECK(!callee->AsProperty()->IsSuperAccess());
- __ ldr(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
+ __ LoadP(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
EmitNamedPropertyLoad(callee->AsProperty());
PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
// Push the target function under the receiver.
- __ ldr(ip, MemOperand(sp, 0));
+ __ LoadP(ip, MemOperand(sp, 0));
__ push(ip);
- __ str(r0, MemOperand(sp, kPointerSize));
+ __ StoreP(r3, MemOperand(sp, kPointerSize));
}
EmitCall(expr, call_type);
@@ -2885,15 +2871,12 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
Literal* key = prop->key()->AsLiteral();
DCHECK(!key->value()->IsSmi());
// Load the function from the receiver.
- const Register scratch = r1;
+ const Register scratch = r4;
SuperReference* super_ref = prop->obj()->AsSuperReference();
EmitLoadHomeObject(super_ref);
- __ Push(r0);
+ __ mr(scratch, r3);
VisitForAccumulatorValue(super_ref->this_var());
- __ Push(r0);
- __ Push(r0);
- __ ldr(scratch, MemOperand(sp, kPointerSize * 2));
- __ Push(scratch);
+ __ Push(scratch, r3, r3, scratch);
__ Push(key->value());
// Stack here:
@@ -2905,7 +2888,7 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
__ CallRuntime(Runtime::kLoadFromSuper, 3);
// Replace home_object with target function.
- __ str(r0, MemOperand(sp, kPointerSize));
+ __ StoreP(r3, MemOperand(sp, kPointerSize));
// Stack here:
// - target function
@@ -2915,8 +2898,7 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
// Code common for calls using the IC.
-void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
- Expression* key) {
+void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr, Expression* key) {
// Load the key.
VisitForAccumulatorValue(key);
@@ -2924,15 +2906,15 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
// Load the function from the receiver.
DCHECK(callee->IsProperty());
- __ ldr(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
- __ Move(LoadDescriptor::NameRegister(), r0);
+ __ LoadP(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
+ __ Move(LoadDescriptor::NameRegister(), r3);
EmitKeyedPropertyLoad(callee->AsProperty());
PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
// Push the target function under the receiver.
- __ ldr(ip, MemOperand(sp, 0));
+ __ LoadP(ip, MemOperand(sp, 0));
__ push(ip);
- __ str(r0, MemOperand(sp, kPointerSize));
+ __ StoreP(r3, MemOperand(sp, kPointerSize));
EmitCall(expr, CallICState::METHOD);
}
@@ -2946,14 +2928,14 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
SetSourcePosition(prop->position());
// Load the function from the receiver.
- const Register scratch = r1;
+ const Register scratch = r4;
SuperReference* super_ref = prop->obj()->AsSuperReference();
EmitLoadHomeObject(super_ref);
- __ Push(r0);
+ __ Push(r3);
VisitForAccumulatorValue(super_ref->this_var());
- __ Push(r0);
- __ Push(r0);
- __ ldr(scratch, MemOperand(sp, kPointerSize * 2));
+ __ Push(r3);
+ __ Push(r3);
+ __ LoadP(scratch, MemOperand(sp, kPointerSize * 2));
__ Push(scratch);
VisitForStackValue(prop->key());
@@ -2966,7 +2948,7 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
__ CallRuntime(Runtime::kLoadKeyedFromSuper, 3);
// Replace home_object with target function.
- __ str(r0, MemOperand(sp, kPointerSize));
+ __ StoreP(r3, MemOperand(sp, kPointerSize));
// Stack here:
// - target function
@@ -2979,7 +2961,8 @@ void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
// Load the arguments.
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
- { PreservePositionScope scope(masm()->positions_recorder());
+ {
+ PreservePositionScope scope(masm()->positions_recorder());
for (int i = 0; i < arg_count; i++) {
VisitForStackValue(args->at(i));
}
@@ -2987,53 +2970,51 @@ void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
// Record source position of the IC call.
SetSourcePosition(expr->position());
- Handle<Code> ic = CallIC::initialize_stub(
- isolate(), arg_count, call_type);
- __ mov(r3, Operand(SmiFromSlot(expr->CallFeedbackSlot())));
- __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
+ Handle<Code> ic = CallIC::initialize_stub(isolate(), arg_count, call_type);
+ __ LoadSmiLiteral(r6, SmiFromSlot(expr->CallFeedbackSlot()));
+ __ LoadP(r4, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
// Don't assign a type feedback id to the IC, since type feedback is provided
// by the vector above.
CallIC(ic);
RecordJSReturnSite(expr);
// Restore context register.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, r0);
+ __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ context()->DropAndPlug(1, r3);
}
void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
- // r5: copy of the first argument or undefined if it doesn't exist.
+ // r8: copy of the first argument or undefined if it doesn't exist.
if (arg_count > 0) {
- __ ldr(r5, MemOperand(sp, arg_count * kPointerSize));
+ __ LoadP(r8, MemOperand(sp, arg_count * kPointerSize), r0);
} else {
- __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
}
- // r4: the receiver of the enclosing function.
- __ ldr(r4, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ // r7: the receiver of the enclosing function.
+ __ LoadP(r7, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- // r3: the receiver of the enclosing function.
+ // r6: the receiver of the enclosing function.
int receiver_offset = 2 + info_->scope()->num_parameters();
- __ ldr(r3, MemOperand(fp, receiver_offset * kPointerSize));
+ __ LoadP(r6, MemOperand(fp, receiver_offset * kPointerSize), r0);
- // r2: strict mode.
- __ mov(r2, Operand(Smi::FromInt(strict_mode())));
+ // r5: strict mode.
+ __ LoadSmiLiteral(r5, Smi::FromInt(strict_mode()));
- // r1: the start position of the scope the calls resides in.
- __ mov(r1, Operand(Smi::FromInt(scope()->start_position())));
+ // r4: the start position of the scope the calls resides in.
+ __ LoadSmiLiteral(r4, Smi::FromInt(scope()->start_position()));
// Do the runtime call.
- __ Push(r5);
- __ Push(r4, r3, r2, r1);
+ __ Push(r8, r7, r6, r5, r4);
__ CallRuntime(Runtime::kResolvePossiblyDirectEval, 6);
}
void FullCodeGenerator::EmitLoadSuperConstructor(SuperReference* super_ref) {
DCHECK(super_ref != NULL);
- __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ Push(r0);
+ __ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Push(r3);
__ CallRuntime(Runtime::kGetPrototype, 1);
}
@@ -3057,10 +3038,11 @@ void FullCodeGenerator::VisitCall(Call* expr) {
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
- { PreservePositionScope pos_scope(masm()->positions_recorder());
+ {
+ PreservePositionScope pos_scope(masm()->positions_recorder());
VisitForStackValue(callee);
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
- __ push(r2); // Reserved receiver slot.
+ __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+ __ push(r5); // Reserved receiver slot.
// Push the arguments.
for (int i = 0; i < arg_count; i++) {
@@ -3069,14 +3051,14 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// Push a copy of the function (found below the arguments) and
// resolve eval.
- __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ push(r1);
+ __ LoadP(r4, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
+ __ push(r4);
EmitResolvePossiblyDirectEval(arg_count);
- // The runtime call returns a pair of values in r0 (function) and
- // r1 (receiver). Touch up the stack with the right values.
- __ str(r0, MemOperand(sp, (arg_count + 1) * kPointerSize));
- __ str(r1, MemOperand(sp, arg_count * kPointerSize));
+ // The runtime call returns a pair of values in r3 (function) and
+ // r4 (receiver). Touch up the stack with the right values.
+ __ StoreP(r3, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
+ __ StoreP(r4, MemOperand(sp, arg_count * kPointerSize), r0);
PrepareForBailoutForId(expr->EvalOrLookupId(), NO_REGISTERS);
}
@@ -3084,12 +3066,12 @@ void FullCodeGenerator::VisitCall(Call* expr) {
// Record source position for debugger.
SetSourcePosition(expr->position());
CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
- __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
+ __ LoadP(r4, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
__ CallStub(&stub);
RecordJSReturnSite(expr);
// Restore context register.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, r0);
+ __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ context()->DropAndPlug(1, r3);
} else if (call_type == Call::GLOBAL_CALL) {
EmitCallWithLoadIC(expr);
@@ -3098,20 +3080,21 @@ void FullCodeGenerator::VisitCall(Call* expr) {
VariableProxy* proxy = callee->AsVariableProxy();
Label slow, done;
- { PreservePositionScope scope(masm()->positions_recorder());
+ {
+ PreservePositionScope scope(masm()->positions_recorder());
// Generate code for loading from variables potentially shadowed
// by eval-introduced variables.
EmitDynamicLookupFastCase(proxy, NOT_INSIDE_TYPEOF, &slow, &done);
}
__ bind(&slow);
- // Call the runtime to find the function to call (returned in r0)
+ // Call the runtime to find the function to call (returned in r3)
// and the object holding it (returned in edx).
- DCHECK(!context_register().is(r2));
- __ mov(r2, Operand(proxy->name()));
- __ Push(context_register(), r2);
+ DCHECK(!context_register().is(r5));
+ __ mov(r5, Operand(proxy->name()));
+ __ Push(context_register(), r5);
__ CallRuntime(Runtime::kLoadLookupSlot, 2);
- __ Push(r0, r1); // Function, receiver.
+ __ Push(r3, r4); // Function, receiver.
PrepareForBailoutForId(expr->EvalOrLookupId(), NO_REGISTERS);
// If fast case code has been generated, emit code to push the
@@ -3122,11 +3105,11 @@ void FullCodeGenerator::VisitCall(Call* expr) {
__ b(&call);
__ bind(&done);
// Push function.
- __ push(r0);
+ __ push(r3);
// The receiver is implicitly the global receiver. Indicate this
// by passing the hole to the call function stub.
- __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
- __ push(r1);
+ __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
+ __ push(r4);
__ bind(&call);
}
@@ -3162,11 +3145,12 @@ void FullCodeGenerator::VisitCall(Call* expr) {
} else {
DCHECK(call_type == Call::OTHER_CALL);
// Call to an arbitrary expression not handled specially above.
- { PreservePositionScope scope(masm()->positions_recorder());
+ {
+ PreservePositionScope scope(masm()->positions_recorder());
VisitForStackValue(callee);
}
- __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
- __ push(r1);
+ __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
+ __ push(r4);
// Emit function call.
EmitCall(expr);
}
@@ -3205,9 +3189,9 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// constructor invocation.
SetSourcePosition(expr->position());
- // Load function and argument count into r1 and r0.
- __ mov(r0, Operand(arg_count));
- __ ldr(r1, MemOperand(sp, arg_count * kPointerSize));
+ // Load function and argument count into r4 and r3.
+ __ mov(r3, Operand(arg_count));
+ __ LoadP(r4, MemOperand(sp, arg_count * kPointerSize), r0);
// Record call targets in unoptimized code.
if (FLAG_pretenuring_call_new) {
@@ -3216,13 +3200,13 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
expr->CallNewFeedbackSlot().ToInt() + 1);
}
- __ Move(r2, FeedbackVector());
- __ mov(r3, Operand(SmiFromSlot(expr->CallNewFeedbackSlot())));
+ __ Move(r5, FeedbackVector());
+ __ LoadSmiLiteral(r6, SmiFromSlot(expr->CallNewFeedbackSlot()));
CallConstructStub stub(isolate(), RECORD_CONSTRUCTOR_TARGET);
__ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
- context()->Plug(r0);
+ context()->Plug(r3);
}
@@ -3236,12 +3220,12 @@ void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ SmiTst(r0);
- Split(eq, if_true, if_false, fall_through);
+ __ TestIfSmi(r3, r0);
+ Split(eq, if_true, if_false, fall_through, cr0);
context()->Plug(if_true, if_false);
}
@@ -3257,12 +3241,12 @@ void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ NonNegativeSmiTst(r0);
- Split(eq, if_true, if_false, fall_through);
+ __ TestIfPositiveSmi(r3, r0);
+ Split(eq, if_true, if_false, fall_through, cr0);
context()->Plug(if_true, if_false);
}
@@ -3278,22 +3262,22 @@ void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
- __ JumpIfSmi(r0, if_false);
+ __ JumpIfSmi(r3, if_false);
__ LoadRoot(ip, Heap::kNullValueRootIndex);
- __ cmp(r0, ip);
- __ b(eq, if_true);
- __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ cmp(r3, ip);
+ __ beq(if_true);
+ __ LoadP(r5, FieldMemOperand(r3, HeapObject::kMapOffset));
// Undetectable objects behave like undefined when tested with typeof.
- __ ldrb(r1, FieldMemOperand(r2, Map::kBitFieldOffset));
- __ tst(r1, Operand(1 << Map::kIsUndetectable));
- __ b(ne, if_false);
- __ ldrb(r1, FieldMemOperand(r2, Map::kInstanceTypeOffset));
- __ cmp(r1, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
- __ b(lt, if_false);
- __ cmp(r1, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ __ lbz(r4, FieldMemOperand(r5, Map::kBitFieldOffset));
+ __ andi(r0, r4, Operand(1 << Map::kIsUndetectable));
+ __ bne(if_false, cr0);
+ __ lbz(r4, FieldMemOperand(r5, Map::kInstanceTypeOffset));
+ __ cmpi(r4, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+ __ blt(if_false);
+ __ cmpi(r4, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(le, if_true, if_false, fall_through);
@@ -3311,11 +3295,11 @@ void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
- __ JumpIfSmi(r0, if_false);
- __ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE);
+ __ JumpIfSmi(r3, if_false);
+ __ CompareObjectType(r3, r4, r4, FIRST_SPEC_OBJECT_TYPE);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(ge, if_true, if_false, fall_through);
@@ -3333,15 +3317,15 @@ void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
- __ JumpIfSmi(r0, if_false);
- __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldrb(r1, FieldMemOperand(r1, Map::kBitFieldOffset));
- __ tst(r1, Operand(1 << Map::kIsUndetectable));
+ __ JumpIfSmi(r3, if_false);
+ __ LoadP(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ lbz(r4, FieldMemOperand(r4, Map::kBitFieldOffset));
+ __ andi(r0, r4, Operand(1 << Map::kIsUndetectable));
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(ne, if_true, if_false, fall_through);
+ Split(ne, if_true, if_false, fall_through, cr0);
context()->Plug(if_true, if_false);
}
@@ -3358,22 +3342,22 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
- __ AssertNotSmi(r0);
+ __ AssertNotSmi(r3);
- __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldrb(ip, FieldMemOperand(r1, Map::kBitField2Offset));
- __ tst(ip, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ b(ne, &skip_lookup);
+ __ LoadP(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ lbz(ip, FieldMemOperand(r4, Map::kBitField2Offset));
+ __ andi(r0, ip, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
+ __ bne(&skip_lookup, cr0);
// Check for fast case object. Generate false result for slow case object.
- __ ldr(r2, FieldMemOperand(r0, JSObject::kPropertiesOffset));
- __ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ LoadP(r5, FieldMemOperand(r3, JSObject::kPropertiesOffset));
+ __ LoadP(r5, FieldMemOperand(r5, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHashTableMapRootIndex);
- __ cmp(r2, ip);
- __ b(eq, if_false);
+ __ cmp(r5, ip);
+ __ beq(if_false);
// Look for valueOf name in the descriptor array, and indicate false if
// found. Since we omit an enumeration index check, if it is added via a
@@ -3381,54 +3365,56 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
Label entry, loop, done;
// Skip loop if no descriptors are valid.
- __ NumberOfOwnDescriptors(r3, r1);
- __ cmp(r3, Operand::Zero());
- __ b(eq, &done);
+ __ NumberOfOwnDescriptors(r6, r4);
+ __ cmpi(r6, Operand::Zero());
+ __ beq(&done);
- __ LoadInstanceDescriptors(r1, r4);
- // r4: descriptor array.
- // r3: valid entries in the descriptor array.
+ __ LoadInstanceDescriptors(r4, r7);
+ // r7: descriptor array.
+ // r6: valid entries in the descriptor array.
__ mov(ip, Operand(DescriptorArray::kDescriptorSize));
- __ mul(r3, r3, ip);
+ __ Mul(r6, r6, ip);
// Calculate location of the first key name.
- __ add(r4, r4, Operand(DescriptorArray::kFirstOffset - kHeapObjectTag));
+ __ addi(r7, r7, Operand(DescriptorArray::kFirstOffset - kHeapObjectTag));
// Calculate the end of the descriptor array.
- __ mov(r2, r4);
- __ add(r2, r2, Operand(r3, LSL, kPointerSizeLog2));
+ __ mr(r5, r7);
+ __ ShiftLeftImm(ip, r6, Operand(kPointerSizeLog2));
+ __ add(r5, r5, ip);
// Loop through all the keys in the descriptor array. If one of these is the
// string "valueOf" the result is false.
// The use of ip to store the valueOf string assumes that it is not otherwise
// used in the loop below.
__ mov(ip, Operand(isolate()->factory()->value_of_string()));
- __ jmp(&entry);
+ __ b(&entry);
__ bind(&loop);
- __ ldr(r3, MemOperand(r4, 0));
- __ cmp(r3, ip);
- __ b(eq, if_false);
- __ add(r4, r4, Operand(DescriptorArray::kDescriptorSize * kPointerSize));
+ __ LoadP(r6, MemOperand(r7, 0));
+ __ cmp(r6, ip);
+ __ beq(if_false);
+ __ addi(r7, r7, Operand(DescriptorArray::kDescriptorSize * kPointerSize));
__ bind(&entry);
- __ cmp(r4, Operand(r2));
- __ b(ne, &loop);
+ __ cmp(r7, r5);
+ __ bne(&loop);
__ bind(&done);
// Set the bit in the map to indicate that there is no local valueOf field.
- __ ldrb(r2, FieldMemOperand(r1, Map::kBitField2Offset));
- __ orr(r2, r2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
- __ strb(r2, FieldMemOperand(r1, Map::kBitField2Offset));
+ __ lbz(r5, FieldMemOperand(r4, Map::kBitField2Offset));
+ __ ori(r5, r5, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
+ __ stb(r5, FieldMemOperand(r4, Map::kBitField2Offset));
__ bind(&skip_lookup);
// If a valueOf property is not found on the object check that its
// prototype is the un-modified String prototype. If not result is false.
- __ ldr(r2, FieldMemOperand(r1, Map::kPrototypeOffset));
- __ JumpIfSmi(r2, if_false);
- __ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
- __ ldr(r3, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ ldr(r3, FieldMemOperand(r3, GlobalObject::kNativeContextOffset));
- __ ldr(r3, ContextOperand(r3, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
- __ cmp(r2, r3);
+ __ LoadP(r5, FieldMemOperand(r4, Map::kPrototypeOffset));
+ __ JumpIfSmi(r5, if_false);
+ __ LoadP(r5, FieldMemOperand(r5, HeapObject::kMapOffset));
+ __ LoadP(r6, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
+ __ LoadP(r6, FieldMemOperand(r6, GlobalObject::kNativeContextOffset));
+ __ LoadP(r6,
+ ContextOperand(r6, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
+ __ cmp(r5, r6);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through);
@@ -3446,11 +3432,11 @@ void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
- __ JumpIfSmi(r0, if_false);
- __ CompareObjectType(r0, r1, r2, JS_FUNCTION_TYPE);
+ __ JumpIfSmi(r3, if_false);
+ __ CompareObjectType(r3, r4, r5, JS_FUNCTION_TYPE);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through);
@@ -3468,14 +3454,25 @@ void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) {
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
- __ CheckMap(r0, r1, Heap::kHeapNumberMapRootIndex, if_false, DO_SMI_CHECK);
- __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
- __ ldr(r1, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
- __ cmp(r2, Operand(0x80000000));
- __ cmp(r1, Operand(0x00000000), eq);
+ __ CheckMap(r3, r4, Heap::kHeapNumberMapRootIndex, if_false, DO_SMI_CHECK);
+#if V8_TARGET_ARCH_PPC64
+ __ LoadP(r4, FieldMemOperand(r3, HeapNumber::kValueOffset));
+ __ li(r5, Operand(1));
+ __ rotrdi(r5, r5, 1); // r5 = 0x80000000_00000000
+ __ cmp(r4, r5);
+#else
+ __ lwz(r5, FieldMemOperand(r3, HeapNumber::kExponentOffset));
+ __ lwz(r4, FieldMemOperand(r3, HeapNumber::kMantissaOffset));
+ Label skip;
+ __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
+ __ cmp(r5, r0);
+ __ bne(&skip);
+ __ cmpi(r4, Operand::Zero());
+ __ bind(&skip);
+#endif
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through);
@@ -3494,11 +3491,11 @@ void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
- __ JumpIfSmi(r0, if_false);
- __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE);
+ __ JumpIfSmi(r3, if_false);
+ __ CompareObjectType(r3, r4, r4, JS_ARRAY_TYPE);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through);
@@ -3516,11 +3513,11 @@ void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
- __ JumpIfSmi(r0, if_false);
- __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
+ __ JumpIfSmi(r3, if_false);
+ __ CompareObjectType(r3, r4, r4, JS_REGEXP_TYPE);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through);
@@ -3541,15 +3538,15 @@ void FullCodeGenerator::EmitIsJSProxy(CallRuntime* expr) {
context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
&if_false, &fall_through);
- __ JumpIfSmi(r0, if_false);
- Register map = r1;
- Register type_reg = r2;
- __ ldr(map, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldrb(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
- __ sub(type_reg, type_reg, Operand(FIRST_JS_PROXY_TYPE));
- __ cmp(type_reg, Operand(LAST_JS_PROXY_TYPE - FIRST_JS_PROXY_TYPE));
+ __ JumpIfSmi(r3, if_false);
+ Register map = r4;
+ Register type_reg = r5;
+ __ LoadP(map, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ lbz(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ __ subi(type_reg, type_reg, Operand(FIRST_JS_PROXY_TYPE));
+ __ cmpli(type_reg, Operand(LAST_JS_PROXY_TYPE - FIRST_JS_PROXY_TYPE));
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(ls, if_true, if_false, fall_through);
+ Split(le, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
@@ -3562,20 +3559,24 @@ void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
// Get the frame pointer for the calling frame.
- __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ LoadP(r5, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
// Skip the arguments adaptor frame if it exists.
- __ ldr(r1, MemOperand(r2, StandardFrameConstants::kContextOffset));
- __ cmp(r1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
- __ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset), eq);
+ Label check_frame_marker;
+ __ LoadP(r4, MemOperand(r5, StandardFrameConstants::kContextOffset));
+ __ CmpSmiLiteral(r4, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+ __ bne(&check_frame_marker);
+ __ LoadP(r5, MemOperand(r5, StandardFrameConstants::kCallerFPOffset));
// Check the marker in the calling frame.
- __ ldr(r1, MemOperand(r2, StandardFrameConstants::kMarkerOffset));
- __ cmp(r1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
+ __ bind(&check_frame_marker);
+ __ LoadP(r4, MemOperand(r5, StandardFrameConstants::kMarkerOffset));
+ STATIC_ASSERT(StackFrame::CONSTRUCT < 0x4000);
+ __ CmpSmiLiteral(r4, Smi::FromInt(StackFrame::CONSTRUCT), r0);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through);
@@ -3595,11 +3596,11 @@ void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
- __ pop(r1);
- __ cmp(r0, r1);
+ __ pop(r4);
+ __ cmp(r3, r4);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through);
@@ -3612,32 +3613,34 @@ void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
DCHECK(args->length() == 1);
// ArgumentsAccessStub expects the key in edx and the formal
- // parameter count in r0.
+ // parameter count in r3.
VisitForAccumulatorValue(args->at(0));
- __ mov(r1, r0);
- __ mov(r0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
+ __ mr(r4, r3);
+ __ LoadSmiLiteral(r3, Smi::FromInt(info_->scope()->num_parameters()));
ArgumentsAccessStub stub(isolate(), ArgumentsAccessStub::READ_ELEMENT);
__ CallStub(&stub);
- context()->Plug(r0);
+ context()->Plug(r3);
}
void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
DCHECK(expr->arguments()->length() == 0);
-
+ Label exit;
// Get the number of formal parameters.
- __ mov(r0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
+ __ LoadSmiLiteral(r3, Smi::FromInt(info_->scope()->num_parameters()));
// Check if the calling frame is an arguments adaptor frame.
- __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
- __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+ __ LoadP(r5, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ LoadP(r6, MemOperand(r5, StandardFrameConstants::kContextOffset));
+ __ CmpSmiLiteral(r6, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+ __ bne(&exit);
// Arguments adaptor case: Read the arguments length from the
// adaptor frame.
- __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset), eq);
+ __ LoadP(r3, MemOperand(r5, ArgumentsAdaptorFrameConstants::kLengthOffset));
- context()->Plug(r0);
+ __ bind(&exit);
+ context()->Plug(r3);
}
@@ -3649,56 +3652,56 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
VisitForAccumulatorValue(args->at(0));
// If the object is a smi, we return null.
- __ JumpIfSmi(r0, &null);
+ __ JumpIfSmi(r3, &null);
// Check that the object is a JS object but take special care of JS
// functions to make sure they have 'Function' as their class.
// Assume that there are only two callable types, and one of them is at
// either end of the type range for JS object types. Saves extra comparisons.
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- __ CompareObjectType(r0, r0, r1, FIRST_SPEC_OBJECT_TYPE);
- // Map is now in r0.
- __ b(lt, &null);
+ __ CompareObjectType(r3, r3, r4, FIRST_SPEC_OBJECT_TYPE);
+ // Map is now in r3.
+ __ blt(&null);
STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
FIRST_SPEC_OBJECT_TYPE + 1);
- __ b(eq, &function);
+ __ beq(&function);
- __ cmp(r1, Operand(LAST_SPEC_OBJECT_TYPE));
- STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
- LAST_SPEC_OBJECT_TYPE - 1);
- __ b(eq, &function);
+ __ cmpi(r4, Operand(LAST_SPEC_OBJECT_TYPE));
+ STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_SPEC_OBJECT_TYPE - 1);
+ __ beq(&function);
// Assume that there is no larger type.
STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
// Check if the constructor in the map is a JS function.
- __ ldr(r0, FieldMemOperand(r0, Map::kConstructorOffset));
- __ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE);
- __ b(ne, &non_function_constructor);
+ __ LoadP(r3, FieldMemOperand(r3, Map::kConstructorOffset));
+ __ CompareObjectType(r3, r4, r4, JS_FUNCTION_TYPE);
+ __ bne(&non_function_constructor);
- // r0 now contains the constructor function. Grab the
+ // r3 now contains the constructor function. Grab the
// instance class name from there.
- __ ldr(r0, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset));
- __ ldr(r0, FieldMemOperand(r0, SharedFunctionInfo::kInstanceClassNameOffset));
+ __ LoadP(r3, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(r3,
+ FieldMemOperand(r3, SharedFunctionInfo::kInstanceClassNameOffset));
__ b(&done);
// Functions have class 'Function'.
__ bind(&function);
- __ LoadRoot(r0, Heap::kFunction_stringRootIndex);
- __ jmp(&done);
+ __ LoadRoot(r3, Heap::kFunction_stringRootIndex);
+ __ b(&done);
// Objects with a non-function constructor have class 'Object'.
__ bind(&non_function_constructor);
- __ LoadRoot(r0, Heap::kObject_stringRootIndex);
- __ jmp(&done);
+ __ LoadRoot(r3, Heap::kObject_stringRootIndex);
+ __ b(&done);
// Non-JS objects have class null.
__ bind(&null);
- __ LoadRoot(r0, Heap::kNullValueRootIndex);
+ __ LoadRoot(r3, Heap::kNullValueRootIndex);
// All done.
__ bind(&done);
- context()->Plug(r0);
+ context()->Plug(r3);
}
@@ -3711,7 +3714,7 @@ void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
VisitForStackValue(args->at(1));
VisitForStackValue(args->at(2));
__ CallStub(&stub);
- context()->Plug(r0);
+ context()->Plug(r3);
}
@@ -3725,7 +3728,7 @@ void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
VisitForStackValue(args->at(2));
VisitForStackValue(args->at(3));
__ CallStub(&stub);
- context()->Plug(r0);
+ context()->Plug(r3);
}
@@ -3736,13 +3739,14 @@ void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
Label done;
// If the object is a smi return the object.
- __ JumpIfSmi(r0, &done);
+ __ JumpIfSmi(r3, &done);
// If the object is not a value type, return the object.
- __ CompareObjectType(r0, r1, r1, JS_VALUE_TYPE);
- __ ldr(r0, FieldMemOperand(r0, JSValue::kValueOffset), eq);
+ __ CompareObjectType(r3, r4, r4, JS_VALUE_TYPE);
+ __ bne(&done);
+ __ LoadP(r3, FieldMemOperand(r3, JSValue::kValueOffset));
__ bind(&done);
- context()->Plug(r0);
+ context()->Plug(r3);
}
@@ -3755,41 +3759,43 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
VisitForAccumulatorValue(args->at(0)); // Load the object.
Label runtime, done, not_date_object;
- Register object = r0;
- Register result = r0;
- Register scratch0 = r9;
- Register scratch1 = r1;
+ Register object = r3;
+ Register result = r3;
+ Register scratch0 = r11;
+ Register scratch1 = r4;
__ JumpIfSmi(object, &not_date_object);
__ CompareObjectType(object, scratch1, scratch1, JS_DATE_TYPE);
- __ b(ne, &not_date_object);
+ __ bne(&not_date_object);
if (index->value() == 0) {
- __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
- __ jmp(&done);
+ __ LoadP(result, FieldMemOperand(object, JSDate::kValueOffset));
+ __ b(&done);
} else {
if (index->value() < JSDate::kFirstUncachedField) {
ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
__ mov(scratch1, Operand(stamp));
- __ ldr(scratch1, MemOperand(scratch1));
- __ ldr(scratch0, FieldMemOperand(object, JSDate::kCacheStampOffset));
+ __ LoadP(scratch1, MemOperand(scratch1));
+ __ LoadP(scratch0, FieldMemOperand(object, JSDate::kCacheStampOffset));
__ cmp(scratch1, scratch0);
- __ b(ne, &runtime);
- __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset +
- kPointerSize * index->value()));
- __ jmp(&done);
+ __ bne(&runtime);
+ __ LoadP(result,
+ FieldMemOperand(object, JSDate::kValueOffset +
+ kPointerSize * index->value()),
+ scratch0);
+ __ b(&done);
}
__ bind(&runtime);
__ PrepareCallCFunction(2, scratch1);
- __ mov(r1, Operand(index));
+ __ LoadSmiLiteral(r4, index);
__ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
- __ jmp(&done);
+ __ b(&done);
}
__ bind(&not_date_object);
__ CallRuntime(Runtime::kThrowNotDateError, 0);
__ bind(&done);
- context()->Plug(r0);
+ context()->Plug(r3);
}
@@ -3797,9 +3803,9 @@ void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(3, args->length());
- Register string = r0;
- Register index = r1;
- Register value = r2;
+ Register string = r3;
+ Register index = r4;
+ Register value = r5;
VisitForStackValue(args->at(0)); // index
VisitForStackValue(args->at(1)); // value
@@ -3807,21 +3813,20 @@ void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
__ Pop(index, value);
if (FLAG_debug_code) {
- __ SmiTst(value);
- __ Check(eq, kNonSmiValue);
- __ SmiTst(index);
- __ Check(eq, kNonSmiIndex);
+ __ TestIfSmi(value, r0);
+ __ Check(eq, kNonSmiValue, cr0);
+ __ TestIfSmi(index, r0);
+ __ Check(eq, kNonSmiIndex, cr0);
__ SmiUntag(index, index);
static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
__ EmitSeqStringSetCharCheck(string, index, value, one_byte_seq_type);
__ SmiTag(index, index);
}
- __ SmiUntag(value, value);
- __ add(ip,
- string,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- __ strb(value, MemOperand(ip, index, LSR, kSmiTagSize));
+ __ SmiUntag(value);
+ __ addi(ip, string, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+ __ SmiToByteArrayOffset(r0, index);
+ __ stbx(value, MemOperand(ip, r0));
context()->Plug(string);
}
@@ -3830,9 +3835,9 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(3, args->length());
- Register string = r0;
- Register index = r1;
- Register value = r2;
+ Register string = r3;
+ Register index = r4;
+ Register value = r5;
VisitForStackValue(args->at(0)); // index
VisitForStackValue(args->at(1)); // value
@@ -3840,27 +3845,24 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
__ Pop(index, value);
if (FLAG_debug_code) {
- __ SmiTst(value);
- __ Check(eq, kNonSmiValue);
- __ SmiTst(index);
- __ Check(eq, kNonSmiIndex);
+ __ TestIfSmi(value, r0);
+ __ Check(eq, kNonSmiValue, cr0);
+ __ TestIfSmi(index, r0);
+ __ Check(eq, kNonSmiIndex, cr0);
__ SmiUntag(index, index);
static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
__ EmitSeqStringSetCharCheck(string, index, value, two_byte_seq_type);
__ SmiTag(index, index);
}
- __ SmiUntag(value, value);
- __ add(ip,
- string,
- Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
- __ strh(value, MemOperand(ip, index));
+ __ SmiUntag(value);
+ __ addi(ip, string, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ __ SmiToShortArrayOffset(r0, index);
+ __ sthx(value, MemOperand(ip, r0));
context()->Plug(string);
}
-
void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
// Load the arguments on the stack and call the runtime function.
ZoneList<Expression*>* args = expr->arguments();
@@ -3869,47 +3871,47 @@ void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
VisitForStackValue(args->at(1));
MathPowStub stub(isolate(), MathPowStub::ON_STACK);
__ CallStub(&stub);
- context()->Plug(r0);
+ context()->Plug(r3);
}
void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 2);
- VisitForStackValue(args->at(0)); // Load the object.
+ VisitForStackValue(args->at(0)); // Load the object.
VisitForAccumulatorValue(args->at(1)); // Load the value.
- __ pop(r1); // r0 = value. r1 = object.
+ __ pop(r4); // r3 = value. r4 = object.
Label done;
// If the object is a smi, return the value.
- __ JumpIfSmi(r1, &done);
+ __ JumpIfSmi(r4, &done);
// If the object is not a value type, return the value.
- __ CompareObjectType(r1, r2, r2, JS_VALUE_TYPE);
- __ b(ne, &done);
+ __ CompareObjectType(r4, r5, r5, JS_VALUE_TYPE);
+ __ bne(&done);
// Store the value.
- __ str(r0, FieldMemOperand(r1, JSValue::kValueOffset));
+ __ StoreP(r3, FieldMemOperand(r4, JSValue::kValueOffset), r0);
// Update the write barrier. Save the value as it will be
// overwritten by the write barrier code and is needed afterward.
- __ mov(r2, r0);
- __ RecordWriteField(
- r1, JSValue::kValueOffset, r2, r3, kLRHasBeenSaved, kDontSaveFPRegs);
+ __ mr(r5, r3);
+ __ RecordWriteField(r4, JSValue::kValueOffset, r5, r6, kLRHasBeenSaved,
+ kDontSaveFPRegs);
__ bind(&done);
- context()->Plug(r0);
+ context()->Plug(r3);
}
void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(args->length(), 1);
- // Load the argument into r0 and call the stub.
+ // Load the argument into r3 and call the stub.
VisitForAccumulatorValue(args->at(0));
NumberToStringStub stub(isolate());
__ CallStub(&stub);
- context()->Plug(r0);
+ context()->Plug(r3);
}
@@ -3919,15 +3921,15 @@ void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
VisitForAccumulatorValue(args->at(0));
Label done;
- StringCharFromCodeGenerator generator(r0, r1);
+ StringCharFromCodeGenerator generator(r3, r4);
generator.GenerateFast(masm_);
- __ jmp(&done);
+ __ b(&done);
NopRuntimeCallHelper call_helper;
generator.GenerateSlow(masm_, call_helper);
__ bind(&done);
- context()->Plug(r1);
+ context()->Plug(r4);
}
@@ -3937,36 +3939,32 @@ void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
VisitForStackValue(args->at(0));
VisitForAccumulatorValue(args->at(1));
- Register object = r1;
- Register index = r0;
- Register result = r3;
+ Register object = r4;
+ Register index = r3;
+ Register result = r6;
__ pop(object);
Label need_conversion;
Label index_out_of_range;
Label done;
- StringCharCodeAtGenerator generator(object,
- index,
- result,
- &need_conversion,
- &need_conversion,
- &index_out_of_range,
+ StringCharCodeAtGenerator generator(object, index, result, &need_conversion,
+ &need_conversion, &index_out_of_range,
STRING_INDEX_IS_NUMBER);
generator.GenerateFast(masm_);
- __ jmp(&done);
+ __ b(&done);
__ bind(&index_out_of_range);
// When the index is out of range, the spec requires us to return
// NaN.
__ LoadRoot(result, Heap::kNanValueRootIndex);
- __ jmp(&done);
+ __ b(&done);
__ bind(&need_conversion);
// Load the undefined value into the result register, which will
// trigger conversion.
__ LoadRoot(result, Heap::kUndefinedValueRootIndex);
- __ jmp(&done);
+ __ b(&done);
NopRuntimeCallHelper call_helper;
generator.GenerateSlow(masm_, call_helper);
@@ -3982,38 +3980,33 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
VisitForStackValue(args->at(0));
VisitForAccumulatorValue(args->at(1));
- Register object = r1;
- Register index = r0;
- Register scratch = r3;
- Register result = r0;
+ Register object = r4;
+ Register index = r3;
+ Register scratch = r6;
+ Register result = r3;
__ pop(object);
Label need_conversion;
Label index_out_of_range;
Label done;
- StringCharAtGenerator generator(object,
- index,
- scratch,
- result,
- &need_conversion,
- &need_conversion,
- &index_out_of_range,
- STRING_INDEX_IS_NUMBER);
+ StringCharAtGenerator generator(object, index, scratch, result,
+ &need_conversion, &need_conversion,
+ &index_out_of_range, STRING_INDEX_IS_NUMBER);
generator.GenerateFast(masm_);
- __ jmp(&done);
+ __ b(&done);
__ bind(&index_out_of_range);
// When the index is out of range, the spec requires us to return
// the empty string.
__ LoadRoot(result, Heap::kempty_stringRootIndex);
- __ jmp(&done);
+ __ b(&done);
__ bind(&need_conversion);
// Move smi zero into the result register, which will trigger
// conversion.
- __ mov(result, Operand(Smi::FromInt(0)));
- __ jmp(&done);
+ __ LoadSmiLiteral(result, Smi::FromInt(0));
+ __ b(&done);
NopRuntimeCallHelper call_helper;
generator.GenerateSlow(masm_, call_helper);
@@ -4029,10 +4022,10 @@ void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
VisitForStackValue(args->at(0));
VisitForAccumulatorValue(args->at(1));
- __ pop(r1);
+ __ pop(r4);
StringAddStub stub(isolate(), STRING_ADD_CHECK_BOTH, NOT_TENURED);
__ CallStub(&stub);
- context()->Plug(r0);
+ context()->Plug(r3);
}
@@ -4044,7 +4037,7 @@ void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
StringCompareStub stub(isolate());
__ CallStub(&stub);
- context()->Plug(r0);
+ context()->Plug(r3);
}
@@ -4060,23 +4053,23 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
Label runtime, done;
// Check for non-function argument (including proxy).
- __ JumpIfSmi(r0, &runtime);
- __ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE);
- __ b(ne, &runtime);
+ __ JumpIfSmi(r3, &runtime);
+ __ CompareObjectType(r3, r4, r4, JS_FUNCTION_TYPE);
+ __ bne(&runtime);
- // InvokeFunction requires the function in r1. Move it in there.
- __ mov(r1, result_register());
+ // InvokeFunction requires the function in r4. Move it in there.
+ __ mr(r4, result_register());
ParameterCount count(arg_count);
- __ InvokeFunction(r1, count, CALL_FUNCTION, NullCallWrapper());
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ jmp(&done);
+ __ InvokeFunction(r4, count, CALL_FUNCTION, NullCallWrapper());
+ __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ b(&done);
__ bind(&runtime);
- __ push(r0);
+ __ push(r3);
__ CallRuntime(Runtime::kCall, args->length());
__ bind(&done);
- context()->Plug(r0);
+ context()->Plug(r3);
}
@@ -4087,10 +4080,9 @@ void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
VisitForAccumulatorValue(args->at(2));
- __ pop(r1);
- __ pop(r2);
+ __ Pop(r5, r4);
__ CallStub(&stub);
- context()->Plug(r0);
+ context()->Plug(r3);
}
@@ -4104,33 +4096,34 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
isolate()->native_context()->jsfunction_result_caches());
if (jsfunction_result_caches->length() <= cache_id) {
__ Abort(kAttemptToUseUndefinedCache);
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- context()->Plug(r0);
+ __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
+ context()->Plug(r3);
return;
}
VisitForAccumulatorValue(args->at(1));
- Register key = r0;
- Register cache = r1;
- __ ldr(cache, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
- __ ldr(cache, FieldMemOperand(cache, GlobalObject::kNativeContextOffset));
- __ ldr(cache, ContextOperand(cache, Context::JSFUNCTION_RESULT_CACHES_INDEX));
- __ ldr(cache,
- FieldMemOperand(cache, FixedArray::OffsetOfElementAt(cache_id)));
-
+ Register key = r3;
+ Register cache = r4;
+ __ LoadP(cache, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
+ __ LoadP(cache, FieldMemOperand(cache, GlobalObject::kNativeContextOffset));
+ __ LoadP(cache,
+ ContextOperand(cache, Context::JSFUNCTION_RESULT_CACHES_INDEX));
+ __ LoadP(cache,
+ FieldMemOperand(cache, FixedArray::OffsetOfElementAt(cache_id)), r0);
Label done, not_found;
- __ ldr(r2, FieldMemOperand(cache, JSFunctionResultCache::kFingerOffset));
- // r2 now holds finger offset as a smi.
- __ add(r3, cache, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- // r3 now points to the start of fixed array elements.
- __ ldr(r2, MemOperand::PointerAddressFromSmiKey(r3, r2, PreIndex));
- // Note side effect of PreIndex: r3 now points to the key of the pair.
- __ cmp(key, r2);
- __ b(ne, &not_found);
-
- __ ldr(r0, MemOperand(r3, kPointerSize));
+ __ LoadP(r5, FieldMemOperand(cache, JSFunctionResultCache::kFingerOffset));
+ // r5 now holds finger offset as a smi.
+ __ addi(r6, cache, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ // r6 now points to the start of fixed array elements.
+ __ SmiToPtrArrayOffset(r5, r5);
+ __ LoadPUX(r5, MemOperand(r6, r5));
+ // r6 now points to the key of the pair.
+ __ cmp(key, r5);
+ __ bne(&not_found);
+
+ __ LoadP(r3, MemOperand(r6, kPointerSize));
__ b(&done);
__ bind(&not_found);
@@ -4139,7 +4132,7 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
__ CallRuntime(Runtime::kGetFromCache, 2);
__ bind(&done);
- context()->Plug(r0);
+ context()->Plug(r3);
}
@@ -4151,11 +4144,14 @@ void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
- __ ldr(r0, FieldMemOperand(r0, String::kHashFieldOffset));
- __ tst(r0, Operand(String::kContainsCachedArrayIndexMask));
+ __ lwz(r3, FieldMemOperand(r3, String::kHashFieldOffset));
+ // PPC - assume ip is free
+ __ mov(ip, Operand(String::kContainsCachedArrayIndexMask));
+ __ and_(r0, r3, ip);
+ __ cmpi(r0, Operand::Zero());
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through);
@@ -4168,12 +4164,12 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
DCHECK(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
- __ AssertString(r0);
+ __ AssertString(r3);
- __ ldr(r0, FieldMemOperand(r0, String::kHashFieldOffset));
- __ IndexFromHash(r0, r0);
+ __ lwz(r3, FieldMemOperand(r3, String::kHashFieldOffset));
+ __ IndexFromHash(r3, r3);
- context()->Plug(r0);
+ context()->Plug(r3);
}
@@ -4187,49 +4183,51 @@ void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
VisitForAccumulatorValue(args->at(0));
// All aliases of the same register have disjoint lifetimes.
- Register array = r0;
- Register elements = no_reg; // Will be r0.
- Register result = no_reg; // Will be r0.
- Register separator = r1;
- Register array_length = r2;
- Register result_pos = no_reg; // Will be r2
- Register string_length = r3;
- Register string = r4;
- Register element = r5;
- Register elements_end = r6;
- Register scratch = r9;
+ Register array = r3;
+ Register elements = no_reg; // Will be r3.
+ Register result = no_reg; // Will be r3.
+ Register separator = r4;
+ Register array_length = r5;
+ Register result_pos = no_reg; // Will be r5
+ Register string_length = r6;
+ Register string = r7;
+ Register element = r8;
+ Register elements_end = r9;
+ Register scratch1 = r10;
+ Register scratch2 = r11;
// Separator operand is on the stack.
__ pop(separator);
// Check that the array is a JSArray.
__ JumpIfSmi(array, &bailout);
- __ CompareObjectType(array, scratch, array_length, JS_ARRAY_TYPE);
- __ b(ne, &bailout);
+ __ CompareObjectType(array, scratch1, scratch2, JS_ARRAY_TYPE);
+ __ bne(&bailout);
// Check that the array has fast elements.
- __ CheckFastElements(scratch, array_length, &bailout);
+ __ CheckFastElements(scratch1, scratch2, &bailout);
// If the array has length zero, return the empty string.
- __ ldr(array_length, FieldMemOperand(array, JSArray::kLengthOffset));
- __ SmiUntag(array_length, SetCC);
- __ b(ne, &non_trivial_array);
- __ LoadRoot(r0, Heap::kempty_stringRootIndex);
+ __ LoadP(array_length, FieldMemOperand(array, JSArray::kLengthOffset));
+ __ SmiUntag(array_length);
+ __ cmpi(array_length, Operand::Zero());
+ __ bne(&non_trivial_array);
+ __ LoadRoot(r3, Heap::kempty_stringRootIndex);
__ b(&done);
__ bind(&non_trivial_array);
// Get the FixedArray containing array's elements.
elements = array;
- __ ldr(elements, FieldMemOperand(array, JSArray::kElementsOffset));
+ __ LoadP(elements, FieldMemOperand(array, JSArray::kElementsOffset));
array = no_reg; // End of array's live range.
// Check that all array elements are sequential one-byte strings, and
// accumulate the sum of their lengths, as a smi-encoded value.
- __ mov(string_length, Operand::Zero());
- __ add(element,
- elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(elements_end, element, Operand(array_length, LSL, kPointerSizeLog2));
+ __ li(string_length, Operand::Zero());
+ __ addi(element, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ ShiftLeftImm(elements_end, array_length, Operand(kPointerSizeLog2));
+ __ add(elements_end, element, elements_end);
// Loop condition: while (element < elements_end).
// Live values in registers:
// elements: Fixed array of strings.
@@ -4239,25 +4237,29 @@ void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
// element: Current array element.
// elements_end: Array end.
if (generate_debug_code_) {
- __ cmp(array_length, Operand::Zero());
+ __ cmpi(array_length, Operand::Zero());
__ Assert(gt, kNoEmptyArraysHereInEmitFastOneByteArrayJoin);
}
__ bind(&loop);
- __ ldr(string, MemOperand(element, kPointerSize, PostIndex));
+ __ LoadP(string, MemOperand(element));
+ __ addi(element, element, Operand(kPointerSize));
__ JumpIfSmi(string, &bailout);
- __ ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
- __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
- __ JumpIfInstanceTypeIsNotSequentialOneByte(scratch, scratch, &bailout);
- __ ldr(scratch, FieldMemOperand(string, SeqOneByteString::kLengthOffset));
- __ add(string_length, string_length, Operand(scratch), SetCC);
- __ b(vs, &bailout);
+ __ LoadP(scratch1, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ lbz(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+ __ JumpIfInstanceTypeIsNotSequentialOneByte(scratch1, scratch2, &bailout);
+ __ LoadP(scratch1, FieldMemOperand(string, SeqOneByteString::kLengthOffset));
+
+ __ AddAndCheckForOverflow(string_length, string_length, scratch1, scratch2,
+ r0);
+ __ BranchOnOverflow(&bailout);
+
__ cmp(element, elements_end);
- __ b(lt, &loop);
+ __ blt(&loop);
// If array_length is 1, return elements[0], a string.
- __ cmp(array_length, Operand(1));
- __ b(ne, &not_size_one_array);
- __ ldr(r0, FieldMemOperand(elements, FixedArray::kHeaderSize));
+ __ cmpi(array_length, Operand(1));
+ __ bne(&not_size_one_array);
+ __ LoadP(r3, FieldMemOperand(elements, FixedArray::kHeaderSize));
__ b(&done);
__ bind(&not_size_one_array);
@@ -4270,30 +4272,43 @@ void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
// Check that the separator is a flat one-byte string.
__ JumpIfSmi(separator, &bailout);
- __ ldr(scratch, FieldMemOperand(separator, HeapObject::kMapOffset));
- __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
- __ JumpIfInstanceTypeIsNotSequentialOneByte(scratch, scratch, &bailout);
+ __ LoadP(scratch1, FieldMemOperand(separator, HeapObject::kMapOffset));
+ __ lbz(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+ __ JumpIfInstanceTypeIsNotSequentialOneByte(scratch1, scratch2, &bailout);
// Add (separator length times array_length) - separator length to the
- // string_length to get the length of the result string. array_length is not
- // smi but the other values are, so the result is a smi
- __ ldr(scratch, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
- __ sub(string_length, string_length, Operand(scratch));
- __ smull(scratch, ip, array_length, scratch);
+ // string_length to get the length of the result string.
+ __ LoadP(scratch1,
+ FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
+ __ sub(string_length, string_length, scratch1);
+#if V8_TARGET_ARCH_PPC64
+ __ SmiUntag(scratch1, scratch1);
+ __ Mul(scratch2, array_length, scratch1);
+ // Check for smi overflow. No overflow if higher 33 bits of 64-bit result are
+ // zero.
+ __ ShiftRightImm(ip, scratch2, Operand(31), SetRC);
+ __ bne(&bailout, cr0);
+ __ SmiTag(scratch2, scratch2);
+#else
+ // array_length is not smi but the other values are, so the result is a smi
+ __ mullw(scratch2, array_length, scratch1);
+ __ mulhw(ip, array_length, scratch1);
// Check for smi overflow. No overflow if higher 33 bits of 64-bit result are
// zero.
- __ cmp(ip, Operand::Zero());
- __ b(ne, &bailout);
- __ tst(scratch, Operand(0x80000000));
- __ b(ne, &bailout);
- __ add(string_length, string_length, Operand(scratch), SetCC);
- __ b(vs, &bailout);
+ __ cmpi(ip, Operand::Zero());
+ __ bne(&bailout);
+ __ cmpwi(scratch2, Operand::Zero());
+ __ blt(&bailout);
+#endif
+
+ __ AddAndCheckForOverflow(string_length, string_length, scratch2, scratch1,
+ r0);
+ __ BranchOnOverflow(&bailout);
__ SmiUntag(string_length);
// Get first element in the array to free up the elements register to be used
// for the result.
- __ add(element,
- elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ addi(element, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
result = elements; // End of live range for elements.
elements = no_reg;
// Live values in registers:
@@ -4301,25 +4316,24 @@ void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
// separator: Separator string
// string_length: Length of result string (not smi)
// array_length: Length of the array.
- __ AllocateOneByteString(result, string_length, scratch,
- string, // used as scratch
- elements_end, // used as scratch
- &bailout);
+ __ AllocateOneByteString(result, string_length, scratch1, scratch2,
+ elements_end, &bailout);
// Prepare for looping. Set up elements_end to end of the array. Set
// result_pos to the position of the result where to write the first
// character.
- __ add(elements_end, element, Operand(array_length, LSL, kPointerSizeLog2));
+ __ ShiftLeftImm(elements_end, array_length, Operand(kPointerSizeLog2));
+ __ add(elements_end, element, elements_end);
result_pos = array_length; // End of live range for array_length.
array_length = no_reg;
- __ add(result_pos,
- result,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+ __ addi(result_pos, result,
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
// Check the length of the separator.
- __ ldr(scratch, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
- __ cmp(scratch, Operand(Smi::FromInt(1)));
- __ b(eq, &one_char_separator);
- __ b(gt, &long_separator);
+ __ LoadP(scratch1,
+ FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
+ __ CmpSmiLiteral(scratch1, Smi::FromInt(1), r0);
+ __ beq(&one_char_separator);
+ __ bgt(&long_separator);
// Empty separator case
__ bind(&empty_separator_loop);
@@ -4329,25 +4343,25 @@ void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
// elements_end: Array end.
// Copy next array element to the result.
- __ ldr(string, MemOperand(element, kPointerSize, PostIndex));
- __ ldr(string_length, FieldMemOperand(string, String::kLengthOffset));
+ __ LoadP(string, MemOperand(element));
+ __ addi(element, element, Operand(kPointerSize));
+ __ LoadP(string_length, FieldMemOperand(string, String::kLengthOffset));
__ SmiUntag(string_length);
- __ add(string,
- string,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- __ CopyBytes(string, result_pos, string_length, scratch);
+ __ addi(string, string,
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+ __ CopyBytes(string, result_pos, string_length, scratch1);
__ cmp(element, elements_end);
- __ b(lt, &empty_separator_loop); // End while (element < elements_end).
- DCHECK(result.is(r0));
+ __ blt(&empty_separator_loop); // End while (element < elements_end).
+ DCHECK(result.is(r3));
__ b(&done);
// One-character separator case
__ bind(&one_char_separator);
// Replace separator with its one-byte character value.
- __ ldrb(separator, FieldMemOperand(separator, SeqOneByteString::kHeaderSize));
+ __ lbz(separator, FieldMemOperand(separator, SeqOneByteString::kHeaderSize));
// Jump into the loop after the code that copies the separator, so the first
// element is not preceded by a separator
- __ jmp(&one_char_separator_loop_entry);
+ __ b(&one_char_separator_loop_entry);
__ bind(&one_char_separator_loop);
// Live values in registers:
@@ -4357,20 +4371,21 @@ void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
// separator: Single separator one-byte char (in lower byte).
// Copy the separator character to the result.
- __ strb(separator, MemOperand(result_pos, 1, PostIndex));
+ __ stb(separator, MemOperand(result_pos));
+ __ addi(result_pos, result_pos, Operand(1));
// Copy next array element to the result.
__ bind(&one_char_separator_loop_entry);
- __ ldr(string, MemOperand(element, kPointerSize, PostIndex));
- __ ldr(string_length, FieldMemOperand(string, String::kLengthOffset));
+ __ LoadP(string, MemOperand(element));
+ __ addi(element, element, Operand(kPointerSize));
+ __ LoadP(string_length, FieldMemOperand(string, String::kLengthOffset));
__ SmiUntag(string_length);
- __ add(string,
- string,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- __ CopyBytes(string, result_pos, string_length, scratch);
- __ cmp(element, elements_end);
- __ b(lt, &one_char_separator_loop); // End while (element < elements_end).
- DCHECK(result.is(r0));
+ __ addi(string, string,
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+ __ CopyBytes(string, result_pos, string_length, scratch1);
+ __ cmpl(element, elements_end);
+ __ blt(&one_char_separator_loop); // End while (element < elements_end).
+ DCHECK(result.is(r3));
__ b(&done);
// Long separator case (separator is more than one character). Entry is at the
@@ -4383,30 +4398,29 @@ void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
// separator: Separator string.
// Copy the separator to the result.
- __ ldr(string_length, FieldMemOperand(separator, String::kLengthOffset));
+ __ LoadP(string_length, FieldMemOperand(separator, String::kLengthOffset));
__ SmiUntag(string_length);
- __ add(string,
- separator,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- __ CopyBytes(string, result_pos, string_length, scratch);
+ __ addi(string, separator,
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+ __ CopyBytes(string, result_pos, string_length, scratch1);
__ bind(&long_separator);
- __ ldr(string, MemOperand(element, kPointerSize, PostIndex));
- __ ldr(string_length, FieldMemOperand(string, String::kLengthOffset));
+ __ LoadP(string, MemOperand(element));
+ __ addi(element, element, Operand(kPointerSize));
+ __ LoadP(string_length, FieldMemOperand(string, String::kLengthOffset));
__ SmiUntag(string_length);
- __ add(string,
- string,
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- __ CopyBytes(string, result_pos, string_length, scratch);
- __ cmp(element, elements_end);
- __ b(lt, &long_separator_loop); // End while (element < elements_end).
- DCHECK(result.is(r0));
+ __ addi(string, string,
+ Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+ __ CopyBytes(string, result_pos, string_length, scratch1);
+ __ cmpl(element, elements_end);
+ __ blt(&long_separator_loop); // End while (element < elements_end).
+ DCHECK(result.is(r3));
__ b(&done);
__ bind(&bailout);
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
__ bind(&done);
- context()->Plug(r0);
+ context()->Plug(r3);
}
@@ -4415,9 +4429,9 @@ void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
ExternalReference debug_is_active =
ExternalReference::debug_is_active_address(isolate());
__ mov(ip, Operand(debug_is_active));
- __ ldrb(r0, MemOperand(ip));
- __ SmiTag(r0);
- context()->Plug(r0);
+ __ lbz(r3, MemOperand(ip));
+ __ SmiTag(r3);
+ context()->Plug(r3);
}
@@ -4436,8 +4450,9 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
if (expr->is_jsruntime()) {
// Push the builtins object as the receiver.
Register receiver = LoadDescriptor::ReceiverRegister();
- __ ldr(receiver, GlobalObjectOperand());
- __ ldr(receiver, FieldMemOperand(receiver, GlobalObject::kBuiltinsOffset));
+ __ LoadP(receiver, GlobalObjectOperand());
+ __ LoadP(receiver,
+ FieldMemOperand(receiver, GlobalObject::kBuiltinsOffset));
__ push(receiver);
// Load the function from the receiver.
@@ -4451,9 +4466,9 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
}
// Push the target function under the receiver.
- __ ldr(ip, MemOperand(sp, 0));
+ __ LoadP(ip, MemOperand(sp, 0));
__ push(ip);
- __ str(r0, MemOperand(sp, kPointerSize));
+ __ StoreP(r3, MemOperand(sp, kPointerSize));
// Push the arguments ("left-to-right").
int arg_count = args->length();
@@ -4464,13 +4479,13 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
// Record source position of the IC call.
SetSourcePosition(expr->position());
CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
- __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize));
+ __ LoadP(r4, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
__ CallStub(&stub);
// Restore context register.
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, r0);
+ context()->DropAndPlug(1, r3);
} else {
// Push the arguments ("left-to-right").
for (int i = 0; i < arg_count; i++) {
@@ -4479,7 +4494,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
// Call the C runtime function.
__ CallRuntime(expr->function(), arg_count);
- context()->Plug(r0);
+ context()->Plug(r3);
}
}
@@ -4494,22 +4509,22 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (property != NULL) {
VisitForStackValue(property->obj());
VisitForStackValue(property->key());
- __ mov(r1, Operand(Smi::FromInt(strict_mode())));
- __ push(r1);
+ __ LoadSmiLiteral(r4, Smi::FromInt(strict_mode()));
+ __ push(r4);
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
- context()->Plug(r0);
+ context()->Plug(r3);
} else if (proxy != NULL) {
Variable* var = proxy->var();
// Delete of an unqualified identifier is disallowed in strict mode
// but "delete this" is allowed.
DCHECK(strict_mode() == SLOPPY || var->is_this());
if (var->IsUnallocated()) {
- __ ldr(r2, GlobalObjectOperand());
- __ mov(r1, Operand(var->name()));
- __ mov(r0, Operand(Smi::FromInt(SLOPPY)));
- __ Push(r2, r1, r0);
+ __ LoadP(r5, GlobalObjectOperand());
+ __ mov(r4, Operand(var->name()));
+ __ LoadSmiLiteral(r3, Smi::FromInt(SLOPPY));
+ __ Push(r5, r4, r3);
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
- context()->Plug(r0);
+ context()->Plug(r3);
} else if (var->IsStackAllocated() || var->IsContextSlot()) {
// Result of deleting non-global, non-dynamic variables is false.
// The subexpression does not have side effects.
@@ -4517,11 +4532,11 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
} else {
// Non-global variable. Call the runtime to try to delete from the
// context where the variable was introduced.
- DCHECK(!context_register().is(r2));
- __ mov(r2, Operand(var->name()));
- __ Push(context_register(), r2);
+ DCHECK(!context_register().is(r5));
+ __ mov(r5, Operand(var->name()));
+ __ Push(context_register(), r5);
__ CallRuntime(Runtime::kDeleteLookupSlot, 2);
- context()->Plug(r0);
+ context()->Plug(r3);
}
} else {
// Result of deleting non-property, non-variable reference is true.
@@ -4548,10 +4563,8 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
} else if (context()->IsTest()) {
const TestContext* test = TestContext::cast(context());
// The labels are swapped for the recursive call.
- VisitForControl(expr->expression(),
- test->false_label(),
- test->true_label(),
- test->fall_through());
+ VisitForControl(expr->expression(), test->false_label(),
+ test->true_label(), test->fall_through());
context()->Plug(test->true_label(), test->false_label());
} else {
// We handle value contexts explicitly rather than simply visiting
@@ -4560,19 +4573,17 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
// for the optimizing compiler.
DCHECK(context()->IsAccumulatorValue() || context()->IsStackValue());
Label materialize_true, materialize_false, done;
- VisitForControl(expr->expression(),
- &materialize_false,
- &materialize_true,
- &materialize_true);
+ VisitForControl(expr->expression(), &materialize_false,
+ &materialize_true, &materialize_true);
__ bind(&materialize_true);
PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
- __ LoadRoot(r0, Heap::kTrueValueRootIndex);
- if (context()->IsStackValue()) __ push(r0);
- __ jmp(&done);
+ __ LoadRoot(r3, Heap::kTrueValueRootIndex);
+ if (context()->IsStackValue()) __ push(r3);
+ __ b(&done);
__ bind(&materialize_false);
PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
- __ LoadRoot(r0, Heap::kFalseValueRootIndex);
- if (context()->IsStackValue()) __ push(r0);
+ __ LoadRoot(r3, Heap::kFalseValueRootIndex);
+ if (context()->IsStackValue()) __ push(r3);
__ bind(&done);
}
break;
@@ -4580,11 +4591,12 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
case Token::TYPEOF: {
Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
- { StackValueContext context(this);
+ {
+ StackValueContext context(this);
VisitForTypeofValue(expr->expression());
}
__ CallRuntime(Runtime::kTypeof, 1);
- context()->Plug(r0);
+ context()->Plug(r3);
break;
}
@@ -4611,14 +4623,14 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
} else {
// Reserve space for result of postfix operation.
if (expr->is_postfix() && !context()->IsEffect()) {
- __ mov(ip, Operand(Smi::FromInt(0)));
+ __ LoadSmiLiteral(ip, Smi::FromInt(0));
__ push(ip);
}
switch (assign_type) {
case NAMED_PROPERTY: {
// Put the object both on the stack and in the register.
VisitForStackValue(prop->obj());
- __ ldr(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
+ __ LoadP(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
EmitNamedPropertyLoad(prop);
break;
}
@@ -4627,10 +4639,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
EmitLoadHomeObject(prop->obj()->AsSuperReference());
__ Push(result_register());
- const Register scratch = r1;
- __ ldr(scratch, MemOperand(sp, kPointerSize));
- __ Push(scratch);
- __ Push(result_register());
+ const Register scratch = r4;
+ __ LoadP(scratch, MemOperand(sp, kPointerSize));
+ __ Push(scratch, result_register());
EmitNamedSuperPropertyLoad(prop);
break;
}
@@ -4638,15 +4649,13 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case KEYED_SUPER_PROPERTY: {
VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
EmitLoadHomeObject(prop->obj()->AsSuperReference());
- __ Push(result_register());
+ const Register scratch = r4;
+ const Register scratch1 = r5;
+ __ Move(scratch, result_register());
VisitForAccumulatorValue(prop->key());
- __ Push(result_register());
- const Register scratch = r1;
- __ ldr(scratch, MemOperand(sp, 2 * kPointerSize));
- __ Push(scratch);
- __ ldr(scratch, MemOperand(sp, 2 * kPointerSize));
- __ Push(scratch);
- __ Push(result_register());
+ __ Push(scratch, result_register());
+ __ LoadP(scratch1, MemOperand(sp, 2 * kPointerSize));
+ __ Push(scratch1, scratch, result_register());
EmitKeyedSuperPropertyLoad(prop);
break;
}
@@ -4654,9 +4663,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
case KEYED_PROPERTY: {
VisitForStackValue(prop->obj());
VisitForStackValue(prop->key());
- __ ldr(LoadDescriptor::ReceiverRegister(),
- MemOperand(sp, 1 * kPointerSize));
- __ ldr(LoadDescriptor::NameRegister(), MemOperand(sp, 0));
+ __ LoadP(LoadDescriptor::ReceiverRegister(),
+ MemOperand(sp, 1 * kPointerSize));
+ __ LoadP(LoadDescriptor::NameRegister(), MemOperand(sp, 0));
EmitKeyedPropertyLoad(prop);
break;
}
@@ -4681,7 +4690,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
int count_value = expr->op() == Token::INC ? 1 : -1;
if (ShouldInlineSmiCase(expr->op())) {
Label slow;
- patch_site.EmitJumpIfNotSmi(r0, &slow);
+ patch_site.EmitJumpIfNotSmi(r3, &slow);
// Save result for postfix expressions.
if (expr->is_postfix()) {
@@ -4691,29 +4700,32 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// of the stack.
switch (assign_type) {
case VARIABLE:
- __ push(r0);
+ __ push(r3);
break;
case NAMED_PROPERTY:
- __ str(r0, MemOperand(sp, kPointerSize));
+ __ StoreP(r3, MemOperand(sp, kPointerSize));
break;
case NAMED_SUPER_PROPERTY:
- __ str(r0, MemOperand(sp, 2 * kPointerSize));
+ __ StoreP(r3, MemOperand(sp, 2 * kPointerSize));
break;
case KEYED_PROPERTY:
- __ str(r0, MemOperand(sp, 2 * kPointerSize));
+ __ StoreP(r3, MemOperand(sp, 2 * kPointerSize));
break;
case KEYED_SUPER_PROPERTY:
- __ str(r0, MemOperand(sp, 3 * kPointerSize));
+ __ StoreP(r3, MemOperand(sp, 3 * kPointerSize));
break;
}
}
}
- __ add(r0, r0, Operand(Smi::FromInt(count_value)), SetCC);
- __ b(vc, &done);
+ Register scratch1 = r4;
+ Register scratch2 = r5;
+ __ LoadSmiLiteral(scratch1, Smi::FromInt(count_value));
+ __ AddAndCheckForOverflow(r3, r3, scratch1, scratch2, r0);
+ __ BranchOnNoOverflow(&done);
// Call stub. Undo operation first.
- __ sub(r0, r0, Operand(Smi::FromInt(count_value)));
- __ jmp(&stub_call);
+ __ sub(r3, r3, scratch1);
+ __ b(&stub_call);
__ bind(&slow);
}
ToNumberStub convert_stub(isolate());
@@ -4727,28 +4739,27 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// of the stack.
switch (assign_type) {
case VARIABLE:
- __ push(r0);
+ __ push(r3);
break;
case NAMED_PROPERTY:
- __ str(r0, MemOperand(sp, kPointerSize));
+ __ StoreP(r3, MemOperand(sp, kPointerSize));
break;
case NAMED_SUPER_PROPERTY:
- __ str(r0, MemOperand(sp, 2 * kPointerSize));
+ __ StoreP(r3, MemOperand(sp, 2 * kPointerSize));
break;
case KEYED_PROPERTY:
- __ str(r0, MemOperand(sp, 2 * kPointerSize));
+ __ StoreP(r3, MemOperand(sp, 2 * kPointerSize));
break;
case KEYED_SUPER_PROPERTY:
- __ str(r0, MemOperand(sp, 3 * kPointerSize));
+ __ StoreP(r3, MemOperand(sp, 3 * kPointerSize));
break;
}
}
}
-
__ bind(&stub_call);
- __ mov(r1, r0);
- __ mov(r0, Operand(Smi::FromInt(count_value)));
+ __ mr(r4, r3);
+ __ LoadSmiLiteral(r3, Smi::FromInt(count_value));
// Record position before stub call.
SetSourcePosition(expr->position());
@@ -4759,15 +4770,16 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
patch_site.EmitPatchInfo();
__ bind(&done);
- // Store the value returned in r0.
+ // Store the value returned in r3.
switch (assign_type) {
case VARIABLE:
if (expr->is_postfix()) {
- { EffectContext context(this);
+ {
+ EffectContext context(this);
EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
Token::ASSIGN);
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context.Plug(r0);
+ context.Plug(r3);
}
// For all contexts except EffectConstant We have the result on
// top of the stack.
@@ -4778,7 +4790,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
Token::ASSIGN);
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(r0);
+ context()->Plug(r3);
}
break;
case NAMED_PROPERTY: {
@@ -4792,7 +4804,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
context()->PlugTOS();
}
} else {
- context()->Plug(r0);
+ context()->Plug(r3);
}
break;
}
@@ -4803,7 +4815,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
context()->PlugTOS();
}
} else {
- context()->Plug(r0);
+ context()->Plug(r3);
}
break;
}
@@ -4814,7 +4826,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
context()->PlugTOS();
}
} else {
- context()->Plug(r0);
+ context()->Plug(r3);
}
break;
}
@@ -4830,7 +4842,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
context()->PlugTOS();
}
} else {
- context()->Plug(r0);
+ context()->Plug(r3);
}
break;
}
@@ -4844,7 +4856,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
VariableProxy* proxy = expr->AsVariableProxy();
if (proxy != NULL && proxy->var()->IsUnallocated()) {
Comment cmnt(masm_, "[ Global variable");
- __ ldr(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+ __ LoadP(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
__ mov(LoadDescriptor::NameRegister(), Operand(proxy->name()));
if (FLAG_vector_ics) {
__ mov(VectorLoadICDescriptor::SlotRegister(),
@@ -4854,7 +4866,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
// error.
CallLoadIC(NOT_CONTEXTUAL);
PrepareForBailout(expr, TOS_REG);
- context()->Plug(r0);
+ context()->Plug(r3);
} else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
Comment cmnt(masm_, "[ Lookup slot");
Label done, slow;
@@ -4864,13 +4876,13 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
EmitDynamicLookupFastCase(proxy, INSIDE_TYPEOF, &slow, &done);
__ bind(&slow);
- __ mov(r0, Operand(proxy->name()));
- __ Push(cp, r0);
+ __ mov(r3, Operand(proxy->name()));
+ __ Push(cp, r3);
__ CallRuntime(Runtime::kLoadLookupSlotNoReferenceError, 2);
PrepareForBailout(expr, TOS_REG);
__ bind(&done);
- context()->Plug(r0);
+ context()->Plug(r3);
} else {
// This expression cannot throw a reference error at the top level.
VisitInDuplicateContext(expr);
@@ -4885,70 +4897,72 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
- { AccumulatorValueContext context(this);
+ {
+ AccumulatorValueContext context(this);
VisitForTypeofValue(sub_expr);
}
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Factory* factory = isolate()->factory();
if (String::Equals(check, factory->number_string())) {
- __ JumpIfSmi(r0, if_true);
- __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
+ __ JumpIfSmi(r3, if_true);
+ __ LoadP(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(r0, ip);
+ __ cmp(r3, ip);
Split(eq, if_true, if_false, fall_through);
} else if (String::Equals(check, factory->string_string())) {
- __ JumpIfSmi(r0, if_false);
+ __ JumpIfSmi(r3, if_false);
// Check for undetectable objects => false.
- __ CompareObjectType(r0, r0, r1, FIRST_NONSTRING_TYPE);
- __ b(ge, if_false);
- __ ldrb(r1, FieldMemOperand(r0, Map::kBitFieldOffset));
- __ tst(r1, Operand(1 << Map::kIsUndetectable));
- Split(eq, if_true, if_false, fall_through);
+ __ CompareObjectType(r3, r3, r4, FIRST_NONSTRING_TYPE);
+ __ bge(if_false);
+ __ lbz(r4, FieldMemOperand(r3, Map::kBitFieldOffset));
+ STATIC_ASSERT((1 << Map::kIsUndetectable) < 0x8000);
+ __ andi(r0, r4, Operand(1 << Map::kIsUndetectable));
+ Split(eq, if_true, if_false, fall_through, cr0);
} else if (String::Equals(check, factory->symbol_string())) {
- __ JumpIfSmi(r0, if_false);
- __ CompareObjectType(r0, r0, r1, SYMBOL_TYPE);
+ __ JumpIfSmi(r3, if_false);
+ __ CompareObjectType(r3, r3, r4, SYMBOL_TYPE);
Split(eq, if_true, if_false, fall_through);
} else if (String::Equals(check, factory->boolean_string())) {
- __ CompareRoot(r0, Heap::kTrueValueRootIndex);
- __ b(eq, if_true);
- __ CompareRoot(r0, Heap::kFalseValueRootIndex);
+ __ CompareRoot(r3, Heap::kTrueValueRootIndex);
+ __ beq(if_true);
+ __ CompareRoot(r3, Heap::kFalseValueRootIndex);
Split(eq, if_true, if_false, fall_through);
} else if (String::Equals(check, factory->undefined_string())) {
- __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
- __ b(eq, if_true);
- __ JumpIfSmi(r0, if_false);
+ __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
+ __ beq(if_true);
+ __ JumpIfSmi(r3, if_false);
// Check for undetectable objects => true.
- __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset));
- __ ldrb(r1, FieldMemOperand(r0, Map::kBitFieldOffset));
- __ tst(r1, Operand(1 << Map::kIsUndetectable));
- Split(ne, if_true, if_false, fall_through);
+ __ LoadP(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ lbz(r4, FieldMemOperand(r3, Map::kBitFieldOffset));
+ __ andi(r0, r4, Operand(1 << Map::kIsUndetectable));
+ Split(ne, if_true, if_false, fall_through, cr0);
} else if (String::Equals(check, factory->function_string())) {
- __ JumpIfSmi(r0, if_false);
+ __ JumpIfSmi(r3, if_false);
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
- __ CompareObjectType(r0, r0, r1, JS_FUNCTION_TYPE);
- __ b(eq, if_true);
- __ cmp(r1, Operand(JS_FUNCTION_PROXY_TYPE));
+ __ CompareObjectType(r3, r3, r4, JS_FUNCTION_TYPE);
+ __ beq(if_true);
+ __ cmpi(r4, Operand(JS_FUNCTION_PROXY_TYPE));
Split(eq, if_true, if_false, fall_through);
} else if (String::Equals(check, factory->object_string())) {
- __ JumpIfSmi(r0, if_false);
- __ CompareRoot(r0, Heap::kNullValueRootIndex);
- __ b(eq, if_true);
+ __ JumpIfSmi(r3, if_false);
+ __ CompareRoot(r3, Heap::kNullValueRootIndex);
+ __ beq(if_true);
// Check for JS objects => true.
- __ CompareObjectType(r0, r0, r1, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
- __ b(lt, if_false);
- __ CompareInstanceType(r0, r1, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
- __ b(gt, if_false);
+ __ CompareObjectType(r3, r3, r4, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ __ blt(if_false);
+ __ CompareInstanceType(r3, r4, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
+ __ bgt(if_false);
// Check for undetectable objects => false.
- __ ldrb(r1, FieldMemOperand(r0, Map::kBitFieldOffset));
- __ tst(r1, Operand(1 << Map::kIsUndetectable));
- Split(eq, if_true, if_false, fall_through);
+ __ lbz(r4, FieldMemOperand(r3, Map::kBitFieldOffset));
+ __ andi(r0, r4, Operand(1 << Map::kIsUndetectable));
+ Split(eq, if_true, if_false, fall_through, cr0);
} else {
- if (if_false != fall_through) __ jmp(if_false);
+ if (if_false != fall_through) __ b(if_false);
}
context()->Plug(if_true, if_false);
}
@@ -4968,8 +4982,8 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
Token::Value op = expr->op();
VisitForStackValue(expr->left());
@@ -4979,7 +4993,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
__ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
__ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ cmp(r0, ip);
+ __ cmp(r3, ip);
Split(eq, if_true, if_false, fall_through);
break;
@@ -4989,7 +5003,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
__ CallStub(&stub);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
// The stub returns 0 for true.
- __ tst(r0, r0);
+ __ cmpi(r3, Operand::Zero());
Split(eq, if_true, if_false, fall_through);
break;
}
@@ -4997,15 +5011,15 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
default: {
VisitForAccumulatorValue(expr->right());
Condition cond = CompareIC::ComputeCondition(op);
- __ pop(r1);
+ __ pop(r4);
bool inline_smi_code = ShouldInlineSmiCase(op);
JumpPatchSite patch_site(masm_);
if (inline_smi_code) {
Label slow_case;
- __ orr(r2, r0, Operand(r1));
- patch_site.EmitJumpIfNotSmi(r2, &slow_case);
- __ cmp(r1, r0);
+ __ orx(r5, r3, r4);
+ patch_site.EmitJumpIfNotSmi(r5, &slow_case);
+ __ cmp(r4, r3);
Split(cond, if_true, if_false, NULL);
__ bind(&slow_case);
}
@@ -5016,7 +5030,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
CallIC(ic, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ cmp(r0, Operand::Zero());
+ __ cmpi(r3, Operand::Zero());
Split(cond, if_true, if_false, fall_through);
}
}
@@ -5034,22 +5048,22 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
- context()->PrepareTest(&materialize_true, &materialize_false,
- &if_true, &if_false, &fall_through);
+ context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+ &if_false, &fall_through);
VisitForAccumulatorValue(sub_expr);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
if (expr->op() == Token::EQ_STRICT) {
- Heap::RootListIndex nil_value = nil == kNullValue ?
- Heap::kNullValueRootIndex :
- Heap::kUndefinedValueRootIndex;
- __ LoadRoot(r1, nil_value);
- __ cmp(r0, r1);
+ Heap::RootListIndex nil_value = nil == kNullValue
+ ? Heap::kNullValueRootIndex
+ : Heap::kUndefinedValueRootIndex;
+ __ LoadRoot(r4, nil_value);
+ __ cmp(r3, r4);
Split(eq, if_true, if_false, fall_through);
} else {
Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
CallIC(ic, expr->CompareOperationFeedbackId());
- __ cmp(r0, Operand(0));
+ __ cmpi(r3, Operand::Zero());
Split(ne, if_true, if_false, fall_through);
}
context()->Plug(if_true, if_false);
@@ -5057,29 +5071,25 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
- __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- context()->Plug(r0);
+ __ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ context()->Plug(r3);
}
-Register FullCodeGenerator::result_register() {
- return r0;
-}
+Register FullCodeGenerator::result_register() { return r3; }
-Register FullCodeGenerator::context_register() {
- return cp;
-}
+Register FullCodeGenerator::context_register() { return cp; }
void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
- DCHECK_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
- __ str(value, MemOperand(fp, frame_offset));
+ DCHECK_EQ(static_cast<int>(POINTER_SIZE_ALIGN(frame_offset)), frame_offset);
+ __ StoreP(value, MemOperand(fp, frame_offset), r0);
}
void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
- __ ldr(dst, ContextOperand(cp, context_index));
+ __ LoadP(dst, ContextOperand(cp, context_index), r0);
}
@@ -5091,15 +5101,15 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
// as their closure, not the anonymous closure containing the global
// code. Pass a smi sentinel and let the runtime look up the empty
// function.
- __ mov(ip, Operand(Smi::FromInt(0)));
+ __ LoadSmiLiteral(ip, Smi::FromInt(0));
} else if (declaration_scope->is_eval_scope()) {
// Contexts created by a call to eval have the same closure as the
// context calling eval, not the anonymous closure containing the eval
// code. Fetch it from the context.
- __ ldr(ip, ContextOperand(cp, Context::CLOSURE_INDEX));
+ __ LoadP(ip, ContextOperand(cp, Context::CLOSURE_INDEX));
} else {
DCHECK(declaration_scope->is_function_scope());
- __ ldr(ip, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ LoadP(ip, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
}
__ push(ip);
}
@@ -5109,69 +5119,72 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
// Non-local control flow support.
void FullCodeGenerator::EnterFinallyBlock() {
- DCHECK(!result_register().is(r1));
+ DCHECK(!result_register().is(r4));
// Store result register while executing finally block.
__ push(result_register());
// Cook return address in link register to stack (smi encoded Code* delta)
- __ sub(r1, lr, Operand(masm_->CodeObject()));
- __ SmiTag(r1);
+ __ mflr(r4);
+ __ mov(ip, Operand(masm_->CodeObject()));
+ __ sub(r4, r4, ip);
+ __ SmiTag(r4);
// Store result register while executing finally block.
- __ push(r1);
+ __ push(r4);
// Store pending message while executing finally block.
ExternalReference pending_message_obj =
ExternalReference::address_of_pending_message_obj(isolate());
__ mov(ip, Operand(pending_message_obj));
- __ ldr(r1, MemOperand(ip));
- __ push(r1);
+ __ LoadP(r4, MemOperand(ip));
+ __ push(r4);
ExternalReference has_pending_message =
ExternalReference::address_of_has_pending_message(isolate());
__ mov(ip, Operand(has_pending_message));
- STATIC_ASSERT(sizeof(bool) == 1); // NOLINT(runtime/sizeof)
- __ ldrb(r1, MemOperand(ip));
- __ SmiTag(r1);
- __ push(r1);
+ __ lbz(r4, MemOperand(ip));
+ __ SmiTag(r4);
+ __ push(r4);
ExternalReference pending_message_script =
ExternalReference::address_of_pending_message_script(isolate());
__ mov(ip, Operand(pending_message_script));
- __ ldr(r1, MemOperand(ip));
- __ push(r1);
+ __ LoadP(r4, MemOperand(ip));
+ __ push(r4);
}
void FullCodeGenerator::ExitFinallyBlock() {
- DCHECK(!result_register().is(r1));
+ DCHECK(!result_register().is(r4));
// Restore pending message from stack.
- __ pop(r1);
+ __ pop(r4);
ExternalReference pending_message_script =
ExternalReference::address_of_pending_message_script(isolate());
__ mov(ip, Operand(pending_message_script));
- __ str(r1, MemOperand(ip));
+ __ StoreP(r4, MemOperand(ip));
- __ pop(r1);
- __ SmiUntag(r1);
+ __ pop(r4);
+ __ SmiUntag(r4);
ExternalReference has_pending_message =
ExternalReference::address_of_has_pending_message(isolate());
__ mov(ip, Operand(has_pending_message));
- STATIC_ASSERT(sizeof(bool) == 1); // NOLINT(runtime/sizeof)
- __ strb(r1, MemOperand(ip));
+ __ stb(r4, MemOperand(ip));
- __ pop(r1);
+ __ pop(r4);
ExternalReference pending_message_obj =
ExternalReference::address_of_pending_message_obj(isolate());
__ mov(ip, Operand(pending_message_obj));
- __ str(r1, MemOperand(ip));
+ __ StoreP(r4, MemOperand(ip));
// Restore result register from stack.
- __ pop(r1);
+ __ pop(r4);
// Uncook return address and return.
__ pop(result_register());
- __ SmiUntag(r1);
- __ add(pc, r1, Operand(masm_->CodeObject()));
+ __ SmiUntag(r4);
+ __ mov(ip, Operand(masm_->CodeObject()));
+ __ add(ip, ip, r4);
+ __ mtctr(ip);
+ __ bctr();
}
@@ -5180,8 +5193,7 @@ void FullCodeGenerator::ExitFinallyBlock() {
#define __ ACCESS_MASM(masm())
FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
- int* stack_depth,
- int* context_length) {
+ int* stack_depth, int* context_length) {
// The macros used here must preserve the result register.
// Because the handler block contains the context of the finally
@@ -5191,151 +5203,81 @@ FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
__ Drop(*stack_depth); // Down to the handler block.
if (*context_length > 0) {
// Restore the context to its dedicated register and the stack.
- __ ldr(cp, MemOperand(sp, StackHandlerConstants::kContextOffset));
- __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ LoadP(cp, MemOperand(sp, StackHandlerConstants::kContextOffset));
+ __ StoreP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
}
__ PopTryHandler();
- __ bl(finally_entry_);
+ __ b(finally_entry_, SetLK);
*stack_depth = 0;
*context_length = 0;
return previous_;
}
-
#undef __
-static Address GetInterruptImmediateLoadAddress(Address pc) {
- Address load_address = pc - 2 * Assembler::kInstrSize;
- if (!FLAG_enable_ool_constant_pool) {
- DCHECK(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(load_address)));
- } else if (Assembler::IsLdrPpRegOffset(Memory::int32_at(load_address))) {
- // This is an extended constant pool lookup.
- if (CpuFeatures::IsSupported(ARMv7)) {
- load_address -= 2 * Assembler::kInstrSize;
- DCHECK(Assembler::IsMovW(Memory::int32_at(load_address)));
- DCHECK(Assembler::IsMovT(
- Memory::int32_at(load_address + Assembler::kInstrSize)));
- } else {
- load_address -= 4 * Assembler::kInstrSize;
- DCHECK(Assembler::IsMovImmed(Memory::int32_at(load_address)));
- DCHECK(Assembler::IsOrrImmed(
- Memory::int32_at(load_address + Assembler::kInstrSize)));
- DCHECK(Assembler::IsOrrImmed(
- Memory::int32_at(load_address + 2 * Assembler::kInstrSize)));
- DCHECK(Assembler::IsOrrImmed(
- Memory::int32_at(load_address + 3 * Assembler::kInstrSize)));
- }
- } else if (CpuFeatures::IsSupported(ARMv7) &&
- Assembler::IsMovT(Memory::int32_at(load_address))) {
- // This is a movw / movt immediate load.
- load_address -= Assembler::kInstrSize;
- DCHECK(Assembler::IsMovW(Memory::int32_at(load_address)));
- } else if (!CpuFeatures::IsSupported(ARMv7) &&
- Assembler::IsOrrImmed(Memory::int32_at(load_address))) {
- // This is a mov / orr immediate load.
- load_address -= 3 * Assembler::kInstrSize;
- DCHECK(Assembler::IsMovImmed(Memory::int32_at(load_address)));
- DCHECK(Assembler::IsOrrImmed(
- Memory::int32_at(load_address + Assembler::kInstrSize)));
- DCHECK(Assembler::IsOrrImmed(
- Memory::int32_at(load_address + 2 * Assembler::kInstrSize)));
- } else {
- // This is a small constant pool lookup.
- DCHECK(Assembler::IsLdrPpImmediateOffset(Memory::int32_at(load_address)));
- }
- return load_address;
-}
-
-
-void BackEdgeTable::PatchAt(Code* unoptimized_code,
- Address pc,
+void BackEdgeTable::PatchAt(Code* unoptimized_code, Address pc,
BackEdgeState target_state,
Code* replacement_code) {
- Address pc_immediate_load_address = GetInterruptImmediateLoadAddress(pc);
- Address branch_address = pc_immediate_load_address - Assembler::kInstrSize;
- CodePatcher patcher(branch_address, 1);
+ Address mov_address = Assembler::target_address_from_return_address(pc);
+ Address cmp_address = mov_address - 2 * Assembler::kInstrSize;
+ CodePatcher patcher(cmp_address, 1);
+
switch (target_state) {
- case INTERRUPT:
- {
+ case INTERRUPT: {
// <decrement profiling counter>
- // bpl ok
- // ; load interrupt stub address into ip - either of (for ARMv7):
- // ; <small cp load> | <extended cp load> | <immediate load>
- // ldr ip, [pc/pp, #imm] | movw ip, #imm | movw ip, #imm
- // | movt ip, #imm | movw ip, #imm
- // | ldr ip, [pp, ip]
- // ; or (for ARMv6):
- // ; <small cp load> | <extended cp load> | <immediate load>
- // ldr ip, [pc/pp, #imm] | mov ip, #imm | mov ip, #imm
- // | orr ip, ip, #imm> | orr ip, ip, #imm
- // | orr ip, ip, #imm> | orr ip, ip, #imm
- // | orr ip, ip, #imm> | orr ip, ip, #imm
- // blx ip
+ // cmpi r6, 0
+ // bge <ok> ;; not changed
+ // mov r12, <interrupt stub address>
+ // mtlr r12
+ // blrl
// <reset profiling counter>
// ok-label
-
- // Calculate branch offset to the ok-label - this is the difference
- // between the branch address and |pc| (which points at <blx ip>) plus
- // kProfileCounterResetSequence instructions
- int branch_offset = pc - Instruction::kPCReadOffset - branch_address +
- kProfileCounterResetSequenceLength;
- patcher.masm()->b(branch_offset, pl);
+ patcher.masm()->cmpi(r6, Operand::Zero());
break;
}
case ON_STACK_REPLACEMENT:
case OSR_AFTER_STACK_CHECK:
// <decrement profiling counter>
- // mov r0, r0 (NOP)
- // ; load on-stack replacement address into ip - either of (for ARMv7):
- // ; <small cp load> | <extended cp load> | <immediate load>
- // ldr ip, [pc/pp, #imm] | movw ip, #imm | movw ip, #imm
- // | movt ip, #imm> | movw ip, #imm
- // | ldr ip, [pp, ip]
- // ; or (for ARMv6):
- // ; <small cp load> | <extended cp load> | <immediate load>
- // ldr ip, [pc/pp, #imm] | mov ip, #imm | mov ip, #imm
- // | orr ip, ip, #imm> | orr ip, ip, #imm
- // | orr ip, ip, #imm> | orr ip, ip, #imm
- // | orr ip, ip, #imm> | orr ip, ip, #imm
- // blx ip
+ // crset
+ // bge <ok> ;; not changed
+ // mov r12, <on-stack replacement address>
+ // mtlr r12
+ // blrl
// <reset profiling counter>
- // ok-label
- patcher.masm()->nop();
+ // ok-label ----- pc_after points here
+
+ // Set the LT bit such that bge is a NOP
+ patcher.masm()->crset(Assembler::encode_crbit(cr7, CR_LT));
break;
}
- // Replace the call address.
- Assembler::set_target_address_at(pc_immediate_load_address, unoptimized_code,
- replacement_code->entry());
+ // Replace the stack check address in the mov sequence with the
+ // entry address of the replacement code.
+ Assembler::set_target_address_at(mov_address, unoptimized_code,
+ replacement_code->entry());
unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
- unoptimized_code, pc_immediate_load_address, replacement_code);
+ unoptimized_code, mov_address, replacement_code);
}
BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
- Isolate* isolate,
- Code* unoptimized_code,
- Address pc) {
- DCHECK(Assembler::IsBlxIp(Memory::int32_at(pc - Assembler::kInstrSize)));
-
- Address pc_immediate_load_address = GetInterruptImmediateLoadAddress(pc);
- Address branch_address = pc_immediate_load_address - Assembler::kInstrSize;
- Address interrupt_address = Assembler::target_address_at(
- pc_immediate_load_address, unoptimized_code);
-
- if (Assembler::IsBranch(Assembler::instr_at(branch_address))) {
- DCHECK(interrupt_address ==
- isolate->builtins()->InterruptCheck()->entry());
+ Isolate* isolate, Code* unoptimized_code, Address pc) {
+ Address mov_address = Assembler::target_address_from_return_address(pc);
+ Address cmp_address = mov_address - 2 * Assembler::kInstrSize;
+ Address interrupt_address =
+ Assembler::target_address_at(mov_address, unoptimized_code);
+
+ if (Assembler::IsCmpImmediate(Assembler::instr_at(cmp_address))) {
+ DCHECK(interrupt_address == isolate->builtins()->InterruptCheck()->entry());
return INTERRUPT;
}
- DCHECK(Assembler::IsNop(Assembler::instr_at(branch_address)));
+ DCHECK(Assembler::IsCrSet(Assembler::instr_at(cmp_address)));
- if (interrupt_address ==
- isolate->builtins()->OnStackReplacement()->entry()) {
+ if (interrupt_address == isolate->builtins()->OnStackReplacement()->entry()) {
return ON_STACK_REPLACEMENT;
}
@@ -5343,8 +5285,6 @@ BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
isolate->builtins()->OsrAfterStackCheck()->entry());
return OSR_AFTER_STACK_CHECK;
}
-
-
-} } // namespace v8::internal
-
-#endif // V8_TARGET_ARCH_ARM
+}
+} // namespace v8::internal
+#endif // V8_TARGET_ARCH_PPC
« no previous file with comments | « src/ppc/frames-ppc.cc ('k') | src/ppc/interface-descriptors-ppc.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698