Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(30)

Unified Diff: src/full-codegen/s390/full-codegen-s390.cc

Issue 1764153002: S390: Initial impl of full-codegen (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: rebased on master Created 4 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « src/full-codegen/full-codegen.h ('k') | tools/gyp/v8.gyp » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: src/full-codegen/s390/full-codegen-s390.cc
diff --git a/src/full-codegen/ppc/full-codegen-ppc.cc b/src/full-codegen/s390/full-codegen-s390.cc
similarity index 78%
copy from src/full-codegen/ppc/full-codegen-ppc.cc
copy to src/full-codegen/s390/full-codegen-s390.cc
index 632d985f17b7c51f475522cdbcda074c2626ba6f..5932f9955b634f97d7ac09234112f41086ed5126 100644
--- a/src/full-codegen/ppc/full-codegen-ppc.cc
+++ b/src/full-codegen/s390/full-codegen-s390.cc
@@ -1,8 +1,8 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
+// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#if V8_TARGET_ARCH_PPC
+#if V8_TARGET_ARCH_S390
#include "src/ast/scopes.h"
#include "src/code-factory.h"
@@ -13,8 +13,8 @@
#include "src/ic/ic.h"
#include "src/parsing/parser.h"
-#include "src/ppc/code-stubs-ppc.h"
-#include "src/ppc/macro-assembler-ppc.h"
+#include "src/s390/code-stubs-s390.h"
+#include "src/s390/macro-assembler-s390.h"
namespace v8 {
namespace internal {
@@ -27,7 +27,7 @@ namespace internal {
// marker is a cmpi rx, #yyy instruction, and x * 0x0000ffff + yyy (raw 16 bit
// immediate value is used) is the delta from the pc to the first instruction of
// the patchable code.
-// See PatchInlinedSmiCode in ic-ppc.cc for the code that patches it
+// See PatchInlinedSmiCode in ic-s390.cc for the code that patches it
class JumpPatchSite BASE_EMBEDDED {
public:
explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) {
@@ -42,34 +42,41 @@ class JumpPatchSite BASE_EMBEDDED {
// the inlined smi code.
void EmitJumpIfNotSmi(Register reg, Label* target) {
DCHECK(!patch_site_.is_bound() && !info_emitted_);
- Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
__ bind(&patch_site_);
- __ cmp(reg, reg, cr0);
- __ beq(target, cr0); // Always taken before patched.
+ __ CmpP(reg, reg);
+// Emit the Nop to make bigger place for patching on 31-bit
+// as the TestIfSmi sequence uses 4-byte TMLL
+#ifndef V8_TARGET_ARCH_S390X
+ __ nop();
+#endif
+ __ beq(target); // Always taken before patched.
}
// When initially emitting this ensure that a jump is never generated to skip
// the inlined smi code.
void EmitJumpIfSmi(Register reg, Label* target) {
- Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
DCHECK(!patch_site_.is_bound() && !info_emitted_);
__ bind(&patch_site_);
- __ cmp(reg, reg, cr0);
- __ bne(target, cr0); // Never taken before patched.
+ __ CmpP(reg, reg);
+// Emit the Nop to make bigger place for patching on 31-bit
+// as the TestIfSmi sequence uses 4-byte TMLL
+#ifndef V8_TARGET_ARCH_S390X
+ __ nop();
+#endif
+ __ bne(target); // Never taken before patched.
}
void EmitPatchInfo() {
if (patch_site_.is_bound()) {
- int delta_to_patch_site = masm_->InstructionsGeneratedSince(&patch_site_);
- Register reg;
- // I believe this is using reg as the high bits of of the offset
- reg.set_code(delta_to_patch_site / kOff16Mask);
- __ cmpi(reg, Operand(delta_to_patch_site % kOff16Mask));
+ int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site_);
+ DCHECK(is_int16(delta_to_patch_site));
+ __ chi(r0, Operand(delta_to_patch_site));
#ifdef DEBUG
info_emitted_ = true;
#endif
} else {
- __ nop(); // Signals no inlined code.
+ __ nop();
+ __ nop();
}
}
@@ -82,23 +89,22 @@ class JumpPatchSite BASE_EMBEDDED {
#endif
};
-
// Generate code for a JS function. On entry to the function the receiver
// and arguments have been pushed on the stack left to right. The actual
// argument count matches the formal parameter count expected by the
// function.
//
// The live registers are:
-// o r4: the JS function object being called (i.e., ourselves)
-// o r6: the new target value
+// o r3: the JS function object being called (i.e., ourselves)
+// o r5: the new target value
// o cp: our context
-// o fp: our caller's frame pointer (aka r31)
+// o fp: our caller's frame pointer
// o sp: stack pointer
// o lr: return address
// o ip: our own function entry (required by the prologue)
//
// The function builds a JS frame. Please see JavaScriptFrameConstants in
-// frames-ppc.h for its layout.
+// frames-s390.h for its layout.
void FullCodeGenerator::Generate() {
CompilationInfo* info = info_;
profiling_counter_ = isolate()->factory()->NewCell(
@@ -110,9 +116,9 @@ void FullCodeGenerator::Generate() {
if (FLAG_debug_code && info->ExpectsJSReceiverAsReceiver()) {
int receiver_offset = info->scope()->num_parameters() * kPointerSize;
- __ LoadP(r5, MemOperand(sp, receiver_offset), r0);
- __ AssertNotSmi(r5);
- __ CompareObjectType(r5, r5, no_reg, FIRST_JS_RECEIVER_TYPE);
+ __ LoadP(r4, MemOperand(sp, receiver_offset), r0);
+ __ AssertNotSmi(r4);
+ __ CompareObjectType(r4, r4, no_reg, FIRST_JS_RECEIVER_TYPE);
__ Assert(ge, kSloppyFunctionExpectsJSReceiverReceiver);
}
@@ -122,12 +128,6 @@ void FullCodeGenerator::Generate() {
FrameScope frame_scope(masm_, StackFrame::MANUAL);
int prologue_offset = masm_->pc_offset();
- if (prologue_offset) {
- // Prologue logic requires it's starting address in ip and the
- // corresponding offset from the function entry.
- prologue_offset += Instruction::kInstrSize;
- __ addi(ip, ip, Operand(prologue_offset));
- }
info->set_prologue_offset(prologue_offset);
__ Prologue(info->GeneratePreagedPrologue(), ip, prologue_offset);
@@ -140,10 +140,10 @@ void FullCodeGenerator::Generate() {
if (locals_count > 0) {
if (locals_count >= 128) {
Label ok;
- __ Add(ip, sp, -(locals_count * kPointerSize), r0);
+ __ AddP(ip, sp, Operand(-(locals_count * kPointerSize)));
__ LoadRoot(r5, Heap::kRealStackLimitRootIndex);
- __ cmpl(ip, r5);
- __ bc_short(ge, &ok);
+ __ CmpLogicalP(ip, r5);
+ __ bge(&ok, Label::kNear);
__ CallRuntime(Runtime::kThrowStackOverflow);
__ bind(&ok);
}
@@ -151,35 +151,40 @@ void FullCodeGenerator::Generate() {
int kMaxPushes = FLAG_optimize_for_size ? 4 : 32;
if (locals_count >= kMaxPushes) {
int loop_iterations = locals_count / kMaxPushes;
- __ mov(r5, Operand(loop_iterations));
- __ mtctr(r5);
+ __ mov(r4, Operand(loop_iterations));
Label loop_header;
__ bind(&loop_header);
// Do pushes.
+ // TODO(joransiu): Use MVC for better performance
+ __ lay(sp, MemOperand(sp, -kMaxPushes * kPointerSize));
for (int i = 0; i < kMaxPushes; i++) {
- __ push(ip);
+ __ StoreP(ip, MemOperand(sp, i * kPointerSize));
}
// Continue loop if not done.
- __ bdnz(&loop_header);
+ __ BranchOnCount(r4, &loop_header);
}
int remaining = locals_count % kMaxPushes;
// Emit the remaining pushes.
- for (int i = 0; i < remaining; i++) {
- __ push(ip);
+ // TODO(joransiu): Use MVC for better performance
+ if (remaining > 0) {
+ __ lay(sp, MemOperand(sp, -remaining * kPointerSize));
+ for (int i = 0; i < remaining; i++) {
+ __ StoreP(ip, MemOperand(sp, i * kPointerSize));
+ }
}
}
}
- bool function_in_register_r4 = true;
+ bool function_in_register_r3 = true;
// Possibly allocate a local context.
if (info->scope()->num_heap_slots() > 0) {
- // Argument to NewContext is the function, which is still in r4.
+ // Argument to NewContext is the function, which is still in r3.
Comment cmnt(masm_, "[ Allocate context");
bool need_write_barrier = true;
int slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
if (info->scope()->is_script_scope()) {
- __ push(r4);
+ __ push(r3);
__ Push(info->scope()->GetScopeInfo(info->isolate()));
__ CallRuntime(Runtime::kNewScriptContext);
PrepareForBailoutForId(BailoutId::ScriptContext(), TOS_REG);
@@ -187,7 +192,7 @@ void FullCodeGenerator::Generate() {
DCHECK_NULL(info->scope()->new_target_var());
} else {
if (info->scope()->new_target_var() != nullptr) {
- __ push(r6); // Preserve new target.
+ __ push(r5); // Preserve new target.
}
if (slots <= FastNewContextStub::kMaximumSlots) {
FastNewContextStub stub(isolate(), slots);
@@ -195,18 +200,18 @@ void FullCodeGenerator::Generate() {
// Result of FastNewContextStub is always in new space.
need_write_barrier = false;
} else {
- __ push(r4);
+ __ push(r3);
__ CallRuntime(Runtime::kNewFunctionContext);
}
if (info->scope()->new_target_var() != nullptr) {
- __ pop(r6); // Preserve new target.
+ __ pop(r5); // Preserve new target.
}
}
- function_in_register_r4 = false;
- // Context is returned in r3. It replaces the context passed to us.
+ function_in_register_r3 = false;
+ // Context is returned in r2. It replaces the context passed to us.
// It's saved in the stack and kept live in cp.
- __ mr(cp, r3);
- __ StoreP(r3, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ LoadRR(cp, r2);
+ __ StoreP(r2, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Copy any necessary parameters into the context.
int num_parameters = info->scope()->num_parameters();
int first_parameter = info->scope()->has_this_declaration() ? -1 : 0;
@@ -216,18 +221,18 @@ void FullCodeGenerator::Generate() {
int parameter_offset = StandardFrameConstants::kCallerSPOffset +
(num_parameters - 1 - i) * kPointerSize;
// Load parameter from stack.
- __ LoadP(r3, MemOperand(fp, parameter_offset), r0);
+ __ LoadP(r2, MemOperand(fp, parameter_offset), r0);
// Store it in the context.
MemOperand target = ContextMemOperand(cp, var->index());
- __ StoreP(r3, target, r0);
+ __ StoreP(r2, target);
// Update the write barrier.
if (need_write_barrier) {
- __ RecordWriteContextSlot(cp, target.offset(), r3, r5,
+ __ RecordWriteContextSlot(cp, target.offset(), r2, r4,
kLRHasBeenSaved, kDontSaveFPRegs);
} else if (FLAG_debug_code) {
Label done;
- __ JumpIfInNewSpace(cp, r3, &done);
+ __ JumpIfInNewSpace(cp, r2, &done);
__ Abort(kExpectedNewSpaceObject);
__ bind(&done);
}
@@ -245,18 +250,18 @@ void FullCodeGenerator::Generate() {
Variable* this_function_var = scope()->this_function_var();
if (this_function_var != nullptr) {
Comment cmnt(masm_, "[ This function");
- if (!function_in_register_r4) {
- __ LoadP(r4, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ if (!function_in_register_r3) {
+ __ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
// The write barrier clobbers register again, keep it marked as such.
}
- SetVar(this_function_var, r4, r3, r5);
+ SetVar(this_function_var, r3, r2, r4);
}
// Possibly set up a local binding to the new target value.
Variable* new_target_var = scope()->new_target_var();
if (new_target_var != nullptr) {
Comment cmnt(masm_, "[ new.target");
- SetVar(new_target_var, r6, r3, r5);
+ SetVar(new_target_var, r5, r2, r4);
}
// Possibly allocate RestParameters
@@ -264,35 +269,37 @@ void FullCodeGenerator::Generate() {
Variable* rest_param = scope()->rest_parameter(&rest_index);
if (rest_param) {
Comment cmnt(masm_, "[ Allocate rest parameter array");
- if (!function_in_register_r4) {
- __ LoadP(r4, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+
+ if (!function_in_register_r3) {
+ __ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
}
FastNewRestParameterStub stub(isolate());
__ CallStub(&stub);
- function_in_register_r4 = false;
- SetVar(rest_param, r3, r4, r5);
+
+ function_in_register_r3 = false;
+ SetVar(rest_param, r2, r3, r4);
}
Variable* arguments = scope()->arguments();
if (arguments != NULL) {
// Function uses arguments object.
Comment cmnt(masm_, "[ Allocate arguments object");
- if (!function_in_register_r4) {
+ if (!function_in_register_r3) {
// Load this again, if it's used by the local context below.
- __ LoadP(r4, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
}
if (is_strict(language_mode()) || !has_simple_parameters()) {
FastNewStrictArgumentsStub stub(isolate());
__ CallStub(&stub);
} else if (literal()->has_duplicate_parameters()) {
- __ Push(r4);
+ __ Push(r3);
__ CallRuntime(Runtime::kNewSloppyArguments_Generic);
} else {
FastNewSloppyArgumentsStub stub(isolate());
__ CallStub(&stub);
}
- SetVar(arguments, r3, r4, r5);
+ SetVar(arguments, r2, r3, r4);
}
if (FLAG_trace) {
@@ -302,7 +309,9 @@ void FullCodeGenerator::Generate() {
// Visit the declarations and body unless there is an illegal
// redeclaration.
if (scope()->HasIllegalRedeclaration()) {
- EmitIllegalRedeclaration();
+ Comment cmnt(masm_, "[ Declarations");
+ VisitForEffect(scope()->GetIllegalRedeclaration());
+
} else {
PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
{
@@ -320,8 +329,8 @@ void FullCodeGenerator::Generate() {
PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
Label ok;
__ LoadRoot(ip, Heap::kStackLimitRootIndex);
- __ cmpl(sp, ip);
- __ bc_short(ge, &ok);
+ __ CmpLogicalP(sp, ip);
+ __ bge(&ok, Label::kNear);
__ Call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
__ bind(&ok);
}
@@ -338,37 +347,35 @@ void FullCodeGenerator::Generate() {
// the body.
{
Comment cmnt(masm_, "[ return <undefined>;");
- __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
}
EmitReturnSequence();
-
- if (HasStackOverflow()) {
- masm_->AbortConstantPoolBuilding();
- }
}
-
void FullCodeGenerator::ClearAccumulator() {
- __ LoadSmiLiteral(r3, Smi::FromInt(0));
+ __ LoadSmiLiteral(r2, Smi::FromInt(0));
}
-
void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
- __ mov(r5, Operand(profiling_counter_));
- __ LoadP(r6, FieldMemOperand(r5, Cell::kValueOffset));
- __ SubSmiLiteral(r6, r6, Smi::FromInt(delta), r0);
- __ StoreP(r6, FieldMemOperand(r5, Cell::kValueOffset), r0);
+ __ mov(r4, Operand(profiling_counter_));
+ intptr_t smi_delta = reinterpret_cast<intptr_t>(Smi::FromInt(delta));
+ if (CpuFeatures::IsSupported(GENERAL_INSTR_EXT) && is_int8(-smi_delta)) {
+ __ AddP(FieldMemOperand(r4, Cell::kValueOffset), Operand(-smi_delta));
+ __ LoadP(r5, FieldMemOperand(r4, Cell::kValueOffset));
+ } else {
+ __ LoadP(r5, FieldMemOperand(r4, Cell::kValueOffset));
+ __ SubSmiLiteral(r5, r5, Smi::FromInt(delta), r0);
+ __ StoreP(r5, FieldMemOperand(r4, Cell::kValueOffset));
+ }
}
-
void FullCodeGenerator::EmitProfilingCounterReset() {
int reset_value = FLAG_interrupt_budget;
- __ mov(r5, Operand(profiling_counter_));
- __ LoadSmiLiteral(r6, Smi::FromInt(reset_value));
- __ StoreP(r6, FieldMemOperand(r5, Cell::kValueOffset), r0);
+ __ mov(r4, Operand(profiling_counter_));
+ __ LoadSmiLiteral(r5, Smi::FromInt(reset_value));
+ __ StoreP(r5, FieldMemOperand(r4, Cell::kValueOffset));
}
-
void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
Label* back_edge_target) {
Comment cmnt(masm_, "[ Back edge bookkeeping");
@@ -380,11 +387,8 @@ void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
int weight = Min(kMaxBackEdgeWeight, Max(1, distance / kCodeSizeMultiplier));
EmitProfilingCounterDecrement(weight);
{
- Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
- Assembler::BlockConstantPoolEntrySharingScope prevent_entry_sharing(masm_);
// BackEdgeTable::PatchAt manipulates this sequence.
- __ cmpi(r6, Operand::Zero());
- __ bc_short(ge, &ok);
+ __ bge(&ok, Label::kNear);
__ Call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
// Record a mapping of this PC offset to the OSR id. This is used to find
@@ -414,15 +418,15 @@ void FullCodeGenerator::EmitProfilingCounterHandlingForReturnSequence(
}
EmitProfilingCounterDecrement(weight);
Label ok;
- __ cmpi(r6, Operand::Zero());
+ __ CmpP(r5, Operand::Zero());
__ bge(&ok);
// Don't need to save result register if we are going to do a tail call.
if (!is_tail_call) {
- __ push(r3);
+ __ push(r2);
}
__ Call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
if (!is_tail_call) {
- __ pop(r3);
+ __ pop(r2);
}
EmitProfilingCounterReset();
__ bind(&ok);
@@ -436,8 +440,8 @@ void FullCodeGenerator::EmitReturnSequence() {
__ bind(&return_label_);
if (FLAG_trace) {
// Push the return value on the stack as the parameter.
- // Runtime::TraceExit returns its parameter in r3
- __ push(r3);
+ // Runtime::TraceExit returns its parameter in r2
+ __ push(r2);
__ CallRuntime(Runtime::kTraceExit);
}
EmitProfilingCounterHandlingForReturnSequence(false);
@@ -445,40 +449,37 @@ void FullCodeGenerator::EmitReturnSequence() {
// Make sure that the constant pool is not emitted inside of the return
// sequence.
{
- Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+ // Here we use masm_-> instead of the __ macro to avoid the code coverage
+ // tool from instrumenting as we rely on the code size here.
int32_t arg_count = info_->scope()->num_parameters() + 1;
int32_t sp_delta = arg_count * kPointerSize;
SetReturnPosition(literal());
__ LeaveFrame(StackFrame::JAVA_SCRIPT, sp_delta);
- __ blr();
+
+ __ Ret();
}
}
}
-
void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
codegen()->GetVar(result_register(), var);
codegen()->PushOperand(result_register());
}
-
void FullCodeGenerator::EffectContext::Plug(Heap::RootListIndex index) const {}
-
void FullCodeGenerator::AccumulatorValueContext::Plug(
Heap::RootListIndex index) const {
__ LoadRoot(result_register(), index);
}
-
void FullCodeGenerator::StackValueContext::Plug(
Heap::RootListIndex index) const {
__ LoadRoot(result_register(), index);
codegen()->PushOperand(result_register());
}
-
void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
codegen()->PrepareForBailoutBeforeSplit(condition(), true, true_label_,
false_label_);
@@ -494,27 +495,23 @@ void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
}
}
-
void FullCodeGenerator::EffectContext::Plug(Handle<Object> lit) const {}
-
void FullCodeGenerator::AccumulatorValueContext::Plug(
Handle<Object> lit) const {
__ mov(result_register(), Operand(lit));
}
-
void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
// Immediates cannot be pushed directly.
__ mov(result_register(), Operand(lit));
codegen()->PushOperand(result_register());
}
-
void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
codegen()->PrepareForBailoutBeforeSplit(condition(), true, true_label_,
false_label_);
- DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectable());
+ DCHECK(lit->IsNull() || lit->IsUndefined() || !lit->IsUndetectableObject());
if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
if (false_label_ != fall_through_) __ b(false_label_);
} else if (lit->IsTrue() || lit->IsJSObject()) {
@@ -538,7 +535,6 @@ void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
}
}
-
void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
Register reg) const {
DCHECK(count > 0);
@@ -546,53 +542,47 @@ void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
__ StoreP(reg, MemOperand(sp, 0));
}
-
void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
Label* materialize_false) const {
DCHECK(materialize_true == materialize_false);
__ bind(materialize_true);
}
-
void FullCodeGenerator::AccumulatorValueContext::Plug(
Label* materialize_true, Label* materialize_false) const {
Label done;
__ bind(materialize_true);
__ LoadRoot(result_register(), Heap::kTrueValueRootIndex);
- __ b(&done);
+ __ b(&done, Label::kNear);
__ bind(materialize_false);
__ LoadRoot(result_register(), Heap::kFalseValueRootIndex);
__ bind(&done);
}
-
void FullCodeGenerator::StackValueContext::Plug(
Label* materialize_true, Label* materialize_false) const {
Label done;
__ bind(materialize_true);
__ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ b(&done);
+ __ b(&done, Label::kNear);
__ bind(materialize_false);
__ LoadRoot(ip, Heap::kFalseValueRootIndex);
__ bind(&done);
codegen()->PushOperand(ip);
}
-
void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
Label* materialize_false) const {
DCHECK(materialize_true == true_label_);
DCHECK(materialize_false == false_label_);
}
-
void FullCodeGenerator::AccumulatorValueContext::Plug(bool flag) const {
Heap::RootListIndex value_root_index =
flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
__ LoadRoot(result_register(), value_root_index);
}
-
void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
Heap::RootListIndex value_root_index =
flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
@@ -600,7 +590,6 @@ void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
codegen()->PushOperand(ip);
}
-
void FullCodeGenerator::TestContext::Plug(bool flag) const {
codegen()->PrepareForBailoutBeforeSplit(condition(), true, true_label_,
false_label_);
@@ -611,29 +600,26 @@ void FullCodeGenerator::TestContext::Plug(bool flag) const {
}
}
-
void FullCodeGenerator::DoTest(Expression* condition, Label* if_true,
Label* if_false, Label* fall_through) {
- Handle<Code> ic = ToBooleanICStub::GetUninitialized(isolate());
+ Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
CallIC(ic, condition->test_id());
__ CompareRoot(result_register(), Heap::kTrueValueRootIndex);
Split(eq, if_true, if_false, fall_through);
}
-
void FullCodeGenerator::Split(Condition cond, Label* if_true, Label* if_false,
- Label* fall_through, CRegister cr) {
+ Label* fall_through) {
if (if_false == fall_through) {
- __ b(cond, if_true, cr);
+ __ b(cond, if_true);
} else if (if_true == fall_through) {
- __ b(NegateCondition(cond), if_false, cr);
+ __ b(NegateCondition(cond), if_false);
} else {
- __ b(cond, if_true, cr);
+ __ b(cond, if_true);
__ b(if_false);
}
}
-
MemOperand FullCodeGenerator::StackOperand(Variable* var) {
DCHECK(var->IsStackAllocated());
// Offset is negative because higher indexes are at lower addresses.
@@ -647,7 +633,6 @@ MemOperand FullCodeGenerator::StackOperand(Variable* var) {
return MemOperand(fp, offset);
}
-
MemOperand FullCodeGenerator::VarOperand(Variable* var, Register scratch) {
DCHECK(var->IsContextSlot() || var->IsStackAllocated());
if (var->IsContextSlot()) {
@@ -659,14 +644,12 @@ MemOperand FullCodeGenerator::VarOperand(Variable* var, Register scratch) {
}
}
-
void FullCodeGenerator::GetVar(Register dest, Variable* var) {
// Use destination as scratch.
MemOperand location = VarOperand(var, dest);
__ LoadP(dest, location, r0);
}
-
void FullCodeGenerator::SetVar(Variable* var, Register src, Register scratch0,
Register scratch1) {
DCHECK(var->IsContextSlot() || var->IsStackAllocated());
@@ -674,7 +657,7 @@ void FullCodeGenerator::SetVar(Variable* var, Register src, Register scratch0,
DCHECK(!scratch0.is(scratch1));
DCHECK(!scratch1.is(src));
MemOperand location = VarOperand(var, scratch0);
- __ StoreP(src, location, r0);
+ __ StoreP(src, location);
// Emit the write barrier code if the location is in the heap.
if (var->IsContextSlot()) {
@@ -683,7 +666,6 @@ void FullCodeGenerator::SetVar(Variable* var, Register src, Register scratch0,
}
}
-
void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
bool should_normalize,
Label* if_true,
@@ -697,29 +679,26 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
if (should_normalize) __ b(&skip);
PrepareForBailout(expr, TOS_REG);
if (should_normalize) {
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ cmp(r3, ip);
+ __ CompareRoot(r2, Heap::kTrueValueRootIndex);
Split(eq, if_true, if_false, NULL);
__ bind(&skip);
}
}
-
void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
// The variable in the declaration always resides in the current function
// context.
DCHECK_EQ(0, scope()->ContextChainLength(variable->scope()));
if (FLAG_debug_code) {
// Check that we're not inside a with or catch context.
- __ LoadP(r4, FieldMemOperand(cp, HeapObject::kMapOffset));
- __ CompareRoot(r4, Heap::kWithContextMapRootIndex);
+ __ LoadP(r3, FieldMemOperand(cp, HeapObject::kMapOffset));
+ __ CompareRoot(r3, Heap::kWithContextMapRootIndex);
__ Check(ne, kDeclarationInWithContext);
- __ CompareRoot(r4, Heap::kCatchContextMapRootIndex);
+ __ CompareRoot(r3, Heap::kCatchContextMapRootIndex);
__ Check(ne, kDeclarationInCatchContext);
}
}
-
void FullCodeGenerator::VisitVariableDeclaration(
VariableDeclaration* declaration) {
// If it was not possible to allocate the variable at compile time, we
@@ -753,7 +732,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
Comment cmnt(masm_, "[ VariableDeclaration");
EmitDebugCheckDeclarationContext(variable);
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
- __ StoreP(ip, ContextMemOperand(cp, variable->index()), r0);
+ __ StoreP(ip, ContextMemOperand(cp, variable->index()));
// No write barrier since the_hole_value is in old space.
PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
}
@@ -761,7 +740,7 @@ void FullCodeGenerator::VisitVariableDeclaration(
case VariableLocation::LOOKUP: {
Comment cmnt(masm_, "[ VariableDeclaration");
- __ mov(r5, Operand(variable->name()));
+ __ mov(r4, Operand(variable->name()));
// Declaration nodes are always introduced in one of four modes.
DCHECK(IsDeclaredVariableMode(mode));
// Push initial value, if any.
@@ -769,11 +748,11 @@ void FullCodeGenerator::VisitVariableDeclaration(
// 'undefined') because we may have a (legal) redeclaration and we
// must not destroy the current value.
if (hole_init) {
- __ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
+ __ LoadRoot(r2, Heap::kTheHoleValueRootIndex);
} else {
- __ LoadSmiLiteral(r3, Smi::FromInt(0)); // Indicates no initial value.
+ __ LoadSmiLiteral(r2, Smi::FromInt(0)); // Indicates no initial value.
}
- __ Push(r5, r3);
+ __ Push(r4, r2);
__ Push(Smi::FromInt(variable->DeclarationPropertyAttributes()));
__ CallRuntime(Runtime::kDeclareLookupSlot);
break;
@@ -781,7 +760,6 @@ void FullCodeGenerator::VisitVariableDeclaration(
}
}
-
void FullCodeGenerator::VisitFunctionDeclaration(
FunctionDeclaration* declaration) {
VariableProxy* proxy = declaration->proxy();
@@ -810,11 +788,10 @@ void FullCodeGenerator::VisitFunctionDeclaration(
Comment cmnt(masm_, "[ FunctionDeclaration");
EmitDebugCheckDeclarationContext(variable);
VisitForAccumulatorValue(declaration->fun());
- __ StoreP(result_register(), ContextMemOperand(cp, variable->index()),
- r0);
+ __ StoreP(result_register(), ContextMemOperand(cp, variable->index()));
int offset = Context::SlotOffset(variable->index());
// We know that we have written a function, which is not a smi.
- __ RecordWriteContextSlot(cp, offset, result_register(), r5,
+ __ RecordWriteContextSlot(cp, offset, result_register(), r4,
kLRHasBeenSaved, kDontSaveFPRegs,
EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
@@ -823,8 +800,8 @@ void FullCodeGenerator::VisitFunctionDeclaration(
case VariableLocation::LOOKUP: {
Comment cmnt(masm_, "[ FunctionDeclaration");
- __ mov(r5, Operand(variable->name()));
- PushOperand(r5);
+ __ mov(r4, Operand(variable->name()));
+ PushOperand(r4);
// Push initial value for function declaration.
VisitForStackValue(declaration->fun());
PushOperand(Smi::FromInt(variable->DeclarationPropertyAttributes()));
@@ -834,17 +811,15 @@ void FullCodeGenerator::VisitFunctionDeclaration(
}
}
-
void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
// Call the runtime to declare the globals.
- __ mov(r4, Operand(pairs));
- __ LoadSmiLiteral(r3, Smi::FromInt(DeclareGlobalsFlags()));
- __ Push(r4, r3);
+ __ mov(r3, Operand(pairs));
+ __ LoadSmiLiteral(r2, Smi::FromInt(DeclareGlobalsFlags()));
+ __ Push(r3, r2);
__ CallRuntime(Runtime::kDeclareGlobals);
// Return value is ignored.
}
-
void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
// Call the runtime to declare the modules.
__ Push(descriptions);
@@ -852,7 +827,6 @@ void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
// Return value is ignored.
}
-
void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Comment cmnt(masm_, "[ SwitchStatement");
Breakable nested_statement(this, stmt);
@@ -885,15 +859,16 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
VisitForAccumulatorValue(clause->label());
// Perform the comparison as if via '==='.
- __ LoadP(r4, MemOperand(sp, 0)); // Switch value.
+ __ LoadP(r3, MemOperand(sp, 0)); // Switch value.
bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
JumpPatchSite patch_site(masm_);
if (inline_smi_code) {
Label slow_case;
- __ orx(r5, r4, r3);
- patch_site.EmitJumpIfNotSmi(r5, &slow_case);
+ __ LoadRR(r4, r2);
+ __ OrP(r4, r3);
+ patch_site.EmitJumpIfNotSmi(r4, &slow_case);
- __ cmp(r4, r3);
+ __ CmpP(r3, r2);
__ bne(&next_test);
__ Drop(1); // Switch value is no longer needed.
__ b(clause->body_target());
@@ -910,14 +885,13 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
Label skip;
__ b(&skip);
PrepareForBailout(clause, TOS_REG);
- __ LoadRoot(ip, Heap::kTrueValueRootIndex);
- __ cmp(r3, ip);
+ __ CompareRoot(r2, Heap::kTrueValueRootIndex);
__ bne(&next_test);
__ Drop(1);
__ b(clause->body_target());
__ bind(&skip);
- __ cmpi(r3, Operand::Zero());
+ __ CmpP(r2, Operand::Zero());
__ bne(&next_test);
__ Drop(1); // Switch value is no longer needed.
__ b(clause->body_target());
@@ -946,7 +920,6 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
}
-
void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
Comment cmnt(masm_, "[ ForInStatement");
SetStatementPosition(stmt, SKIP_BREAK);
@@ -965,19 +938,19 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// If the object is null or undefined, skip over the loop, otherwise convert
// it to a JS receiver. See ECMA-262 version 5, section 12.6.4.
Label convert, done_convert;
- __ JumpIfSmi(r3, &convert);
- __ CompareObjectType(r3, r4, r4, FIRST_JS_RECEIVER_TYPE);
+ __ JumpIfSmi(r2, &convert);
+ __ CompareObjectType(r2, r3, r3, FIRST_JS_RECEIVER_TYPE);
__ bge(&done_convert);
- __ CompareRoot(r3, Heap::kNullValueRootIndex);
+ __ CompareRoot(r2, Heap::kNullValueRootIndex);
__ beq(&exit);
- __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
+ __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
__ beq(&exit);
__ bind(&convert);
ToObjectStub stub(isolate());
__ CallStub(&stub);
__ bind(&done_convert);
PrepareForBailoutForId(stmt->ToObjectId(), TOS_REG);
- __ push(r3);
+ __ push(r2);
// Check cache validity in generated code. This is a fast case for
// the JSObject::IsSimpleEnum cache validity checks. If we cannot
@@ -991,12 +964,12 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// The enum cache is valid. Load the map of the object being
// iterated over and use the cache for the iteration.
Label use_cache;
- __ LoadP(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ LoadP(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
__ b(&use_cache);
// Get the set of properties to enumerate.
__ bind(&call_runtime);
- __ push(r3); // Duplicate the enumerable object on the stack.
+ __ push(r2); // Duplicate the enumerable object on the stack.
__ CallRuntime(Runtime::kForInEnumerate);
PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
@@ -1004,96 +977,104 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// modification check. Otherwise, we got a fixed array, and we have
// to do a slow check.
Label fixed_array;
- __ LoadP(r5, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kMetaMapRootIndex);
- __ cmp(r5, ip);
+ __ LoadP(r4, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ CompareRoot(r4, Heap::kMetaMapRootIndex);
__ bne(&fixed_array);
- // We got a map in register r3. Get the enumeration cache from it.
+ // We got a map in register r2. Get the enumeration cache from it.
Label no_descriptors;
__ bind(&use_cache);
- __ EnumLength(r4, r3);
- __ CmpSmiLiteral(r4, Smi::FromInt(0), r0);
- __ beq(&no_descriptors);
+ __ EnumLength(r3, r2);
+ __ CmpSmiLiteral(r3, Smi::FromInt(0), r0);
+ __ beq(&no_descriptors, Label::kNear);
- __ LoadInstanceDescriptors(r3, r5);
- __ LoadP(r5, FieldMemOperand(r5, DescriptorArray::kEnumCacheOffset));
- __ LoadP(r5,
- FieldMemOperand(r5, DescriptorArray::kEnumCacheBridgeCacheOffset));
+ __ LoadInstanceDescriptors(r2, r4);
+ __ LoadP(r4, FieldMemOperand(r4, DescriptorArray::kEnumCacheOffset));
+ __ LoadP(r4,
+ FieldMemOperand(r4, DescriptorArray::kEnumCacheBridgeCacheOffset));
// Set up the four remaining stack slots.
- __ push(r3); // Map.
- __ LoadSmiLiteral(r3, Smi::FromInt(0));
+ __ push(r2); // Map.
+ __ LoadSmiLiteral(r2, Smi::FromInt(0));
// Push enumeration cache, enumeration cache length (as smi) and zero.
- __ Push(r5, r4, r3);
+ __ Push(r4, r3, r2);
__ b(&loop);
__ bind(&no_descriptors);
__ Drop(1);
__ b(&exit);
- // We got a fixed array in register r3. Iterate through that.
+ // We got a fixed array in register r2. Iterate through that.
__ bind(&fixed_array);
- __ LoadSmiLiteral(r4, Smi::FromInt(1)); // Smi(1) indicates slow check
- __ Push(r4, r3); // Smi and array
- __ LoadP(r4, FieldMemOperand(r3, FixedArray::kLengthOffset));
- __ Push(r4); // Fixed array length (as smi).
+ int const vector_index = SmiFromSlot(slot)->value();
+ __ EmitLoadTypeFeedbackVector(r3);
+ __ mov(r4, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
+ __ StoreP(
+ r4, FieldMemOperand(r3, FixedArray::OffsetOfElementAt(vector_index)), r0);
+ __ LoadSmiLiteral(r3, Smi::FromInt(1)); // Smi(1) indicates slow check
+ __ Push(r3, r2); // Smi and array
+ __ LoadP(r3, FieldMemOperand(r2, FixedArray::kLengthOffset));
+ __ Push(r3); // Fixed array length (as smi).
PrepareForBailoutForId(stmt->PrepareId(), NO_REGISTERS);
- __ LoadSmiLiteral(r3, Smi::FromInt(0));
- __ Push(r3); // Initial index.
+ __ LoadSmiLiteral(r2, Smi::FromInt(0));
+ __ Push(r2); // Initial index.
// Generate code for doing the condition check.
__ bind(&loop);
SetExpressionAsStatementPosition(stmt->each());
- // Load the current count to r3, load the length to r4.
- __ LoadP(r3, MemOperand(sp, 0 * kPointerSize));
- __ LoadP(r4, MemOperand(sp, 1 * kPointerSize));
- __ cmpl(r3, r4); // Compare to the array length.
+ // Load the current count to r2, load the length to r3.
+ __ LoadP(r2, MemOperand(sp, 0 * kPointerSize));
+ __ LoadP(r3, MemOperand(sp, 1 * kPointerSize));
+ __ CmpLogicalP(r2, r3); // Compare to the array length.
__ bge(loop_statement.break_label());
- // Get the current entry of the array into register r6.
- __ LoadP(r5, MemOperand(sp, 2 * kPointerSize));
- __ addi(r5, r5, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ SmiToPtrArrayOffset(r6, r3);
- __ LoadPX(r6, MemOperand(r6, r5));
+ // Get the current entry of the array into register r5.
+ __ LoadP(r4, MemOperand(sp, 2 * kPointerSize));
+ __ AddP(r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+ __ SmiToPtrArrayOffset(r5, r2);
+ __ LoadP(r5, MemOperand(r5, r4));
// Get the expected map from the stack or a smi in the
- // permanent slow case into register r5.
- __ LoadP(r5, MemOperand(sp, 3 * kPointerSize));
+ // permanent slow case into register r4.
+ __ LoadP(r4, MemOperand(sp, 3 * kPointerSize));
// Check if the expected map still matches that of the enumerable.
// If not, we may have to filter the key.
Label update_each;
- __ LoadP(r4, MemOperand(sp, 4 * kPointerSize));
- __ LoadP(r7, FieldMemOperand(r4, HeapObject::kMapOffset));
- __ cmp(r7, r5);
+ __ LoadP(r3, MemOperand(sp, 4 * kPointerSize));
+ __ LoadP(r6, FieldMemOperand(r3, HeapObject::kMapOffset));
+ __ CmpP(r6, r4);
__ beq(&update_each);
- // We need to filter the key, record slow-path here.
- int const vector_index = SmiFromSlot(slot)->value();
- __ EmitLoadTypeFeedbackVector(r3);
- __ mov(r5, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
+ // We might get here from TurboFan or Crankshaft when something in the
+ // for-in loop body deopts and only now notice in fullcodegen, that we
+ // can now longer use the enum cache, i.e. left fast mode. So better record
+ // this information here, in case we later OSR back into this loop or
+ // reoptimize the whole function w/o rerunning the loop with the slow
+ // mode object in fullcodegen (which would result in a deopt loop).
+ __ EmitLoadTypeFeedbackVector(r2);
+ __ mov(r4, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
__ StoreP(
- r5, FieldMemOperand(r3, FixedArray::OffsetOfElementAt(vector_index)), r0);
+ r4, FieldMemOperand(r2, FixedArray::OffsetOfElementAt(vector_index)), r0);
// Convert the entry to a string or (smi) 0 if it isn't a property
// any more. If the property has been removed while iterating, we
// just skip it.
- __ Push(r4, r6); // Enumerable and current entry.
+ __ Push(r3, r5); // Enumerable and current entry.
__ CallRuntime(Runtime::kForInFilter);
PrepareForBailoutForId(stmt->FilterId(), TOS_REG);
- __ mr(r6, r3);
+ __ LoadRR(r5, r2);
__ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- __ cmp(r3, r0);
+ __ CmpP(r2, r0);
__ beq(loop_statement.continue_label());
// Update the 'each' property or variable from the possibly filtered
- // entry in register r6.
+ // entry in register r5.
__ bind(&update_each);
- __ mr(result_register(), r6);
+ __ LoadRR(result_register(), r5);
// Perform the assignment as if via '='.
{
EffectContext context(this);
@@ -1109,9 +1090,9 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
// Generate code for the going to the next element by incrementing
// the index (smi) stored on top of the stack.
__ bind(loop_statement.continue_label());
- __ pop(r3);
- __ AddSmiLiteral(r3, r3, Smi::FromInt(1), r0);
- __ push(r3);
+ __ pop(r2);
+ __ AddSmiLiteral(r2, r2, Smi::FromInt(1), r0);
+ __ push(r2);
EmitBackEdgeBookkeeping(stmt, &loop);
__ b(&loop);
@@ -1126,7 +1107,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
decrement_loop_depth();
}
-
void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
FeedbackVectorSlot slot) {
DCHECK(NeedsHomeObject(initializer));
@@ -1139,12 +1119,11 @@ void FullCodeGenerator::EmitSetHomeObject(Expression* initializer, int offset,
CallStoreIC();
}
-
void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
int offset,
FeedbackVectorSlot slot) {
DCHECK(NeedsHomeObject(initializer));
- __ Move(StoreDescriptor::ReceiverRegister(), r3);
+ __ Move(StoreDescriptor::ReceiverRegister(), r2);
__ mov(StoreDescriptor::NameRegister(),
Operand(isolate()->factory()->home_object_symbol()));
__ LoadP(StoreDescriptor::ValueRegister(),
@@ -1153,13 +1132,12 @@ void FullCodeGenerator::EmitSetHomeObjectAccumulator(Expression* initializer,
CallStoreIC();
}
-
void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
TypeofMode typeof_mode,
Label* slow) {
Register current = cp;
- Register next = r4;
- Register temp = r5;
+ Register next = r3;
+ Register temp = r4;
Scope* s = scope();
while (s != NULL) {
@@ -1188,9 +1166,8 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
__ bind(&loop);
// Terminate at native context.
__ LoadP(temp, FieldMemOperand(next, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kNativeContextMapRootIndex);
- __ cmp(temp, ip);
- __ beq(&fast);
+ __ CompareRoot(temp, Heap::kNativeContextMapRootIndex);
+ __ beq(&fast, Label::kNear);
// Check that extension is "the hole".
__ LoadP(temp, ContextMemOperand(next, Context::EXTENSION_INDEX));
__ JumpIfNotRoot(temp, Heap::kTheHoleValueRootIndex, slow);
@@ -1205,13 +1182,12 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
EmitGlobalVariableLoad(proxy, typeof_mode);
}
-
MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
Label* slow) {
DCHECK(var->IsContextSlot());
Register context = cp;
- Register next = r6;
- Register temp = r7;
+ Register next = r5;
+ Register temp = r6;
for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
if (s->num_heap_slots() > 0) {
@@ -1235,7 +1211,6 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
return ContextMemOperand(context, var->index());
}
-
void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
TypeofMode typeof_mode,
Label* slow, Label* done) {
@@ -1250,16 +1225,16 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
__ b(done);
} else if (var->mode() == DYNAMIC_LOCAL) {
Variable* local = var->local_if_not_shadowed();
- __ LoadP(r3, ContextSlotOperandCheckExtensions(local, slow));
+ __ LoadP(r2, ContextSlotOperandCheckExtensions(local, slow));
if (local->mode() == LET || local->mode() == CONST ||
local->mode() == CONST_LEGACY) {
- __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
+ __ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
__ bne(done);
if (local->mode() == CONST_LEGACY) {
- __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
} else { // LET || CONST
- __ mov(r3, Operand(var->name()));
- __ push(r3);
+ __ mov(r2, Operand(var->name()));
+ __ push(r2);
__ CallRuntime(Runtime::kThrowReferenceError);
}
}
@@ -1267,7 +1242,6 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
}
}
-
void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
TypeofMode typeof_mode) {
Variable* var = proxy->var();
@@ -1280,7 +1254,6 @@ void FullCodeGenerator::EmitGlobalVariableLoad(VariableProxy* proxy,
CallLoadIC(typeof_mode);
}
-
void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
TypeofMode typeof_mode) {
// Record position before possible IC call.
@@ -1295,7 +1268,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
case VariableLocation::UNALLOCATED: {
Comment cmnt(masm_, "[ Global variable");
EmitGlobalVariableLoad(proxy, typeof_mode);
- context()->Plug(r3);
+ context()->Plug(r2);
break;
}
@@ -1308,22 +1281,22 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
if (NeedsHoleCheckForLoad(proxy)) {
Label done;
// Let and const need a read barrier.
- GetVar(r3, var);
- __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
+ GetVar(r2, var);
+ __ CompareRoot(r2, Heap::kTheHoleValueRootIndex);
__ bne(&done);
if (var->mode() == LET || var->mode() == CONST) {
// Throw a reference error when using an uninitialized let/const
// binding in harmony mode.
- __ mov(r3, Operand(var->name()));
- __ push(r3);
+ __ mov(r2, Operand(var->name()));
+ __ push(r2);
__ CallRuntime(Runtime::kThrowReferenceError);
} else {
// Uninitialized legacy const bindings are unholed.
DCHECK(var->mode() == CONST_LEGACY);
- __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
}
__ bind(&done);
- context()->Plug(r3);
+ context()->Plug(r2);
break;
}
context()->Plug(var);
@@ -1344,29 +1317,27 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy,
: Runtime::kLoadLookupSlotInsideTypeof;
__ CallRuntime(function_id);
__ bind(&done);
- context()->Plug(r3);
+ context()->Plug(r2);
}
}
}
-
void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
Comment cmnt(masm_, "[ RegExpLiteral");
- __ LoadP(r6, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ LoadSmiLiteral(r5, Smi::FromInt(expr->literal_index()));
- __ mov(r4, Operand(expr->pattern()));
- __ LoadSmiLiteral(r3, Smi::FromInt(expr->flags()));
+ __ LoadP(r5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ LoadSmiLiteral(r4, Smi::FromInt(expr->literal_index()));
+ __ mov(r3, Operand(expr->pattern()));
+ __ LoadSmiLiteral(r2, Smi::FromInt(expr->flags()));
FastCloneRegExpStub stub(isolate());
__ CallStub(&stub);
- context()->Plug(r3);
+ context()->Plug(r2);
}
-
void FullCodeGenerator::EmitAccessor(ObjectLiteralProperty* property) {
Expression* expression = (property == NULL) ? NULL : property->value();
if (expression == NULL) {
- __ LoadRoot(r4, Heap::kNullValueRootIndex);
- PushOperand(r4);
+ __ LoadRoot(r3, Heap::kNullValueRootIndex);
+ PushOperand(r3);
} else {
VisitForStackValue(expression);
if (NeedsHomeObject(expression)) {
@@ -1378,18 +1349,17 @@ void FullCodeGenerator::EmitAccessor(ObjectLiteralProperty* property) {
}
}
-
void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Comment cmnt(masm_, "[ ObjectLiteral");
Handle<FixedArray> constant_properties = expr->constant_properties();
- __ LoadP(r6, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ LoadSmiLiteral(r5, Smi::FromInt(expr->literal_index()));
- __ mov(r4, Operand(constant_properties));
+ __ LoadP(r5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ LoadSmiLiteral(r4, Smi::FromInt(expr->literal_index()));
+ __ mov(r3, Operand(constant_properties));
int flags = expr->ComputeFlags();
- __ LoadSmiLiteral(r3, Smi::FromInt(flags));
+ __ LoadSmiLiteral(r2, Smi::FromInt(flags));
if (MustCreateObjectLiteralWithRuntime(expr)) {
- __ Push(r6, r5, r4, r3);
+ __ Push(r5, r4, r3, r2);
__ CallRuntime(Runtime::kCreateObjectLiteral);
} else {
FastCloneShallowObjectStub stub(isolate(), expr->properties_count());
@@ -1398,7 +1368,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
// If result_saved is true the result is on top of the stack. If
- // result_saved is false the result is in r3.
+ // result_saved is false the result is in r2.
bool result_saved = false;
AccessorTable accessor_table(zone());
@@ -1411,7 +1381,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Literal* key = property->key()->AsLiteral();
Expression* value = property->value();
if (!result_saved) {
- PushOperand(r3); // Save result on stack
+ PushOperand(r2); // Save result on stack
result_saved = true;
}
switch (property->kind()) {
@@ -1426,7 +1396,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
if (key->value()->IsInternalizedString()) {
if (property->emit_store()) {
VisitForAccumulatorValue(value);
- DCHECK(StoreDescriptor::ValueRegister().is(r3));
+ DCHECK(StoreDescriptor::ValueRegister().is(r2));
__ mov(StoreDescriptor::NameRegister(), Operand(key->value()));
__ LoadP(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
EmitLoadStoreICSlot(property->GetSlot(0));
@@ -1442,16 +1412,16 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
break;
}
// Duplicate receiver on stack.
- __ LoadP(r3, MemOperand(sp));
- PushOperand(r3);
+ __ LoadP(r2, MemOperand(sp));
+ PushOperand(r2);
VisitForStackValue(key);
VisitForStackValue(value);
if (property->emit_store()) {
if (NeedsHomeObject(value)) {
EmitSetHomeObject(value, 2, property->GetSlot());
}
- __ LoadSmiLiteral(r3, Smi::FromInt(SLOPPY)); // PropertyAttributes
- PushOperand(r3);
+ __ LoadSmiLiteral(r2, Smi::FromInt(SLOPPY)); // PropertyAttributes
+ PushOperand(r2);
CallRuntimeWithOperands(Runtime::kSetProperty);
} else {
DropOperands(3);
@@ -1459,8 +1429,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
break;
case ObjectLiteral::Property::PROTOTYPE:
// Duplicate receiver on stack.
- __ LoadP(r3, MemOperand(sp));
- PushOperand(r3);
+ __ LoadP(r2, MemOperand(sp));
+ PushOperand(r2);
VisitForStackValue(value);
DCHECK(property->emit_store());
CallRuntimeWithOperands(Runtime::kInternalSetPrototype);
@@ -1484,13 +1454,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
// each pair of corresponding getters and setters.
for (AccessorTable::Iterator it = accessor_table.begin();
it != accessor_table.end(); ++it) {
- __ LoadP(r3, MemOperand(sp)); // Duplicate receiver.
- PushOperand(r3);
+ __ LoadP(r2, MemOperand(sp)); // Duplicate receiver.
+ PushOperand(r2);
VisitForStackValue(it->first);
EmitAccessor(it->second->getter);
EmitAccessor(it->second->setter);
- __ LoadSmiLiteral(r3, Smi::FromInt(NONE));
- PushOperand(r3);
+ __ LoadSmiLiteral(r2, Smi::FromInt(NONE));
+ PushOperand(r2);
CallRuntimeWithOperands(Runtime::kDefineAccessorPropertyUnchecked);
}
@@ -1508,12 +1478,12 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
Expression* value = property->value();
if (!result_saved) {
- PushOperand(r3); // Save result on the stack
+ PushOperand(r2); // Save result on the stack
result_saved = true;
}
- __ LoadP(r3, MemOperand(sp)); // Duplicate receiver.
- PushOperand(r3);
+ __ LoadP(r2, MemOperand(sp)); // Duplicate receiver.
+ PushOperand(r2);
if (property->kind() == ObjectLiteral::Property::PROTOTYPE) {
DCHECK(!property->is_computed_name());
@@ -1561,19 +1531,18 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
if (expr->has_function()) {
DCHECK(result_saved);
- __ LoadP(r3, MemOperand(sp));
- __ push(r3);
+ __ LoadP(r2, MemOperand(sp));
+ __ push(r2);
__ CallRuntime(Runtime::kToFastProperties);
}
if (result_saved) {
context()->PlugTOS();
} else {
- context()->Plug(r3);
+ context()->Plug(r2);
}
}
-
void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Comment cmnt(masm_, "[ ArrayLiteral");
@@ -1590,12 +1559,12 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
}
- __ LoadP(r6, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- __ LoadSmiLiteral(r5, Smi::FromInt(expr->literal_index()));
- __ mov(r4, Operand(constant_elements));
+ __ LoadP(r5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ LoadSmiLiteral(r4, Smi::FromInt(expr->literal_index()));
+ __ mov(r3, Operand(constant_elements));
if (MustCreateArrayLiteralWithRuntime(expr)) {
- __ LoadSmiLiteral(r3, Smi::FromInt(expr->ComputeFlags()));
- __ Push(r6, r5, r4, r3);
+ __ LoadSmiLiteral(r2, Smi::FromInt(expr->ComputeFlags()));
+ __ Push(r5, r4, r3, r2);
__ CallRuntime(Runtime::kCreateArrayLiteral);
} else {
FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
@@ -1618,7 +1587,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
if (!result_saved) {
- PushOperand(r3);
+ PushOperand(r2);
result_saved = true;
}
VisitForAccumulatorValue(subexpr);
@@ -1640,13 +1609,13 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
// (inclusive) and these elements gets appended to the array. Note that the
// number elements an iterable produces is unknown ahead of time.
if (array_index < length && result_saved) {
- PopOperand(r3);
+ PopOperand(r2);
result_saved = false;
}
for (; array_index < length; array_index++) {
Expression* subexpr = subexprs->at(array_index);
- PushOperand(r3);
+ PushOperand(r2);
DCHECK(!subexpr->IsSpread());
VisitForStackValue(subexpr);
CallRuntimeWithOperands(Runtime::kAppendElement);
@@ -1657,11 +1626,10 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
if (result_saved) {
context()->PlugTOS();
} else {
- context()->Plug(r3);
+ context()->Plug(r2);
}
}
-
void FullCodeGenerator::VisitAssignment(Assignment* expr) {
DCHECK(expr->target()->IsValidReferenceExpressionOrThis());
@@ -1692,22 +1660,22 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
property->obj()->AsSuperPropertyReference()->home_object());
PushOperand(result_register());
if (expr->is_compound()) {
- const Register scratch = r4;
+ const Register scratch = r3;
__ LoadP(scratch, MemOperand(sp, kPointerSize));
PushOperands(scratch, result_register());
}
break;
case KEYED_SUPER_PROPERTY: {
- const Register scratch = r4;
+ const Register scratch = r3;
VisitForStackValue(
property->obj()->AsSuperPropertyReference()->this_var());
VisitForAccumulatorValue(
property->obj()->AsSuperPropertyReference()->home_object());
- __ mr(scratch, result_register());
+ __ LoadRR(scratch, result_register());
VisitForAccumulatorValue(property->key());
PushOperands(scratch, result_register());
if (expr->is_compound()) {
- const Register scratch1 = r5;
+ const Register scratch1 = r4;
__ LoadP(scratch1, MemOperand(sp, 2 * kPointerSize));
PushOperands(scratch1, scratch, result_register());
}
@@ -1757,7 +1725,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
}
Token::Value op = expr->binary_op();
- PushOperand(r3); // Left operand goes on the stack.
+ PushOperand(r2); // Left operand goes on the stack.
VisitForAccumulatorValue(expr->value());
AccumulatorValueContext context(this);
@@ -1782,18 +1750,18 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
expr->op(), expr->AssignmentSlot());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(r3);
+ context()->Plug(r2);
break;
case NAMED_PROPERTY:
EmitNamedPropertyAssignment(expr);
break;
case NAMED_SUPER_PROPERTY:
EmitNamedSuperPropertyStore(property);
- context()->Plug(r3);
+ context()->Plug(r2);
break;
case KEYED_SUPER_PROPERTY:
EmitKeyedSuperPropertyStore(property);
- context()->Plug(r3);
+ context()->Plug(r2);
break;
case KEYED_PROPERTY:
EmitKeyedPropertyAssignment(expr);
@@ -1801,7 +1769,6 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) {
}
}
-
void FullCodeGenerator::VisitYield(Yield* expr) {
Comment cmnt(masm_, "[ Yield");
SetExpressionPosition(expr);
@@ -1810,150 +1777,163 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
// this. It stays on the stack while we update the iterator.
VisitForStackValue(expr->expression());
- Label suspend, continuation, post_runtime, resume;
-
- __ b(&suspend);
- __ bind(&continuation);
- // When we arrive here, the stack top is the resume mode and
- // result_register() holds the input value (the argument given to the
- // respective resume operation).
- __ RecordGeneratorContinuation();
- __ pop(r4);
- __ CmpSmiLiteral(r4, Smi::FromInt(JSGeneratorObject::RETURN), r0);
- __ bne(&resume);
- __ push(result_register());
- EmitCreateIteratorResult(true);
- EmitUnwindAndReturn();
-
- __ bind(&suspend);
- OperandStackDepthIncrement(1); // Not popped on this path.
- VisitForAccumulatorValue(expr->generator_object());
- DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
- __ LoadSmiLiteral(r4, Smi::FromInt(continuation.pos()));
- __ StoreP(r4, FieldMemOperand(r3, JSGeneratorObject::kContinuationOffset),
- r0);
- __ StoreP(cp, FieldMemOperand(r3, JSGeneratorObject::kContextOffset), r0);
- __ mr(r4, cp);
- __ RecordWriteField(r3, JSGeneratorObject::kContextOffset, r4, r5,
- kLRHasBeenSaved, kDontSaveFPRegs);
- __ addi(r4, fp, Operand(StandardFrameConstants::kExpressionsOffset));
- __ cmp(sp, r4);
- __ beq(&post_runtime);
- __ push(r3); // generator object
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
- __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- __ bind(&post_runtime);
- PopOperand(result_register());
- EmitReturnSequence();
+ switch (expr->yield_kind()) {
+ case Yield::kSuspend:
+ // Pop value from top-of-stack slot; box result into result register.
+ EmitCreateIteratorResult(false);
+ PushOperand(result_register());
+ // Fall through.
+ case Yield::kInitial: {
+ Label suspend, continuation, post_runtime, resume;
+
+ __ b(&suspend, Label::kNear);
+ __ bind(&continuation);
+ // When we arrive here, the stack top is the resume mode and
+ // result_register() holds the input value (the argument given to the
+ // respective resume operation).
+ __ RecordGeneratorContinuation();
+ __ pop(r3);
+ __ CmpSmiLiteral(r3, Smi::FromInt(JSGeneratorObject::RETURN), r0);
+ __ bne(&resume);
+ __ push(result_register());
+ EmitCreateIteratorResult(true);
+ EmitUnwindAndReturn();
+
+ __ bind(&suspend);
+ OperandStackDepthIncrement(1); // Not popped on this path.
+ VisitForAccumulatorValue(expr->generator_object());
+ DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
+ __ LoadSmiLiteral(r3, Smi::FromInt(continuation.pos()));
+ __ StoreP(r3,
+ FieldMemOperand(r2, JSGeneratorObject::kContinuationOffset));
+ __ StoreP(cp, FieldMemOperand(r2, JSGeneratorObject::kContextOffset));
+ __ LoadRR(r3, cp);
+ __ RecordWriteField(r2, JSGeneratorObject::kContextOffset, r3, r4,
+ kLRHasBeenSaved, kDontSaveFPRegs);
+ __ AddP(r3, fp, Operand(StandardFrameConstants::kExpressionsOffset));
+ __ CmpP(sp, r3);
+ __ beq(&post_runtime);
+ __ push(r2); // generator object
+ __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+ __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ bind(&post_runtime);
+ PopOperand(result_register());
+ EmitReturnSequence();
+
+ __ bind(&resume);
+ context()->Plug(result_register());
+ break;
+ }
- __ bind(&resume);
- context()->Plug(result_register());
-}
+ case Yield::kFinal: {
+ // Pop value from top-of-stack slot, box result into result register.
+ EmitCreateIteratorResult(true);
+ EmitUnwindAndReturn();
+ break;
+ }
+ case Yield::kDelegating:
+ UNREACHABLE();
+ }
+}
void FullCodeGenerator::EmitGeneratorResume(
Expression* generator, Expression* value,
JSGeneratorObject::ResumeMode resume_mode) {
- // The value stays in r3, and is ultimately read by the resumed generator, as
+ // The value stays in r2, and is ultimately read by the resumed generator, as
// if CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it
// is read to throw the value when the resumed generator is already closed.
- // r4 will hold the generator object until the activation has been resumed.
+ // r3 will hold the generator object until the activation has been resumed.
VisitForStackValue(generator);
VisitForAccumulatorValue(value);
- PopOperand(r4);
+ PopOperand(r3);
// Store input value into generator object.
__ StoreP(result_register(),
- FieldMemOperand(r4, JSGeneratorObject::kInputOffset), r0);
- __ mr(r5, result_register());
- __ RecordWriteField(r4, JSGeneratorObject::kInputOffset, r5, r6,
+ FieldMemOperand(r3, JSGeneratorObject::kInputOffset), r0);
+ __ LoadRR(r4, result_register());
+ __ RecordWriteField(r3, JSGeneratorObject::kInputOffset, r4, r5,
kLRHasBeenSaved, kDontSaveFPRegs);
// Load suspended function and context.
- __ LoadP(cp, FieldMemOperand(r4, JSGeneratorObject::kContextOffset));
- __ LoadP(r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset));
+ __ LoadP(cp, FieldMemOperand(r3, JSGeneratorObject::kContextOffset));
+ __ LoadP(r6, FieldMemOperand(r3, JSGeneratorObject::kFunctionOffset));
// Load receiver and store as the first argument.
- __ LoadP(r5, FieldMemOperand(r4, JSGeneratorObject::kReceiverOffset));
- __ push(r5);
+ __ LoadP(r4, FieldMemOperand(r3, JSGeneratorObject::kReceiverOffset));
+ __ push(r4);
// Push holes for the rest of the arguments to the generator function.
- __ LoadP(r6, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset));
- __ LoadWordArith(
- r6, FieldMemOperand(r6, SharedFunctionInfo::kFormalParameterCountOffset));
- __ LoadRoot(r5, Heap::kTheHoleValueRootIndex);
+ __ LoadP(r5, FieldMemOperand(r6, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadW(
+ r5, FieldMemOperand(r5, SharedFunctionInfo::kFormalParameterCountOffset));
+ __ LoadRoot(r4, Heap::kTheHoleValueRootIndex);
Label argument_loop, push_frame;
-#if V8_TARGET_ARCH_PPC64
- __ cmpi(r6, Operand::Zero());
- __ beq(&push_frame);
+#if V8_TARGET_ARCH_S390X
+ __ CmpP(r5, Operand::Zero());
+ __ beq(&push_frame, Label::kNear);
#else
- __ SmiUntag(r6, SetRC);
- __ beq(&push_frame, cr0);
+ __ SmiUntag(r5);
+ __ beq(&push_frame, Label::kNear);
#endif
- __ mtctr(r6);
+ __ LoadRR(r0, r5);
__ bind(&argument_loop);
- __ push(r5);
- __ bdnz(&argument_loop);
+ __ push(r4);
+ __ SubP(r0, Operand(1));
+ __ bne(&argument_loop);
// Enter a new JavaScript frame, and initialize its slots as they were when
// the generator was suspended.
Label resume_frame, done;
__ bind(&push_frame);
- __ b(&resume_frame, SetLK);
+ __ b(r14, &resume_frame); // brasl
__ b(&done);
__ bind(&resume_frame);
// lr = return address.
// fp = caller's frame pointer.
// cp = callee's context,
- // r7 = callee's JS function.
- __ PushFixedFrame(r7);
+ // r6 = callee's JS function.
+ __ PushFixedFrame(r6);
// Adjust FP to point to saved FP.
- __ addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+ __ lay(fp, MemOperand(sp, StandardFrameConstants::kFixedFrameSizeFromFp));
// Load the operand stack size.
- __ LoadP(r6, FieldMemOperand(r4, JSGeneratorObject::kOperandStackOffset));
- __ LoadP(r6, FieldMemOperand(r6, FixedArray::kLengthOffset));
- __ SmiUntag(r6, SetRC);
+ __ LoadP(r5, FieldMemOperand(r3, JSGeneratorObject::kOperandStackOffset));
+ __ LoadP(r5, FieldMemOperand(r5, FixedArray::kLengthOffset));
+ __ SmiUntag(r5);
// If we are sending a value and there is no operand stack, we can jump back
// in directly.
Label call_resume;
if (resume_mode == JSGeneratorObject::NEXT) {
Label slow_resume;
- __ bne(&slow_resume, cr0);
- __ LoadP(ip, FieldMemOperand(r7, JSFunction::kCodeEntryOffset));
- {
- ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
- if (FLAG_enable_embedded_constant_pool) {
- __ LoadConstantPoolPointerRegisterFromCodeTargetAddress(ip);
- }
- __ LoadP(r5, FieldMemOperand(r4, JSGeneratorObject::kContinuationOffset));
- __ SmiUntag(r5);
- __ add(ip, ip, r5);
- __ LoadSmiLiteral(r5,
- Smi::FromInt(JSGeneratorObject::kGeneratorExecuting));
- __ StoreP(r5, FieldMemOperand(r4, JSGeneratorObject::kContinuationOffset),
- r0);
- __ Push(Smi::FromInt(resume_mode)); // Consumed in continuation.
- __ Jump(ip);
- __ bind(&slow_resume);
- }
+ __ bne(&slow_resume, Label::kNear);
+ __ LoadP(ip, FieldMemOperand(r6, JSFunction::kCodeEntryOffset));
+ __ LoadP(r4, FieldMemOperand(r3, JSGeneratorObject::kContinuationOffset));
+ __ SmiUntag(r4);
+ __ AddP(ip, ip, r4);
+ __ LoadSmiLiteral(r4, Smi::FromInt(JSGeneratorObject::kGeneratorExecuting));
+ __ StoreP(r4, FieldMemOperand(r3, JSGeneratorObject::kContinuationOffset));
+ __ Push(Smi::FromInt(resume_mode)); // Consumed in continuation.
+ __ Jump(ip);
+ __ bind(&slow_resume);
} else {
- __ beq(&call_resume, cr0);
+ __ beq(&call_resume);
}
// Otherwise, we push holes for the operand stack and call the runtime to fix
// up the stack and the handlers.
Label operand_loop;
- __ mtctr(r6);
+ __ LoadRR(r0, r5);
__ bind(&operand_loop);
- __ push(r5);
- __ bdnz(&operand_loop);
+ __ push(r4);
+ __ SubP(r0, Operand(1));
+ __ bne(&operand_loop);
__ bind(&call_resume);
__ Push(Smi::FromInt(resume_mode)); // Consumed in continuation.
- DCHECK(!result_register().is(r4));
- __ Push(r4, result_register());
+ DCHECK(!result_register().is(r3));
+ __ Push(r3, result_register());
__ Push(Smi::FromInt(resume_mode));
__ CallRuntime(Runtime::kResumeJSGeneratorObject);
// Not reached: the runtime call returns elsewhere.
@@ -1989,8 +1969,8 @@ void FullCodeGenerator::EmitOperandStackDepthCheck() {
if (FLAG_debug_code) {
int expected_diff = StandardFrameConstants::kFixedFrameSizeFromFp +
operand_stack_depth_ * kPointerSize;
- __ sub(r3, fp, sp);
- __ cmpi(r3, Operand(expected_diff));
+ __ SubP(r2, fp, sp);
+ __ CmpP(r2, Operand(expected_diff));
__ Assert(eq, kUnexpectedStackDepth);
}
}
@@ -1998,7 +1978,7 @@ void FullCodeGenerator::EmitOperandStackDepthCheck() {
void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
Label allocate, done_allocate;
- __ Allocate(JSIteratorResult::kSize, r3, r5, r6, &allocate, TAG_OBJECT);
+ __ Allocate(JSIteratorResult::kSize, r2, r4, r5, &allocate, TAG_OBJECT);
__ b(&done_allocate);
__ bind(&allocate);
@@ -2006,35 +1986,35 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
__ CallRuntime(Runtime::kAllocateInNewSpace);
__ bind(&done_allocate);
- __ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, r4);
- PopOperand(r5);
- __ LoadRoot(r6,
+ __ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, r3);
+ PopOperand(r4);
+ __ LoadRoot(r5,
done ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex);
- __ LoadRoot(r7, Heap::kEmptyFixedArrayRootIndex);
- __ StoreP(r4, FieldMemOperand(r3, HeapObject::kMapOffset), r0);
- __ StoreP(r7, FieldMemOperand(r3, JSObject::kPropertiesOffset), r0);
- __ StoreP(r7, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
- __ StoreP(r5, FieldMemOperand(r3, JSIteratorResult::kValueOffset), r0);
- __ StoreP(r6, FieldMemOperand(r3, JSIteratorResult::kDoneOffset), r0);
+ __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
+ __ StoreP(r3, FieldMemOperand(r2, HeapObject::kMapOffset), r0);
+ __ StoreP(r6, FieldMemOperand(r2, JSObject::kPropertiesOffset), r0);
+ __ StoreP(r6, FieldMemOperand(r2, JSObject::kElementsOffset), r0);
+ __ StoreP(r4, FieldMemOperand(r2, JSIteratorResult::kValueOffset), r0);
+ __ StoreP(r5, FieldMemOperand(r2, JSIteratorResult::kDoneOffset), r0);
}
-
void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
Token::Value op,
Expression* left_expr,
Expression* right_expr) {
Label done, smi_case, stub_call;
- Register scratch1 = r5;
- Register scratch2 = r6;
+ Register scratch1 = r4;
+ Register scratch2 = r5;
// Get the arguments.
- Register left = r4;
- Register right = r3;
+ Register left = r3;
+ Register right = r2;
PopOperand(left);
// Perform combined smi check on both operands.
- __ orx(scratch1, left, right);
+ __ LoadRR(scratch1, right);
+ __ OrP(scratch1, left);
STATIC_ASSERT(kSmiTag == 0);
JumpPatchSite patch_site(masm_);
patch_site.EmitJumpIfSmi(scratch1, &smi_case);
@@ -2051,16 +2031,16 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
switch (op) {
case Token::SAR:
__ GetLeastBitsFromSmi(scratch1, right, 5);
- __ ShiftRightArith(right, left, scratch1);
+ __ ShiftRightArithP(right, left, scratch1);
__ ClearRightImm(right, right, Operand(kSmiTagSize + kSmiShiftSize));
break;
case Token::SHL: {
__ GetLeastBitsFromSmi(scratch2, right, 5);
-#if V8_TARGET_ARCH_PPC64
- __ ShiftLeft_(right, left, scratch2);
+#if V8_TARGET_ARCH_S390X
+ __ ShiftLeftP(right, left, scratch2);
#else
__ SmiUntag(scratch1, left);
- __ ShiftLeft_(scratch1, scratch1, scratch2);
+ __ ShiftLeftP(scratch1, scratch1, scratch2);
// Check that the *signed* result fits in a smi
__ JumpIfNotSmiCandidate(scratch1, scratch2, &stub_call);
__ SmiTag(right, scratch1);
@@ -2070,7 +2050,7 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
case Token::SHR: {
__ SmiUntag(scratch1, left);
__ GetLeastBitsFromSmi(scratch2, right, 5);
- __ srw(scratch1, scratch1, scratch2);
+ __ srl(scratch1, scratch2);
// Unsigned shift is not allowed to produce a negative number.
__ JumpIfNotUnsignedSmiCandidate(scratch1, r0, &stub_call);
__ SmiTag(right, scratch1);
@@ -2079,77 +2059,78 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
case Token::ADD: {
__ AddAndCheckForOverflow(scratch1, left, right, scratch2, r0);
__ BranchOnOverflow(&stub_call);
- __ mr(right, scratch1);
+ __ LoadRR(right, scratch1);
break;
}
case Token::SUB: {
__ SubAndCheckForOverflow(scratch1, left, right, scratch2, r0);
__ BranchOnOverflow(&stub_call);
- __ mr(right, scratch1);
+ __ LoadRR(right, scratch1);
break;
}
case Token::MUL: {
Label mul_zero;
-#if V8_TARGET_ARCH_PPC64
+#if V8_TARGET_ARCH_S390X
// Remove tag from both operands.
__ SmiUntag(ip, right);
- __ SmiUntag(r0, left);
- __ Mul(scratch1, r0, ip);
+ __ SmiUntag(scratch2, left);
+ __ mr_z(scratch1, ip);
// Check for overflowing the smi range - no overflow if higher 33 bits of
// the result are identical.
- __ TestIfInt32(scratch1, r0);
+ __ lr(ip, scratch2); // 32 bit load
+ __ sra(ip, Operand(31));
+ __ cr_z(ip, scratch1); // 32 bit compare
__ bne(&stub_call);
#else
__ SmiUntag(ip, right);
- __ mullw(scratch1, left, ip);
- __ mulhw(scratch2, left, ip);
+ __ LoadRR(scratch2, left); // load into low order of reg pair
+ __ mr_z(scratch1, ip); // R4:R5 = R5 * ip
// Check for overflowing the smi range - no overflow if higher 33 bits of
// the result are identical.
- __ TestIfInt32(scratch2, scratch1, ip);
+ __ TestIfInt32(scratch1, scratch2, ip);
__ bne(&stub_call);
#endif
// Go slow on zero result to handle -0.
- __ cmpi(scratch1, Operand::Zero());
- __ beq(&mul_zero);
-#if V8_TARGET_ARCH_PPC64
- __ SmiTag(right, scratch1);
+ __ chi(scratch2, Operand::Zero());
+ __ beq(&mul_zero, Label::kNear);
+#if V8_TARGET_ARCH_S390X
+ __ SmiTag(right, scratch2);
#else
- __ mr(right, scratch1);
+ __ LoadRR(right, scratch2);
#endif
__ b(&done);
// We need -0 if we were multiplying a negative number with 0 to get 0.
// We know one of them was zero.
__ bind(&mul_zero);
- __ add(scratch2, right, left);
- __ cmpi(scratch2, Operand::Zero());
+ __ AddP(scratch2, right, left);
+ __ CmpP(scratch2, Operand::Zero());
__ blt(&stub_call);
__ LoadSmiLiteral(right, Smi::FromInt(0));
break;
}
case Token::BIT_OR:
- __ orx(right, left, right);
+ __ OrP(right, left);
break;
case Token::BIT_AND:
- __ and_(right, left, right);
+ __ AndP(right, left);
break;
case Token::BIT_XOR:
- __ xor_(right, left, right);
+ __ XorP(right, left);
break;
default:
UNREACHABLE();
}
__ bind(&done);
- context()->Plug(r3);
+ context()->Plug(r2);
}
-
void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
for (int i = 0; i < lit->properties()->length(); i++) {
ObjectLiteral::Property* property = lit->properties()->at(i);
Expression* value = property->value();
- Register scratch = r4;
+ Register scratch = r3;
if (property->is_static()) {
__ LoadP(scratch, MemOperand(sp, kPointerSize)); // constructor
} else {
@@ -2164,7 +2145,7 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
// not need to do this for every property.
if (property->is_static() && property->is_computed_name()) {
__ CallRuntime(Runtime::kThrowIfStaticPrototype);
- __ push(r3);
+ __ push(r2);
}
VisitForStackValue(value);
@@ -2199,17 +2180,15 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
}
}
-
void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op) {
- PopOperand(r4);
+ PopOperand(r3);
Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op).code();
JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code.
CallIC(code, expr->BinaryOperationFeedbackId());
patch_site.EmitPatchInfo();
- context()->Plug(r3);
+ context()->Plug(r2);
}
-
void FullCodeGenerator::EmitAssignment(Expression* expr,
FeedbackVectorSlot slot) {
DCHECK(expr->IsValidReferenceExpressionOrThis());
@@ -2225,9 +2204,9 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
break;
}
case NAMED_PROPERTY: {
- PushOperand(r3); // Preserve value.
+ PushOperand(r2); // Preserve value.
VisitForAccumulatorValue(prop->obj());
- __ Move(StoreDescriptor::ReceiverRegister(), r3);
+ __ Move(StoreDescriptor::ReceiverRegister(), r2);
PopOperand(StoreDescriptor::ValueRegister()); // Restore value.
__ mov(StoreDescriptor::NameRegister(),
Operand(prop->key()->AsLiteral()->value()));
@@ -2236,47 +2215,47 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
break;
}
case NAMED_SUPER_PROPERTY: {
- PushOperand(r3);
+ PushOperand(r2);
VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
VisitForAccumulatorValue(
prop->obj()->AsSuperPropertyReference()->home_object());
- // stack: value, this; r3: home_object
- Register scratch = r5;
- Register scratch2 = r6;
- __ mr(scratch, result_register()); // home_object
- __ LoadP(r3, MemOperand(sp, kPointerSize)); // value
+ // stack: value, this; r2: home_object
+ Register scratch = r4;
+ Register scratch2 = r5;
+ __ LoadRR(scratch, result_register()); // home_object
+ __ LoadP(r2, MemOperand(sp, kPointerSize)); // value
__ LoadP(scratch2, MemOperand(sp, 0)); // this
__ StoreP(scratch2, MemOperand(sp, kPointerSize)); // this
__ StoreP(scratch, MemOperand(sp, 0)); // home_object
- // stack: this, home_object; r3: value
+ // stack: this, home_object; r2: value
EmitNamedSuperPropertyStore(prop);
break;
}
case KEYED_SUPER_PROPERTY: {
- PushOperand(r3);
+ PushOperand(r2);
VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
VisitForStackValue(
prop->obj()->AsSuperPropertyReference()->home_object());
VisitForAccumulatorValue(prop->key());
- Register scratch = r5;
- Register scratch2 = r6;
+ Register scratch = r4;
+ Register scratch2 = r5;
__ LoadP(scratch2, MemOperand(sp, 2 * kPointerSize)); // value
// stack: value, this, home_object; r3: key, r6: value
__ LoadP(scratch, MemOperand(sp, kPointerSize)); // this
__ StoreP(scratch, MemOperand(sp, 2 * kPointerSize));
__ LoadP(scratch, MemOperand(sp, 0)); // home_object
__ StoreP(scratch, MemOperand(sp, kPointerSize));
- __ StoreP(r3, MemOperand(sp, 0));
- __ Move(r3, scratch2);
- // stack: this, home_object, key; r3: value.
+ __ StoreP(r2, MemOperand(sp, 0));
+ __ Move(r2, scratch2);
+ // stack: this, home_object, key; r2: value.
EmitKeyedSuperPropertyStore(prop);
break;
}
case KEYED_PROPERTY: {
- PushOperand(r3); // Preserve value.
+ PushOperand(r2); // Preserve value.
VisitForStackValue(prop->obj());
VisitForAccumulatorValue(prop->key());
- __ Move(StoreDescriptor::NameRegister(), r3);
+ __ Move(StoreDescriptor::NameRegister(), r2);
PopOperands(StoreDescriptor::ValueRegister(),
StoreDescriptor::ReceiverRegister());
EmitLoadStoreICSlot(slot);
@@ -2286,23 +2265,21 @@ void FullCodeGenerator::EmitAssignment(Expression* expr,
break;
}
}
- context()->Plug(r3);
+ context()->Plug(r2);
}
-
void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
Variable* var, MemOperand location) {
- __ StoreP(result_register(), location, r0);
+ __ StoreP(result_register(), location);
if (var->IsContextSlot()) {
// RecordWrite may destroy all its register arguments.
- __ mr(r6, result_register());
+ __ LoadRR(r5, result_register());
int offset = Context::SlotOffset(var->index());
- __ RecordWriteContextSlot(r4, offset, r6, r5, kLRHasBeenSaved,
+ __ RecordWriteContextSlot(r3, offset, r5, r4, kLRHasBeenSaved,
kDontSaveFPRegs);
}
}
-
void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
FeedbackVectorSlot slot) {
if (var->IsUnallocated()) {
@@ -2317,12 +2294,12 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
DCHECK(!var->IsLookupSlot());
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
Label assign;
- MemOperand location = VarOperand(var, r4);
- __ LoadP(r6, location);
- __ CompareRoot(r6, Heap::kTheHoleValueRootIndex);
+ MemOperand location = VarOperand(var, r3);
+ __ LoadP(r5, location);
+ __ CompareRoot(r5, Heap::kTheHoleValueRootIndex);
__ bne(&assign);
- __ mov(r6, Operand(var->name()));
- __ push(r6);
+ __ mov(r5, Operand(var->name()));
+ __ push(r5);
__ CallRuntime(Runtime::kThrowReferenceError);
// Perform the assignment.
__ bind(&assign);
@@ -2333,12 +2310,12 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
DCHECK(!var->IsLookupSlot());
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
Label const_error;
- MemOperand location = VarOperand(var, r4);
- __ LoadP(r6, location);
- __ CompareRoot(r6, Heap::kTheHoleValueRootIndex);
- __ bne(&const_error);
- __ mov(r6, Operand(var->name()));
- __ push(r6);
+ MemOperand location = VarOperand(var, r3);
+ __ LoadP(r5, location);
+ __ CompareRoot(r5, Heap::kTheHoleValueRootIndex);
+ __ bne(&const_error, Label::kNear);
+ __ mov(r5, Operand(var->name()));
+ __ push(r5);
__ CallRuntime(Runtime::kThrowReferenceError);
__ bind(&const_error);
__ CallRuntime(Runtime::kThrowConstAssignError);
@@ -2347,12 +2324,12 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
// Initializing assignment to const {this} needs a write barrier.
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
Label uninitialized_this;
- MemOperand location = VarOperand(var, r4);
- __ LoadP(r6, location);
- __ CompareRoot(r6, Heap::kTheHoleValueRootIndex);
+ MemOperand location = VarOperand(var, r3);
+ __ LoadP(r5, location);
+ __ CompareRoot(r5, Heap::kTheHoleValueRootIndex);
__ beq(&uninitialized_this);
- __ mov(r4, Operand(var->name()));
- __ push(r4);
+ __ mov(r3, Operand(var->name()));
+ __ push(r3);
__ CallRuntime(Runtime::kThrowReferenceError);
__ bind(&uninitialized_this);
EmitStoreToStackLocalOrContextSlot(var, location);
@@ -2362,7 +2339,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
if (var->IsLookupSlot()) {
// Assignment to var.
__ Push(var->name());
- __ Push(r3);
+ __ Push(r2);
__ CallRuntime(is_strict(language_mode())
? Runtime::kStoreLookupSlot_Strict
: Runtime::kStoreLookupSlot_Sloppy);
@@ -2370,11 +2347,11 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
// Assignment to var or initializing assignment to let/const in harmony
// mode.
DCHECK((var->IsStackAllocated() || var->IsContextSlot()));
- MemOperand location = VarOperand(var, r4);
+ MemOperand location = VarOperand(var, r3);
if (FLAG_debug_code && var->mode() == LET && op == Token::INIT) {
// Check for an uninitialized let binding.
- __ LoadP(r5, location);
- __ CompareRoot(r5, Heap::kTheHoleValueRootIndex);
+ __ LoadP(r4, location);
+ __ CompareRoot(r4, Heap::kTheHoleValueRootIndex);
__ Check(eq, kLetBindingReInitialization);
}
EmitStoreToStackLocalOrContextSlot(var, location);
@@ -2383,16 +2360,16 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
// Const initializers need a write barrier.
DCHECK(!var->IsParameter()); // No const parameters.
if (var->IsLookupSlot()) {
- __ push(r3);
- __ mov(r3, Operand(var->name()));
- __ Push(cp, r3); // Context and name.
+ __ push(r2);
+ __ mov(r2, Operand(var->name()));
+ __ Push(cp, r2); // Context and name.
__ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot);
} else {
DCHECK(var->IsStackAllocated() || var->IsContextSlot());
Label skip;
- MemOperand location = VarOperand(var, r4);
- __ LoadP(r5, location);
- __ CompareRoot(r5, Heap::kTheHoleValueRootIndex);
+ MemOperand location = VarOperand(var, r3);
+ __ LoadP(r4, location);
+ __ CompareRoot(r4, Heap::kTheHoleValueRootIndex);
__ bne(&skip);
EmitStoreToStackLocalOrContextSlot(var, location);
__ bind(&skip);
@@ -2407,7 +2384,6 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op,
}
}
-
void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
// Assignment to a property, using a named store IC.
Property* prop = expr->target()->AsProperty();
@@ -2421,44 +2397,41 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
CallStoreIC();
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(r3);
+ context()->Plug(r2);
}
-
void FullCodeGenerator::EmitNamedSuperPropertyStore(Property* prop) {
// Assignment to named property of super.
- // r3 : value
+ // r2 : value
// stack : receiver ('this'), home_object
DCHECK(prop != NULL);
Literal* key = prop->key()->AsLiteral();
DCHECK(key != NULL);
PushOperand(key->value());
- PushOperand(r3);
+ PushOperand(r2);
CallRuntimeWithOperands((is_strict(language_mode())
? Runtime::kStoreToSuper_Strict
: Runtime::kStoreToSuper_Sloppy));
}
-
void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
// Assignment to named property of super.
- // r3 : value
+ // r2 : value
// stack : receiver ('this'), home_object, key
DCHECK(prop != NULL);
- PushOperand(r3);
+ PushOperand(r2);
CallRuntimeWithOperands((is_strict(language_mode())
? Runtime::kStoreKeyedToSuper_Strict
: Runtime::kStoreKeyedToSuper_Sloppy));
}
-
void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
// Assignment to a property, using a keyed store IC.
PopOperands(StoreDescriptor::ReceiverRegister(),
StoreDescriptor::NameRegister());
- DCHECK(StoreDescriptor::ValueRegister().is(r3));
+ DCHECK(StoreDescriptor::ValueRegister().is(r2));
Handle<Code> ic =
CodeFactory::KeyedStoreIC(isolate(), language_mode()).code();
@@ -2466,10 +2439,9 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
CallIC(ic);
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(r3);
+ context()->Plug(r2);
}
-
void FullCodeGenerator::VisitProperty(Property* expr) {
Comment cmnt(masm_, "[ Property");
SetExpressionPosition(expr);
@@ -2479,7 +2451,7 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
if (key->IsPropertyName()) {
if (!expr->IsSuperAccess()) {
VisitForAccumulatorValue(expr->obj());
- __ Move(LoadDescriptor::ReceiverRegister(), r3);
+ __ Move(LoadDescriptor::ReceiverRegister(), r2);
EmitNamedPropertyLoad(expr);
} else {
VisitForStackValue(expr->obj()->AsSuperPropertyReference()->this_var());
@@ -2491,7 +2463,7 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
if (!expr->IsSuperAccess()) {
VisitForStackValue(expr->obj());
VisitForAccumulatorValue(expr->key());
- __ Move(LoadDescriptor::NameRegister(), r3);
+ __ Move(LoadDescriptor::NameRegister(), r2);
PopOperand(LoadDescriptor::ReceiverRegister());
EmitKeyedPropertyLoad(expr);
} else {
@@ -2503,16 +2475,14 @@ void FullCodeGenerator::VisitProperty(Property* expr) {
}
}
PrepareForBailoutForId(expr->LoadId(), TOS_REG);
- context()->Plug(r3);
+ context()->Plug(r2);
}
-
void FullCodeGenerator::CallIC(Handle<Code> code, TypeFeedbackId ast_id) {
ic_total_count_++;
__ Call(code, RelocInfo::CODE_TARGET, ast_id);
}
-
// Code common for calls using the IC.
void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
Expression* callee = expr->expression();
@@ -2527,8 +2497,8 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
}
// Push undefined as receiver. This is patched in the method prologue if it
// is a sloppy mode method.
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
- PushOperand(r0);
+ __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
+ PushOperand(r1);
convert_mode = ConvertReceiverMode::kNullOrUndefined;
} else {
// Load the function from the receiver.
@@ -2538,16 +2508,15 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
EmitNamedPropertyLoad(callee->AsProperty());
PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
// Push the target function under the receiver.
- __ LoadP(r0, MemOperand(sp, 0));
- PushOperand(r0);
- __ StoreP(r3, MemOperand(sp, kPointerSize));
+ __ LoadP(r1, MemOperand(sp, 0));
+ PushOperand(r1);
+ __ StoreP(r2, MemOperand(sp, kPointerSize));
convert_mode = ConvertReceiverMode::kNotNullOrUndefined;
}
EmitCall(expr, convert_mode);
}
-
void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
Expression* callee = expr->expression();
DCHECK(callee->IsProperty());
@@ -2558,12 +2527,12 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
Literal* key = prop->key()->AsLiteral();
DCHECK(!key->value()->IsSmi());
// Load the function from the receiver.
- const Register scratch = r4;
+ const Register scratch = r3;
SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
VisitForAccumulatorValue(super_ref->home_object());
- __ mr(scratch, r3);
+ __ LoadRR(scratch, r2);
VisitForAccumulatorValue(super_ref->this_var());
- PushOperands(scratch, r3, r3, scratch);
+ PushOperands(scratch, r2, r2, scratch);
PushOperand(key->value());
// Stack here:
@@ -2575,7 +2544,7 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
CallRuntimeWithOperands(Runtime::kLoadFromSuper);
// Replace home_object with target function.
- __ StoreP(r3, MemOperand(sp, kPointerSize));
+ __ StoreP(r2, MemOperand(sp, kPointerSize));
// Stack here:
// - target function
@@ -2583,7 +2552,6 @@ void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
EmitCall(expr);
}
-
// Code common for calls using the IC.
void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr, Expression* key) {
// Load the key.
@@ -2594,19 +2562,18 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr, Expression* key) {
// Load the function from the receiver.
DCHECK(callee->IsProperty());
__ LoadP(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
- __ Move(LoadDescriptor::NameRegister(), r3);
+ __ Move(LoadDescriptor::NameRegister(), r2);
EmitKeyedPropertyLoad(callee->AsProperty());
PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
// Push the target function under the receiver.
__ LoadP(ip, MemOperand(sp, 0));
PushOperand(ip);
- __ StoreP(r3, MemOperand(sp, kPointerSize));
+ __ StoreP(r2, MemOperand(sp, kPointerSize));
EmitCall(expr, ConvertReceiverMode::kNotNullOrUndefined);
}
-
void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
Expression* callee = expr->expression();
DCHECK(callee->IsProperty());
@@ -2615,12 +2582,12 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
SetExpressionPosition(prop);
// Load the function from the receiver.
- const Register scratch = r4;
+ const Register scratch = r3;
SuperPropertyReference* super_ref = prop->obj()->AsSuperPropertyReference();
VisitForAccumulatorValue(super_ref->home_object());
- __ mr(scratch, r3);
+ __ LoadRR(scratch, r2);
VisitForAccumulatorValue(super_ref->this_var());
- PushOperands(scratch, r3, r3, scratch);
+ PushOperands(scratch, r2, r2, scratch);
VisitForStackValue(prop->key());
// Stack here:
@@ -2632,7 +2599,7 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
CallRuntimeWithOperands(Runtime::kLoadKeyedFromSuper);
// Replace home_object with target function.
- __ StoreP(r3, MemOperand(sp, kPointerSize));
+ __ StoreP(r2, MemOperand(sp, kPointerSize));
// Stack here:
// - target function
@@ -2640,7 +2607,6 @@ void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
EmitCall(expr);
}
-
void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
// Load the arguments.
ZoneList<Expression*>* args = expr->arguments();
@@ -2662,8 +2628,8 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
Handle<Code> ic =
CodeFactory::CallIC(isolate(), arg_count, mode, expr->tail_call_mode())
.code();
- __ LoadSmiLiteral(r6, SmiFromSlot(expr->CallFeedbackICSlot()));
- __ LoadP(r4, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
+ __ LoadSmiLiteral(r5, SmiFromSlot(expr->CallFeedbackICSlot()));
+ __ LoadP(r3, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
// Don't assign a type feedback id to the IC, since type feedback is provided
// by the vector above.
CallIC(ic);
@@ -2672,33 +2638,31 @@ void FullCodeGenerator::EmitCall(Call* expr, ConvertReceiverMode mode) {
RecordJSReturnSite(expr);
// Restore context register.
__ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, r3);
+ context()->DropAndPlug(1, r2);
}
-
void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
- // r7: copy of the first argument or undefined if it doesn't exist.
+ // r6: copy of the first argument or undefined if it doesn't exist.
if (arg_count > 0) {
- __ LoadP(r7, MemOperand(sp, arg_count * kPointerSize), r0);
+ __ LoadP(r6, MemOperand(sp, arg_count * kPointerSize), r0);
} else {
- __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
+ __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
}
- // r6: the receiver of the enclosing function.
- __ LoadP(r6, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ // r5: the receiver of the enclosing function.
+ __ LoadP(r5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- // r5: language mode.
- __ LoadSmiLiteral(r5, Smi::FromInt(language_mode()));
+ // r4: language mode.
+ __ LoadSmiLiteral(r4, Smi::FromInt(language_mode()));
- // r4: the start position of the scope the calls resides in.
- __ LoadSmiLiteral(r4, Smi::FromInt(scope()->start_position()));
+ // r3: the start position of the scope the calls resides in.
+ __ LoadSmiLiteral(r3, Smi::FromInt(scope()->start_position()));
// Do the runtime call.
- __ Push(r7, r6, r5, r4);
+ __ Push(r6, r5, r4, r3);
__ CallRuntime(Runtime::kResolvePossiblyDirectEval);
}
-
// See http://www.ecma-international.org/ecma-262/6.0/#sec-function-calls.
void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
VariableProxy* callee = expr->expression()->AsVariableProxy();
@@ -2710,11 +2674,11 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
EmitDynamicLookupFastCase(callee, NOT_INSIDE_TYPEOF, &slow, &done);
__ bind(&slow);
- // Call the runtime to find the function to call (returned in r3) and
- // the object holding it (returned in r4).
+ // Call the runtime to find the function to call (returned in r2) and
+ // the object holding it (returned in r3).
__ Push(callee->name());
__ CallRuntime(Runtime::kLoadLookupSlotForCall);
- PushOperands(r3, r4); // Function, receiver.
+ PushOperands(r2, r3); // Function, receiver.
PrepareForBailoutForId(expr->LookupId(), NO_REGISTERS);
// If fast case code has been generated, emit code to push the function
@@ -2724,23 +2688,22 @@ void FullCodeGenerator::PushCalleeAndWithBaseObject(Call* expr) {
__ b(&call);
__ bind(&done);
// Push function.
- __ push(r3);
+ __ push(r2);
// Pass undefined as the receiver, which is the WithBaseObject of a
// non-object environment record. If the callee is sloppy, it will patch
// it up to be the global receiver.
- __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
- __ push(r4);
+ __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
+ __ push(r3);
__ bind(&call);
}
} else {
VisitForStackValue(callee);
// refEnv.WithBaseObject()
- __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
- PushOperand(r5); // Reserved receiver slot.
+ __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
+ PushOperand(r4); // Reserved receiver slot.
}
}
-
void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
// In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
// to resolve the function we need to call. Then we call the resolved
@@ -2757,19 +2720,19 @@ void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
// Push a copy of the function (found below the arguments) and
// resolve eval.
- __ LoadP(r4, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
- __ push(r4);
+ __ LoadP(r3, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
+ __ push(r3);
EmitResolvePossiblyDirectEval(arg_count);
// Touch up the stack with the resolved function.
- __ StoreP(r3, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
+ __ StoreP(r2, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
PrepareForBailoutForId(expr->EvalId(), NO_REGISTERS);
// Record source position for debugger.
SetCallPosition(expr);
- __ LoadP(r4, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
- __ mov(r3, Operand(arg_count));
+ __ LoadP(r3, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
+ __ mov(r2, Operand(arg_count));
__ Call(isolate()->builtins()->Call(ConvertReceiverMode::kAny,
expr->tail_call_mode()),
RelocInfo::CODE_TARGET);
@@ -2777,10 +2740,9 @@ void FullCodeGenerator::EmitPossiblyEvalCall(Call* expr) {
RecordJSReturnSite(expr);
// Restore context register.
__ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, r3);
+ context()->DropAndPlug(1, r2);
}
-
void FullCodeGenerator::VisitCallNew(CallNew* expr) {
Comment cmnt(masm_, "[ CallNew");
// According to ECMA-262, section 11.2.2, page 44, the function
@@ -2804,13 +2766,13 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
// constructor invocation.
SetConstructCallPosition(expr);
- // Load function and argument count into r4 and r3.
- __ mov(r3, Operand(arg_count));
- __ LoadP(r4, MemOperand(sp, arg_count * kPointerSize), r0);
+ // Load function and argument count into r3 and r2.
+ __ mov(r2, Operand(arg_count));
+ __ LoadP(r3, MemOperand(sp, arg_count * kPointerSize), r0);
// Record call targets in unoptimized code.
- __ EmitLoadTypeFeedbackVector(r5);
- __ LoadSmiLiteral(r6, SmiFromSlot(expr->CallNewFeedbackSlot()));
+ __ EmitLoadTypeFeedbackVector(r4);
+ __ LoadSmiLiteral(r5, SmiFromSlot(expr->CallNewFeedbackSlot()));
CallConstructStub stub(isolate());
__ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
@@ -2818,10 +2780,9 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) {
PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
// Restore context register.
__ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- context()->Plug(r3);
+ context()->Plug(r2);
}
-
void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
SuperCallReference* super_call_ref =
expr->expression()->AsSuperCallReference();
@@ -2848,13 +2809,13 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
// constructor invocation.
SetConstructCallPosition(expr);
- // Load new target into r6.
+ // Load new target into r5.
VisitForAccumulatorValue(super_call_ref->new_target_var());
- __ mr(r6, result_register());
+ __ LoadRR(r5, result_register());
// Load function and argument count into r1 and r0.
- __ mov(r3, Operand(arg_count));
- __ LoadP(r4, MemOperand(sp, arg_count * kPointerSize));
+ __ mov(r2, Operand(arg_count));
+ __ LoadP(r3, MemOperand(sp, arg_count * kPointerSize));
__ Call(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
OperandStackDepthDecrement(arg_count + 1);
@@ -2863,17 +2824,16 @@ void FullCodeGenerator::EmitSuperConstructorCall(Call* expr) {
// Restore context register.
__ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- context()->Plug(r3);
+ context()->Plug(r2);
}
-
void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
- Label materialize_true, materialize_false;
+ Label materialize_true, materialize_false, skip_lookup;
Label* if_true = NULL;
Label* if_false = NULL;
Label* fall_through = NULL;
@@ -2881,13 +2841,12 @@ void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
&if_false, &fall_through);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ TestIfSmi(r3, r0);
- Split(eq, if_true, if_false, fall_through, cr0);
+ __ TestIfSmi(r2);
+ Split(eq, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
-
void FullCodeGenerator::EmitIsJSReceiver(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -2901,15 +2860,14 @@ void FullCodeGenerator::EmitIsJSReceiver(CallRuntime* expr) {
context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
&if_false, &fall_through);
- __ JumpIfSmi(r3, if_false);
- __ CompareObjectType(r3, r4, r4, FIRST_JS_RECEIVER_TYPE);
+ __ JumpIfSmi(r2, if_false);
+ __ CompareObjectType(r2, r3, r3, FIRST_JS_RECEIVER_TYPE);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(ge, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
-
void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -2923,15 +2881,14 @@ void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
&if_false, &fall_through);
- __ JumpIfSmi(r3, if_false);
- __ CompareObjectType(r3, r4, r4, JS_ARRAY_TYPE);
+ __ JumpIfSmi(r2, if_false);
+ __ CompareObjectType(r2, r3, r3, JS_ARRAY_TYPE);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
-
void FullCodeGenerator::EmitIsTypedArray(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -2945,15 +2902,14 @@ void FullCodeGenerator::EmitIsTypedArray(CallRuntime* expr) {
context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
&if_false, &fall_through);
- __ JumpIfSmi(r3, if_false);
- __ CompareObjectType(r3, r4, r4, JS_TYPED_ARRAY_TYPE);
+ __ JumpIfSmi(r2, if_false);
+ __ CompareObjectType(r2, r3, r3, JS_TYPED_ARRAY_TYPE);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
-
void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -2967,15 +2923,14 @@ void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
&if_false, &fall_through);
- __ JumpIfSmi(r3, if_false);
- __ CompareObjectType(r3, r4, r4, JS_REGEXP_TYPE);
+ __ JumpIfSmi(r2, if_false);
+ __ CompareObjectType(r2, r3, r3, JS_REGEXP_TYPE);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
-
void FullCodeGenerator::EmitIsJSProxy(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -2989,15 +2944,14 @@ void FullCodeGenerator::EmitIsJSProxy(CallRuntime* expr) {
context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
&if_false, &fall_through);
- __ JumpIfSmi(r3, if_false);
- __ CompareObjectType(r3, r4, r4, JS_PROXY_TYPE);
+ __ JumpIfSmi(r2, if_false);
+ __ CompareObjectType(r2, r3, r3, JS_PROXY_TYPE);
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
Split(eq, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
-
void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3006,50 +2960,49 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
VisitForAccumulatorValue(args->at(0));
// If the object is not a JSReceiver, we return null.
- __ JumpIfSmi(r3, &null);
+ __ JumpIfSmi(r2, &null);
STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- __ CompareObjectType(r3, r3, r4, FIRST_JS_RECEIVER_TYPE);
- // Map is now in r3.
+ __ CompareObjectType(r2, r2, r3, FIRST_JS_RECEIVER_TYPE);
+ // Map is now in r2.
__ blt(&null);
// Return 'Function' for JSFunction objects.
- __ cmpi(r4, Operand(JS_FUNCTION_TYPE));
+ __ CmpP(r3, Operand(JS_FUNCTION_TYPE));
__ beq(&function);
// Check if the constructor in the map is a JS function.
- Register instance_type = r5;
- __ GetMapConstructor(r3, r3, r4, instance_type);
- __ cmpi(instance_type, Operand(JS_FUNCTION_TYPE));
- __ bne(&non_function_constructor);
+ Register instance_type = r4;
+ __ GetMapConstructor(r2, r2, r3, instance_type);
+ __ CmpP(instance_type, Operand(JS_FUNCTION_TYPE));
+ __ bne(&non_function_constructor, Label::kNear);
- // r3 now contains the constructor function. Grab the
+ // r2 now contains the constructor function. Grab the
// instance class name from there.
- __ LoadP(r3, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
- __ LoadP(r3,
- FieldMemOperand(r3, SharedFunctionInfo::kInstanceClassNameOffset));
- __ b(&done);
+ __ LoadP(r2, FieldMemOperand(r2, JSFunction::kSharedFunctionInfoOffset));
+ __ LoadP(r2,
+ FieldMemOperand(r2, SharedFunctionInfo::kInstanceClassNameOffset));
+ __ b(&done, Label::kNear);
// Functions have class 'Function'.
__ bind(&function);
- __ LoadRoot(r3, Heap::kFunction_stringRootIndex);
- __ b(&done);
+ __ LoadRoot(r2, Heap::kFunction_stringRootIndex);
+ __ b(&done, Label::kNear);
// Objects with a non-function constructor have class 'Object'.
__ bind(&non_function_constructor);
- __ LoadRoot(r3, Heap::kObject_stringRootIndex);
- __ b(&done);
+ __ LoadRoot(r2, Heap::kObject_stringRootIndex);
+ __ b(&done, Label::kNear);
// Non-JS objects have class null.
__ bind(&null);
- __ LoadRoot(r3, Heap::kNullValueRootIndex);
+ __ LoadRoot(r2, Heap::kNullValueRootIndex);
// All done.
__ bind(&done);
- context()->Plug(r3);
+ context()->Plug(r2);
}
-
void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
@@ -3057,24 +3010,23 @@ void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
Label done;
// If the object is a smi return the object.
- __ JumpIfSmi(r3, &done);
+ __ JumpIfSmi(r2, &done);
// If the object is not a value type, return the object.
- __ CompareObjectType(r3, r4, r4, JS_VALUE_TYPE);
- __ bne(&done);
- __ LoadP(r3, FieldMemOperand(r3, JSValue::kValueOffset));
+ __ CompareObjectType(r2, r3, r3, JS_VALUE_TYPE);
+ __ bne(&done, Label::kNear);
+ __ LoadP(r2, FieldMemOperand(r2, JSValue::kValueOffset));
__ bind(&done);
- context()->Plug(r3);
+ context()->Plug(r2);
}
-
void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(3, args->length());
- Register string = r3;
- Register index = r4;
- Register value = r5;
+ Register string = r2;
+ Register index = r3;
+ Register value = r4;
VisitForStackValue(args->at(0)); // index
VisitForStackValue(args->at(1)); // value
@@ -3082,31 +3034,30 @@ void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
PopOperands(index, value);
if (FLAG_debug_code) {
- __ TestIfSmi(value, r0);
+ __ TestIfSmi(value);
__ Check(eq, kNonSmiValue, cr0);
- __ TestIfSmi(index, r0);
+ __ TestIfSmi(index);
__ Check(eq, kNonSmiIndex, cr0);
- __ SmiUntag(index, index);
+ __ SmiUntag(index);
static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
__ EmitSeqStringSetCharCheck(string, index, value, one_byte_seq_type);
- __ SmiTag(index, index);
+ __ SmiTag(index);
}
__ SmiUntag(value);
- __ addi(ip, string, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
- __ SmiToByteArrayOffset(r0, index);
- __ stbx(value, MemOperand(ip, r0));
+ __ AddP(ip, string, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+ __ SmiToByteArrayOffset(r1, index);
+ __ StoreByte(value, MemOperand(ip, r1));
context()->Plug(string);
}
-
void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(3, args->length());
- Register string = r3;
- Register index = r4;
- Register value = r5;
+ Register string = r2;
+ Register index = r3;
+ Register value = r4;
VisitForStackValue(args->at(0)); // index
VisitForStackValue(args->at(1)); // value
@@ -3114,9 +3065,9 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
PopOperands(index, value);
if (FLAG_debug_code) {
- __ TestIfSmi(value, r0);
+ __ TestIfSmi(value);
__ Check(eq, kNonSmiValue, cr0);
- __ TestIfSmi(index, r0);
+ __ TestIfSmi(index);
__ Check(eq, kNonSmiIndex, cr0);
__ SmiUntag(index, index);
static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
@@ -3125,37 +3076,35 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
}
__ SmiUntag(value);
- __ addi(ip, string, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- __ SmiToShortArrayOffset(r0, index);
- __ sthx(value, MemOperand(ip, r0));
+ __ SmiToShortArrayOffset(r1, index);
+ __ StoreHalfWord(value, MemOperand(r1, string, SeqTwoByteString::kHeaderSize -
+ kHeapObjectTag));
context()->Plug(string);
}
-
void FullCodeGenerator::EmitToInteger(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(1, args->length());
- // Load the argument into r3 and convert it.
+ // Load the argument into r2 and convert it.
VisitForAccumulatorValue(args->at(0));
// Convert the object to an integer.
Label done_convert;
- __ JumpIfSmi(r3, &done_convert);
- __ Push(r3);
+ __ JumpIfSmi(r2, &done_convert);
+ __ Push(r2);
__ CallRuntime(Runtime::kToInteger);
__ bind(&done_convert);
- context()->Plug(r3);
+ context()->Plug(r2);
}
-
void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
Label done;
- StringCharFromCodeGenerator generator(r3, r4);
+ StringCharFromCodeGenerator generator(r2, r3);
generator.GenerateFast(masm_);
__ b(&done);
@@ -3163,19 +3112,18 @@ void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
generator.GenerateSlow(masm_, call_helper);
__ bind(&done);
- context()->Plug(r4);
+ context()->Plug(r3);
}
-
void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 2);
VisitForStackValue(args->at(0));
VisitForAccumulatorValue(args->at(1));
- Register object = r4;
- Register index = r3;
- Register result = r6;
+ Register object = r3;
+ Register index = r2;
+ Register result = r5;
PopOperand(object);
@@ -3207,17 +3155,16 @@ void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
context()->Plug(result);
}
-
void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 2);
VisitForStackValue(args->at(0));
VisitForAccumulatorValue(args->at(1));
- Register object = r4;
- Register index = r3;
- Register scratch = r6;
- Register result = r3;
+ Register object = r3;
+ Register index = r2;
+ Register scratch = r5;
+ Register result = r2;
PopOperand(object);
@@ -3249,7 +3196,6 @@ void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
context()->Plug(result);
}
-
void FullCodeGenerator::EmitCall(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_LE(2, args->length());
@@ -3258,20 +3204,19 @@ void FullCodeGenerator::EmitCall(CallRuntime* expr) {
VisitForStackValue(arg);
}
PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
- // Move target to r4.
+ // Move target to r3.
int const argc = args->length() - 2;
- __ LoadP(r4, MemOperand(sp, (argc + 1) * kPointerSize));
+ __ LoadP(r3, MemOperand(sp, (argc + 1) * kPointerSize));
// Call the target.
- __ mov(r3, Operand(argc));
+ __ mov(r2, Operand(argc));
__ Call(isolate()->builtins()->Call(), RelocInfo::CODE_TARGET);
OperandStackDepthDecrement(argc + 1);
// Restore context register.
__ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Discard the function left on TOS.
- context()->DropAndPlug(1, r3);
+ context()->DropAndPlug(1, r2);
}
-
void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
VisitForAccumulatorValue(args->at(0));
@@ -3283,53 +3228,47 @@ void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
&if_false, &fall_through);
- __ lwz(r3, FieldMemOperand(r3, String::kHashFieldOffset));
- // PPC - assume ip is free
- __ mov(ip, Operand(String::kContainsCachedArrayIndexMask));
- __ and_(r0, r3, ip, SetRC);
+ __ LoadlW(r2, FieldMemOperand(r2, String::kHashFieldOffset));
+ __ AndP(r0, r2, Operand(String::kContainsCachedArrayIndexMask));
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- Split(eq, if_true, if_false, fall_through, cr0);
+ Split(eq, if_true, if_false, fall_through);
context()->Plug(if_true, if_false);
}
-
void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK(args->length() == 1);
VisitForAccumulatorValue(args->at(0));
- __ AssertString(r3);
+ __ AssertString(r2);
- __ lwz(r3, FieldMemOperand(r3, String::kHashFieldOffset));
- __ IndexFromHash(r3, r3);
+ __ LoadlW(r2, FieldMemOperand(r2, String::kHashFieldOffset));
+ __ IndexFromHash(r2, r2);
- context()->Plug(r3);
+ context()->Plug(r2);
}
-
void FullCodeGenerator::EmitGetSuperConstructor(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(1, args->length());
VisitForAccumulatorValue(args->at(0));
- __ AssertFunction(r3);
- __ LoadP(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ LoadP(r3, FieldMemOperand(r3, Map::kPrototypeOffset));
- context()->Plug(r3);
+ __ AssertFunction(r2);
+ __ LoadP(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ LoadP(r2, FieldMemOperand(r2, Map::kPrototypeOffset));
+ context()->Plug(r2);
}
-
void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
DCHECK(expr->arguments()->length() == 0);
ExternalReference debug_is_active =
ExternalReference::debug_is_active_address(isolate());
__ mov(ip, Operand(debug_is_active));
- __ lbz(r3, MemOperand(ip));
- __ SmiTag(r3);
- context()->Plug(r3);
+ __ LoadlB(r2, MemOperand(ip));
+ __ SmiTag(r2);
+ context()->Plug(r2);
}
-
void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
DCHECK_EQ(2, args->length());
@@ -3338,15 +3277,15 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
Label runtime, done;
- __ Allocate(JSIteratorResult::kSize, r3, r5, r6, &runtime, TAG_OBJECT);
- __ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, r4);
- __ Pop(r5, r6);
- __ LoadRoot(r7, Heap::kEmptyFixedArrayRootIndex);
- __ StoreP(r4, FieldMemOperand(r3, HeapObject::kMapOffset), r0);
- __ StoreP(r7, FieldMemOperand(r3, JSObject::kPropertiesOffset), r0);
- __ StoreP(r7, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
- __ StoreP(r5, FieldMemOperand(r3, JSIteratorResult::kValueOffset), r0);
- __ StoreP(r6, FieldMemOperand(r3, JSIteratorResult::kDoneOffset), r0);
+ __ Allocate(JSIteratorResult::kSize, r2, r4, r5, &runtime, TAG_OBJECT);
+ __ LoadNativeContextSlot(Context::ITERATOR_RESULT_MAP_INDEX, r3);
+ __ Pop(r4, r5);
+ __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
+ __ StoreP(r3, FieldMemOperand(r2, HeapObject::kMapOffset), r0);
+ __ StoreP(r6, FieldMemOperand(r2, JSObject::kPropertiesOffset), r0);
+ __ StoreP(r6, FieldMemOperand(r2, JSObject::kElementsOffset), r0);
+ __ StoreP(r4, FieldMemOperand(r2, JSIteratorResult::kValueOffset), r0);
+ __ StoreP(r5, FieldMemOperand(r2, JSIteratorResult::kDoneOffset), r0);
STATIC_ASSERT(JSIteratorResult::kSize == 5 * kPointerSize);
__ b(&done);
@@ -3354,32 +3293,29 @@ void FullCodeGenerator::EmitCreateIterResultObject(CallRuntime* expr) {
CallRuntimeWithOperands(Runtime::kCreateIterResultObject);
__ bind(&done);
- context()->Plug(r3);
+ context()->Plug(r2);
}
-
void FullCodeGenerator::EmitLoadJSRuntimeFunction(CallRuntime* expr) {
// Push undefined as the receiver.
- __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
- PushOperand(r3);
+ __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
+ PushOperand(r2);
- __ LoadNativeContextSlot(expr->context_index(), r3);
+ __ LoadNativeContextSlot(expr->context_index(), r2);
}
-
void FullCodeGenerator::EmitCallJSRuntimeFunction(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
SetCallPosition(expr);
- __ LoadP(r4, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
- __ mov(r3, Operand(arg_count));
+ __ LoadP(r3, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
+ __ mov(r2, Operand(arg_count));
__ Call(isolate()->builtins()->Call(ConvertReceiverMode::kNullOrUndefined),
RelocInfo::CODE_TARGET);
OperandStackDepthDecrement(arg_count + 1);
}
-
void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
ZoneList<Expression*>* args = expr->arguments();
int arg_count = args->length();
@@ -3391,7 +3327,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
// Push the target function under the receiver.
__ LoadP(ip, MemOperand(sp, 0));
PushOperand(ip);
- __ StoreP(r3, MemOperand(sp, kPointerSize));
+ __ StoreP(r2, MemOperand(sp, kPointerSize));
// Push the arguments ("left-to-right").
for (int i = 0; i < arg_count; i++) {
@@ -3404,7 +3340,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
// Restore context register.
__ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
- context()->DropAndPlug(1, r3);
+ context()->DropAndPlug(1, r2);
} else {
const Runtime::Function* function = expr->function();
@@ -3427,13 +3363,12 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
PrepareForBailoutForId(expr->CallId(), NO_REGISTERS);
__ CallRuntime(expr->function(), arg_count);
OperandStackDepthDecrement(arg_count);
- context()->Plug(r3);
+ context()->Plug(r2);
}
}
}
}
-
void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
switch (expr->op()) {
case Token::DELETE: {
@@ -3447,7 +3382,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
CallRuntimeWithOperands(is_strict(language_mode())
? Runtime::kDeleteProperty_Strict
: Runtime::kDeleteProperty_Sloppy);
- context()->Plug(r3);
+ context()->Plug(r2);
} else if (proxy != NULL) {
Variable* var = proxy->var();
// Delete of an unqualified identifier is disallowed in strict mode but
@@ -3455,11 +3390,11 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
bool is_this = var->HasThisName(isolate());
DCHECK(is_sloppy(language_mode()) || is_this);
if (var->IsUnallocatedOrGlobalSlot()) {
- __ LoadGlobalObject(r5);
- __ mov(r4, Operand(var->name()));
- __ Push(r5, r4);
+ __ LoadGlobalObject(r4);
+ __ mov(r3, Operand(var->name()));
+ __ Push(r4, r3);
__ CallRuntime(Runtime::kDeleteProperty_Sloppy);
- context()->Plug(r3);
+ context()->Plug(r2);
} else if (var->IsStackAllocated() || var->IsContextSlot()) {
// Result of deleting non-global, non-dynamic variables is false.
// The subexpression does not have side effects.
@@ -3469,7 +3404,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
// context where the variable was introduced.
__ Push(var->name());
__ CallRuntime(Runtime::kDeleteLookupSlot);
- context()->Plug(r3);
+ context()->Plug(r2);
}
} else {
// Result of deleting non-property, non-variable reference is true.
@@ -3511,13 +3446,13 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
if (!context()->IsAccumulatorValue()) OperandStackDepthIncrement(1);
__ bind(&materialize_true);
PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
- __ LoadRoot(r3, Heap::kTrueValueRootIndex);
- if (context()->IsStackValue()) __ push(r3);
+ __ LoadRoot(r2, Heap::kTrueValueRootIndex);
+ if (context()->IsStackValue()) __ push(r2);
__ b(&done);
__ bind(&materialize_false);
PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
- __ LoadRoot(r3, Heap::kFalseValueRootIndex);
- if (context()->IsStackValue()) __ push(r3);
+ __ LoadRoot(r2, Heap::kFalseValueRootIndex);
+ if (context()->IsStackValue()) __ push(r2);
__ bind(&done);
}
break;
@@ -3529,10 +3464,10 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
AccumulatorValueContext context(this);
VisitForTypeofValue(expr->expression());
}
- __ mr(r6, r3);
+ __ LoadRR(r5, r2);
TypeofStub typeof_stub(isolate());
__ CallStub(&typeof_stub);
- context()->Plug(r3);
+ context()->Plug(r2);
break;
}
@@ -3541,7 +3476,6 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
}
}
-
void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
DCHECK(expr->expression()->IsValidReferenceExpressionOrThis());
@@ -3575,7 +3509,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
VisitForAccumulatorValue(
prop->obj()->AsSuperPropertyReference()->home_object());
PushOperand(result_register());
- const Register scratch = r4;
+ const Register scratch = r3;
__ LoadP(scratch, MemOperand(sp, kPointerSize));
PushOperands(scratch, result_register());
EmitNamedSuperPropertyLoad(prop);
@@ -3586,9 +3520,9 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
VisitForStackValue(prop->obj()->AsSuperPropertyReference()->this_var());
VisitForAccumulatorValue(
prop->obj()->AsSuperPropertyReference()->home_object());
- const Register scratch = r4;
- const Register scratch1 = r5;
- __ mr(scratch, result_register());
+ const Register scratch = r3;
+ const Register scratch1 = r4;
+ __ LoadRR(scratch, result_register());
VisitForAccumulatorValue(prop->key());
PushOperands(scratch, result_register());
__ LoadP(scratch1, MemOperand(sp, 2 * kPointerSize));
@@ -3627,7 +3561,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
int count_value = expr->op() == Token::INC ? 1 : -1;
if (ShouldInlineSmiCase(expr->op())) {
Label slow;
- patch_site.EmitJumpIfNotSmi(r3, &slow);
+ patch_site.EmitJumpIfNotSmi(r2, &slow);
// Save result for postfix expressions.
if (expr->is_postfix()) {
@@ -3637,31 +3571,31 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// of the stack.
switch (assign_type) {
case VARIABLE:
- __ push(r3);
+ __ push(r2);
break;
case NAMED_PROPERTY:
- __ StoreP(r3, MemOperand(sp, kPointerSize));
+ __ StoreP(r2, MemOperand(sp, kPointerSize));
break;
case NAMED_SUPER_PROPERTY:
- __ StoreP(r3, MemOperand(sp, 2 * kPointerSize));
+ __ StoreP(r2, MemOperand(sp, 2 * kPointerSize));
break;
case KEYED_PROPERTY:
- __ StoreP(r3, MemOperand(sp, 2 * kPointerSize));
+ __ StoreP(r2, MemOperand(sp, 2 * kPointerSize));
break;
case KEYED_SUPER_PROPERTY:
- __ StoreP(r3, MemOperand(sp, 3 * kPointerSize));
+ __ StoreP(r2, MemOperand(sp, 3 * kPointerSize));
break;
}
}
}
- Register scratch1 = r4;
- Register scratch2 = r5;
+ Register scratch1 = r3;
+ Register scratch2 = r4;
__ LoadSmiLiteral(scratch1, Smi::FromInt(count_value));
- __ AddAndCheckForOverflow(r3, r3, scratch1, scratch2, r0);
+ __ AddAndCheckForOverflow(r2, r2, scratch1, scratch2, r0);
__ BranchOnNoOverflow(&done);
// Call stub. Undo operation first.
- __ sub(r3, r3, scratch1);
+ __ SubP(r2, r2, scratch1);
__ b(&stub_call);
__ bind(&slow);
}
@@ -3679,27 +3613,27 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
// of the stack.
switch (assign_type) {
case VARIABLE:
- PushOperand(r3);
+ PushOperand(r2);
break;
case NAMED_PROPERTY:
- __ StoreP(r3, MemOperand(sp, kPointerSize));
+ __ StoreP(r2, MemOperand(sp, kPointerSize));
break;
case NAMED_SUPER_PROPERTY:
- __ StoreP(r3, MemOperand(sp, 2 * kPointerSize));
+ __ StoreP(r2, MemOperand(sp, 2 * kPointerSize));
break;
case KEYED_PROPERTY:
- __ StoreP(r3, MemOperand(sp, 2 * kPointerSize));
+ __ StoreP(r2, MemOperand(sp, 2 * kPointerSize));
break;
case KEYED_SUPER_PROPERTY:
- __ StoreP(r3, MemOperand(sp, 3 * kPointerSize));
+ __ StoreP(r2, MemOperand(sp, 3 * kPointerSize));
break;
}
}
}
__ bind(&stub_call);
- __ mr(r4, r3);
- __ LoadSmiLiteral(r3, Smi::FromInt(count_value));
+ __ LoadRR(r3, r2);
+ __ LoadSmiLiteral(r2, Smi::FromInt(count_value));
SetExpressionPosition(expr);
@@ -3708,7 +3642,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
patch_site.EmitPatchInfo();
__ bind(&done);
- // Store the value returned in r3.
+ // Store the value returned in r2.
switch (assign_type) {
case VARIABLE:
if (expr->is_postfix()) {
@@ -3717,7 +3651,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
Token::ASSIGN, expr->CountSlot());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context.Plug(r3);
+ context.Plug(r2);
}
// For all contexts except EffectConstant We have the result on
// top of the stack.
@@ -3728,7 +3662,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
Token::ASSIGN, expr->CountSlot());
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
- context()->Plug(r3);
+ context()->Plug(r2);
}
break;
case NAMED_PROPERTY: {
@@ -3743,7 +3677,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
context()->PlugTOS();
}
} else {
- context()->Plug(r3);
+ context()->Plug(r2);
}
break;
}
@@ -3754,7 +3688,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
context()->PlugTOS();
}
} else {
- context()->Plug(r3);
+ context()->Plug(r2);
}
break;
}
@@ -3765,7 +3699,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
context()->PlugTOS();
}
} else {
- context()->Plug(r3);
+ context()->Plug(r2);
}
break;
}
@@ -3782,14 +3716,13 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
context()->PlugTOS();
}
} else {
- context()->Plug(r3);
+ context()->Plug(r2);
}
break;
}
}
}
-
void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
Expression* sub_expr,
Handle<String> check) {
@@ -3808,60 +3741,57 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
Factory* factory = isolate()->factory();
if (String::Equals(check, factory->number_string())) {
- __ JumpIfSmi(r3, if_true);
- __ LoadP(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
- __ cmp(r3, ip);
+ __ JumpIfSmi(r2, if_true);
+ __ LoadP(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ CompareRoot(r2, Heap::kHeapNumberMapRootIndex);
Split(eq, if_true, if_false, fall_through);
} else if (String::Equals(check, factory->string_string())) {
- __ JumpIfSmi(r3, if_false);
- __ CompareObjectType(r3, r3, r4, FIRST_NONSTRING_TYPE);
+ __ JumpIfSmi(r2, if_false);
+ __ CompareObjectType(r2, r2, r3, FIRST_NONSTRING_TYPE);
Split(lt, if_true, if_false, fall_through);
} else if (String::Equals(check, factory->symbol_string())) {
- __ JumpIfSmi(r3, if_false);
- __ CompareObjectType(r3, r3, r4, SYMBOL_TYPE);
+ __ JumpIfSmi(r2, if_false);
+ __ CompareObjectType(r2, r2, r3, SYMBOL_TYPE);
Split(eq, if_true, if_false, fall_through);
} else if (String::Equals(check, factory->boolean_string())) {
- __ CompareRoot(r3, Heap::kTrueValueRootIndex);
+ __ CompareRoot(r2, Heap::kTrueValueRootIndex);
__ beq(if_true);
- __ CompareRoot(r3, Heap::kFalseValueRootIndex);
+ __ CompareRoot(r2, Heap::kFalseValueRootIndex);
Split(eq, if_true, if_false, fall_through);
} else if (String::Equals(check, factory->undefined_string())) {
- __ CompareRoot(r3, Heap::kNullValueRootIndex);
+ __ CompareRoot(r2, Heap::kNullValueRootIndex);
__ beq(if_false);
- __ JumpIfSmi(r3, if_false);
+ __ JumpIfSmi(r2, if_false);
// Check for undetectable objects => true.
- __ LoadP(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ lbz(r4, FieldMemOperand(r3, Map::kBitFieldOffset));
- __ andi(r0, r4, Operand(1 << Map::kIsUndetectable));
- Split(ne, if_true, if_false, fall_through, cr0);
+ __ LoadP(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ tm(FieldMemOperand(r2, Map::kBitFieldOffset),
+ Operand(1 << Map::kIsUndetectable));
+ Split(ne, if_true, if_false, fall_through);
} else if (String::Equals(check, factory->function_string())) {
- __ JumpIfSmi(r3, if_false);
- __ LoadP(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ lbz(r4, FieldMemOperand(r3, Map::kBitFieldOffset));
- __ andi(r4, r4,
+ __ JumpIfSmi(r2, if_false);
+ __ LoadP(r2, FieldMemOperand(r2, HeapObject::kMapOffset));
+ __ LoadlB(r3, FieldMemOperand(r2, Map::kBitFieldOffset));
+ __ AndP(r3, r3,
Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
- __ cmpi(r4, Operand(1 << Map::kIsCallable));
+ __ CmpP(r3, Operand(1 << Map::kIsCallable));
Split(eq, if_true, if_false, fall_through);
} else if (String::Equals(check, factory->object_string())) {
- __ JumpIfSmi(r3, if_false);
- __ CompareRoot(r3, Heap::kNullValueRootIndex);
+ __ JumpIfSmi(r2, if_false);
+ __ CompareRoot(r2, Heap::kNullValueRootIndex);
__ beq(if_true);
STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
- __ CompareObjectType(r3, r3, r4, FIRST_JS_RECEIVER_TYPE);
+ __ CompareObjectType(r2, r2, r3, FIRST_JS_RECEIVER_TYPE);
__ blt(if_false);
- // Check for callable or undetectable objects => false.
- __ lbz(r4, FieldMemOperand(r3, Map::kBitFieldOffset));
- __ andi(r0, r4,
- Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
- Split(eq, if_true, if_false, fall_through, cr0);
+ __ tm(FieldMemOperand(r2, Map::kBitFieldOffset),
+ Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
+ Split(eq, if_true, if_false, fall_through);
// clang-format off
#define SIMD128_TYPE(TYPE, Type, type, lane_count, lane_type) \
} else if (String::Equals(check, factory->type##_string())) { \
- __ JumpIfSmi(r3, if_false); \
- __ LoadP(r3, FieldMemOperand(r3, HeapObject::kMapOffset)); \
- __ CompareRoot(r3, Heap::k##Type##MapRootIndex); \
+ __ JumpIfSmi(r2, if_false); \
+ __ LoadP(r2, FieldMemOperand(r2, HeapObject::kMapOffset)); \
+ __ CompareRoot(r2, Heap::k##Type##MapRootIndex); \
Split(eq, if_true, if_false, fall_through);
SIMD128_TYPES(SIMD128_TYPE)
#undef SIMD128_TYPE
@@ -3872,7 +3802,6 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
context()->Plug(if_true, if_false);
}
-
void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
Comment cmnt(masm_, "[ CompareOperation");
SetExpressionPosition(expr);
@@ -3897,17 +3826,17 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
VisitForStackValue(expr->right());
CallRuntimeWithOperands(Runtime::kHasProperty);
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
- __ CompareRoot(r3, Heap::kTrueValueRootIndex);
+ __ CompareRoot(r2, Heap::kTrueValueRootIndex);
Split(eq, if_true, if_false, fall_through);
break;
case Token::INSTANCEOF: {
VisitForAccumulatorValue(expr->right());
- PopOperand(r4);
+ PopOperand(r3);
InstanceOfStub stub(isolate());
__ CallStub(&stub);
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
- __ CompareRoot(r3, Heap::kTrueValueRootIndex);
+ __ CompareRoot(r2, Heap::kTrueValueRootIndex);
Split(eq, if_true, if_false, fall_through);
break;
}
@@ -3915,15 +3844,16 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
default: {
VisitForAccumulatorValue(expr->right());
Condition cond = CompareIC::ComputeCondition(op);
- PopOperand(r4);
+ PopOperand(r3);
bool inline_smi_code = ShouldInlineSmiCase(op);
JumpPatchSite patch_site(masm_);
if (inline_smi_code) {
Label slow_case;
- __ orx(r5, r3, r4);
- patch_site.EmitJumpIfNotSmi(r5, &slow_case);
- __ cmp(r4, r3);
+ __ LoadRR(r4, r3);
+ __ OrP(r4, r2);
+ patch_site.EmitJumpIfNotSmi(r4, &slow_case);
+ __ CmpP(r3, r2);
Split(cond, if_true, if_false, NULL);
__ bind(&slow_case);
}
@@ -3932,7 +3862,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
CallIC(ic, expr->CompareOperationFeedbackId());
patch_site.EmitPatchInfo();
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
- __ cmpi(r3, Operand::Zero());
+ __ CmpP(r2, Operand::Zero());
Split(cond, if_true, if_false, fall_through);
}
}
@@ -3942,7 +3872,6 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
context()->Plug(if_true, if_false);
}
-
void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
Expression* sub_expr,
NilValue nil) {
@@ -3959,47 +3888,38 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
Heap::RootListIndex nil_value = nil == kNullValue
? Heap::kNullValueRootIndex
: Heap::kUndefinedValueRootIndex;
- __ LoadRoot(r4, nil_value);
- __ cmp(r3, r4);
+ __ CompareRoot(r2, nil_value);
Split(eq, if_true, if_false, fall_through);
} else {
- __ JumpIfSmi(r3, if_false);
- __ LoadP(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
- __ lbz(r4, FieldMemOperand(r3, Map::kBitFieldOffset));
- __ andi(r0, r4, Operand(1 << Map::kIsUndetectable));
- Split(ne, if_true, if_false, fall_through, cr0);
+ Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
+ CallIC(ic, expr->CompareOperationFeedbackId());
+ __ CompareRoot(r2, Heap::kTrueValueRootIndex);
+ Split(eq, if_true, if_false, fall_through);
}
context()->Plug(if_true, if_false);
}
-
void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
- __ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
- context()->Plug(r3);
+ __ LoadP(r2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ context()->Plug(r2);
}
-
-Register FullCodeGenerator::result_register() { return r3; }
-
+Register FullCodeGenerator::result_register() { return r2; }
Register FullCodeGenerator::context_register() { return cp; }
-
void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
DCHECK_EQ(static_cast<int>(POINTER_SIZE_ALIGN(frame_offset)), frame_offset);
- __ StoreP(value, MemOperand(fp, frame_offset), r0);
+ __ StoreP(value, MemOperand(fp, frame_offset));
}
-
void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
__ LoadP(dst, ContextMemOperand(cp, context_index), r0);
}
-
void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
Scope* closure_scope = scope()->ClosureScope();
- if (closure_scope->is_script_scope() ||
- closure_scope->is_module_scope()) {
+ if (closure_scope->is_script_scope() || closure_scope->is_module_scope()) {
// Contexts nested in the native context have a canonical empty function
// as their closure, not the anonymous closure containing the global
// code.
@@ -4016,51 +3936,53 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
PushOperand(ip);
}
-
// ----------------------------------------------------------------------------
// Non-local control flow support.
void FullCodeGenerator::EnterFinallyBlock() {
- DCHECK(!result_register().is(r4));
+ DCHECK(!result_register().is(r3));
// Store pending message while executing finally block.
ExternalReference pending_message_obj =
ExternalReference::address_of_pending_message_obj(isolate());
__ mov(ip, Operand(pending_message_obj));
- __ LoadP(r4, MemOperand(ip));
- PushOperand(r4);
+ __ LoadP(r3, MemOperand(ip));
+ PushOperand(r3);
ClearPendingMessage();
}
-
void FullCodeGenerator::ExitFinallyBlock() {
- DCHECK(!result_register().is(r4));
+ DCHECK(!result_register().is(r3));
// Restore pending message from stack.
- PopOperand(r4);
+ PopOperand(r3);
ExternalReference pending_message_obj =
ExternalReference::address_of_pending_message_obj(isolate());
__ mov(ip, Operand(pending_message_obj));
- __ StoreP(r4, MemOperand(ip));
+ __ StoreP(r3, MemOperand(ip));
}
-
void FullCodeGenerator::ClearPendingMessage() {
- DCHECK(!result_register().is(r4));
+ DCHECK(!result_register().is(r3));
ExternalReference pending_message_obj =
ExternalReference::address_of_pending_message_obj(isolate());
- __ LoadRoot(r4, Heap::kTheHoleValueRootIndex);
+ __ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
__ mov(ip, Operand(pending_message_obj));
- __ StoreP(r4, MemOperand(ip));
+ __ StoreP(r3, MemOperand(ip));
}
+void FullCodeGenerator::EmitLoadStoreICSlot(FeedbackVectorSlot slot) {
+ DCHECK(!slot.IsInvalid());
+ __ mov(VectorStoreICTrampolineDescriptor::SlotRegister(),
+ Operand(SmiFromSlot(slot)));
+}
void FullCodeGenerator::DeferredCommands::EmitCommands() {
- DCHECK(!result_register().is(r4));
- // Restore the accumulator (r3) and token (r4).
- __ Pop(r4, result_register());
+ DCHECK(!result_register().is(r3));
+ // Restore the accumulator (r2) and token (r3).
+ __ Pop(r3, result_register());
for (DeferredCommand cmd : commands_) {
Label skip;
- __ CmpSmiLiteral(r4, Smi::FromInt(cmd.token), r0);
+ __ CmpSmiLiteral(r3, Smi::FromInt(cmd.token), r0);
__ bne(&skip);
switch (cmd.command) {
case kReturn:
@@ -4083,67 +4005,75 @@ void FullCodeGenerator::DeferredCommands::EmitCommands() {
#undef __
+#if V8_TARGET_ARCH_S390X
+static const FourByteInstr kInterruptBranchInstruction = 0xA7A40011;
+static const FourByteInstr kOSRBranchInstruction = 0xA7040011;
+static const int16_t kBackEdgeBranchOffset = 0x11 * 2;
+#else
+static const FourByteInstr kInterruptBranchInstruction = 0xA7A4000D;
+static const FourByteInstr kOSRBranchInstruction = 0xA704000D;
+static const int16_t kBackEdgeBranchOffset = 0xD * 2;
+#endif
void BackEdgeTable::PatchAt(Code* unoptimized_code, Address pc,
BackEdgeState target_state,
Code* replacement_code) {
- Address mov_address = Assembler::target_address_from_return_address(pc);
- Address cmp_address = mov_address - 2 * Assembler::kInstrSize;
+ Address call_address = Assembler::target_address_from_return_address(pc);
+ Address branch_address = call_address - 4;
Isolate* isolate = unoptimized_code->GetIsolate();
- CodePatcher patcher(isolate, cmp_address, 1);
+ CodePatcher patcher(isolate, branch_address, 4);
switch (target_state) {
case INTERRUPT: {
// <decrement profiling counter>
- // cmpi r6, 0
- // bge <ok> ;; not changed
- // mov r12, <interrupt stub address>
- // mtlr r12
- // blrl
+ // bge <ok> ;; patched to GE BRC
+ // brasrl r14, <interrupt stub address>
// <reset profiling counter>
// ok-label
- patcher.masm()->cmpi(r6, Operand::Zero());
+ patcher.masm()->brc(ge, Operand(kBackEdgeBranchOffset));
break;
}
case ON_STACK_REPLACEMENT:
case OSR_AFTER_STACK_CHECK:
// <decrement profiling counter>
- // crset
- // bge <ok> ;; not changed
- // mov r12, <on-stack replacement address>
- // mtlr r12
- // blrl
+ // brc 0x0, <ok> ;; patched to NOP BRC
+ // brasrl r14, <interrupt stub address>
// <reset profiling counter>
// ok-label ----- pc_after points here
-
- // Set the LT bit such that bge is a NOP
- patcher.masm()->crset(Assembler::encode_crbit(cr7, CR_LT));
+ patcher.masm()->brc(CC_NOP, Operand(kBackEdgeBranchOffset));
break;
}
// Replace the stack check address in the mov sequence with the
// entry address of the replacement code.
- Assembler::set_target_address_at(isolate, mov_address, unoptimized_code,
+ Assembler::set_target_address_at(isolate, call_address, unoptimized_code,
replacement_code->entry());
unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
- unoptimized_code, mov_address, replacement_code);
+ unoptimized_code, call_address, replacement_code);
}
-
BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
Isolate* isolate, Code* unoptimized_code, Address pc) {
- Address mov_address = Assembler::target_address_from_return_address(pc);
- Address cmp_address = mov_address - 2 * Assembler::kInstrSize;
+ Address call_address = Assembler::target_address_from_return_address(pc);
+ Address branch_address = call_address - 4;
Address interrupt_address =
- Assembler::target_address_at(mov_address, unoptimized_code);
-
- if (Assembler::IsCmpImmediate(Assembler::instr_at(cmp_address))) {
+ Assembler::target_address_at(call_address, unoptimized_code);
+
+ DCHECK(BRC == Instruction::S390OpcodeValue(branch_address));
+ // For interrupt, we expect a branch greater than or equal
+ // i.e. BRC 0xa, +XXXX (0xA7A4XXXX)
+ FourByteInstr br_instr = Instruction::InstructionBits(
+ reinterpret_cast<const byte*>(branch_address));
+ if (kInterruptBranchInstruction == br_instr) {
DCHECK(interrupt_address == isolate->builtins()->InterruptCheck()->entry());
return INTERRUPT;
}
- DCHECK(Assembler::IsCrSet(Assembler::instr_at(cmp_address)));
+ // Expect BRC to be patched to NOP branch.
+ // i.e. BRC 0x0, +XXXX (0xA704XXXX)
+ USE(kOSRBranchInstruction);
+ DCHECK(kOSRBranchInstruction == br_instr);
if (interrupt_address == isolate->builtins()->OnStackReplacement()->entry()) {
return ON_STACK_REPLACEMENT;
@@ -4153,6 +4083,7 @@ BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
isolate->builtins()->OsrAfterStackCheck()->entry());
return OSR_AFTER_STACK_CHECK;
}
+
} // namespace internal
} // namespace v8
-#endif // V8_TARGET_ARCH_PPC
+#endif // V8_TARGET_ARCH_S390
« no previous file with comments | « src/full-codegen/full-codegen.h ('k') | tools/gyp/v8.gyp » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698