Index: src/a64/full-codegen-a64.cc |
diff --git a/src/arm/full-codegen-arm.cc b/src/a64/full-codegen-a64.cc |
similarity index 61% |
copy from src/arm/full-codegen-arm.cc |
copy to src/a64/full-codegen-a64.cc |
index dbce0edf97521e528667161cb9223586e5c0f22f..e14670bbd3585660ea64f6f15e49e54a419d0d52 100644 |
--- a/src/arm/full-codegen-arm.cc |
+++ b/src/a64/full-codegen-a64.cc |
@@ -1,4 +1,4 @@ |
-// Copyright 2012 the V8 project authors. All rights reserved. |
+// Copyright 2013 the V8 project authors. All rights reserved. |
// Redistribution and use in source and binary forms, with or without |
// modification, are permitted provided that the following conditions are |
// met: |
@@ -27,7 +27,7 @@ |
#include "v8.h" |
-#if V8_TARGET_ARCH_ARM |
+#if V8_TARGET_ARCH_A64 |
#include "code-stubs.h" |
#include "codegen.h" |
@@ -39,92 +39,90 @@ |
#include "scopes.h" |
#include "stub-cache.h" |
-#include "arm/code-stubs-arm.h" |
-#include "arm/macro-assembler-arm.h" |
+#include "a64/code-stubs-a64.h" |
+#include "a64/macro-assembler-a64.h" |
namespace v8 { |
namespace internal { |
#define __ ACCESS_MASM(masm_) |
- |
-// A patch site is a location in the code which it is possible to patch. This |
-// class has a number of methods to emit the code which is patchable and the |
-// method EmitPatchInfo to record a marker back to the patchable code. This |
-// marker is a cmp rx, #yyy instruction, and x * 0x00000fff + yyy (raw 12 bit |
-// immediate value is used) is the delta from the pc to the first instruction of |
-// the patchable code. |
class JumpPatchSite BASE_EMBEDDED { |
public: |
- explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) { |
+ explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm), reg_(NoReg) { |
#ifdef DEBUG |
info_emitted_ = false; |
#endif |
} |
~JumpPatchSite() { |
- ASSERT(patch_site_.is_bound() == info_emitted_); |
+ if (patch_site_.is_bound()) { |
+ ASSERT(info_emitted_); |
+ } else { |
+ ASSERT(reg_.IsNone()); |
+ } |
} |
- // When initially emitting this ensure that a jump is always generated to skip |
- // the inlined smi code. |
void EmitJumpIfNotSmi(Register reg, Label* target) { |
- ASSERT(!patch_site_.is_bound() && !info_emitted_); |
- Assembler::BlockConstPoolScope block_const_pool(masm_); |
+ // This code will be patched by PatchInlinedSmiCode, in ic-a64.cc. |
+ InstructionAccurateScope scope(masm_, 1); |
+ ASSERT(!info_emitted_); |
+ ASSERT(reg.Is64Bits()); |
+ ASSERT(!reg.Is(csp)); |
+ reg_ = reg; |
__ bind(&patch_site_); |
- __ cmp(reg, Operand(reg)); |
- __ b(eq, target); // Always taken before patched. |
+ __ tbz(xzr, 0, target); // Always taken before patched. |
} |
- // When initially emitting this ensure that a jump is never generated to skip |
- // the inlined smi code. |
void EmitJumpIfSmi(Register reg, Label* target) { |
- ASSERT(!patch_site_.is_bound() && !info_emitted_); |
- Assembler::BlockConstPoolScope block_const_pool(masm_); |
+ // This code will be patched by PatchInlinedSmiCode, in ic-a64.cc. |
+ InstructionAccurateScope scope(masm_, 1); |
+ ASSERT(!info_emitted_); |
+ ASSERT(reg.Is64Bits()); |
+ ASSERT(!reg.Is(csp)); |
+ reg_ = reg; |
__ bind(&patch_site_); |
- __ cmp(reg, Operand(reg)); |
- __ b(ne, target); // Never taken before patched. |
+ __ tbnz(xzr, 0, target); // Never taken before patched. |
+ } |
+ |
+ void EmitJumpIfEitherNotSmi(Register reg1, Register reg2, Label* target) { |
+ // We need to use ip0, so don't allow access to the MacroAssembler. |
+ InstructionAccurateScope scope(masm_); |
+ __ orr(ip0, reg1, reg2); |
+ EmitJumpIfNotSmi(ip0, target); |
} |
void EmitPatchInfo() { |
- // Block literal pool emission whilst recording patch site information. |
- Assembler::BlockConstPoolScope block_const_pool(masm_); |
- if (patch_site_.is_bound()) { |
- int delta_to_patch_site = masm_->InstructionsGeneratedSince(&patch_site_); |
- Register reg; |
- reg.set_code(delta_to_patch_site / kOff12Mask); |
- __ cmp_raw_immediate(reg, delta_to_patch_site % kOff12Mask); |
+ Assembler::BlockConstPoolScope scope(masm_); |
+ InlineSmiCheckInfo::Emit(masm_, reg_, &patch_site_); |
#ifdef DEBUG |
- info_emitted_ = true; |
+ info_emitted_ = true; |
#endif |
- } else { |
- __ nop(); // Signals no inlined code. |
- } |
} |
private: |
MacroAssembler* masm_; |
Label patch_site_; |
+ Register reg_; |
#ifdef DEBUG |
bool info_emitted_; |
#endif |
}; |
-// Generate code for a JS function. On entry to the function the receiver |
-// and arguments have been pushed on the stack left to right. The actual |
+// Generate code for a JS function. On entry to the function the receiver |
+// and arguments have been pushed on the stack left to right. The actual |
// argument count matches the formal parameter count expected by the |
// function. |
// |
// The live registers are: |
-// o r1: the JS function object being called (i.e., ourselves) |
-// o cp: our context |
-// o pp: our caller's constant pool pointer (if FLAG_enable_ool_constant_pool) |
-// o fp: our caller's frame pointer |
-// o sp: stack pointer |
-// o lr: return address |
+// - x1: the JS function object being called (i.e. ourselves). |
+// - cp: our context. |
+// - fp: our caller's frame pointer. |
+// - jssp: stack pointer. |
+// - lr: return address. |
// |
-// The function builds a JS frame. Please see JavaScriptFrameConstants in |
+// The function builds a JS frame. See JavaScriptFrameConstants in |
// frames-arm.h for its layout. |
void FullCodeGenerator::Generate() { |
CompilationInfo* info = info_; |
@@ -136,14 +134,14 @@ void FullCodeGenerator::Generate() { |
profiling_counter_ = isolate()->factory()->NewCell( |
Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate())); |
SetFunctionPosition(function()); |
- Comment cmnt(masm_, "[ function compiled by full code generator"); |
+ Comment cmnt(masm_, "[ Function compiled by full code generator"); |
ProfileEntryHookStub::MaybeCallEntryHook(masm_); |
#ifdef DEBUG |
if (strlen(FLAG_stop_at) > 0 && |
info->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) { |
- __ stop("stop-at"); |
+ __ Debug("stop-at", __LINE__, BREAK); |
} |
#endif |
@@ -152,75 +150,65 @@ void FullCodeGenerator::Generate() { |
// object). |
if (info->is_classic_mode() && !info->is_native()) { |
Label ok; |
- int receiver_offset = info->scope()->num_parameters() * kPointerSize; |
- __ ldr(r2, MemOperand(sp, receiver_offset)); |
- __ CompareRoot(r2, Heap::kUndefinedValueRootIndex); |
- __ b(ne, &ok); |
- |
- __ ldr(r2, GlobalObjectOperand()); |
- __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset)); |
+ int receiver_offset = info->scope()->num_parameters() * kXRegSizeInBytes; |
+ __ Peek(x10, receiver_offset); |
+ __ JumpIfNotRoot(x10, Heap::kUndefinedValueRootIndex, &ok); |
- __ str(r2, MemOperand(sp, receiver_offset)); |
+ __ Ldr(x10, GlobalObjectMemOperand()); |
+ __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kGlobalReceiverOffset)); |
+ __ Poke(x10, receiver_offset); |
- __ bind(&ok); |
+ __ Bind(&ok); |
} |
- // Open a frame scope to indicate that there is a frame on the stack. The |
- // MANUAL indicates that the scope shouldn't actually generate code to set up |
- // the frame (that is done below). |
+ |
+ // Open a frame scope to indicate that there is a frame on the stack. |
+ // The MANUAL indicates that the scope shouldn't actually generate code |
+ // to set up the frame because we do it manually below. |
FrameScope frame_scope(masm_, StackFrame::MANUAL); |
+ // This call emits the following sequence in a way that can be patched for |
+ // code ageing support: |
+ // Push(lr, fp, cp, x1); |
+ // Add(fp, jssp, 2 * kPointerSize); |
info->set_prologue_offset(masm_->pc_offset()); |
__ Prologue(BUILD_FUNCTION_FRAME); |
info->AddNoFrameRange(0, masm_->pc_offset()); |
- __ LoadConstantPoolPointerRegister(); |
+ // Reserve space on the stack for locals. |
{ Comment cmnt(masm_, "[ Allocate locals"); |
int locals_count = info->scope()->num_stack_slots(); |
// Generators allocate locals, if any, in context slots. |
ASSERT(!info->function()->is_generator() || locals_count == 0); |
+ |
if (locals_count > 0) { |
- // Emit a loop to initialize stack cells for locals when optimizing for |
- // size. Otherwise, unroll the loop for maximum performance. |
- __ LoadRoot(r9, Heap::kUndefinedValueRootIndex); |
- if (FLAG_optimize_for_size && locals_count > 4) { |
- Label loop; |
- __ mov(r2, Operand(locals_count)); |
- __ bind(&loop); |
- __ sub(r2, r2, Operand(1), SetCC); |
- __ push(r9); |
- __ b(&loop, ne); |
- } else { |
- for (int i = 0; i < locals_count; i++) { |
- __ push(r9); |
- } |
- } |
+ __ LoadRoot(x10, Heap::kUndefinedValueRootIndex); |
+ __ PushMultipleTimes(locals_count, x10); |
} |
} |
- bool function_in_register = true; |
+ bool function_in_register_x1 = true; |
- // Possibly allocate a local context. |
int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS; |
if (heap_slots > 0) { |
- // Argument to NewContext is the function, which is still in r1. |
+ // Argument to NewContext is the function, which is still in x1. |
Comment cmnt(masm_, "[ Allocate context"); |
if (FLAG_harmony_scoping && info->scope()->is_global_scope()) { |
- __ push(r1); |
- __ Push(info->scope()->GetScopeInfo()); |
+ __ Mov(x10, Operand(info->scope()->GetScopeInfo())); |
+ __ Push(x1, x10); |
__ CallRuntime(Runtime::kNewGlobalContext, 2); |
} else if (heap_slots <= FastNewContextStub::kMaximumSlots) { |
FastNewContextStub stub(heap_slots); |
__ CallStub(&stub); |
} else { |
- __ push(r1); |
+ __ Push(x1); |
__ CallRuntime(Runtime::kNewFunctionContext, 1); |
} |
- function_in_register = false; |
- // Context is returned in r0. It replaces the context passed to us. |
+ function_in_register_x1 = false; |
+ // Context is returned in x0. It replaces the context passed to us. |
// It's saved in the stack and kept live in cp. |
- __ mov(cp, r0); |
- __ str(r0, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
+ __ Mov(cp, x0); |
+ __ Str(x0, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
// Copy any necessary parameters into the context. |
int num_parameters = info->scope()->num_parameters(); |
for (int i = 0; i < num_parameters; i++) { |
@@ -229,14 +217,14 @@ void FullCodeGenerator::Generate() { |
int parameter_offset = StandardFrameConstants::kCallerSPOffset + |
(num_parameters - 1 - i) * kPointerSize; |
// Load parameter from stack. |
- __ ldr(r0, MemOperand(fp, parameter_offset)); |
+ __ Ldr(x10, MemOperand(fp, parameter_offset)); |
// Store it in the context. |
- MemOperand target = ContextOperand(cp, var->index()); |
- __ str(r0, target); |
+ MemOperand target = ContextMemOperand(cp, var->index()); |
+ __ Str(x10, target); |
// Update the write barrier. |
__ RecordWriteContextSlot( |
- cp, target.offset(), r0, r3, kLRHasBeenSaved, kDontSaveFPRegs); |
+ cp, target.offset(), x10, x11, kLRHasBeenSaved, kDontSaveFPRegs); |
} |
} |
} |
@@ -245,23 +233,22 @@ void FullCodeGenerator::Generate() { |
if (arguments != NULL) { |
// Function uses arguments object. |
Comment cmnt(masm_, "[ Allocate arguments object"); |
- if (!function_in_register) { |
+ if (!function_in_register_x1) { |
// Load this again, if it's used by the local context below. |
- __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); |
+ __ Ldr(x3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); |
} else { |
- __ mov(r3, r1); |
+ __ Mov(x3, x1); |
} |
// Receiver is just before the parameters on the caller's stack. |
int num_parameters = info->scope()->num_parameters(); |
int offset = num_parameters * kPointerSize; |
- __ add(r2, fp, |
- Operand(StandardFrameConstants::kCallerSPOffset + offset)); |
- __ mov(r1, Operand(Smi::FromInt(num_parameters))); |
- __ Push(r3, r2, r1); |
+ __ Add(x2, fp, StandardFrameConstants::kCallerSPOffset + offset); |
+ __ Mov(x1, Operand(Smi::FromInt(num_parameters))); |
+ __ Push(x3, x2, x1); |
// Arguments to ArgumentsAccessStub: |
// function, receiver address, parameter count. |
- // The stub will rewrite receiever and parameter count if the previous |
+ // The stub will rewrite receiver and parameter count if the previous |
// stack frame was an arguments adapter frame. |
ArgumentsAccessStub::Type type; |
if (!is_classic_mode()) { |
@@ -274,13 +261,14 @@ void FullCodeGenerator::Generate() { |
ArgumentsAccessStub stub(type); |
__ CallStub(&stub); |
- SetVar(arguments, r0, r1, r2); |
+ SetVar(arguments, x0, x1, x2); |
} |
if (FLAG_trace) { |
__ CallRuntime(Runtime::kTraceEnter, 0); |
} |
+ |
// Visit the declarations and body unless there is an illegal |
// redeclaration. |
if (scope()->HasIllegalRedeclaration()) { |
@@ -290,8 +278,6 @@ void FullCodeGenerator::Generate() { |
} else { |
PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS); |
{ Comment cmnt(masm_, "[ Declarations"); |
- // For named function expressions, declare the function name as a |
- // constant. |
if (scope()->is_function_scope() && scope()->function() != NULL) { |
VariableDeclaration* function = scope()->function(); |
ASSERT(function->proxy()->var()->mode() == CONST || |
@@ -301,29 +287,30 @@ void FullCodeGenerator::Generate() { |
} |
VisitDeclarations(scope()->declarations()); |
} |
+ } |
- { Comment cmnt(masm_, "[ Stack check"); |
- PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS); |
- Label ok; |
- __ LoadRoot(ip, Heap::kStackLimitRootIndex); |
- __ cmp(sp, Operand(ip)); |
- __ b(hs, &ok); |
- PredictableCodeSizeScope predictable(masm_, 2 * Assembler::kInstrSize); |
- __ Call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET); |
- __ bind(&ok); |
- } |
+ { Comment cmnt(masm_, "[ Stack check"); |
+ PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS); |
+ Label ok; |
+ ASSERT(jssp.Is(__ StackPointer())); |
+ __ CompareRoot(jssp, Heap::kStackLimitRootIndex); |
+ __ B(hs, &ok); |
+ PredictableCodeSizeScope predictable(masm_, |
+ Assembler::kCallSizeWithRelocation); |
+ __ Call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET); |
+ __ Bind(&ok); |
+ } |
- { Comment cmnt(masm_, "[ Body"); |
- ASSERT(loop_depth() == 0); |
- VisitStatements(function()->body()); |
- ASSERT(loop_depth() == 0); |
- } |
+ { Comment cmnt(masm_, "[ Body"); |
+ ASSERT(loop_depth() == 0); |
+ VisitStatements(function()->body()); |
+ ASSERT(loop_depth() == 0); |
} |
// Always emit a 'return undefined' in case control fell off the end of |
// the body. |
{ Comment cmnt(masm_, "[ return <undefined>;"); |
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex); |
+ __ LoadRoot(x0, Heap::kUndefinedValueRootIndex); |
} |
EmitReturnSequence(); |
@@ -334,15 +321,15 @@ void FullCodeGenerator::Generate() { |
void FullCodeGenerator::ClearAccumulator() { |
- __ mov(r0, Operand(Smi::FromInt(0))); |
+ __ Mov(x0, Operand(Smi::FromInt(0))); |
} |
void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) { |
- __ mov(r2, Operand(profiling_counter_)); |
- __ ldr(r3, FieldMemOperand(r2, Cell::kValueOffset)); |
- __ sub(r3, r3, Operand(Smi::FromInt(delta)), SetCC); |
- __ str(r3, FieldMemOperand(r2, Cell::kValueOffset)); |
+ __ Mov(x2, Operand(profiling_counter_)); |
+ __ Ldr(x3, FieldMemOperand(x2, Cell::kValueOffset)); |
+ __ Subs(x3, x3, Operand(Smi::FromInt(delta))); |
+ __ Str(x3, FieldMemOperand(x2, Cell::kValueOffset)); |
} |
@@ -352,14 +339,15 @@ void FullCodeGenerator::EmitProfilingCounterReset() { |
// Detect debug break requests as soon as possible. |
reset_value = FLAG_interrupt_budget >> 4; |
} |
- __ mov(r2, Operand(profiling_counter_)); |
- __ mov(r3, Operand(Smi::FromInt(reset_value))); |
- __ str(r3, FieldMemOperand(r2, Cell::kValueOffset)); |
+ __ Mov(x2, Operand(profiling_counter_)); |
+ __ Mov(x3, Operand(Smi::FromInt(reset_value))); |
+ __ Str(x3, FieldMemOperand(x2, Cell::kValueOffset)); |
} |
void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt, |
Label* back_edge_target) { |
+ ASSERT(jssp.Is(__ StackPointer())); |
Comment cmnt(masm_, "[ Back edge bookkeeping"); |
// Block literal pools whilst emitting back edge code. |
Assembler::BlockConstPoolScope block_const_pool(masm_); |
@@ -370,7 +358,7 @@ void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt, |
int weight = Min(kMaxBackEdgeWeight, |
Max(1, distance / kCodeSizeMultiplier)); |
EmitProfilingCounterDecrement(weight); |
- __ b(pl, &ok); |
+ __ B(pl, &ok); |
__ Call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET); |
// Record a mapping of this PC offset to the OSR id. This is used to find |
@@ -380,7 +368,7 @@ void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt, |
EmitProfilingCounterReset(); |
- __ bind(&ok); |
+ __ Bind(&ok); |
PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS); |
// Record a mapping of the OSR id to this PC. This is used if the OSR |
// entry becomes the target of a bailout. We don't expect it to be, but |
@@ -391,15 +379,18 @@ void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt, |
void FullCodeGenerator::EmitReturnSequence() { |
Comment cmnt(masm_, "[ Return sequence"); |
+ |
if (return_label_.is_bound()) { |
- __ b(&return_label_); |
+ __ B(&return_label_); |
+ |
} else { |
- __ bind(&return_label_); |
+ __ Bind(&return_label_); |
if (FLAG_trace) { |
// Push the return value on the stack as the parameter. |
- // Runtime::TraceExit returns its parameter in r0. |
- __ push(r0); |
+ // Runtime::TraceExit returns its parameter in x0. |
+ __ Push(result_register()); |
__ CallRuntime(Runtime::kTraceExit, 1); |
+ ASSERT(x0.Is(result_register())); |
} |
// Pretend that the exit is a backwards jump to the entry. |
int weight = 1; |
@@ -412,39 +403,41 @@ void FullCodeGenerator::EmitReturnSequence() { |
} |
EmitProfilingCounterDecrement(weight); |
Label ok; |
- __ b(pl, &ok); |
- __ push(r0); |
+ __ B(pl, &ok); |
+ __ Push(x0); |
__ Call(isolate()->builtins()->InterruptCheck(), |
RelocInfo::CODE_TARGET); |
- __ pop(r0); |
+ __ Pop(x0); |
EmitProfilingCounterReset(); |
- __ bind(&ok); |
+ __ Bind(&ok); |
-#ifdef DEBUG |
- // Add a label for checking the size of the code used for returning. |
- Label check_exit_codesize; |
- __ bind(&check_exit_codesize); |
-#endif |
// Make sure that the constant pool is not emitted inside of the return |
- // sequence. |
- { Assembler::BlockConstPoolScope block_const_pool(masm_); |
- int32_t sp_delta = (info_->scope()->num_parameters() + 1) * kPointerSize; |
+ // sequence. This sequence can get patched when the debugger is used. See |
+ // debug-a64.cc:BreakLocationIterator::SetDebugBreakAtReturn(). |
+ { |
+ InstructionAccurateScope scope(masm_, |
+ Assembler::kJSRetSequenceInstructions); |
CodeGenerator::RecordPositions(masm_, function()->end_position() - 1); |
- // TODO(svenpanne) The code below is sometimes 4 words, sometimes 5! |
- PredictableCodeSizeScope predictable(masm_, -1); |
__ RecordJSReturn(); |
- int no_frame_start = __ LeaveFrame(StackFrame::JAVA_SCRIPT); |
- __ add(sp, sp, Operand(sp_delta)); |
- __ Jump(lr); |
+ // This code is generated using Assembler methods rather than Macro |
+ // Assembler methods because it will be patched later on, and so the size |
+ // of the generated code must be consistent. |
+ const Register& current_sp = __ StackPointer(); |
+ // Nothing ensures 16 bytes alignment here. |
+ ASSERT(!current_sp.Is(csp)); |
+ __ mov(current_sp, fp); |
+ int no_frame_start = masm_->pc_offset(); |
+ __ ldp(fp, lr, MemOperand(current_sp, 2 * kXRegSizeInBytes, PostIndex)); |
+ // Drop the arguments and receiver and return. |
+ // TODO(all): This implementation is overkill as it supports 2**31+1 |
+ // arguments, consider how to improve it without creating a security |
+ // hole. |
+ __ LoadLiteral(ip0, 3 * kInstructionSize); |
+ __ add(current_sp, current_sp, ip0); |
+ __ ret(); |
+ __ dc64(kXRegSizeInBytes * (info_->scope()->num_parameters() + 1)); |
info_->AddNoFrameRange(no_frame_start, masm_->pc_offset()); |
} |
- |
-#ifdef DEBUG |
- // Check that the size of the code used for returning is large enough |
- // for the debugger's requirements. |
- ASSERT(Assembler::kJSReturnSequenceInstructions <= |
- masm_->InstructionsGeneratedSince(&check_exit_codesize)); |
-#endif |
} |
} |
@@ -463,7 +456,7 @@ void FullCodeGenerator::AccumulatorValueContext::Plug(Variable* var) const { |
void FullCodeGenerator::StackValueContext::Plug(Variable* var) const { |
ASSERT(var->IsStackAllocated() || var->IsContextSlot()); |
codegen()->GetVar(result_register(), var); |
- __ push(result_register()); |
+ __ Push(result_register()); |
} |
@@ -477,6 +470,7 @@ void FullCodeGenerator::TestContext::Plug(Variable* var) const { |
void FullCodeGenerator::EffectContext::Plug(Heap::RootListIndex index) const { |
+ // Root values have no side effects. |
} |
@@ -489,21 +483,19 @@ void FullCodeGenerator::AccumulatorValueContext::Plug( |
void FullCodeGenerator::StackValueContext::Plug( |
Heap::RootListIndex index) const { |
__ LoadRoot(result_register(), index); |
- __ push(result_register()); |
+ __ Push(result_register()); |
} |
void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const { |
- codegen()->PrepareForBailoutBeforeSplit(condition(), |
- true, |
- true_label_, |
+ codegen()->PrepareForBailoutBeforeSplit(condition(), true, true_label_, |
false_label_); |
if (index == Heap::kUndefinedValueRootIndex || |
index == Heap::kNullValueRootIndex || |
index == Heap::kFalseValueRootIndex) { |
- if (false_label_ != fall_through_) __ b(false_label_); |
+ if (false_label_ != fall_through_) __ B(false_label_); |
} else if (index == Heap::kTrueValueRootIndex) { |
- if (true_label_ != fall_through_) __ b(true_label_); |
+ if (true_label_ != fall_through_) __ B(true_label_); |
} else { |
__ LoadRoot(result_register(), index); |
codegen()->DoTest(this); |
@@ -517,14 +509,14 @@ void FullCodeGenerator::EffectContext::Plug(Handle<Object> lit) const { |
void FullCodeGenerator::AccumulatorValueContext::Plug( |
Handle<Object> lit) const { |
- __ mov(result_register(), Operand(lit)); |
+ __ Mov(result_register(), Operand(lit)); |
} |
void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const { |
// Immediates cannot be pushed directly. |
- __ mov(result_register(), Operand(lit)); |
- __ push(result_register()); |
+ __ Mov(result_register(), Operand(lit)); |
+ __ Push(result_register()); |
} |
@@ -535,24 +527,24 @@ void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const { |
false_label_); |
ASSERT(!lit->IsUndetectableObject()); // There are no undetectable literals. |
if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) { |
- if (false_label_ != fall_through_) __ b(false_label_); |
+ if (false_label_ != fall_through_) __ B(false_label_); |
} else if (lit->IsTrue() || lit->IsJSObject()) { |
- if (true_label_ != fall_through_) __ b(true_label_); |
+ if (true_label_ != fall_through_) __ B(true_label_); |
} else if (lit->IsString()) { |
if (String::cast(*lit)->length() == 0) { |
- if (false_label_ != fall_through_) __ b(false_label_); |
+ if (false_label_ != fall_through_) __ B(false_label_); |
} else { |
- if (true_label_ != fall_through_) __ b(true_label_); |
+ if (true_label_ != fall_through_) __ B(true_label_); |
} |
} else if (lit->IsSmi()) { |
if (Smi::cast(*lit)->value() == 0) { |
- if (false_label_ != fall_through_) __ b(false_label_); |
+ if (false_label_ != fall_through_) __ B(false_label_); |
} else { |
- if (true_label_ != fall_through_) __ b(true_label_); |
+ if (true_label_ != fall_through_) __ B(true_label_); |
} |
} else { |
// For simplicity we always test the accumulator register. |
- __ mov(result_register(), Operand(lit)); |
+ __ Mov(result_register(), Operand(lit)); |
codegen()->DoTest(this); |
} |
} |
@@ -578,7 +570,7 @@ void FullCodeGenerator::StackValueContext::DropAndPlug(int count, |
Register reg) const { |
ASSERT(count > 0); |
if (count > 1) __ Drop(count - 1); |
- __ str(reg, MemOperand(sp, 0)); |
+ __ Poke(reg, 0); |
} |
@@ -587,7 +579,7 @@ void FullCodeGenerator::TestContext::DropAndPlug(int count, |
ASSERT(count > 0); |
// For simplicity we always test the accumulator register. |
__ Drop(count); |
- __ Move(result_register(), reg); |
+ __ Mov(result_register(), reg); |
codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL); |
codegen()->DoTest(this); |
} |
@@ -596,7 +588,7 @@ void FullCodeGenerator::TestContext::DropAndPlug(int count, |
void FullCodeGenerator::EffectContext::Plug(Label* materialize_true, |
Label* materialize_false) const { |
ASSERT(materialize_true == materialize_false); |
- __ bind(materialize_true); |
+ __ Bind(materialize_true); |
} |
@@ -604,12 +596,12 @@ void FullCodeGenerator::AccumulatorValueContext::Plug( |
Label* materialize_true, |
Label* materialize_false) const { |
Label done; |
- __ bind(materialize_true); |
+ __ Bind(materialize_true); |
__ LoadRoot(result_register(), Heap::kTrueValueRootIndex); |
- __ jmp(&done); |
- __ bind(materialize_false); |
+ __ B(&done); |
+ __ Bind(materialize_false); |
__ LoadRoot(result_register(), Heap::kFalseValueRootIndex); |
- __ bind(&done); |
+ __ Bind(&done); |
} |
@@ -617,13 +609,13 @@ void FullCodeGenerator::StackValueContext::Plug( |
Label* materialize_true, |
Label* materialize_false) const { |
Label done; |
- __ bind(materialize_true); |
- __ LoadRoot(ip, Heap::kTrueValueRootIndex); |
- __ jmp(&done); |
- __ bind(materialize_false); |
- __ LoadRoot(ip, Heap::kFalseValueRootIndex); |
- __ bind(&done); |
- __ push(ip); |
+ __ Bind(materialize_true); |
+ __ LoadRoot(x10, Heap::kTrueValueRootIndex); |
+ __ B(&done); |
+ __ Bind(materialize_false); |
+ __ LoadRoot(x10, Heap::kFalseValueRootIndex); |
+ __ Bind(&done); |
+ __ Push(x10); |
} |
@@ -648,8 +640,8 @@ void FullCodeGenerator::AccumulatorValueContext::Plug(bool flag) const { |
void FullCodeGenerator::StackValueContext::Plug(bool flag) const { |
Heap::RootListIndex value_root_index = |
flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex; |
- __ LoadRoot(ip, value_root_index); |
- __ push(ip); |
+ __ LoadRoot(x10, value_root_index); |
+ __ Push(x10); |
} |
@@ -659,9 +651,13 @@ void FullCodeGenerator::TestContext::Plug(bool flag) const { |
true_label_, |
false_label_); |
if (flag) { |
- if (true_label_ != fall_through_) __ b(true_label_); |
+ if (true_label_ != fall_through_) { |
+ __ B(true_label_); |
+ } |
} else { |
- if (false_label_ != fall_through_) __ b(false_label_); |
+ if (false_label_ != fall_through_) { |
+ __ B(false_label_); |
+ } |
} |
} |
@@ -672,30 +668,33 @@ void FullCodeGenerator::DoTest(Expression* condition, |
Label* fall_through) { |
Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate()); |
CallIC(ic, condition->test_id()); |
- __ tst(result_register(), result_register()); |
- Split(ne, if_true, if_false, fall_through); |
+ __ CompareAndSplit(result_register(), 0, ne, if_true, if_false, fall_through); |
} |
+// If (cond), branch to if_true. |
+// If (!cond), branch to if_false. |
+// fall_through is used as an optimization in cases where only one branch |
+// instruction is necessary. |
void FullCodeGenerator::Split(Condition cond, |
Label* if_true, |
Label* if_false, |
Label* fall_through) { |
if (if_false == fall_through) { |
- __ b(cond, if_true); |
+ __ B(cond, if_true); |
} else if (if_true == fall_through) { |
- __ b(NegateCondition(cond), if_false); |
+ ASSERT(if_false != fall_through); |
+ __ B(InvertCondition(cond), if_false); |
} else { |
- __ b(cond, if_true); |
- __ b(if_false); |
+ __ B(cond, if_true); |
+ __ B(if_false); |
} |
} |
MemOperand FullCodeGenerator::StackOperand(Variable* var) { |
- ASSERT(var->IsStackAllocated()); |
// Offset is negative because higher indexes are at lower addresses. |
- int offset = -var->index() * kPointerSize; |
+ int offset = -var->index() * kXRegSizeInBytes; |
// Adjust by a (parameter or local) base offset. |
if (var->IsParameter()) { |
offset += (info_->scope()->num_parameters() + 1) * kPointerSize; |
@@ -711,7 +710,7 @@ MemOperand FullCodeGenerator::VarOperand(Variable* var, Register scratch) { |
if (var->IsContextSlot()) { |
int context_chain_length = scope()->ContextChainLength(var->scope()); |
__ LoadContext(scratch, context_chain_length); |
- return ContextOperand(scratch, var->index()); |
+ return ContextMemOperand(scratch, var->index()); |
} else { |
return StackOperand(var); |
} |
@@ -721,7 +720,7 @@ MemOperand FullCodeGenerator::VarOperand(Variable* var, Register scratch) { |
void FullCodeGenerator::GetVar(Register dest, Variable* var) { |
// Use destination as scratch. |
MemOperand location = VarOperand(var, dest); |
- __ ldr(dest, location); |
+ __ Ldr(dest, location); |
} |
@@ -730,14 +729,13 @@ void FullCodeGenerator::SetVar(Variable* var, |
Register scratch0, |
Register scratch1) { |
ASSERT(var->IsContextSlot() || var->IsStackAllocated()); |
- ASSERT(!scratch0.is(src)); |
- ASSERT(!scratch0.is(scratch1)); |
- ASSERT(!scratch1.is(src)); |
+ ASSERT(!AreAliased(src, scratch0, scratch1)); |
MemOperand location = VarOperand(var, scratch0); |
- __ str(src, location); |
+ __ Str(src, location); |
// Emit the write barrier code if the location is in the heap. |
if (var->IsContextSlot()) { |
+ // scratch0 contains the correct context. |
__ RecordWriteContextSlot(scratch0, |
location.offset(), |
src, |
@@ -757,14 +755,16 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr, |
// preparation to avoid preparing with the same AST id twice. |
if (!context()->IsTest() || !info_->IsOptimizable()) return; |
+ // TODO(all): Investigate to see if there is something to work on here. |
Label skip; |
- if (should_normalize) __ b(&skip); |
+ if (should_normalize) { |
+ __ B(&skip); |
+ } |
PrepareForBailout(expr, TOS_REG); |
if (should_normalize) { |
- __ LoadRoot(ip, Heap::kTrueValueRootIndex); |
- __ cmp(r0, ip); |
+ __ CompareRoot(x0, Heap::kTrueValueRootIndex); |
Split(eq, if_true, if_false, NULL); |
- __ bind(&skip); |
+ __ Bind(&skip); |
} |
} |
@@ -775,10 +775,10 @@ void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) { |
ASSERT_EQ(0, scope()->ContextChainLength(variable->scope())); |
if (generate_debug_code_) { |
// Check that we're not inside a with or catch context. |
- __ ldr(r1, FieldMemOperand(cp, HeapObject::kMapOffset)); |
- __ CompareRoot(r1, Heap::kWithContextMapRootIndex); |
+ __ Ldr(x1, FieldMemOperand(cp, HeapObject::kMapOffset)); |
+ __ CompareRoot(x1, Heap::kWithContextMapRootIndex); |
__ Check(ne, kDeclarationInWithContext); |
- __ CompareRoot(r1, Heap::kCatchContextMapRootIndex); |
+ __ CompareRoot(x1, Heap::kCatchContextMapRootIndex); |
__ Check(ne, kDeclarationInCatchContext); |
} |
} |
@@ -792,7 +792,8 @@ void FullCodeGenerator::VisitVariableDeclaration( |
VariableProxy* proxy = declaration->proxy(); |
VariableMode mode = declaration->mode(); |
Variable* variable = proxy->var(); |
- bool hole_init = mode == CONST || mode == CONST_HARMONY || mode == LET; |
+ bool hole_init = (mode == CONST) || (mode == CONST_HARMONY) || (mode == LET); |
+ |
switch (variable->location()) { |
case Variable::UNALLOCATED: |
globals_->Add(variable->name(), zone()); |
@@ -806,8 +807,8 @@ void FullCodeGenerator::VisitVariableDeclaration( |
case Variable::LOCAL: |
if (hole_init) { |
Comment cmnt(masm_, "[ VariableDeclaration"); |
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); |
- __ str(ip, StackOperand(variable)); |
+ __ LoadRoot(x10, Heap::kTheHoleValueRootIndex); |
+ __ Str(x10, StackOperand(variable)); |
} |
break; |
@@ -815,8 +816,8 @@ void FullCodeGenerator::VisitVariableDeclaration( |
if (hole_init) { |
Comment cmnt(masm_, "[ VariableDeclaration"); |
EmitDebugCheckDeclarationContext(variable); |
- __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); |
- __ str(ip, ContextOperand(cp, variable->index())); |
+ __ LoadRoot(x10, Heap::kTheHoleValueRootIndex); |
+ __ Str(x10, ContextMemOperand(cp, variable->index())); |
// No write barrier since the_hole_value is in old space. |
PrepareForBailoutForId(proxy->id(), NO_REGISTERS); |
} |
@@ -824,22 +825,22 @@ void FullCodeGenerator::VisitVariableDeclaration( |
case Variable::LOOKUP: { |
Comment cmnt(masm_, "[ VariableDeclaration"); |
- __ mov(r2, Operand(variable->name())); |
+ __ Mov(x2, Operand(variable->name())); |
// Declaration nodes are always introduced in one of four modes. |
ASSERT(IsDeclaredVariableMode(mode)); |
- PropertyAttributes attr = |
- IsImmutableVariableMode(mode) ? READ_ONLY : NONE; |
- __ mov(r1, Operand(Smi::FromInt(attr))); |
+ PropertyAttributes attr = IsImmutableVariableMode(mode) ? READ_ONLY |
+ : NONE; |
+ __ Mov(x1, Operand(Smi::FromInt(attr))); |
// Push initial value, if any. |
// Note: For variables we must not push an initial value (such as |
// 'undefined') because we may have a (legal) redeclaration and we |
// must not destroy the current value. |
if (hole_init) { |
- __ LoadRoot(r0, Heap::kTheHoleValueRootIndex); |
- __ Push(cp, r2, r1, r0); |
+ __ LoadRoot(x0, Heap::kTheHoleValueRootIndex); |
+ __ Push(cp, x2, x1, x0); |
} else { |
- __ mov(r0, Operand(Smi::FromInt(0))); // Indicates no initial value. |
- __ Push(cp, r2, r1, r0); |
+ // Pushing 0 (xzr) indicates no initial value. |
+ __ Push(cp, x2, x1, xzr); |
} |
__ CallRuntime(Runtime::kDeclareContextSlot, 4); |
break; |
@@ -857,7 +858,7 @@ void FullCodeGenerator::VisitFunctionDeclaration( |
globals_->Add(variable->name(), zone()); |
Handle<SharedFunctionInfo> function = |
Compiler::BuildFunctionInfo(declaration->fun(), script()); |
- // Check for stack-overflow exception. |
+ // Check for stack overflow exception. |
if (function.is_null()) return SetStackOverflow(); |
globals_->Add(function, zone()); |
break; |
@@ -865,23 +866,23 @@ void FullCodeGenerator::VisitFunctionDeclaration( |
case Variable::PARAMETER: |
case Variable::LOCAL: { |
- Comment cmnt(masm_, "[ FunctionDeclaration"); |
+ Comment cmnt(masm_, "[ Function Declaration"); |
VisitForAccumulatorValue(declaration->fun()); |
- __ str(result_register(), StackOperand(variable)); |
+ __ Str(result_register(), StackOperand(variable)); |
break; |
} |
case Variable::CONTEXT: { |
- Comment cmnt(masm_, "[ FunctionDeclaration"); |
+ Comment cmnt(masm_, "[ Function Declaration"); |
EmitDebugCheckDeclarationContext(variable); |
VisitForAccumulatorValue(declaration->fun()); |
- __ str(result_register(), ContextOperand(cp, variable->index())); |
+ __ Str(result_register(), ContextMemOperand(cp, variable->index())); |
int offset = Context::SlotOffset(variable->index()); |
// We know that we have written a function, which is not a smi. |
__ RecordWriteContextSlot(cp, |
offset, |
result_register(), |
- r2, |
+ x2, |
kLRHasBeenSaved, |
kDontSaveFPRegs, |
EMIT_REMEMBERED_SET, |
@@ -891,10 +892,10 @@ void FullCodeGenerator::VisitFunctionDeclaration( |
} |
case Variable::LOOKUP: { |
- Comment cmnt(masm_, "[ FunctionDeclaration"); |
- __ mov(r2, Operand(variable->name())); |
- __ mov(r1, Operand(Smi::FromInt(NONE))); |
- __ Push(cp, r2, r1); |
+ Comment cmnt(masm_, "[ Function Declaration"); |
+ __ Mov(x2, Operand(variable->name())); |
+ __ Mov(x1, Operand(Smi::FromInt(NONE))); |
+ __ Push(cp, x2, x1); |
// Push initial value for function declaration. |
VisitForStackValue(declaration->fun()); |
__ CallRuntime(Runtime::kDeclareContextSlot, 4); |
@@ -913,24 +914,24 @@ void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) { |
EmitDebugCheckDeclarationContext(variable); |
// Load instance object. |
- __ LoadContext(r1, scope_->ContextChainLength(scope_->GlobalScope())); |
- __ ldr(r1, ContextOperand(r1, variable->interface()->Index())); |
- __ ldr(r1, ContextOperand(r1, Context::EXTENSION_INDEX)); |
+ __ LoadContext(x1, scope_->ContextChainLength(scope_->GlobalScope())); |
+ __ Ldr(x1, ContextMemOperand(x1, variable->interface()->Index())); |
+ __ Ldr(x1, ContextMemOperand(x1, Context::EXTENSION_INDEX)); |
// Assign it. |
- __ str(r1, ContextOperand(cp, variable->index())); |
+ __ Str(x1, ContextMemOperand(cp, variable->index())); |
// We know that we have written a module, which is not a smi. |
__ RecordWriteContextSlot(cp, |
Context::SlotOffset(variable->index()), |
- r1, |
- r3, |
+ x1, |
+ x3, |
kLRHasBeenSaved, |
kDontSaveFPRegs, |
EMIT_REMEMBERED_SET, |
OMIT_SMI_CHECK); |
PrepareForBailoutForId(declaration->proxy()->id(), NO_REGISTERS); |
- // Traverse into body. |
+ // Traverse info body. |
Visit(declaration->module()); |
} |
@@ -965,10 +966,13 @@ void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* declaration) { |
void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) { |
// Call the runtime to declare the globals. |
- // The context is the first argument. |
- __ mov(r1, Operand(pairs)); |
- __ mov(r0, Operand(Smi::FromInt(DeclareGlobalsFlags()))); |
- __ Push(cp, r1, r0); |
+ __ Mov(x11, Operand(pairs)); |
+ Register flags = xzr; |
+ if (Smi::FromInt(DeclareGlobalsFlags())) { |
+ flags = x10; |
+ __ Mov(flags, Operand(Smi::FromInt(DeclareGlobalsFlags()))); |
+ } |
+ __ Push(cp, x11, flags); |
__ CallRuntime(Runtime::kDeclareGlobals, 3); |
// Return value is ignored. |
} |
@@ -983,6 +987,7 @@ void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) { |
void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) { |
+ ASM_LOCATION("FullCodeGenerator::VisitSwitchStatement"); |
Comment cmnt(masm_, "[ SwitchStatement"); |
Breakable nested_statement(this, stmt); |
SetStatementPosition(stmt); |
@@ -1007,26 +1012,24 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) { |
} |
Comment cmnt(masm_, "[ Case comparison"); |
- __ bind(&next_test); |
+ __ Bind(&next_test); |
next_test.Unuse(); |
// Compile the label expression. |
VisitForAccumulatorValue(clause->label()); |
// Perform the comparison as if via '==='. |
- __ ldr(r1, MemOperand(sp, 0)); // Switch value. |
- bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT); |
+ __ Peek(x1, 0); // Switch value. |
+ |
JumpPatchSite patch_site(masm_); |
- if (inline_smi_code) { |
+ if (ShouldInlineSmiCase(Token::EQ_STRICT)) { |
Label slow_case; |
- __ orr(r2, r1, r0); |
- patch_site.EmitJumpIfNotSmi(r2, &slow_case); |
- |
- __ cmp(r1, r0); |
- __ b(ne, &next_test); |
+ patch_site.EmitJumpIfEitherNotSmi(x0, x1, &slow_case); |
+ __ Cmp(x1, x0); |
+ __ B(ne, &next_test); |
__ Drop(1); // Switch value is no longer needed. |
- __ b(clause->body_target()); |
- __ bind(&slow_case); |
+ __ B(clause->body_target()); |
+ __ Bind(&slow_case); |
} |
// Record position before stub call for type feedback. |
@@ -1036,48 +1039,47 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) { |
patch_site.EmitPatchInfo(); |
Label skip; |
- __ b(&skip); |
+ __ B(&skip); |
PrepareForBailout(clause, TOS_REG); |
- __ LoadRoot(ip, Heap::kTrueValueRootIndex); |
- __ cmp(r0, ip); |
- __ b(ne, &next_test); |
+ __ JumpIfNotRoot(x0, Heap::kTrueValueRootIndex, &next_test); |
__ Drop(1); |
- __ jmp(clause->body_target()); |
- __ bind(&skip); |
+ __ B(clause->body_target()); |
+ __ Bind(&skip); |
- __ cmp(r0, Operand::Zero()); |
- __ b(ne, &next_test); |
+ __ Cbnz(x0, &next_test); |
__ Drop(1); // Switch value is no longer needed. |
- __ b(clause->body_target()); |
+ __ B(clause->body_target()); |
} |
// Discard the test value and jump to the default if present, otherwise to |
// the end of the statement. |
- __ bind(&next_test); |
+ __ Bind(&next_test); |
__ Drop(1); // Switch value is no longer needed. |
if (default_clause == NULL) { |
- __ b(nested_statement.break_label()); |
+ __ B(nested_statement.break_label()); |
} else { |
- __ b(default_clause->body_target()); |
+ __ B(default_clause->body_target()); |
} |
// Compile all the case bodies. |
for (int i = 0; i < clauses->length(); i++) { |
Comment cmnt(masm_, "[ Case body"); |
CaseClause* clause = clauses->at(i); |
- __ bind(clause->body_target()); |
+ __ Bind(clause->body_target()); |
PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS); |
VisitStatements(clause->statements()); |
} |
- __ bind(nested_statement.break_label()); |
+ __ Bind(nested_statement.break_label()); |
PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS); |
} |
void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { |
+ ASM_LOCATION("FullCodeGenerator::VisitForInStatement"); |
Comment cmnt(masm_, "[ ForInStatement"); |
int slot = stmt->ForInFeedbackSlot(); |
+ // TODO(all): This visitor probably needs better comments and a revisit. |
SetStatementPosition(stmt); |
Label loop, exit; |
@@ -1087,149 +1089,140 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { |
// Get the object to enumerate over. If the object is null or undefined, skip |
// over the loop. See ECMA-262 version 5, section 12.6.4. |
VisitForAccumulatorValue(stmt->enumerable()); |
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); |
- __ cmp(r0, ip); |
- __ b(eq, &exit); |
- Register null_value = r5; |
+ __ JumpIfRoot(x0, Heap::kUndefinedValueRootIndex, &exit); |
+ Register null_value = x15; |
__ LoadRoot(null_value, Heap::kNullValueRootIndex); |
- __ cmp(r0, null_value); |
- __ b(eq, &exit); |
+ __ Cmp(x0, null_value); |
+ __ B(eq, &exit); |
PrepareForBailoutForId(stmt->PrepareId(), TOS_REG); |
// Convert the object to a JS object. |
Label convert, done_convert; |
- __ JumpIfSmi(r0, &convert); |
- __ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE); |
- __ b(ge, &done_convert); |
- __ bind(&convert); |
- __ push(r0); |
+ __ JumpIfSmi(x0, &convert); |
+ __ JumpIfObjectType(x0, x10, x11, FIRST_SPEC_OBJECT_TYPE, &done_convert, ge); |
+ __ Bind(&convert); |
+ __ Push(x0); |
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); |
- __ bind(&done_convert); |
- __ push(r0); |
+ __ Bind(&done_convert); |
+ __ Push(x0); |
// Check for proxies. |
Label call_runtime; |
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); |
- __ CompareObjectType(r0, r1, r1, LAST_JS_PROXY_TYPE); |
- __ b(le, &call_runtime); |
+ __ JumpIfObjectType(x0, x10, x11, LAST_JS_PROXY_TYPE, &call_runtime, le); |
// Check cache validity in generated code. This is a fast case for |
// the JSObject::IsSimpleEnum cache validity checks. If we cannot |
// guarantee cache validity, call the runtime system to check cache |
// validity or get the property names in a fixed array. |
- __ CheckEnumCache(null_value, &call_runtime); |
+ __ CheckEnumCache(x0, null_value, x10, x11, x12, x13, &call_runtime); |
// The enum cache is valid. Load the map of the object being |
// iterated over and use the cache for the iteration. |
Label use_cache; |
- __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset)); |
- __ b(&use_cache); |
+ __ Ldr(x0, FieldMemOperand(x0, HeapObject::kMapOffset)); |
+ __ B(&use_cache); |
// Get the set of properties to enumerate. |
- __ bind(&call_runtime); |
- __ push(r0); // Duplicate the enumerable object on the stack. |
+ __ Bind(&call_runtime); |
+ __ Push(x0); // Duplicate the enumerable object on the stack. |
__ CallRuntime(Runtime::kGetPropertyNamesFast, 1); |
// If we got a map from the runtime call, we can do a fast |
// modification check. Otherwise, we got a fixed array, and we have |
// to do a slow check. |
- Label fixed_array; |
- __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset)); |
- __ LoadRoot(ip, Heap::kMetaMapRootIndex); |
- __ cmp(r2, ip); |
- __ b(ne, &fixed_array); |
+ Label fixed_array, no_descriptors; |
+ __ Ldr(x2, FieldMemOperand(x0, HeapObject::kMapOffset)); |
+ __ JumpIfNotRoot(x2, Heap::kMetaMapRootIndex, &fixed_array); |
- // We got a map in register r0. Get the enumeration cache from it. |
- Label no_descriptors; |
- __ bind(&use_cache); |
+ // We got a map in register x0. Get the enumeration cache from it. |
+ __ Bind(&use_cache); |
- __ EnumLength(r1, r0); |
- __ cmp(r1, Operand(Smi::FromInt(0))); |
- __ b(eq, &no_descriptors); |
+ __ EnumLengthUntagged(x1, x0); |
+ __ Cbz(x1, &no_descriptors); |
- __ LoadInstanceDescriptors(r0, r2); |
- __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumCacheOffset)); |
- __ ldr(r2, FieldMemOperand(r2, DescriptorArray::kEnumCacheBridgeCacheOffset)); |
+ __ LoadInstanceDescriptors(x0, x2); |
+ __ Ldr(x2, FieldMemOperand(x2, DescriptorArray::kEnumCacheOffset)); |
+ __ Ldr(x2, |
+ FieldMemOperand(x2, DescriptorArray::kEnumCacheBridgeCacheOffset)); |
// Set up the four remaining stack slots. |
- __ push(r0); // Map. |
- __ mov(r0, Operand(Smi::FromInt(0))); |
+ __ Push(x0); // Map. |
+ __ Mov(x0, Operand(Smi::FromInt(0))); |
// Push enumeration cache, enumeration cache length (as smi) and zero. |
- __ Push(r2, r1, r0); |
- __ jmp(&loop); |
+ __ SmiTag(x1); |
+ __ Push(x2, x1, x0); |
+ __ B(&loop); |
- __ bind(&no_descriptors); |
+ __ Bind(&no_descriptors); |
__ Drop(1); |
- __ jmp(&exit); |
+ __ B(&exit); |
- // We got a fixed array in register r0. Iterate through that. |
- Label non_proxy; |
- __ bind(&fixed_array); |
+ // We got a fixed array in register x0. Iterate through that. |
+ __ Bind(&fixed_array); |
Handle<Object> feedback = Handle<Object>( |
Smi::FromInt(TypeFeedbackInfo::kForInFastCaseMarker), |
isolate()); |
StoreFeedbackVectorSlot(slot, feedback); |
- __ Move(r1, FeedbackVector()); |
- __ mov(r2, Operand(Smi::FromInt(TypeFeedbackInfo::kForInSlowCaseMarker))); |
- __ str(r2, FieldMemOperand(r1, FixedArray::OffsetOfElementAt(slot))); |
+ __ LoadObject(x1, FeedbackVector()); |
+ __ Mov(x10, Operand(Smi::FromInt(TypeFeedbackInfo::kForInSlowCaseMarker))); |
+ __ Str(x10, FieldMemOperand(x1, FixedArray::OffsetOfElementAt(slot))); |
- __ mov(r1, Operand(Smi::FromInt(1))); // Smi indicates slow check |
- __ ldr(r2, MemOperand(sp, 0 * kPointerSize)); // Get enumerated object |
+ __ Mov(x1, Operand(Smi::FromInt(1))); // Smi indicates slow check. |
+ __ Peek(x10, 0); // Get enumerated object. |
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); |
- __ CompareObjectType(r2, r3, r3, LAST_JS_PROXY_TYPE); |
- __ b(gt, &non_proxy); |
- __ mov(r1, Operand(Smi::FromInt(0))); // Zero indicates proxy |
- __ bind(&non_proxy); |
- __ Push(r1, r0); // Smi and array |
- __ ldr(r1, FieldMemOperand(r0, FixedArray::kLengthOffset)); |
- __ mov(r0, Operand(Smi::FromInt(0))); |
- __ Push(r1, r0); // Fixed array length (as smi) and initial index. |
+ // TODO(all): similar check was done already. Can we avoid it here? |
+ __ CompareObjectType(x10, x11, x12, LAST_JS_PROXY_TYPE); |
+ ASSERT(Smi::FromInt(0) == 0); |
+ __ CzeroX(x1, le); // Zero indicates proxy. |
+ __ Push(x1, x0); // Smi and array |
+ __ Ldr(x1, FieldMemOperand(x0, FixedArray::kLengthOffset)); |
+ __ Push(x1, xzr); // Fixed array length (as smi) and initial index. |
// Generate code for doing the condition check. |
PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS); |
- __ bind(&loop); |
- // Load the current count to r0, load the length to r1. |
- __ Ldrd(r0, r1, MemOperand(sp, 0 * kPointerSize)); |
- __ cmp(r0, r1); // Compare to the array length. |
- __ b(hs, loop_statement.break_label()); |
+ __ Bind(&loop); |
+ // Load the current count to x0, load the length to x1. |
+ __ PeekPair(x0, x1, 0); |
+ __ Cmp(x0, x1); // Compare to the array length. |
+ __ B(hs, loop_statement.break_label()); |
// Get the current entry of the array into register r3. |
- __ ldr(r2, MemOperand(sp, 2 * kPointerSize)); |
- __ add(r2, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
- __ ldr(r3, MemOperand::PointerAddressFromSmiKey(r2, r0)); |
+ __ Peek(x10, 2 * kXRegSizeInBytes); |
+ __ Add(x10, x10, Operand::UntagSmiAndScale(x0, kPointerSizeLog2)); |
+ __ Ldr(x3, MemOperand(x10, FixedArray::kHeaderSize - kHeapObjectTag)); |
// Get the expected map from the stack or a smi in the |
- // permanent slow case into register r2. |
- __ ldr(r2, MemOperand(sp, 3 * kPointerSize)); |
+ // permanent slow case into register x10. |
+ __ Peek(x2, 3 * kXRegSizeInBytes); |
// Check if the expected map still matches that of the enumerable. |
// If not, we may have to filter the key. |
Label update_each; |
- __ ldr(r1, MemOperand(sp, 4 * kPointerSize)); |
- __ ldr(r4, FieldMemOperand(r1, HeapObject::kMapOffset)); |
- __ cmp(r4, Operand(r2)); |
- __ b(eq, &update_each); |
+ __ Peek(x1, 4 * kXRegSizeInBytes); |
+ __ Ldr(x11, FieldMemOperand(x1, HeapObject::kMapOffset)); |
+ __ Cmp(x11, x2); |
+ __ B(eq, &update_each); |
// For proxies, no filtering is done. |
// TODO(rossberg): What if only a prototype is a proxy? Not specified yet. |
- __ cmp(r2, Operand(Smi::FromInt(0))); |
- __ b(eq, &update_each); |
+ STATIC_ASSERT(kSmiTag == 0); |
+ __ Cbz(x2, &update_each); |
// Convert the entry to a string or (smi) 0 if it isn't a property |
// any more. If the property has been removed while iterating, we |
// just skip it. |
- __ push(r1); // Enumerable. |
- __ push(r3); // Current entry. |
+ __ Push(x1, x3); |
__ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION); |
- __ mov(r3, Operand(r0), SetCC); |
- __ b(eq, loop_statement.continue_label()); |
+ __ Mov(x3, x0); |
+ __ Cbz(x0, loop_statement.continue_label()); |
// Update the 'each' property or variable from the possibly filtered |
- // entry in register r3. |
- __ bind(&update_each); |
- __ mov(result_register(), r3); |
+ // entry in register x3. |
+ __ Bind(&update_each); |
+ __ Mov(result_register(), x3); |
// Perform the assignment as if via '='. |
{ EffectContext context(this); |
EmitAssignment(stmt->each()); |
@@ -1238,23 +1231,24 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { |
// Generate code for the body of the loop. |
Visit(stmt->body()); |
- // Generate code for the going to the next element by incrementing |
+ // Generate code for going to the next element by incrementing |
// the index (smi) stored on top of the stack. |
- __ bind(loop_statement.continue_label()); |
- __ pop(r0); |
- __ add(r0, r0, Operand(Smi::FromInt(1))); |
- __ push(r0); |
+ __ Bind(loop_statement.continue_label()); |
+ // TODO(all): We could use a callee saved register to avoid popping. |
+ __ Pop(x0); |
+ __ Add(x0, x0, Operand(Smi::FromInt(1))); |
+ __ Push(x0); |
EmitBackEdgeBookkeeping(stmt, &loop); |
- __ b(&loop); |
+ __ B(&loop); |
// Remove the pointers stored on the stack. |
- __ bind(loop_statement.break_label()); |
+ __ Bind(loop_statement.break_label()); |
__ Drop(5); |
// Exit and decrement the loop depth. |
PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS); |
- __ bind(&exit); |
+ __ Bind(&exit); |
decrement_loop_depth(); |
} |
@@ -1270,24 +1264,25 @@ void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) { |
VisitForAccumulatorValue(stmt->assign_iterator()); |
// As with for-in, skip the loop if the iterator is null or undefined. |
- __ CompareRoot(r0, Heap::kUndefinedValueRootIndex); |
- __ b(eq, loop_statement.break_label()); |
- __ CompareRoot(r0, Heap::kNullValueRootIndex); |
- __ b(eq, loop_statement.break_label()); |
+ Register iterator = x0; |
+ __ JumpIfRoot(iterator, Heap::kUndefinedValueRootIndex, |
+ loop_statement.break_label()); |
+ __ JumpIfRoot(iterator, Heap::kNullValueRootIndex, |
+ loop_statement.break_label()); |
// Convert the iterator to a JS object. |
Label convert, done_convert; |
- __ JumpIfSmi(r0, &convert); |
- __ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE); |
- __ b(ge, &done_convert); |
- __ bind(&convert); |
- __ push(r0); |
+ __ JumpIfSmi(iterator, &convert); |
+ __ CompareObjectType(iterator, x1, x1, FIRST_SPEC_OBJECT_TYPE); |
+ __ B(ge, &done_convert); |
+ __ Bind(&convert); |
+ __ Push(iterator); |
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); |
- __ bind(&done_convert); |
- __ push(r0); |
+ __ Bind(&done_convert); |
+ __ Push(iterator); |
// Loop entry. |
- __ bind(loop_statement.continue_label()); |
+ __ Bind(loop_statement.continue_label()); |
// result = iterator.next() |
VisitForEffect(stmt->next_result()); |
@@ -1298,7 +1293,7 @@ void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) { |
loop_statement.break_label(), |
&result_not_done, |
&result_not_done); |
- __ bind(&result_not_done); |
+ __ Bind(&result_not_done); |
// each = result.value |
VisitForEffect(stmt->assign_each()); |
@@ -1309,39 +1304,39 @@ void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) { |
// Check stack before looping. |
PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS); |
EmitBackEdgeBookkeeping(stmt, loop_statement.continue_label()); |
- __ jmp(loop_statement.continue_label()); |
+ __ B(loop_statement.continue_label()); |
// Exit and decrement the loop depth. |
PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS); |
- __ bind(loop_statement.break_label()); |
+ __ Bind(loop_statement.break_label()); |
decrement_loop_depth(); |
} |
void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info, |
bool pretenure) { |
- // Use the fast case closure allocation code that allocates in new |
- // space for nested functions that don't need literals cloning. If |
- // we're running with the --always-opt or the --prepare-always-opt |
- // flag, we need to use the runtime function so that the new function |
- // we are creating here gets a chance to have its code optimized and |
- // doesn't just get a copy of the existing unoptimized code. |
+ // Use the fast case closure allocation code that allocates in new space for |
+ // nested functions that don't need literals cloning. If we're running with |
+ // the --always-opt or the --prepare-always-opt flag, we need to use the |
+ // runtime function so that the new function we are creating here gets a |
+ // chance to have its code optimized and doesn't just get a copy of the |
+ // existing unoptimized code. |
if (!FLAG_always_opt && |
!FLAG_prepare_always_opt && |
!pretenure && |
scope()->is_function_scope() && |
info->num_literals() == 0) { |
FastNewClosureStub stub(info->language_mode(), info->is_generator()); |
- __ mov(r2, Operand(info)); |
+ __ Mov(x2, Operand(info)); |
__ CallStub(&stub); |
} else { |
- __ mov(r0, Operand(info)); |
- __ LoadRoot(r1, pretenure ? Heap::kTrueValueRootIndex |
- : Heap::kFalseValueRootIndex); |
- __ Push(cp, r0, r1); |
+ __ Mov(x11, Operand(info)); |
+ __ LoadRoot(x10, pretenure ? Heap::kTrueValueRootIndex |
+ : Heap::kFalseValueRootIndex); |
+ __ Push(cp, x11, x10); |
__ CallRuntime(Runtime::kNewClosure, 3); |
} |
- context()->Plug(r0); |
+ context()->Plug(x0); |
} |
@@ -1355,20 +1350,19 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var, |
TypeofState typeof_state, |
Label* slow) { |
Register current = cp; |
- Register next = r1; |
- Register temp = r2; |
+ Register next = x10; |
+ Register temp = x11; |
Scope* s = scope(); |
while (s != NULL) { |
if (s->num_heap_slots() > 0) { |
if (s->calls_non_strict_eval()) { |
// Check that extension is NULL. |
- __ ldr(temp, ContextOperand(current, Context::EXTENSION_INDEX)); |
- __ tst(temp, temp); |
- __ b(ne, slow); |
+ __ Ldr(temp, ContextMemOperand(current, Context::EXTENSION_INDEX)); |
+ __ Cbnz(temp, slow); |
} |
// Load next context in chain. |
- __ ldr(next, ContextOperand(current, Context::PREVIOUS_INDEX)); |
+ __ Ldr(next, ContextMemOperand(current, Context::PREVIOUS_INDEX)); |
// Walk the rest of the chain without clobbering cp. |
current = next; |
} |
@@ -1380,30 +1374,25 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var, |
if (s->is_eval_scope()) { |
Label loop, fast; |
- if (!current.is(next)) { |
- __ Move(next, current); |
- } |
- __ bind(&loop); |
+ __ Mov(next, current); |
+ |
+ __ Bind(&loop); |
// Terminate at native context. |
- __ ldr(temp, FieldMemOperand(next, HeapObject::kMapOffset)); |
- __ LoadRoot(ip, Heap::kNativeContextMapRootIndex); |
- __ cmp(temp, ip); |
- __ b(eq, &fast); |
+ __ Ldr(temp, FieldMemOperand(next, HeapObject::kMapOffset)); |
+ __ JumpIfRoot(temp, Heap::kNativeContextMapRootIndex, &fast); |
// Check that extension is NULL. |
- __ ldr(temp, ContextOperand(next, Context::EXTENSION_INDEX)); |
- __ tst(temp, temp); |
- __ b(ne, slow); |
+ __ Ldr(temp, ContextMemOperand(next, Context::EXTENSION_INDEX)); |
+ __ Cbnz(temp, slow); |
// Load next context in chain. |
- __ ldr(next, ContextOperand(next, Context::PREVIOUS_INDEX)); |
- __ b(&loop); |
- __ bind(&fast); |
+ __ Ldr(next, ContextMemOperand(next, Context::PREVIOUS_INDEX)); |
+ __ B(&loop); |
+ __ Bind(&fast); |
} |
- __ ldr(r0, GlobalObjectOperand()); |
- __ mov(r2, Operand(var->name())); |
- ContextualMode mode = (typeof_state == INSIDE_TYPEOF) |
- ? NOT_CONTEXTUAL |
- : CONTEXTUAL; |
+ __ Ldr(x0, GlobalObjectMemOperand()); |
+ __ Mov(x2, Operand(var->name())); |
+ ContextualMode mode = (typeof_state == INSIDE_TYPEOF) ? NOT_CONTEXTUAL |
+ : CONTEXTUAL; |
CallLoadIC(mode); |
} |
@@ -1412,31 +1401,29 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var, |
Label* slow) { |
ASSERT(var->IsContextSlot()); |
Register context = cp; |
- Register next = r3; |
- Register temp = r4; |
+ Register next = x10; |
+ Register temp = x11; |
for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) { |
if (s->num_heap_slots() > 0) { |
if (s->calls_non_strict_eval()) { |
// Check that extension is NULL. |
- __ ldr(temp, ContextOperand(context, Context::EXTENSION_INDEX)); |
- __ tst(temp, temp); |
- __ b(ne, slow); |
+ __ Ldr(temp, ContextMemOperand(context, Context::EXTENSION_INDEX)); |
+ __ Cbnz(temp, slow); |
} |
- __ ldr(next, ContextOperand(context, Context::PREVIOUS_INDEX)); |
+ __ Ldr(next, ContextMemOperand(context, Context::PREVIOUS_INDEX)); |
// Walk the rest of the chain without clobbering cp. |
context = next; |
} |
} |
// Check that last extension is NULL. |
- __ ldr(temp, ContextOperand(context, Context::EXTENSION_INDEX)); |
- __ tst(temp, temp); |
- __ b(ne, slow); |
+ __ Ldr(temp, ContextMemOperand(context, Context::EXTENSION_INDEX)); |
+ __ Cbnz(temp, slow); |
// This function is used only for loads, not stores, so it's safe to |
// return an cp-based operand (the write barrier cannot be allowed to |
// destroy the cp register). |
- return ContextOperand(context, var->index()); |
+ return ContextMemOperand(context, var->index()); |
} |
@@ -1451,24 +1438,23 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var, |
// containing the eval. |
if (var->mode() == DYNAMIC_GLOBAL) { |
EmitLoadGlobalCheckExtensions(var, typeof_state, slow); |
- __ jmp(done); |
+ __ B(done); |
} else if (var->mode() == DYNAMIC_LOCAL) { |
Variable* local = var->local_if_not_shadowed(); |
- __ ldr(r0, ContextSlotOperandCheckExtensions(local, slow)); |
+ __ Ldr(x0, ContextSlotOperandCheckExtensions(local, slow)); |
if (local->mode() == LET || |
local->mode() == CONST || |
local->mode() == CONST_HARMONY) { |
- __ CompareRoot(r0, Heap::kTheHoleValueRootIndex); |
+ __ JumpIfNotRoot(x0, Heap::kTheHoleValueRootIndex, done); |
if (local->mode() == CONST) { |
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq); |
+ __ LoadRoot(x0, Heap::kUndefinedValueRootIndex); |
} else { // LET || CONST_HARMONY |
- __ b(ne, done); |
- __ mov(r0, Operand(var->name())); |
- __ push(r0); |
+ __ Mov(x0, Operand(var->name())); |
+ __ Push(x0); |
__ CallRuntime(Runtime::kThrowReferenceError, 1); |
} |
} |
- __ jmp(done); |
+ __ B(done); |
} |
} |
@@ -1483,12 +1469,12 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) { |
switch (var->location()) { |
case Variable::UNALLOCATED: { |
Comment cmnt(masm_, "Global variable"); |
- // Use inline caching. Variable name is passed in r2 and the global |
- // object (receiver) in r0. |
- __ ldr(r0, GlobalObjectOperand()); |
- __ mov(r2, Operand(var->name())); |
+ // Use inline caching. Variable name is passed in x2 and the global |
+ // object (receiver) in x0. |
+ __ Ldr(x0, GlobalObjectMemOperand()); |
+ __ Mov(x2, Operand(var->name())); |
CallLoadIC(CONTEXTUAL); |
- context()->Plug(r0); |
+ context()->Plug(x0); |
break; |
} |
@@ -1535,23 +1521,23 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) { |
if (!skip_init_check) { |
// Let and const need a read barrier. |
- GetVar(r0, var); |
- __ CompareRoot(r0, Heap::kTheHoleValueRootIndex); |
+ GetVar(x0, var); |
+ Label done; |
+ __ JumpIfNotRoot(x0, Heap::kTheHoleValueRootIndex, &done); |
if (var->mode() == LET || var->mode() == CONST_HARMONY) { |
// Throw a reference error when using an uninitialized let/const |
// binding in harmony mode. |
- Label done; |
- __ b(ne, &done); |
- __ mov(r0, Operand(var->name())); |
- __ push(r0); |
+ __ Mov(x0, Operand(var->name())); |
+ __ Push(x0); |
__ CallRuntime(Runtime::kThrowReferenceError, 1); |
- __ bind(&done); |
+ __ Bind(&done); |
} else { |
// Uninitalized const bindings outside of harmony mode are unholed. |
ASSERT(var->mode() == CONST); |
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq); |
+ __ LoadRoot(x0, Heap::kUndefinedValueRootIndex); |
+ __ Bind(&done); |
} |
- context()->Plug(r0); |
+ context()->Plug(x0); |
break; |
} |
} |
@@ -1561,16 +1547,17 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) { |
case Variable::LOOKUP: { |
Label done, slow; |
- // Generate code for loading from variables potentially shadowed |
- // by eval-introduced variables. |
+ // Generate code for loading from variables potentially shadowed by |
+ // eval-introduced variables. |
EmitDynamicLookupFastCase(var, NOT_INSIDE_TYPEOF, &slow, &done); |
- __ bind(&slow); |
+ __ Bind(&slow); |
Comment cmnt(masm_, "Lookup variable"); |
- __ mov(r1, Operand(var->name())); |
- __ Push(cp, r1); // Context and name. |
+ __ Mov(x1, Operand(var->name())); |
+ __ Push(cp, x1); // Context and name. |
__ CallRuntime(Runtime::kLoadContextSlot, 2); |
- __ bind(&done); |
- context()->Plug(r0); |
+ __ Bind(&done); |
+ context()->Plug(x0); |
+ break; |
} |
} |
} |
@@ -1580,56 +1567,54 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) { |
Comment cmnt(masm_, "[ RegExpLiteral"); |
Label materialized; |
// Registers will be used as follows: |
- // r5 = materialized value (RegExp literal) |
- // r4 = JS function, literals array |
- // r3 = literal index |
- // r2 = RegExp pattern |
- // r1 = RegExp flags |
- // r0 = RegExp literal clone |
- __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); |
- __ ldr(r4, FieldMemOperand(r0, JSFunction::kLiteralsOffset)); |
+ // x5 = materialized value (RegExp literal) |
+ // x4 = JS function, literals array |
+ // x3 = literal index |
+ // x2 = RegExp pattern |
+ // x1 = RegExp flags |
+ // x0 = RegExp literal clone |
+ __ Ldr(x10, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); |
+ __ Ldr(x4, FieldMemOperand(x10, JSFunction::kLiteralsOffset)); |
int literal_offset = |
FixedArray::kHeaderSize + expr->literal_index() * kPointerSize; |
- __ ldr(r5, FieldMemOperand(r4, literal_offset)); |
- __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); |
- __ cmp(r5, ip); |
- __ b(ne, &materialized); |
+ __ Ldr(x5, FieldMemOperand(x4, literal_offset)); |
+ __ JumpIfNotRoot(x5, Heap::kUndefinedValueRootIndex, &materialized); |
// Create regexp literal using runtime function. |
- // Result will be in r0. |
- __ mov(r3, Operand(Smi::FromInt(expr->literal_index()))); |
- __ mov(r2, Operand(expr->pattern())); |
- __ mov(r1, Operand(expr->flags())); |
- __ Push(r4, r3, r2, r1); |
+ // Result will be in x0. |
+ __ Mov(x3, Operand(Smi::FromInt(expr->literal_index()))); |
+ __ Mov(x2, Operand(expr->pattern())); |
+ __ Mov(x1, Operand(expr->flags())); |
+ __ Push(x4, x3, x2, x1); |
__ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4); |
- __ mov(r5, r0); |
+ __ Mov(x5, x0); |
- __ bind(&materialized); |
+ __ Bind(&materialized); |
int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize; |
Label allocated, runtime_allocate; |
- __ Allocate(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT); |
- __ jmp(&allocated); |
+ __ Allocate(size, x0, x2, x3, &runtime_allocate, TAG_OBJECT); |
+ __ B(&allocated); |
- __ bind(&runtime_allocate); |
- __ mov(r0, Operand(Smi::FromInt(size))); |
- __ Push(r5, r0); |
+ __ Bind(&runtime_allocate); |
+ __ Mov(x10, Operand(Smi::FromInt(size))); |
+ __ Push(x5, x10); |
__ CallRuntime(Runtime::kAllocateInNewSpace, 1); |
- __ pop(r5); |
+ __ Pop(x5); |
- __ bind(&allocated); |
+ __ Bind(&allocated); |
// After this, registers are used as follows: |
- // r0: Newly allocated regexp. |
- // r5: Materialized regexp. |
- // r2: temp. |
- __ CopyFields(r0, r5, d0, size / kPointerSize); |
- context()->Plug(r0); |
+ // x0: Newly allocated regexp. |
+ // x5: Materialized regexp. |
+ // x10, x11, x12: temps. |
+ __ CopyFields(x0, x5, CPURegList(x10, x11, x12), size / kPointerSize); |
+ context()->Plug(x0); |
} |
void FullCodeGenerator::EmitAccessor(Expression* expression) { |
if (expression == NULL) { |
- __ LoadRoot(r1, Heap::kNullValueRootIndex); |
- __ push(r1); |
+ __ LoadRoot(x10, Heap::kNullValueRootIndex); |
+ __ Push(x10); |
} else { |
VisitForStackValue(expression); |
} |
@@ -1641,23 +1626,25 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { |
expr->BuildConstantProperties(isolate()); |
Handle<FixedArray> constant_properties = expr->constant_properties(); |
- __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); |
- __ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset)); |
- __ mov(r2, Operand(Smi::FromInt(expr->literal_index()))); |
- __ mov(r1, Operand(constant_properties)); |
+ __ Ldr(x3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); |
+ __ Ldr(x3, FieldMemOperand(x3, JSFunction::kLiteralsOffset)); |
+ __ Mov(x2, Operand(Smi::FromInt(expr->literal_index()))); |
+ __ Mov(x1, Operand(constant_properties)); |
int flags = expr->fast_elements() |
? ObjectLiteral::kFastElements |
: ObjectLiteral::kNoFlags; |
flags |= expr->has_function() |
? ObjectLiteral::kHasFunction |
: ObjectLiteral::kNoFlags; |
- __ mov(r0, Operand(Smi::FromInt(flags))); |
+ __ Mov(x0, Operand(Smi::FromInt(flags))); |
int properties_count = constant_properties->length() / 2; |
+ const int max_cloned_properties = |
+ FastCloneShallowObjectStub::kMaximumClonedProperties; |
if ((FLAG_track_double_fields && expr->may_store_doubles()) || |
- expr->depth() > 1 || Serializer::enabled() || |
- flags != ObjectLiteral::kFastElements || |
- properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) { |
- __ Push(r3, r2, r1, r0); |
+ (expr->depth() > 1) || Serializer::enabled() || |
+ (flags != ObjectLiteral::kFastElements) || |
+ (properties_count > max_cloned_properties)) { |
+ __ Push(x3, x2, x1, x0); |
__ CallRuntime(Runtime::kCreateObjectLiteral, 4); |
} else { |
FastCloneShallowObjectStub stub(properties_count); |
@@ -1665,7 +1652,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { |
} |
// If result_saved is true the result is on top of the stack. If |
- // result_saved is false the result is in r0. |
+ // result_saved is false the result is in x0. |
bool result_saved = false; |
// Mark all computed expressions that are bound to a key that |
@@ -1681,7 +1668,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { |
Literal* key = property->key(); |
Expression* value = property->value(); |
if (!result_saved) { |
- __ push(r0); // Save result on stack |
+ __ Push(x0); // Save result on stack |
result_saved = true; |
} |
switch (property->kind()) { |
@@ -1694,8 +1681,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { |
if (key->value()->IsInternalizedString()) { |
if (property->emit_store()) { |
VisitForAccumulatorValue(value); |
- __ mov(r2, Operand(key->value())); |
- __ ldr(r1, MemOperand(sp)); |
+ __ Mov(x2, Operand(key->value())); |
+ __ Peek(x1, 0); |
CallStoreIC(key->LiteralFeedbackId()); |
PrepareForBailoutForId(key->id(), NO_REGISTERS); |
} else { |
@@ -1704,13 +1691,13 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { |
break; |
} |
// Duplicate receiver on stack. |
- __ ldr(r0, MemOperand(sp)); |
- __ push(r0); |
+ __ Peek(x0, 0); |
+ __ Push(x0); |
VisitForStackValue(key); |
VisitForStackValue(value); |
if (property->emit_store()) { |
- __ mov(r0, Operand(Smi::FromInt(NONE))); // PropertyAttributes |
- __ push(r0); |
+ __ Mov(x0, Operand(Smi::FromInt(NONE))); // PropertyAttributes |
+ __ Push(x0); |
__ CallRuntime(Runtime::kSetProperty, 4); |
} else { |
__ Drop(3); |
@@ -1718,8 +1705,10 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { |
break; |
case ObjectLiteral::Property::PROTOTYPE: |
// Duplicate receiver on stack. |
- __ ldr(r0, MemOperand(sp)); |
- __ push(r0); |
+ __ Peek(x0, 0); |
+ // TODO(jbramley): This push shouldn't be necessary if we don't call the |
+ // runtime below. In that case, skip it. |
+ __ Push(x0); |
VisitForStackValue(value); |
if (property->emit_store()) { |
__ CallRuntime(Runtime::kSetPrototype, 2); |
@@ -1727,7 +1716,6 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { |
__ Drop(2); |
} |
break; |
- |
case ObjectLiteral::Property::GETTER: |
accessor_table.lookup(key)->second->getter = value; |
break; |
@@ -1742,27 +1730,27 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { |
for (AccessorTable::Iterator it = accessor_table.begin(); |
it != accessor_table.end(); |
++it) { |
- __ ldr(r0, MemOperand(sp)); // Duplicate receiver. |
- __ push(r0); |
- VisitForStackValue(it->first); |
- EmitAccessor(it->second->getter); |
- EmitAccessor(it->second->setter); |
- __ mov(r0, Operand(Smi::FromInt(NONE))); |
- __ push(r0); |
- __ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5); |
+ __ Peek(x10, 0); // Duplicate receiver. |
+ __ Push(x10); |
+ VisitForStackValue(it->first); |
+ EmitAccessor(it->second->getter); |
+ EmitAccessor(it->second->setter); |
+ __ Mov(x10, Operand(Smi::FromInt(NONE))); |
+ __ Push(x10); |
+ __ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5); |
} |
if (expr->has_function()) { |
ASSERT(result_saved); |
- __ ldr(r0, MemOperand(sp)); |
- __ push(r0); |
+ __ Peek(x0, 0); |
+ __ Push(x0); |
__ CallRuntime(Runtime::kToFastProperties, 1); |
} |
if (result_saved) { |
context()->PlugTOS(); |
} else { |
- context()->Plug(r0); |
+ context()->Plug(x0); |
} |
} |
@@ -1771,9 +1759,8 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { |
Comment cmnt(masm_, "[ ArrayLiteral"); |
expr->BuildConstantElements(isolate()); |
- int flags = expr->depth() == 1 |
- ? ArrayLiteral::kShallowElements |
- : ArrayLiteral::kNoFlags; |
+ int flags = (expr->depth() == 1) ? ArrayLiteral::kShallowElements |
+ : ArrayLiteral::kNoFlags; |
ZoneList<Expression*>* subexprs = expr->values(); |
int length = subexprs->length(); |
@@ -1792,10 +1779,11 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { |
allocation_site_mode = DONT_TRACK_ALLOCATION_SITE; |
} |
- __ ldr(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); |
- __ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset)); |
- __ mov(r2, Operand(Smi::FromInt(expr->literal_index()))); |
- __ mov(r1, Operand(constant_elements)); |
+ __ Ldr(x3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); |
+ __ Ldr(x3, FieldMemOperand(x3, JSFunction::kLiteralsOffset)); |
+ // TODO(jbramley): Can these Operand constructors be implicit? |
+ __ Mov(x2, Operand(Smi::FromInt(expr->literal_index()))); |
+ __ Mov(x1, Operand(constant_elements)); |
if (has_fast_elements && constant_elements_values->map() == |
isolate()->heap()->fixed_cow_array_map()) { |
FastCloneShallowArrayStub stub( |
@@ -1804,11 +1792,11 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { |
length); |
__ CallStub(&stub); |
__ IncrementCounter( |
- isolate()->counters()->cow_arrays_created_stub(), 1, r1, r2); |
- } else if (expr->depth() > 1 || Serializer::enabled() || |
+ isolate()->counters()->cow_arrays_created_stub(), 1, x10, x11); |
+ } else if ((expr->depth() > 1) || Serializer::enabled() || |
length > FastCloneShallowArrayStub::kMaximumClonedLength) { |
- __ mov(r0, Operand(Smi::FromInt(flags))); |
- __ Push(r3, r2, r1, r0); |
+ __ Mov(x0, Operand(Smi::FromInt(flags))); |
+ __ Push(x3, x2, x1, x0); |
__ CallRuntime(Runtime::kCreateArrayLiteral, 4); |
} else { |
ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) || |
@@ -1835,7 +1823,7 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { |
if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue; |
if (!result_saved) { |
- __ push(r0); |
+ __ Push(x0); |
__ Push(Smi::FromInt(expr->literal_index())); |
result_saved = true; |
} |
@@ -1843,15 +1831,15 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { |
if (IsFastObjectElementsKind(constant_elements_kind)) { |
int offset = FixedArray::kHeaderSize + (i * kPointerSize); |
- __ ldr(r6, MemOperand(sp, kPointerSize)); // Copy of array literal. |
- __ ldr(r1, FieldMemOperand(r6, JSObject::kElementsOffset)); |
- __ str(result_register(), FieldMemOperand(r1, offset)); |
+ __ Peek(x6, kPointerSize); // Copy of array literal. |
+ __ Ldr(x1, FieldMemOperand(x6, JSObject::kElementsOffset)); |
+ __ Str(result_register(), FieldMemOperand(x1, offset)); |
// Update the write barrier for the array store. |
- __ RecordWriteField(r1, offset, result_register(), r2, |
+ __ RecordWriteField(x1, offset, result_register(), x10, |
kLRHasBeenSaved, kDontSaveFPRegs, |
EMIT_REMEMBERED_SET, INLINE_SMI_CHECK); |
} else { |
- __ mov(r3, Operand(Smi::FromInt(i))); |
+ __ Mov(x3, Operand(Smi::FromInt(i))); |
StoreArrayLiteralElementStub stub; |
__ CallStub(&stub); |
} |
@@ -1860,10 +1848,10 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { |
} |
if (result_saved) { |
- __ pop(); // literal index |
+ __ Drop(1); // literal index |
context()->PlugTOS(); |
} else { |
- context()->Plug(r0); |
+ context()->Plug(x0); |
} |
} |
@@ -1897,7 +1885,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) { |
if (expr->is_compound()) { |
// We need the receiver both on the stack and in the accumulator. |
VisitForAccumulatorValue(property->obj()); |
- __ push(result_register()); |
+ __ Push(result_register()); |
} else { |
VisitForStackValue(property->obj()); |
} |
@@ -1906,8 +1894,8 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) { |
if (expr->is_compound()) { |
VisitForStackValue(property->obj()); |
VisitForAccumulatorValue(property->key()); |
- __ ldr(r1, MemOperand(sp, 0)); |
- __ push(r0); |
+ __ Peek(x1, 0); |
+ __ Push(x0); |
} else { |
VisitForStackValue(property->obj()); |
VisitForStackValue(property->key()); |
@@ -1936,7 +1924,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) { |
} |
Token::Value op = expr->binary_op(); |
- __ push(r0); // Left operand goes on the stack. |
+ __ Push(x0); // Left operand goes on the stack. |
VisitForAccumulatorValue(expr->value()); |
OverwriteMode mode = expr->value()->ResultOverwriteAllowed() |
@@ -1969,7 +1957,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) { |
EmitVariableAssignment(expr->target()->AsVariableProxy()->var(), |
expr->op()); |
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); |
- context()->Plug(r0); |
+ context()->Plug(x0); |
break; |
case NAMED_PROPERTY: |
EmitNamedPropertyAssignment(expr); |
@@ -1981,312 +1969,11 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) { |
} |
-void FullCodeGenerator::VisitYield(Yield* expr) { |
- Comment cmnt(masm_, "[ Yield"); |
- // Evaluate yielded value first; the initial iterator definition depends on |
- // this. It stays on the stack while we update the iterator. |
- VisitForStackValue(expr->expression()); |
- |
- switch (expr->yield_kind()) { |
- case Yield::SUSPEND: |
- // Pop value from top-of-stack slot; box result into result register. |
- EmitCreateIteratorResult(false); |
- __ push(result_register()); |
- // Fall through. |
- case Yield::INITIAL: { |
- Label suspend, continuation, post_runtime, resume; |
- |
- __ jmp(&suspend); |
- |
- __ bind(&continuation); |
- __ jmp(&resume); |
- |
- __ bind(&suspend); |
- VisitForAccumulatorValue(expr->generator_object()); |
- ASSERT(continuation.pos() > 0 && Smi::IsValid(continuation.pos())); |
- __ mov(r1, Operand(Smi::FromInt(continuation.pos()))); |
- __ str(r1, FieldMemOperand(r0, JSGeneratorObject::kContinuationOffset)); |
- __ str(cp, FieldMemOperand(r0, JSGeneratorObject::kContextOffset)); |
- __ mov(r1, cp); |
- __ RecordWriteField(r0, JSGeneratorObject::kContextOffset, r1, r2, |
- kLRHasBeenSaved, kDontSaveFPRegs); |
- __ add(r1, fp, Operand(StandardFrameConstants::kExpressionsOffset)); |
- __ cmp(sp, r1); |
- __ b(eq, &post_runtime); |
- __ push(r0); // generator object |
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1); |
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
- __ bind(&post_runtime); |
- __ pop(result_register()); |
- EmitReturnSequence(); |
- |
- __ bind(&resume); |
- context()->Plug(result_register()); |
- break; |
- } |
- |
- case Yield::FINAL: { |
- VisitForAccumulatorValue(expr->generator_object()); |
- __ mov(r1, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorClosed))); |
- __ str(r1, FieldMemOperand(result_register(), |
- JSGeneratorObject::kContinuationOffset)); |
- // Pop value from top-of-stack slot, box result into result register. |
- EmitCreateIteratorResult(true); |
- EmitUnwindBeforeReturn(); |
- EmitReturnSequence(); |
- break; |
- } |
- |
- case Yield::DELEGATING: { |
- VisitForStackValue(expr->generator_object()); |
- |
- // Initial stack layout is as follows: |
- // [sp + 1 * kPointerSize] iter |
- // [sp + 0 * kPointerSize] g |
- |
- Label l_catch, l_try, l_suspend, l_continuation, l_resume; |
- Label l_next, l_call, l_loop; |
- // Initial send value is undefined. |
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex); |
- __ b(&l_next); |
- |
- // catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; } |
- __ bind(&l_catch); |
- handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos())); |
- __ LoadRoot(r2, Heap::kthrow_stringRootIndex); // "throw" |
- __ ldr(r3, MemOperand(sp, 1 * kPointerSize)); // iter |
- __ Push(r2, r3, r0); // "throw", iter, except |
- __ jmp(&l_call); |
- |
- // try { received = %yield result } |
- // Shuffle the received result above a try handler and yield it without |
- // re-boxing. |
- __ bind(&l_try); |
- __ pop(r0); // result |
- __ PushTryHandler(StackHandler::CATCH, expr->index()); |
- const int handler_size = StackHandlerConstants::kSize; |
- __ push(r0); // result |
- __ jmp(&l_suspend); |
- __ bind(&l_continuation); |
- __ jmp(&l_resume); |
- __ bind(&l_suspend); |
- const int generator_object_depth = kPointerSize + handler_size; |
- __ ldr(r0, MemOperand(sp, generator_object_depth)); |
- __ push(r0); // g |
- ASSERT(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos())); |
- __ mov(r1, Operand(Smi::FromInt(l_continuation.pos()))); |
- __ str(r1, FieldMemOperand(r0, JSGeneratorObject::kContinuationOffset)); |
- __ str(cp, FieldMemOperand(r0, JSGeneratorObject::kContextOffset)); |
- __ mov(r1, cp); |
- __ RecordWriteField(r0, JSGeneratorObject::kContextOffset, r1, r2, |
- kLRHasBeenSaved, kDontSaveFPRegs); |
- __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1); |
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
- __ pop(r0); // result |
- EmitReturnSequence(); |
- __ bind(&l_resume); // received in r0 |
- __ PopTryHandler(); |
- |
- // receiver = iter; f = 'next'; arg = received; |
- __ bind(&l_next); |
- __ LoadRoot(r2, Heap::knext_stringRootIndex); // "next" |
- __ ldr(r3, MemOperand(sp, 1 * kPointerSize)); // iter |
- __ Push(r2, r3, r0); // "next", iter, received |
- |
- // result = receiver[f](arg); |
- __ bind(&l_call); |
- __ ldr(r1, MemOperand(sp, kPointerSize)); |
- __ ldr(r0, MemOperand(sp, 2 * kPointerSize)); |
- Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize(); |
- CallIC(ic, TypeFeedbackId::None()); |
- __ mov(r1, r0); |
- __ str(r1, MemOperand(sp, 2 * kPointerSize)); |
- CallFunctionStub stub(1, CALL_AS_METHOD); |
- __ CallStub(&stub); |
- |
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
- __ Drop(1); // The function is still on the stack; drop it. |
- |
- // if (!result.done) goto l_try; |
- __ bind(&l_loop); |
- __ push(r0); // save result |
- __ LoadRoot(r2, Heap::kdone_stringRootIndex); // "done" |
- CallLoadIC(NOT_CONTEXTUAL); // result.done in r0 |
- Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate()); |
- CallIC(bool_ic); |
- __ cmp(r0, Operand(0)); |
- __ b(eq, &l_try); |
- |
- // result.value |
- __ pop(r0); // result |
- __ LoadRoot(r2, Heap::kvalue_stringRootIndex); // "value" |
- CallLoadIC(NOT_CONTEXTUAL); // result.value in r0 |
- context()->DropAndPlug(2, r0); // drop iter and g |
- break; |
- } |
- } |
-} |
- |
- |
-void FullCodeGenerator::EmitGeneratorResume(Expression *generator, |
- Expression *value, |
- JSGeneratorObject::ResumeMode resume_mode) { |
- // The value stays in r0, and is ultimately read by the resumed generator, as |
- // if the CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it |
- // is read to throw the value when the resumed generator is already closed. |
- // r1 will hold the generator object until the activation has been resumed. |
- VisitForStackValue(generator); |
- VisitForAccumulatorValue(value); |
- __ pop(r1); |
- |
- // Check generator state. |
- Label wrong_state, closed_state, done; |
- __ ldr(r3, FieldMemOperand(r1, JSGeneratorObject::kContinuationOffset)); |
- STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting < 0); |
- STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed == 0); |
- __ cmp(r3, Operand(Smi::FromInt(0))); |
- __ b(eq, &closed_state); |
- __ b(lt, &wrong_state); |
- |
- // Load suspended function and context. |
- __ ldr(cp, FieldMemOperand(r1, JSGeneratorObject::kContextOffset)); |
- __ ldr(r4, FieldMemOperand(r1, JSGeneratorObject::kFunctionOffset)); |
- |
- // Load receiver and store as the first argument. |
- __ ldr(r2, FieldMemOperand(r1, JSGeneratorObject::kReceiverOffset)); |
- __ push(r2); |
- |
- // Push holes for the rest of the arguments to the generator function. |
- __ ldr(r3, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset)); |
- __ ldr(r3, |
- FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset)); |
- __ LoadRoot(r2, Heap::kTheHoleValueRootIndex); |
- Label push_argument_holes, push_frame; |
- __ bind(&push_argument_holes); |
- __ sub(r3, r3, Operand(Smi::FromInt(1)), SetCC); |
- __ b(mi, &push_frame); |
- __ push(r2); |
- __ jmp(&push_argument_holes); |
- |
- // Enter a new JavaScript frame, and initialize its slots as they were when |
- // the generator was suspended. |
- Label resume_frame; |
- __ bind(&push_frame); |
- __ bl(&resume_frame); |
- __ jmp(&done); |
- __ bind(&resume_frame); |
- // lr = return address. |
- // fp = caller's frame pointer. |
- // pp = caller's constant pool (if FLAG_enable_ool_constant_pool), |
- // cp = callee's context, |
- // r4 = callee's JS function. |
- __ PushFixedFrame(r4); |
- // Adjust FP to point to saved FP. |
- __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); |
- |
- // Load the operand stack size. |
- __ ldr(r3, FieldMemOperand(r1, JSGeneratorObject::kOperandStackOffset)); |
- __ ldr(r3, FieldMemOperand(r3, FixedArray::kLengthOffset)); |
- __ SmiUntag(r3); |
- |
- // If we are sending a value and there is no operand stack, we can jump back |
- // in directly. |
- if (resume_mode == JSGeneratorObject::NEXT) { |
- Label slow_resume; |
- __ cmp(r3, Operand(0)); |
- __ b(ne, &slow_resume); |
- __ ldr(r3, FieldMemOperand(r4, JSFunction::kCodeEntryOffset)); |
- __ ldr(r2, FieldMemOperand(r1, JSGeneratorObject::kContinuationOffset)); |
- __ SmiUntag(r2); |
- __ add(r3, r3, r2); |
- __ mov(r2, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting))); |
- __ str(r2, FieldMemOperand(r1, JSGeneratorObject::kContinuationOffset)); |
- __ Jump(r3); |
- __ bind(&slow_resume); |
- } |
- |
- // Otherwise, we push holes for the operand stack and call the runtime to fix |
- // up the stack and the handlers. |
- Label push_operand_holes, call_resume; |
- __ bind(&push_operand_holes); |
- __ sub(r3, r3, Operand(1), SetCC); |
- __ b(mi, &call_resume); |
- __ push(r2); |
- __ b(&push_operand_holes); |
- __ bind(&call_resume); |
- ASSERT(!result_register().is(r1)); |
- __ Push(r1, result_register()); |
- __ Push(Smi::FromInt(resume_mode)); |
- __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3); |
- // Not reached: the runtime call returns elsewhere. |
- __ stop("not-reached"); |
- |
- // Reach here when generator is closed. |
- __ bind(&closed_state); |
- if (resume_mode == JSGeneratorObject::NEXT) { |
- // Return completed iterator result when generator is closed. |
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); |
- __ push(r2); |
- // Pop value from top-of-stack slot; box result into result register. |
- EmitCreateIteratorResult(true); |
- } else { |
- // Throw the provided value. |
- __ push(r0); |
- __ CallRuntime(Runtime::kThrow, 1); |
- } |
- __ jmp(&done); |
- |
- // Throw error if we attempt to operate on a running generator. |
- __ bind(&wrong_state); |
- __ push(r1); |
- __ CallRuntime(Runtime::kThrowGeneratorStateError, 1); |
- |
- __ bind(&done); |
- context()->Plug(result_register()); |
-} |
- |
- |
-void FullCodeGenerator::EmitCreateIteratorResult(bool done) { |
- Label gc_required; |
- Label allocated; |
- |
- Handle<Map> map(isolate()->native_context()->generator_result_map()); |
- |
- __ Allocate(map->instance_size(), r0, r2, r3, &gc_required, TAG_OBJECT); |
- __ jmp(&allocated); |
- |
- __ bind(&gc_required); |
- __ Push(Smi::FromInt(map->instance_size())); |
- __ CallRuntime(Runtime::kAllocateInNewSpace, 1); |
- __ ldr(context_register(), |
- MemOperand(fp, StandardFrameConstants::kContextOffset)); |
- |
- __ bind(&allocated); |
- __ mov(r1, Operand(map)); |
- __ pop(r2); |
- __ mov(r3, Operand(isolate()->factory()->ToBoolean(done))); |
- __ mov(r4, Operand(isolate()->factory()->empty_fixed_array())); |
- ASSERT_EQ(map->instance_size(), 5 * kPointerSize); |
- __ str(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); |
- __ str(r4, FieldMemOperand(r0, JSObject::kPropertiesOffset)); |
- __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset)); |
- __ str(r2, |
- FieldMemOperand(r0, JSGeneratorObject::kResultValuePropertyOffset)); |
- __ str(r3, |
- FieldMemOperand(r0, JSGeneratorObject::kResultDonePropertyOffset)); |
- |
- // Only the value field needs a write barrier, as the other values are in the |
- // root set. |
- __ RecordWriteField(r0, JSGeneratorObject::kResultValuePropertyOffset, |
- r2, r3, kLRHasBeenSaved, kDontSaveFPRegs); |
-} |
- |
- |
void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) { |
SetSourcePosition(prop->position()); |
Literal* key = prop->key()->AsLiteral(); |
- __ mov(r2, Operand(key->value())); |
- // Call load IC. It has arguments receiver and property name r0 and r2. |
+ __ Mov(x2, Operand(key->value())); |
+ // Call load IC. It has arguments receiver and property name x0 and x2. |
CallLoadIC(NOT_CONTEXTUAL, prop->PropertyFeedbackId()); |
} |
@@ -2304,109 +1991,118 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr, |
OverwriteMode mode, |
Expression* left_expr, |
Expression* right_expr) { |
- Label done, smi_case, stub_call; |
- |
- Register scratch1 = r2; |
- Register scratch2 = r3; |
+ Label done, both_smis, stub_call; |
// Get the arguments. |
- Register left = r1; |
- Register right = r0; |
- __ pop(left); |
+ Register left = x1; |
+ Register right = x0; |
+ Register result = x0; |
+ __ Pop(left); |
// Perform combined smi check on both operands. |
- __ orr(scratch1, left, Operand(right)); |
- STATIC_ASSERT(kSmiTag == 0); |
+ __ Orr(x10, left, right); |
JumpPatchSite patch_site(masm_); |
- patch_site.EmitJumpIfSmi(scratch1, &smi_case); |
+ patch_site.EmitJumpIfSmi(x10, &both_smis); |
- __ bind(&stub_call); |
+ __ Bind(&stub_call); |
BinaryOpICStub stub(op, mode); |
- CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId()); |
- patch_site.EmitPatchInfo(); |
- __ jmp(&done); |
+ { |
+ Assembler::BlockConstPoolScope scope(masm_); |
+ CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId()); |
+ patch_site.EmitPatchInfo(); |
+ } |
+ __ B(&done); |
- __ bind(&smi_case); |
- // Smi case. This code works the same way as the smi-smi case in the type |
+ __ Bind(&both_smis); |
+ // Smi case. This code works in the same way as the smi-smi case in the type |
// recording binary operation stub, see |
+ // BinaryOpStub::GenerateSmiSmiOperation for comments. |
+ // TODO(all): That doesn't exist any more. Where are the comments? |
+ // |
+ // The set of operations that needs to be supported here is controlled by |
+ // FullCodeGenerator::ShouldInlineSmiCase(). |
switch (op) { |
case Token::SAR: |
- __ GetLeastBitsFromSmi(scratch1, right, 5); |
- __ mov(right, Operand(left, ASR, scratch1)); |
- __ bic(right, right, Operand(kSmiTagMask)); |
+ __ Ubfx(right, right, kSmiShift, 5); |
+ __ Asr(result, left, right); |
+ __ Bic(result, result, kSmiShiftMask); |
break; |
- case Token::SHL: { |
- __ SmiUntag(scratch1, left); |
- __ GetLeastBitsFromSmi(scratch2, right, 5); |
- __ mov(scratch1, Operand(scratch1, LSL, scratch2)); |
- __ TrySmiTag(right, scratch1, &stub_call); |
+ case Token::SHL: |
+ __ Ubfx(right, right, kSmiShift, 5); |
+ __ Lsl(result, left, right); |
break; |
- } |
case Token::SHR: { |
- __ SmiUntag(scratch1, left); |
- __ GetLeastBitsFromSmi(scratch2, right, 5); |
- __ mov(scratch1, Operand(scratch1, LSR, scratch2)); |
- __ tst(scratch1, Operand(0xc0000000)); |
- __ b(ne, &stub_call); |
- __ SmiTag(right, scratch1); |
+ Label right_not_zero; |
+ __ Cbnz(right, &right_not_zero); |
+ __ Tbnz(left, kXSignBit, &stub_call); |
+ __ Bind(&right_not_zero); |
+ __ Ubfx(right, right, kSmiShift, 5); |
+ __ Lsr(result, left, right); |
+ __ Bic(result, result, kSmiShiftMask); |
break; |
} |
case Token::ADD: |
- __ add(scratch1, left, Operand(right), SetCC); |
- __ b(vs, &stub_call); |
- __ mov(right, scratch1); |
+ __ Adds(x10, left, right); |
+ __ B(vs, &stub_call); |
+ __ Mov(result, x10); |
break; |
case Token::SUB: |
- __ sub(scratch1, left, Operand(right), SetCC); |
- __ b(vs, &stub_call); |
- __ mov(right, scratch1); |
+ __ Subs(x10, left, right); |
+ __ B(vs, &stub_call); |
+ __ Mov(result, x10); |
break; |
case Token::MUL: { |
- __ SmiUntag(ip, right); |
- __ smull(scratch1, scratch2, left, ip); |
- __ mov(ip, Operand(scratch1, ASR, 31)); |
- __ cmp(ip, Operand(scratch2)); |
- __ b(ne, &stub_call); |
- __ cmp(scratch1, Operand::Zero()); |
- __ mov(right, Operand(scratch1), LeaveCC, ne); |
- __ b(ne, &done); |
- __ add(scratch2, right, Operand(left), SetCC); |
- __ mov(right, Operand(Smi::FromInt(0)), LeaveCC, pl); |
- __ b(mi, &stub_call); |
+ Label not_minus_zero, done; |
+ __ Smulh(x10, left, right); |
+ __ Cbnz(x10, ¬_minus_zero); |
+ __ Eor(x11, left, right); |
+ __ Tbnz(x11, kXSignBit, &stub_call); |
+ STATIC_ASSERT(kSmiTag == 0); |
+ __ Mov(result, x10); |
+ __ B(&done); |
+ __ Bind(¬_minus_zero); |
+ __ Cls(x11, x10); |
+ __ Cmp(x11, kXRegSize - kSmiShift); |
+ __ B(lt, &stub_call); |
+ __ SmiTag(result, x10); |
+ __ Bind(&done); |
break; |
} |
case Token::BIT_OR: |
- __ orr(right, left, Operand(right)); |
+ __ Orr(result, left, right); |
break; |
case Token::BIT_AND: |
- __ and_(right, left, Operand(right)); |
+ __ And(result, left, right); |
break; |
case Token::BIT_XOR: |
- __ eor(right, left, Operand(right)); |
+ __ Eor(result, left, right); |
break; |
default: |
UNREACHABLE(); |
} |
- __ bind(&done); |
- context()->Plug(r0); |
+ __ Bind(&done); |
+ context()->Plug(x0); |
} |
void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, |
Token::Value op, |
OverwriteMode mode) { |
- __ pop(r1); |
+ __ Pop(x1); |
BinaryOpICStub stub(op, mode); |
- JumpPatchSite patch_site(masm_); // unbound, signals no inlined smi code. |
- CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId()); |
- patch_site.EmitPatchInfo(); |
- context()->Plug(r0); |
+ JumpPatchSite patch_site(masm_); // Unbound, signals no inlined smi code. |
+ { |
+ Assembler::BlockConstPoolScope scope(masm_); |
+ CallIC(stub.GetCode(isolate()), expr->BinaryOperationFeedbackId()); |
+ patch_site.EmitPatchInfo(); |
+ } |
+ context()->Plug(x0); |
} |
void FullCodeGenerator::EmitAssignment(Expression* expr) { |
- // Invalid left-hand sides are rewritten by the parser to have a 'throw |
+ // Invalid left-hand sides are rewritten to have a 'throw |
// ReferenceError' on the left-hand side. |
if (!expr->IsValidLeftHandSide()) { |
VisitForEffect(expr); |
@@ -2432,20 +2128,22 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) { |
break; |
} |
case NAMED_PROPERTY: { |
- __ push(r0); // Preserve value. |
+ __ Push(x0); // Preserve value. |
VisitForAccumulatorValue(prop->obj()); |
- __ mov(r1, r0); |
- __ pop(r0); // Restore value. |
- __ mov(r2, Operand(prop->key()->AsLiteral()->value())); |
+ // TODO(all): We could introduce a VisitForRegValue(reg, expr) to avoid |
+ // this copy. |
+ __ Mov(x1, x0); |
+ __ Pop(x0); // Restore value. |
+ __ Mov(x2, Operand(prop->key()->AsLiteral()->value())); |
CallStoreIC(); |
break; |
} |
case KEYED_PROPERTY: { |
- __ push(r0); // Preserve value. |
+ __ Push(x0); // Preserve value. |
VisitForStackValue(prop->obj()); |
VisitForAccumulatorValue(prop->key()); |
- __ mov(r1, r0); |
- __ Pop(r0, r2); // r0 = restored value. |
+ __ Mov(x1, x0); |
+ __ Pop(x2, x0); |
Handle<Code> ic = is_classic_mode() |
? isolate()->builtins()->KeyedStoreIC_Initialize() |
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict(); |
@@ -2453,24 +2151,28 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) { |
break; |
} |
} |
- context()->Plug(r0); |
+ context()->Plug(x0); |
} |
void FullCodeGenerator::EmitVariableAssignment(Variable* var, |
Token::Value op) { |
+ ASM_LOCATION("FullCodeGenerator::EmitVariableAssignment"); |
if (var->IsUnallocated()) { |
// Global var, const, or let. |
- __ mov(r2, Operand(var->name())); |
- __ ldr(r1, GlobalObjectOperand()); |
+ __ Mov(x2, Operand(var->name())); |
+ __ Ldr(x1, GlobalObjectMemOperand()); |
CallStoreIC(); |
+ |
} else if (op == Token::INIT_CONST) { |
// Const initializers need a write barrier. |
ASSERT(!var->IsParameter()); // No const parameters. |
if (var->IsStackLocal()) { |
- __ ldr(r1, StackOperand(var)); |
- __ CompareRoot(r1, Heap::kTheHoleValueRootIndex); |
- __ str(result_register(), StackOperand(var), eq); |
+ Label skip; |
+ __ Ldr(x1, StackOperand(var)); |
+ __ JumpIfNotRoot(x1, Heap::kTheHoleValueRootIndex, &skip); |
+ __ Str(result_register(), StackOperand(var)); |
+ __ Bind(&skip); |
} else { |
ASSERT(var->IsContextSlot() || var->IsLookupSlot()); |
// Like var declarations, const declarations are hoisted to function |
@@ -2478,39 +2180,38 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, |
// able to drill a hole to that function context, even from inside a |
// 'with' context. We thus bypass the normal static scope lookup for |
// var->IsContextSlot(). |
- __ push(r0); |
- __ mov(r0, Operand(var->name())); |
- __ Push(cp, r0); // Context and name. |
+ __ Push(x0); |
+ __ Mov(x0, Operand(var->name())); |
+ __ Push(cp, x0); // Context and name. |
__ CallRuntime(Runtime::kInitializeConstContextSlot, 3); |
} |
} else if (var->mode() == LET && op != Token::INIT_LET) { |
// Non-initializing assignment to let variable needs a write barrier. |
if (var->IsLookupSlot()) { |
- __ push(r0); // Value. |
- __ mov(r1, Operand(var->name())); |
- __ mov(r0, Operand(Smi::FromInt(language_mode()))); |
- __ Push(cp, r1, r0); // Context, name, strict mode. |
+ __ Push(x0, cp); // Context, value. |
+ __ Mov(x11, Operand(var->name())); |
+ __ Mov(x10, Operand(Smi::FromInt(language_mode()))); |
+ __ Push(x11, x10); // Strict mode, name. |
__ CallRuntime(Runtime::kStoreContextSlot, 4); |
} else { |
ASSERT(var->IsStackAllocated() || var->IsContextSlot()); |
Label assign; |
- MemOperand location = VarOperand(var, r1); |
- __ ldr(r3, location); |
- __ CompareRoot(r3, Heap::kTheHoleValueRootIndex); |
- __ b(ne, &assign); |
- __ mov(r3, Operand(var->name())); |
- __ push(r3); |
+ MemOperand location = VarOperand(var, x1); |
+ __ Ldr(x10, location); |
+ __ JumpIfNotRoot(x10, Heap::kTheHoleValueRootIndex, &assign); |
+ __ Mov(x10, Operand(var->name())); |
+ __ Push(x10); |
__ CallRuntime(Runtime::kThrowReferenceError, 1); |
// Perform the assignment. |
- __ bind(&assign); |
- __ str(result_register(), location); |
+ __ Bind(&assign); |
+ __ Str(result_register(), location); |
if (var->IsContextSlot()) { |
// RecordWrite may destroy all its register arguments. |
- __ mov(r3, result_register()); |
+ __ Mov(x10, result_register()); |
int offset = Context::SlotOffset(var->index()); |
__ RecordWriteContextSlot( |
- r1, offset, r3, r2, kLRHasBeenSaved, kDontSaveFPRegs); |
+ x1, offset, x10, x11, kLRHasBeenSaved, kDontSaveFPRegs); |
} |
} |
@@ -2518,27 +2219,29 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, |
// Assignment to var or initializing assignment to let/const |
// in harmony mode. |
if (var->IsStackAllocated() || var->IsContextSlot()) { |
- MemOperand location = VarOperand(var, r1); |
- if (generate_debug_code_ && op == Token::INIT_LET) { |
- // Check for an uninitialized let binding. |
- __ ldr(r2, location); |
- __ CompareRoot(r2, Heap::kTheHoleValueRootIndex); |
+ MemOperand location = VarOperand(var, x1); |
+ if (FLAG_debug_code && op == Token::INIT_LET) { |
+ __ Ldr(x10, location); |
+ __ CompareRoot(x10, Heap::kTheHoleValueRootIndex); |
__ Check(eq, kLetBindingReInitialization); |
} |
// Perform the assignment. |
- __ str(r0, location); |
+ __ Str(x0, location); |
if (var->IsContextSlot()) { |
- __ mov(r3, r0); |
+ __ Mov(x10, x0); |
int offset = Context::SlotOffset(var->index()); |
__ RecordWriteContextSlot( |
- r1, offset, r3, r2, kLRHasBeenSaved, kDontSaveFPRegs); |
+ x1, offset, x10, x11, kLRHasBeenSaved, kDontSaveFPRegs); |
} |
} else { |
ASSERT(var->IsLookupSlot()); |
- __ push(r0); // Value. |
- __ mov(r1, Operand(var->name())); |
- __ mov(r0, Operand(Smi::FromInt(language_mode()))); |
- __ Push(cp, r1, r0); // Context, name, strict mode. |
+ __ Mov(x11, Operand(var->name())); |
+ __ Mov(x10, Operand(Smi::FromInt(language_mode()))); |
+ // jssp[0] : mode. |
+ // jssp[8] : name. |
+ // jssp[16] : context. |
+ // jssp[24] : value. |
+ __ Push(x0, cp, x11, x10); |
__ CallRuntime(Runtime::kStoreContextSlot, 4); |
} |
} |
@@ -2547,6 +2250,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, |
void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) { |
+ ASM_LOCATION("FullCodeGenerator::EmitNamedPropertyAssignment"); |
// Assignment to a property, using a named store IC. |
Property* prop = expr->target()->AsProperty(); |
ASSERT(prop != NULL); |
@@ -2554,22 +2258,24 @@ void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) { |
// Record source code position before IC call. |
SetSourcePosition(expr->position()); |
- __ mov(r2, Operand(prop->key()->AsLiteral()->value())); |
- __ pop(r1); |
+ __ Mov(x2, Operand(prop->key()->AsLiteral()->value())); |
+ __ Pop(x1); |
CallStoreIC(expr->AssignmentFeedbackId()); |
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); |
- context()->Plug(r0); |
+ context()->Plug(x0); |
} |
void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) { |
+ ASM_LOCATION("FullCodeGenerator::EmitKeyedPropertyAssignment"); |
// Assignment to a property, using a keyed store IC. |
// Record source code position before IC call. |
SetSourcePosition(expr->position()); |
- __ Pop(r2, r1); // r1 = key. |
+ // TODO(all): Could we pass this in registers rather than on the stack? |
+ __ Pop(x1, x2); // Key and object holding the property. |
Handle<Code> ic = is_classic_mode() |
? isolate()->builtins()->KeyedStoreIC_Initialize() |
@@ -2577,7 +2283,7 @@ void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) { |
CallIC(ic, expr->AssignmentFeedbackId()); |
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); |
- context()->Plug(r0); |
+ context()->Plug(x0); |
} |
@@ -2589,13 +2295,13 @@ void FullCodeGenerator::VisitProperty(Property* expr) { |
VisitForAccumulatorValue(expr->obj()); |
EmitNamedPropertyLoad(expr); |
PrepareForBailoutForId(expr->LoadId(), TOS_REG); |
- context()->Plug(r0); |
+ context()->Plug(x0); |
} else { |
VisitForStackValue(expr->obj()); |
VisitForAccumulatorValue(expr->key()); |
- __ pop(r1); |
+ __ Pop(x1); |
EmitKeyedPropertyLoad(expr); |
- context()->Plug(r0); |
+ context()->Plug(x0); |
} |
} |
@@ -2605,13 +2311,14 @@ void FullCodeGenerator::CallIC(Handle<Code> code, |
ic_total_count_++; |
// All calls must have a predictable size in full-codegen code to ensure that |
// the debugger can patch them correctly. |
- __ Call(code, RelocInfo::CODE_TARGET, ast_id, al, |
- NEVER_INLINE_TARGET_ADDRESS); |
+ __ Call(code, RelocInfo::CODE_TARGET, ast_id); |
} |
// Code common for calls using the IC. |
void FullCodeGenerator::EmitCallWithIC(Call* expr) { |
+ ASM_LOCATION("EmitCallWithIC"); |
+ |
Expression* callee = expr->expression(); |
ZoneList<Expression*>* args = expr->arguments(); |
int arg_count = args->length(); |
@@ -2630,13 +2337,12 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr) { |
} else { |
// Load the function from the receiver. |
ASSERT(callee->IsProperty()); |
- __ ldr(r0, MemOperand(sp, 0)); |
+ __ Peek(x0, 0); |
EmitNamedPropertyLoad(callee->AsProperty()); |
PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG); |
// Push the target function under the receiver. |
- __ ldr(ip, MemOperand(sp, 0)); |
- __ push(ip); |
- __ str(r0, MemOperand(sp, kPointerSize)); |
+ __ Pop(x10); |
+ __ Push(x0, x10); |
flags = CALL_AS_METHOD; |
} |
@@ -2650,15 +2356,15 @@ void FullCodeGenerator::EmitCallWithIC(Call* expr) { |
// Record source position for debugger. |
SetSourcePosition(expr->position()); |
CallFunctionStub stub(arg_count, flags); |
- __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize)); |
+ __ Peek(x1, (arg_count + 1) * kPointerSize); |
__ CallStub(&stub); |
RecordJSReturnSite(expr); |
// Restore context register. |
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
- context()->DropAndPlug(1, r0); |
+ context()->DropAndPlug(1, x0); |
} |
@@ -2674,14 +2380,13 @@ void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr, |
// Load the function from the receiver. |
ASSERT(callee->IsProperty()); |
- __ ldr(r1, MemOperand(sp, 0)); |
+ __ Peek(x1, 0); |
EmitKeyedPropertyLoad(callee->AsProperty()); |
PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG); |
// Push the target function under the receiver. |
- __ ldr(ip, MemOperand(sp, 0)); |
- __ push(ip); |
- __ str(r0, MemOperand(sp, kPointerSize)); |
+ __ Pop(x10); |
+ __ Push(x0, x10); |
{ PreservePositionScope scope(masm()->positions_recorder()); |
for (int i = 0; i < arg_count; i++) { |
@@ -2692,14 +2397,14 @@ void FullCodeGenerator::EmitKeyedCallWithIC(Call* expr, |
// Record source position for debugger. |
SetSourcePosition(expr->position()); |
CallFunctionStub stub(arg_count, CALL_AS_METHOD); |
- __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize)); |
+ __ Peek(x1, (arg_count + 1) * kPointerSize); |
__ CallStub(&stub); |
RecordJSReturnSite(expr); |
// Restore context register. |
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
- context()->DropAndPlug(1, r0); |
+ context()->DropAndPlug(1, x0); |
} |
@@ -2718,40 +2423,46 @@ void FullCodeGenerator::EmitCallWithStub(Call* expr) { |
Handle<Object> uninitialized = |
TypeFeedbackInfo::UninitializedSentinel(isolate()); |
StoreFeedbackVectorSlot(expr->CallFeedbackSlot(), uninitialized); |
- __ Move(r2, FeedbackVector()); |
- __ mov(r3, Operand(Smi::FromInt(expr->CallFeedbackSlot()))); |
+ __ LoadObject(x2, FeedbackVector()); |
+ __ Mov(x3, Operand(Smi::FromInt(expr->CallFeedbackSlot()))); |
// Record call targets in unoptimized code. |
CallFunctionStub stub(arg_count, RECORD_CALL_TARGET); |
- __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize)); |
+ __ Peek(x1, (arg_count + 1) * kXRegSizeInBytes); |
__ CallStub(&stub); |
RecordJSReturnSite(expr); |
// Restore context register. |
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
- context()->DropAndPlug(1, r0); |
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
+ context()->DropAndPlug(1, x0); |
} |
void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) { |
- // r4: copy of the first argument or undefined if it doesn't exist. |
+ ASM_LOCATION("FullCodeGenerator::EmitResolvePossiblyDirectEval"); |
+ // Prepare to push a copy of the first argument or undefined if it doesn't |
+ // exist. |
if (arg_count > 0) { |
- __ ldr(r4, MemOperand(sp, arg_count * kPointerSize)); |
+ __ Peek(x10, arg_count * kXRegSizeInBytes); |
} else { |
- __ LoadRoot(r4, Heap::kUndefinedValueRootIndex); |
+ __ LoadRoot(x10, Heap::kUndefinedValueRootIndex); |
} |
- // r3: the receiver of the enclosing function. |
+ // Prepare to push the receiver of the enclosing function. |
int receiver_offset = 2 + info_->scope()->num_parameters(); |
- __ ldr(r3, MemOperand(fp, receiver_offset * kPointerSize)); |
+ __ Ldr(x11, MemOperand(fp, receiver_offset * kPointerSize)); |
+ |
+ // Push. |
+ __ Push(x10, x11); |
- // r2: the language mode. |
- __ mov(r2, Operand(Smi::FromInt(language_mode()))); |
+ // Prepare to push the language mode. |
+ __ Mov(x10, Operand(Smi::FromInt(language_mode()))); |
+ // Prepare to push the start position of the scope the calls resides in. |
+ __ Mov(x11, Operand(Smi::FromInt(scope()->start_position()))); |
- // r1: the start position of the scope the calls resides in. |
- __ mov(r1, Operand(Smi::FromInt(scope()->start_position()))); |
+ // Push. |
+ __ Push(x10, x11); |
// Do the runtime call. |
- __ Push(r4, r3, r2, r1); |
__ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5); |
} |
@@ -2775,10 +2486,11 @@ void FullCodeGenerator::VisitCall(Call* expr) { |
ZoneList<Expression*>* args = expr->arguments(); |
int arg_count = args->length(); |
- { PreservePositionScope pos_scope(masm()->positions_recorder()); |
+ { |
+ PreservePositionScope pos_scope(masm()->positions_recorder()); |
VisitForStackValue(callee); |
- __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); |
- __ push(r2); // Reserved receiver slot. |
+ __ LoadRoot(x10, Heap::kUndefinedValueRootIndex); |
+ __ Push(x10); // Reserved receiver slot. |
// Push the arguments. |
for (int i = 0; i < arg_count; i++) { |
@@ -2787,25 +2499,27 @@ void FullCodeGenerator::VisitCall(Call* expr) { |
// Push a copy of the function (found below the arguments) and |
// resolve eval. |
- __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize)); |
- __ push(r1); |
+ __ Peek(x10, (arg_count + 1) * kPointerSize); |
+ __ Push(x10); |
EmitResolvePossiblyDirectEval(arg_count); |
- // The runtime call returns a pair of values in r0 (function) and |
- // r1 (receiver). Touch up the stack with the right values. |
- __ str(r0, MemOperand(sp, (arg_count + 1) * kPointerSize)); |
- __ str(r1, MemOperand(sp, arg_count * kPointerSize)); |
+ // The runtime call returns a pair of values in x0 (function) and |
+ // x1 (receiver). Touch up the stack with the right values. |
+ __ PokePair(x1, x0, arg_count * kPointerSize); |
} |
// Record source position for debugger. |
SetSourcePosition(expr->position()); |
+ |
+ // Call the evaluated function. |
CallFunctionStub stub(arg_count, NO_CALL_FUNCTION_FLAGS); |
- __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize)); |
+ __ Peek(x1, (arg_count + 1) * kXRegSizeInBytes); |
__ CallStub(&stub); |
RecordJSReturnSite(expr); |
// Restore context register. |
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
- context()->DropAndPlug(1, r0); |
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
+ context()->DropAndPlug(1, x0); |
+ |
} else if (call_type == Call::GLOBAL_CALL) { |
EmitCallWithIC(expr); |
@@ -2820,29 +2534,29 @@ void FullCodeGenerator::VisitCall(Call* expr) { |
EmitDynamicLookupFastCase(proxy->var(), NOT_INSIDE_TYPEOF, &slow, &done); |
} |
- __ bind(&slow); |
- // Call the runtime to find the function to call (returned in r0) |
- // and the object holding it (returned in edx). |
- ASSERT(!context_register().is(r2)); |
- __ mov(r2, Operand(proxy->name())); |
- __ Push(context_register(), r2); |
+ __ Bind(&slow); |
+ // Call the runtime to find the function to call (returned in x0) |
+ // and the object holding it (returned in x1). |
+ __ Push(context_register()); |
+ __ Mov(x10, Operand(proxy->name())); |
+ __ Push(x10); |
__ CallRuntime(Runtime::kLoadContextSlot, 2); |
- __ Push(r0, r1); // Function, receiver. |
+ __ Push(x0, x1); // Receiver, function. |
// If fast case code has been generated, emit code to push the |
// function and receiver and have the slow path jump around this |
// code. |
if (done.is_linked()) { |
Label call; |
- __ b(&call); |
- __ bind(&done); |
+ __ B(&call); |
+ __ Bind(&done); |
// Push function. |
- __ push(r0); |
+ __ Push(x0); |
// The receiver is implicitly the global receiver. Indicate this |
- // by passing the hole to the call function stub. |
- __ LoadRoot(r1, Heap::kUndefinedValueRootIndex); |
- __ push(r1); |
- __ bind(&call); |
+ // by passing the undefined to the call function stub. |
+ __ LoadRoot(x1, Heap::kUndefinedValueRootIndex); |
+ __ Push(x1); |
+ __ Bind(&call); |
} |
// The receiver is either the global receiver or an object found |
@@ -2858,14 +2572,15 @@ void FullCodeGenerator::VisitCall(Call* expr) { |
} else { |
EmitKeyedCallWithIC(expr, property->key()); |
} |
+ |
} else { |
ASSERT(call_type == Call::OTHER_CALL); |
// Call to an arbitrary expression not handled specially above. |
{ PreservePositionScope scope(masm()->positions_recorder()); |
VisitForStackValue(callee); |
} |
- __ LoadRoot(r1, Heap::kUndefinedValueRootIndex); |
- __ push(r1); |
+ __ LoadRoot(x1, Heap::kUndefinedValueRootIndex); |
+ __ Push(x1); |
// Emit function call. |
EmitCallWithStub(expr); |
} |
@@ -2899,21 +2614,21 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) { |
// constructor invocation. |
SetSourcePosition(expr->position()); |
- // Load function and argument count into r1 and r0. |
- __ mov(r0, Operand(arg_count)); |
- __ ldr(r1, MemOperand(sp, arg_count * kPointerSize)); |
+ // Load function and argument count into x1 and x0. |
+ __ Mov(x0, arg_count); |
+ __ Peek(x1, arg_count * kXRegSizeInBytes); |
// Record call targets in unoptimized code. |
Handle<Object> uninitialized = |
TypeFeedbackInfo::UninitializedSentinel(isolate()); |
StoreFeedbackVectorSlot(expr->CallNewFeedbackSlot(), uninitialized); |
- __ Move(r2, FeedbackVector()); |
- __ mov(r3, Operand(Smi::FromInt(expr->CallNewFeedbackSlot()))); |
+ __ LoadObject(x2, FeedbackVector()); |
+ __ Mov(x3, Operand(Smi::FromInt(expr->CallNewFeedbackSlot()))); |
CallConstructStub stub(RECORD_CALL_TARGET); |
__ Call(stub.GetCode(isolate()), RelocInfo::CONSTRUCT_CALL); |
PrepareForBailoutForId(expr->ReturnId(), TOS_REG); |
- context()->Plug(r0); |
+ context()->Plug(x0); |
} |
@@ -2931,8 +2646,7 @@ void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) { |
&if_true, &if_false, &fall_through); |
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); |
- __ SmiTst(r0); |
- Split(eq, if_true, if_false, fall_through); |
+ __ TestAndSplit(x0, kSmiTagMask, if_true, if_false, fall_through); |
context()->Plug(if_true, if_false); |
} |
@@ -2952,8 +2666,8 @@ void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) { |
&if_true, &if_false, &fall_through); |
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); |
- __ NonNegativeSmiTst(r0); |
- Split(eq, if_true, if_false, fall_through); |
+ __ TestAndSplit(x0, kSmiTagMask | (0x80000000UL << kSmiShift), if_true, |
+ if_false, fall_through); |
context()->Plug(if_true, if_false); |
} |
@@ -2972,19 +2686,16 @@ void FullCodeGenerator::EmitIsObject(CallRuntime* expr) { |
context()->PrepareTest(&materialize_true, &materialize_false, |
&if_true, &if_false, &fall_through); |
- __ JumpIfSmi(r0, if_false); |
- __ LoadRoot(ip, Heap::kNullValueRootIndex); |
- __ cmp(r0, ip); |
- __ b(eq, if_true); |
- __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset)); |
+ __ JumpIfSmi(x0, if_false); |
+ __ JumpIfRoot(x0, Heap::kNullValueRootIndex, if_true); |
+ __ Ldr(x10, FieldMemOperand(x0, HeapObject::kMapOffset)); |
// Undetectable objects behave like undefined when tested with typeof. |
- __ ldrb(r1, FieldMemOperand(r2, Map::kBitFieldOffset)); |
- __ tst(r1, Operand(1 << Map::kIsUndetectable)); |
- __ b(ne, if_false); |
- __ ldrb(r1, FieldMemOperand(r2, Map::kInstanceTypeOffset)); |
- __ cmp(r1, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); |
- __ b(lt, if_false); |
- __ cmp(r1, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE)); |
+ __ Ldrb(x11, FieldMemOperand(x10, Map::kBitFieldOffset)); |
+ __ Tbnz(x11, Map::kIsUndetectable, if_false); |
+ __ Ldrb(x12, FieldMemOperand(x10, Map::kInstanceTypeOffset)); |
+ __ Cmp(x12, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE); |
+ __ B(lt, if_false); |
+ __ Cmp(x12, LAST_NONCALLABLE_SPEC_OBJECT_TYPE); |
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); |
Split(le, if_true, if_false, fall_through); |
@@ -3005,8 +2716,8 @@ void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) { |
context()->PrepareTest(&materialize_true, &materialize_false, |
&if_true, &if_false, &fall_through); |
- __ JumpIfSmi(r0, if_false); |
- __ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE); |
+ __ JumpIfSmi(x0, if_false); |
+ __ CompareObjectType(x0, x10, x11, FIRST_SPEC_OBJECT_TYPE); |
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); |
Split(ge, if_true, if_false, fall_through); |
@@ -3015,6 +2726,7 @@ void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) { |
void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) { |
+ ASM_LOCATION("FullCodeGenerator::EmitIsUndetectableObject"); |
ZoneList<Expression*>* args = expr->arguments(); |
ASSERT(args->length() == 1); |
@@ -3027,10 +2739,10 @@ void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) { |
context()->PrepareTest(&materialize_true, &materialize_false, |
&if_true, &if_false, &fall_through); |
- __ JumpIfSmi(r0, if_false); |
- __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); |
- __ ldrb(r1, FieldMemOperand(r1, Map::kBitFieldOffset)); |
- __ tst(r1, Operand(1 << Map::kIsUndetectable)); |
+ __ JumpIfSmi(x0, if_false); |
+ __ Ldr(x10, FieldMemOperand(x0, HeapObject::kMapOffset)); |
+ __ Ldrb(x11, FieldMemOperand(x10, Map::kBitFieldOffset)); |
+ __ Tst(x11, 1 << Map::kIsUndetectable); |
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); |
Split(ne, if_true, if_false, fall_through); |
@@ -3042,7 +2754,6 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf( |
CallRuntime* expr) { |
ZoneList<Expression*>* args = expr->arguments(); |
ASSERT(args->length() == 1); |
- |
VisitForAccumulatorValue(args->at(0)); |
Label materialize_true, materialize_false, skip_lookup; |
@@ -3052,74 +2763,91 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf( |
context()->PrepareTest(&materialize_true, &materialize_false, |
&if_true, &if_false, &fall_through); |
- __ AssertNotSmi(r0); |
+ Register object = x0; |
+ __ AssertNotSmi(object); |
- __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); |
- __ ldrb(ip, FieldMemOperand(r1, Map::kBitField2Offset)); |
- __ tst(ip, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf)); |
- __ b(ne, &skip_lookup); |
+ Register map = x10; |
+ Register bitfield2 = x11; |
+ __ Ldr(map, FieldMemOperand(object, HeapObject::kMapOffset)); |
+ __ Ldrb(bitfield2, FieldMemOperand(map, Map::kBitField2Offset)); |
+ __ Tbnz(bitfield2, Map::kStringWrapperSafeForDefaultValueOf, &skip_lookup); |
// Check for fast case object. Generate false result for slow case object. |
- __ ldr(r2, FieldMemOperand(r0, JSObject::kPropertiesOffset)); |
- __ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset)); |
- __ LoadRoot(ip, Heap::kHashTableMapRootIndex); |
- __ cmp(r2, ip); |
- __ b(eq, if_false); |
- |
- // Look for valueOf name in the descriptor array, and indicate false if |
- // found. Since we omit an enumeration index check, if it is added via a |
- // transition that shares its descriptor array, this is a false positive. |
- Label entry, loop, done; |
+ Register props = x12; |
+ Register props_map = x12; |
+ Register hash_table_map = x13; |
+ __ Ldr(props, FieldMemOperand(object, JSObject::kPropertiesOffset)); |
+ __ Ldr(props_map, FieldMemOperand(props, HeapObject::kMapOffset)); |
+ __ LoadRoot(hash_table_map, Heap::kHashTableMapRootIndex); |
+ __ Cmp(props_map, hash_table_map); |
+ __ B(eq, if_false); |
+ |
+ // Look for valueOf name in the descriptor array, and indicate false if found. |
+ // Since we omit an enumeration index check, if it is added via a transition |
+ // that shares its descriptor array, this is a false positive. |
+ Label loop, done; |
// Skip loop if no descriptors are valid. |
- __ NumberOfOwnDescriptors(r3, r1); |
- __ cmp(r3, Operand::Zero()); |
- __ b(eq, &done); |
- |
- __ LoadInstanceDescriptors(r1, r4); |
- // r4: descriptor array. |
- // r3: valid entries in the descriptor array. |
- __ mov(ip, Operand(DescriptorArray::kDescriptorSize)); |
- __ mul(r3, r3, ip); |
+ Register descriptors = x12; |
+ Register descriptors_length = x13; |
+ __ NumberOfOwnDescriptors(descriptors_length, map); |
+ __ Cbz(descriptors_length, &done); |
+ |
+ __ LoadInstanceDescriptors(map, descriptors); |
+ |
+ // Calculate the end of the descriptor array. |
+ Register descriptors_end = x14; |
+ __ Mov(x15, DescriptorArray::kDescriptorSize); |
+ __ Mul(descriptors_length, descriptors_length, x15); |
// Calculate location of the first key name. |
- __ add(r4, r4, Operand(DescriptorArray::kFirstOffset - kHeapObjectTag)); |
+ __ Add(descriptors, descriptors, |
+ DescriptorArray::kFirstOffset - kHeapObjectTag); |
// Calculate the end of the descriptor array. |
- __ mov(r2, r4); |
- __ add(r2, r2, Operand::PointerOffsetFromSmiKey(r3)); |
+ __ Add(descriptors_end, descriptors, |
+ Operand(descriptors_length, LSL, kPointerSizeLog2)); |
// Loop through all the keys in the descriptor array. If one of these is the |
// string "valueOf" the result is false. |
- // The use of ip to store the valueOf string assumes that it is not otherwise |
- // used in the loop below. |
- __ mov(ip, Operand(isolate()->factory()->value_of_string())); |
- __ jmp(&entry); |
- __ bind(&loop); |
- __ ldr(r3, MemOperand(r4, 0)); |
- __ cmp(r3, ip); |
- __ b(eq, if_false); |
- __ add(r4, r4, Operand(DescriptorArray::kDescriptorSize * kPointerSize)); |
- __ bind(&entry); |
- __ cmp(r4, Operand(r2)); |
- __ b(ne, &loop); |
- |
- __ bind(&done); |
+ // TODO(all): optimise this loop to combine the add and ldr into an |
+ // addressing mode. |
+ Register valueof_string = x1; |
+ __ Mov(valueof_string, Operand(isolate()->factory()->value_of_string())); |
+ __ Bind(&loop); |
+ __ Ldr(x15, MemOperand(descriptors)); |
+ __ Cmp(x15, valueof_string); |
+ __ B(eq, if_false); |
+ __ Add(descriptors, descriptors, |
+ DescriptorArray::kDescriptorSize * kPointerSize); |
+ __ Cmp(descriptors, descriptors_end); |
+ __ B(ne, &loop); |
+ |
+ __ Bind(&done); |
// Set the bit in the map to indicate that there is no local valueOf field. |
- __ ldrb(r2, FieldMemOperand(r1, Map::kBitField2Offset)); |
- __ orr(r2, r2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf)); |
- __ strb(r2, FieldMemOperand(r1, Map::kBitField2Offset)); |
- |
- __ bind(&skip_lookup); |
- |
- // If a valueOf property is not found on the object check that its |
- // prototype is the un-modified String prototype. If not result is false. |
- __ ldr(r2, FieldMemOperand(r1, Map::kPrototypeOffset)); |
- __ JumpIfSmi(r2, if_false); |
- __ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset)); |
- __ ldr(r3, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX)); |
- __ ldr(r3, FieldMemOperand(r3, GlobalObject::kNativeContextOffset)); |
- __ ldr(r3, ContextOperand(r3, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX)); |
- __ cmp(r2, r3); |
+ __ Ldrb(x2, FieldMemOperand(map, Map::kBitField2Offset)); |
+ __ Orr(x2, x2, 1 << Map::kStringWrapperSafeForDefaultValueOf); |
+ __ Strb(x2, FieldMemOperand(map, Map::kBitField2Offset)); |
+ |
+ __ Bind(&skip_lookup); |
+ |
+ // If a valueOf property is not found on the object check that its prototype |
+ // is the unmodified String prototype. If not result is false. |
+ Register prototype = x1; |
+ Register global_idx = x2; |
+ Register native_context = x2; |
+ Register string_proto = x3; |
+ Register proto_map = x4; |
+ __ Ldr(prototype, FieldMemOperand(map, Map::kPrototypeOffset)); |
+ __ JumpIfSmi(prototype, if_false); |
+ __ Ldr(proto_map, FieldMemOperand(prototype, HeapObject::kMapOffset)); |
+ __ Ldr(global_idx, GlobalObjectMemOperand()); |
+ __ Ldr(native_context, |
+ FieldMemOperand(global_idx, GlobalObject::kNativeContextOffset)); |
+ __ Ldr(string_proto, |
+ ContextMemOperand(native_context, |
+ Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX)); |
+ __ Cmp(proto_map, string_proto); |
+ |
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); |
Split(eq, if_true, if_false, fall_through); |
@@ -3140,8 +2868,8 @@ void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) { |
context()->PrepareTest(&materialize_true, &materialize_false, |
&if_true, &if_false, &fall_through); |
- __ JumpIfSmi(r0, if_false); |
- __ CompareObjectType(r0, r1, r2, JS_FUNCTION_TYPE); |
+ __ JumpIfSmi(x0, if_false); |
+ __ CompareObjectType(x0, x10, x11, JS_FUNCTION_TYPE); |
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); |
Split(eq, if_true, if_false, fall_through); |
@@ -3162,14 +2890,15 @@ void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) { |
context()->PrepareTest(&materialize_true, &materialize_false, |
&if_true, &if_false, &fall_through); |
- __ CheckMap(r0, r1, Heap::kHeapNumberMapRootIndex, if_false, DO_SMI_CHECK); |
- __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); |
- __ ldr(r1, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); |
- __ cmp(r2, Operand(0x80000000)); |
- __ cmp(r1, Operand(0x00000000), eq); |
+ // Only a HeapNumber can be -0.0, so return false if we have something else. |
+ __ CheckMap(x0, x1, Heap::kHeapNumberMapRootIndex, if_false, DO_SMI_CHECK); |
+ |
+ // Test the bit pattern. |
+ __ Ldr(x10, FieldMemOperand(x0, HeapNumber::kValueOffset)); |
+ __ Cmp(x10, 1); // Set V on 0x8000000000000000. |
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); |
- Split(eq, if_true, if_false, fall_through); |
+ Split(vs, if_true, if_false, fall_through); |
context()->Plug(if_true, if_false); |
} |
@@ -3188,8 +2917,8 @@ void FullCodeGenerator::EmitIsArray(CallRuntime* expr) { |
context()->PrepareTest(&materialize_true, &materialize_false, |
&if_true, &if_false, &fall_through); |
- __ JumpIfSmi(r0, if_false); |
- __ CompareObjectType(r0, r1, r1, JS_ARRAY_TYPE); |
+ __ JumpIfSmi(x0, if_false); |
+ __ CompareObjectType(x0, x10, x11, JS_ARRAY_TYPE); |
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); |
Split(eq, if_true, if_false, fall_through); |
@@ -3210,8 +2939,8 @@ void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) { |
context()->PrepareTest(&materialize_true, &materialize_false, |
&if_true, &if_false, &fall_through); |
- __ JumpIfSmi(r0, if_false); |
- __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE); |
+ __ JumpIfSmi(x0, if_false); |
+ __ CompareObjectType(x0, x10, x11, JS_REGEXP_TYPE); |
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); |
Split(eq, if_true, if_false, fall_through); |
@@ -3231,16 +2960,19 @@ void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) { |
&if_true, &if_false, &fall_through); |
// Get the frame pointer for the calling frame. |
- __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); |
+ __ Ldr(x2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); |
// Skip the arguments adaptor frame if it exists. |
- __ ldr(r1, MemOperand(r2, StandardFrameConstants::kContextOffset)); |
- __ cmp(r1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); |
- __ ldr(r2, MemOperand(r2, StandardFrameConstants::kCallerFPOffset), eq); |
+ Label check_frame_marker; |
+ __ Ldr(x1, MemOperand(x2, StandardFrameConstants::kContextOffset)); |
+ __ Cmp(x1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); |
+ __ B(ne, &check_frame_marker); |
+ __ Ldr(x2, MemOperand(x2, StandardFrameConstants::kCallerFPOffset)); |
// Check the marker in the calling frame. |
- __ ldr(r1, MemOperand(r2, StandardFrameConstants::kMarkerOffset)); |
- __ cmp(r1, Operand(Smi::FromInt(StackFrame::CONSTRUCT))); |
+ __ Bind(&check_frame_marker); |
+ __ Ldr(x1, MemOperand(x2, StandardFrameConstants::kMarkerOffset)); |
+ __ Cmp(x1, Operand(Smi::FromInt(StackFrame::CONSTRUCT))); |
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); |
Split(eq, if_true, if_false, fall_through); |
@@ -3263,8 +2995,8 @@ void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) { |
context()->PrepareTest(&materialize_true, &materialize_false, |
&if_true, &if_false, &fall_through); |
- __ pop(r1); |
- __ cmp(r0, r1); |
+ __ Pop(x1); |
+ __ Cmp(x0, x1); |
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); |
Split(eq, if_true, if_false, fall_through); |
@@ -3276,37 +3008,39 @@ void FullCodeGenerator::EmitArguments(CallRuntime* expr) { |
ZoneList<Expression*>* args = expr->arguments(); |
ASSERT(args->length() == 1); |
- // ArgumentsAccessStub expects the key in edx and the formal |
- // parameter count in r0. |
+ // ArgumentsAccessStub expects the key in x1. |
VisitForAccumulatorValue(args->at(0)); |
- __ mov(r1, r0); |
- __ mov(r0, Operand(Smi::FromInt(info_->scope()->num_parameters()))); |
+ __ Mov(x1, x0); |
+ __ Mov(x0, Operand(Smi::FromInt(info_->scope()->num_parameters()))); |
ArgumentsAccessStub stub(ArgumentsAccessStub::READ_ELEMENT); |
__ CallStub(&stub); |
- context()->Plug(r0); |
+ context()->Plug(x0); |
} |
void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) { |
ASSERT(expr->arguments()->length() == 0); |
- |
+ Label exit; |
// Get the number of formal parameters. |
- __ mov(r0, Operand(Smi::FromInt(info_->scope()->num_parameters()))); |
+ __ Mov(x0, Operand(Smi::FromInt(info_->scope()->num_parameters()))); |
// Check if the calling frame is an arguments adaptor frame. |
- __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); |
- __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset)); |
- __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); |
+ __ Ldr(x12, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); |
+ __ Ldr(x13, MemOperand(x12, StandardFrameConstants::kContextOffset)); |
+ __ Cmp(x13, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); |
+ __ B(ne, &exit); |
// Arguments adaptor case: Read the arguments length from the |
// adaptor frame. |
- __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset), eq); |
+ __ Ldr(x0, MemOperand(x12, ArgumentsAdaptorFrameConstants::kLengthOffset)); |
- context()->Plug(r0); |
+ __ Bind(&exit); |
+ context()->Plug(x0); |
} |
void FullCodeGenerator::EmitClassOf(CallRuntime* expr) { |
+ ASM_LOCATION("FullCodeGenerator::EmitClassOf"); |
ZoneList<Expression*>* args = expr->arguments(); |
ASSERT(args->length() == 1); |
Label done, null, function, non_function_constructor; |
@@ -3314,56 +3048,58 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) { |
VisitForAccumulatorValue(args->at(0)); |
// If the object is a smi, we return null. |
- __ JumpIfSmi(r0, &null); |
+ __ JumpIfSmi(x0, &null); |
// Check that the object is a JS object but take special care of JS |
// functions to make sure they have 'Function' as their class. |
// Assume that there are only two callable types, and one of them is at |
// either end of the type range for JS object types. Saves extra comparisons. |
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); |
- __ CompareObjectType(r0, r0, r1, FIRST_SPEC_OBJECT_TYPE); |
- // Map is now in r0. |
- __ b(lt, &null); |
+ __ CompareObjectType(x0, x10, x11, FIRST_SPEC_OBJECT_TYPE); |
+ // x10: object's map. |
+ // x11: object's type. |
+ __ B(lt, &null); |
STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE == |
FIRST_SPEC_OBJECT_TYPE + 1); |
- __ b(eq, &function); |
+ __ B(eq, &function); |
- __ cmp(r1, Operand(LAST_SPEC_OBJECT_TYPE)); |
+ __ Cmp(x11, LAST_SPEC_OBJECT_TYPE); |
STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == |
LAST_SPEC_OBJECT_TYPE - 1); |
- __ b(eq, &function); |
+ __ B(eq, &function); |
// Assume that there is no larger type. |
STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1); |
// Check if the constructor in the map is a JS function. |
- __ ldr(r0, FieldMemOperand(r0, Map::kConstructorOffset)); |
- __ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE); |
- __ b(ne, &non_function_constructor); |
+ __ Ldr(x12, FieldMemOperand(x10, Map::kConstructorOffset)); |
+ __ JumpIfNotObjectType(x12, x13, x14, JS_FUNCTION_TYPE, |
+ &non_function_constructor); |
- // r0 now contains the constructor function. Grab the |
+ // x12 now contains the constructor function. Grab the |
// instance class name from there. |
- __ ldr(r0, FieldMemOperand(r0, JSFunction::kSharedFunctionInfoOffset)); |
- __ ldr(r0, FieldMemOperand(r0, SharedFunctionInfo::kInstanceClassNameOffset)); |
- __ b(&done); |
+ __ Ldr(x13, FieldMemOperand(x12, JSFunction::kSharedFunctionInfoOffset)); |
+ __ Ldr(x0, |
+ FieldMemOperand(x13, SharedFunctionInfo::kInstanceClassNameOffset)); |
+ __ B(&done); |
// Functions have class 'Function'. |
- __ bind(&function); |
- __ LoadRoot(r0, Heap::kfunction_class_stringRootIndex); |
- __ jmp(&done); |
+ __ Bind(&function); |
+ __ LoadRoot(x0, Heap::kfunction_class_stringRootIndex); |
+ __ B(&done); |
// Objects with a non-function constructor have class 'Object'. |
- __ bind(&non_function_constructor); |
- __ LoadRoot(r0, Heap::kObject_stringRootIndex); |
- __ jmp(&done); |
+ __ Bind(&non_function_constructor); |
+ __ LoadRoot(x0, Heap::kObject_stringRootIndex); |
+ __ B(&done); |
// Non-JS objects have class null. |
- __ bind(&null); |
- __ LoadRoot(r0, Heap::kNullValueRootIndex); |
+ __ Bind(&null); |
+ __ LoadRoot(x0, Heap::kNullValueRootIndex); |
// All done. |
- __ bind(&done); |
+ __ Bind(&done); |
- context()->Plug(r0); |
+ context()->Plug(x0); |
} |
@@ -3384,8 +3120,8 @@ void FullCodeGenerator::EmitLog(CallRuntime* expr) { |
} |
// Finally, we're expected to leave a value on the top of the stack. |
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex); |
- context()->Plug(r0); |
+ __ LoadRoot(x0, Heap::kUndefinedValueRootIndex); |
+ context()->Plug(x0); |
} |
@@ -3398,7 +3134,7 @@ void FullCodeGenerator::EmitSubString(CallRuntime* expr) { |
VisitForStackValue(args->at(1)); |
VisitForStackValue(args->at(2)); |
__ CallStub(&stub); |
- context()->Plug(r0); |
+ context()->Plug(x0); |
} |
@@ -3412,24 +3148,25 @@ void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) { |
VisitForStackValue(args->at(2)); |
VisitForStackValue(args->at(3)); |
__ CallStub(&stub); |
- context()->Plug(r0); |
+ context()->Plug(x0); |
} |
void FullCodeGenerator::EmitValueOf(CallRuntime* expr) { |
+ ASM_LOCATION("FullCodeGenerator::EmitValueOf"); |
ZoneList<Expression*>* args = expr->arguments(); |
ASSERT(args->length() == 1); |
VisitForAccumulatorValue(args->at(0)); // Load the object. |
Label done; |
// If the object is a smi return the object. |
- __ JumpIfSmi(r0, &done); |
+ __ JumpIfSmi(x0, &done); |
// If the object is not a value type, return the object. |
- __ CompareObjectType(r0, r1, r1, JS_VALUE_TYPE); |
- __ ldr(r0, FieldMemOperand(r0, JSValue::kValueOffset), eq); |
+ __ JumpIfNotObjectType(x0, x10, x11, JS_VALUE_TYPE, &done); |
+ __ Ldr(x0, FieldMemOperand(x0, JSValue::kValueOffset)); |
- __ bind(&done); |
- context()->Plug(r0); |
+ __ Bind(&done); |
+ context()->Plug(x0); |
} |
@@ -3442,41 +3179,40 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) { |
VisitForAccumulatorValue(args->at(0)); // Load the object. |
Label runtime, done, not_date_object; |
- Register object = r0; |
- Register result = r0; |
- Register scratch0 = r9; |
- Register scratch1 = r1; |
+ Register object = x0; |
+ Register result = x0; |
+ Register stamp_addr = x10; |
+ Register stamp_cache = x11; |
__ JumpIfSmi(object, ¬_date_object); |
- __ CompareObjectType(object, scratch1, scratch1, JS_DATE_TYPE); |
- __ b(ne, ¬_date_object); |
+ __ JumpIfNotObjectType(object, x10, x10, JS_DATE_TYPE, ¬_date_object); |
if (index->value() == 0) { |
- __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset)); |
- __ jmp(&done); |
+ __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset)); |
+ __ B(&done); |
} else { |
if (index->value() < JSDate::kFirstUncachedField) { |
ExternalReference stamp = ExternalReference::date_cache_stamp(isolate()); |
- __ mov(scratch1, Operand(stamp)); |
- __ ldr(scratch1, MemOperand(scratch1)); |
- __ ldr(scratch0, FieldMemOperand(object, JSDate::kCacheStampOffset)); |
- __ cmp(scratch1, scratch0); |
- __ b(ne, &runtime); |
- __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset + |
+ __ Mov(x10, Operand(stamp)); |
+ __ Ldr(stamp_addr, MemOperand(x10)); |
+ __ Ldr(stamp_cache, FieldMemOperand(object, JSDate::kCacheStampOffset)); |
+ __ Cmp(stamp_addr, stamp_cache); |
+ __ B(ne, &runtime); |
+ __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset + |
kPointerSize * index->value())); |
- __ jmp(&done); |
+ __ B(&done); |
} |
- __ bind(&runtime); |
- __ PrepareCallCFunction(2, scratch1); |
- __ mov(r1, Operand(index)); |
+ |
+ __ Bind(&runtime); |
+ __ Mov(x1, Operand(index)); |
__ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2); |
- __ jmp(&done); |
+ __ B(&done); |
} |
- __ bind(¬_date_object); |
+ __ Bind(¬_date_object); |
__ CallRuntime(Runtime::kThrowNotDateError, 0); |
- __ bind(&done); |
- context()->Plug(r0); |
+ __ Bind(&done); |
+ context()->Plug(x0); |
} |
@@ -3484,31 +3220,28 @@ void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) { |
ZoneList<Expression*>* args = expr->arguments(); |
ASSERT_EQ(3, args->length()); |
- Register string = r0; |
- Register index = r1; |
- Register value = r2; |
+ Register string = x0; |
+ Register index = x1; |
+ Register value = x2; |
+ Register scratch = x10; |
VisitForStackValue(args->at(1)); // index |
VisitForStackValue(args->at(2)); // value |
VisitForAccumulatorValue(args->at(0)); // string |
- __ Pop(index, value); |
+ __ Pop(value, index); |
if (FLAG_debug_code) { |
- __ SmiTst(value); |
- __ Check(eq, kNonSmiValue); |
- __ SmiTst(index); |
- __ Check(eq, kNonSmiIndex); |
- __ SmiUntag(index, index); |
+ __ AssertSmi(value, kNonSmiValue); |
+ __ AssertSmi(index, kNonSmiIndex); |
static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; |
- __ EmitSeqStringSetCharCheck(string, index, value, one_byte_seq_type); |
- __ SmiTag(index, index); |
+ __ EmitSeqStringSetCharCheck(string, index, kIndexIsSmi, scratch, |
+ one_byte_seq_type); |
} |
- __ SmiUntag(value, value); |
- __ add(ip, |
- string, |
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); |
- __ strb(value, MemOperand(ip, index, LSR, kSmiTagSize)); |
+ __ Add(scratch, string, SeqOneByteString::kHeaderSize - kHeapObjectTag); |
+ __ SmiUntag(value); |
+ __ SmiUntag(index); |
+ __ Strb(value, MemOperand(scratch, index)); |
context()->Plug(string); |
} |
@@ -3517,46 +3250,41 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) { |
ZoneList<Expression*>* args = expr->arguments(); |
ASSERT_EQ(3, args->length()); |
- Register string = r0; |
- Register index = r1; |
- Register value = r2; |
+ Register string = x0; |
+ Register index = x1; |
+ Register value = x2; |
+ Register scratch = x10; |
VisitForStackValue(args->at(1)); // index |
VisitForStackValue(args->at(2)); // value |
VisitForAccumulatorValue(args->at(0)); // string |
- __ Pop(index, value); |
+ __ Pop(value, index); |
if (FLAG_debug_code) { |
- __ SmiTst(value); |
- __ Check(eq, kNonSmiValue); |
- __ SmiTst(index); |
- __ Check(eq, kNonSmiIndex); |
- __ SmiUntag(index, index); |
+ __ AssertSmi(value, kNonSmiValue); |
+ __ AssertSmi(index, kNonSmiIndex); |
static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; |
- __ EmitSeqStringSetCharCheck(string, index, value, two_byte_seq_type); |
- __ SmiTag(index, index); |
+ __ EmitSeqStringSetCharCheck(string, index, kIndexIsSmi, scratch, |
+ two_byte_seq_type); |
} |
- __ SmiUntag(value, value); |
- __ add(ip, |
- string, |
- Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); |
- STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); |
- __ strh(value, MemOperand(ip, index)); |
+ __ Add(scratch, string, SeqTwoByteString::kHeaderSize - kHeapObjectTag); |
+ __ SmiUntag(value); |
+ __ SmiUntag(index); |
+ __ Strh(value, MemOperand(scratch, index, LSL, 1)); |
context()->Plug(string); |
} |
- |
void FullCodeGenerator::EmitMathPow(CallRuntime* expr) { |
- // Load the arguments on the stack and call the runtime function. |
+ // Load the arguments on the stack and call the MathPow stub. |
ZoneList<Expression*>* args = expr->arguments(); |
ASSERT(args->length() == 2); |
VisitForStackValue(args->at(0)); |
VisitForStackValue(args->at(1)); |
MathPowStub stub(MathPowStub::ON_STACK); |
__ CallStub(&stub); |
- context()->Plug(r0); |
+ context()->Plug(x0); |
} |
@@ -3565,70 +3293,77 @@ void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) { |
ASSERT(args->length() == 2); |
VisitForStackValue(args->at(0)); // Load the object. |
VisitForAccumulatorValue(args->at(1)); // Load the value. |
- __ pop(r1); // r0 = value. r1 = object. |
+ __ Pop(x1); |
+ // x0 = value. |
+ // x1 = object. |
Label done; |
// If the object is a smi, return the value. |
- __ JumpIfSmi(r1, &done); |
+ __ JumpIfSmi(x1, &done); |
// If the object is not a value type, return the value. |
- __ CompareObjectType(r1, r2, r2, JS_VALUE_TYPE); |
- __ b(ne, &done); |
+ __ JumpIfNotObjectType(x1, x10, x11, JS_VALUE_TYPE, &done); |
// Store the value. |
- __ str(r0, FieldMemOperand(r1, JSValue::kValueOffset)); |
- // Update the write barrier. Save the value as it will be |
+ __ Str(x0, FieldMemOperand(x1, JSValue::kValueOffset)); |
+ // Update the write barrier. Save the value as it will be |
// overwritten by the write barrier code and is needed afterward. |
- __ mov(r2, r0); |
+ __ Mov(x10, x0); |
__ RecordWriteField( |
- r1, JSValue::kValueOffset, r2, r3, kLRHasBeenSaved, kDontSaveFPRegs); |
+ x1, JSValue::kValueOffset, x10, x11, kLRHasBeenSaved, kDontSaveFPRegs); |
- __ bind(&done); |
- context()->Plug(r0); |
+ __ Bind(&done); |
+ context()->Plug(x0); |
} |
void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) { |
ZoneList<Expression*>* args = expr->arguments(); |
ASSERT_EQ(args->length(), 1); |
- // Load the argument into r0 and call the stub. |
+ |
+ // Load the argument into x0 and call the stub. |
VisitForAccumulatorValue(args->at(0)); |
NumberToStringStub stub; |
__ CallStub(&stub); |
- context()->Plug(r0); |
+ context()->Plug(x0); |
} |
void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) { |
ZoneList<Expression*>* args = expr->arguments(); |
ASSERT(args->length() == 1); |
+ |
VisitForAccumulatorValue(args->at(0)); |
Label done; |
- StringCharFromCodeGenerator generator(r0, r1); |
+ Register code = x0; |
+ Register result = x1; |
+ |
+ StringCharFromCodeGenerator generator(code, result); |
generator.GenerateFast(masm_); |
- __ jmp(&done); |
+ __ B(&done); |
NopRuntimeCallHelper call_helper; |
generator.GenerateSlow(masm_, call_helper); |
- __ bind(&done); |
- context()->Plug(r1); |
+ __ Bind(&done); |
+ context()->Plug(result); |
} |
void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) { |
ZoneList<Expression*>* args = expr->arguments(); |
ASSERT(args->length() == 2); |
+ |
VisitForStackValue(args->at(0)); |
VisitForAccumulatorValue(args->at(1)); |
- Register object = r1; |
- Register index = r0; |
- Register result = r3; |
+ Register object = x1; |
+ Register index = x0; |
+ Register result = x3; |
- __ pop(object); |
+ __ Pop(object); |
Label need_conversion; |
Label index_out_of_range; |
@@ -3641,24 +3376,23 @@ void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) { |
&index_out_of_range, |
STRING_INDEX_IS_NUMBER); |
generator.GenerateFast(masm_); |
- __ jmp(&done); |
+ __ B(&done); |
- __ bind(&index_out_of_range); |
- // When the index is out of range, the spec requires us to return |
- // NaN. |
+ __ Bind(&index_out_of_range); |
+ // When the index is out of range, the spec requires us to return NaN. |
__ LoadRoot(result, Heap::kNanValueRootIndex); |
- __ jmp(&done); |
+ __ B(&done); |
- __ bind(&need_conversion); |
+ __ Bind(&need_conversion); |
// Load the undefined value into the result register, which will |
// trigger conversion. |
__ LoadRoot(result, Heap::kUndefinedValueRootIndex); |
- __ jmp(&done); |
+ __ B(&done); |
NopRuntimeCallHelper call_helper; |
generator.GenerateSlow(masm_, call_helper); |
- __ bind(&done); |
+ __ Bind(&done); |
context()->Plug(result); |
} |
@@ -3666,60 +3400,62 @@ void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) { |
void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) { |
ZoneList<Expression*>* args = expr->arguments(); |
ASSERT(args->length() == 2); |
+ |
VisitForStackValue(args->at(0)); |
VisitForAccumulatorValue(args->at(1)); |
- Register object = r1; |
- Register index = r0; |
- Register scratch = r3; |
- Register result = r0; |
+ Register object = x1; |
+ Register index = x0; |
+ Register result = x0; |
- __ pop(object); |
+ __ Pop(object); |
Label need_conversion; |
Label index_out_of_range; |
Label done; |
StringCharAtGenerator generator(object, |
index, |
- scratch, |
+ x3, |
result, |
&need_conversion, |
&need_conversion, |
&index_out_of_range, |
STRING_INDEX_IS_NUMBER); |
generator.GenerateFast(masm_); |
- __ jmp(&done); |
+ __ B(&done); |
- __ bind(&index_out_of_range); |
+ __ Bind(&index_out_of_range); |
// When the index is out of range, the spec requires us to return |
// the empty string. |
__ LoadRoot(result, Heap::kempty_stringRootIndex); |
- __ jmp(&done); |
+ __ B(&done); |
- __ bind(&need_conversion); |
- // Move smi zero into the result register, which will trigger |
- // conversion. |
- __ mov(result, Operand(Smi::FromInt(0))); |
- __ jmp(&done); |
+ __ Bind(&need_conversion); |
+ // Move smi zero into the result register, which will trigger conversion. |
+ __ Mov(result, Operand(Smi::FromInt(0))); |
+ __ B(&done); |
NopRuntimeCallHelper call_helper; |
generator.GenerateSlow(masm_, call_helper); |
- __ bind(&done); |
+ __ Bind(&done); |
context()->Plug(result); |
} |
void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) { |
+ ASM_LOCATION("FullCodeGenerator::EmitStringAdd"); |
ZoneList<Expression*>* args = expr->arguments(); |
ASSERT_EQ(2, args->length()); |
+ |
VisitForStackValue(args->at(0)); |
VisitForAccumulatorValue(args->at(1)); |
- __ pop(r1); |
+ __ Pop(x1); |
StringAddStub stub(STRING_ADD_CHECK_BOTH, NOT_TENURED); |
__ CallStub(&stub); |
- context()->Plug(r0); |
+ |
+ context()->Plug(x0); |
} |
@@ -3731,7 +3467,7 @@ void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) { |
StringCompareStub stub; |
__ CallStub(&stub); |
- context()->Plug(r0); |
+ context()->Plug(x0); |
} |
@@ -3741,7 +3477,7 @@ void FullCodeGenerator::EmitMathLog(CallRuntime* expr) { |
ASSERT(args->length() == 1); |
VisitForStackValue(args->at(0)); |
__ CallRuntime(Runtime::kMath_log, 1); |
- context()->Plug(r0); |
+ context()->Plug(x0); |
} |
@@ -3751,11 +3487,12 @@ void FullCodeGenerator::EmitMathSqrt(CallRuntime* expr) { |
ASSERT(args->length() == 1); |
VisitForStackValue(args->at(0)); |
__ CallRuntime(Runtime::kMath_sqrt, 1); |
- context()->Plug(r0); |
+ context()->Plug(x0); |
} |
void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) { |
+ ASM_LOCATION("FullCodeGenerator::EmitCallFunction"); |
ZoneList<Expression*>* args = expr->arguments(); |
ASSERT(args->length() >= 2); |
@@ -3767,23 +3504,22 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) { |
Label runtime, done; |
// Check for non-function argument (including proxy). |
- __ JumpIfSmi(r0, &runtime); |
- __ CompareObjectType(r0, r1, r1, JS_FUNCTION_TYPE); |
- __ b(ne, &runtime); |
+ __ JumpIfSmi(x0, &runtime); |
+ __ JumpIfNotObjectType(x0, x1, x1, JS_FUNCTION_TYPE, &runtime); |
- // InvokeFunction requires the function in r1. Move it in there. |
- __ mov(r1, result_register()); |
+ // InvokeFunction requires the function in x1. Move it in there. |
+ __ Mov(x1, x0); |
ParameterCount count(arg_count); |
- __ InvokeFunction(r1, count, CALL_FUNCTION, NullCallWrapper()); |
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
- __ jmp(&done); |
+ __ InvokeFunction(x1, count, CALL_FUNCTION, NullCallWrapper()); |
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
+ __ B(&done); |
- __ bind(&runtime); |
- __ push(r0); |
+ __ Bind(&runtime); |
+ __ Push(x0); |
__ CallRuntime(Runtime::kCall, args->length()); |
- __ bind(&done); |
+ __ Bind(&done); |
- context()->Plug(r0); |
+ context()->Plug(x0); |
} |
@@ -3794,10 +3530,9 @@ void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) { |
VisitForStackValue(args->at(0)); |
VisitForStackValue(args->at(1)); |
VisitForAccumulatorValue(args->at(2)); |
- __ pop(r1); |
- __ pop(r2); |
+ __ Pop(x1, x2); |
__ CallStub(&stub); |
- context()->Plug(r0); |
+ context()->Plug(x0); |
} |
@@ -3811,42 +3546,41 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) { |
isolate()->native_context()->jsfunction_result_caches()); |
if (jsfunction_result_caches->length() <= cache_id) { |
__ Abort(kAttemptToUseUndefinedCache); |
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex); |
- context()->Plug(r0); |
+ __ LoadRoot(x0, Heap::kUndefinedValueRootIndex); |
+ context()->Plug(x0); |
return; |
} |
VisitForAccumulatorValue(args->at(1)); |
- Register key = r0; |
- Register cache = r1; |
- __ ldr(cache, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX)); |
- __ ldr(cache, FieldMemOperand(cache, GlobalObject::kNativeContextOffset)); |
- __ ldr(cache, ContextOperand(cache, Context::JSFUNCTION_RESULT_CACHES_INDEX)); |
- __ ldr(cache, |
+ Register key = x0; |
+ Register cache = x1; |
+ __ Ldr(cache, GlobalObjectMemOperand()); |
+ __ Ldr(cache, FieldMemOperand(cache, GlobalObject::kNativeContextOffset)); |
+ __ Ldr(cache, ContextMemOperand(cache, |
+ Context::JSFUNCTION_RESULT_CACHES_INDEX)); |
+ __ Ldr(cache, |
FieldMemOperand(cache, FixedArray::OffsetOfElementAt(cache_id))); |
+ Label done; |
+ __ Ldrsw(x2, UntagSmiFieldMemOperand(cache, |
+ JSFunctionResultCache::kFingerOffset)); |
+ __ Add(x3, cache, FixedArray::kHeaderSize - kHeapObjectTag); |
+ __ Add(x3, x3, Operand(x2, LSL, kPointerSizeLog2)); |
- Label done, not_found; |
- __ ldr(r2, FieldMemOperand(cache, JSFunctionResultCache::kFingerOffset)); |
- // r2 now holds finger offset as a smi. |
- __ add(r3, cache, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
- // r3 now points to the start of fixed array elements. |
- __ ldr(r2, MemOperand::PointerAddressFromSmiKey(r3, r2, PreIndex)); |
- // Note side effect of PreIndex: r3 now points to the key of the pair. |
- __ cmp(key, r2); |
- __ b(ne, ¬_found); |
+ // Load the key and data from the cache. |
+ __ Ldp(x2, x3, MemOperand(x3)); |
- __ ldr(r0, MemOperand(r3, kPointerSize)); |
- __ b(&done); |
+ __ Cmp(key, x2); |
+ __ CmovX(x0, x3, eq); |
+ __ B(eq, &done); |
- __ bind(¬_found); |
// Call runtime to perform the lookup. |
__ Push(cache, key); |
__ CallRuntime(Runtime::kGetFromCache, 2); |
- __ bind(&done); |
- context()->Plug(r0); |
+ __ Bind(&done); |
+ context()->Plug(x0); |
} |
@@ -3861,8 +3595,8 @@ void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) { |
context()->PrepareTest(&materialize_true, &materialize_false, |
&if_true, &if_false, &fall_through); |
- __ ldr(r0, FieldMemOperand(r0, String::kHashFieldOffset)); |
- __ tst(r0, Operand(String::kContainsCachedArrayIndexMask)); |
+ __ Ldr(x10, FieldMemOperand(x0, String::kHashFieldOffset)); |
+ __ Tst(x10, String::kContainsCachedArrayIndexMask); |
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); |
Split(eq, if_true, if_false, fall_through); |
@@ -3875,190 +3609,177 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) { |
ASSERT(args->length() == 1); |
VisitForAccumulatorValue(args->at(0)); |
- __ AssertString(r0); |
+ __ AssertString(x0); |
- __ ldr(r0, FieldMemOperand(r0, String::kHashFieldOffset)); |
- __ IndexFromHash(r0, r0); |
+ __ Ldr(x10, FieldMemOperand(x0, String::kHashFieldOffset)); |
+ __ IndexFromHash(x10, x0); |
- context()->Plug(r0); |
+ context()->Plug(x0); |
} |
void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { |
- Label bailout, done, one_char_separator, long_separator, non_trivial_array, |
- not_size_one_array, loop, empty_separator_loop, one_char_separator_loop, |
- one_char_separator_loop_entry, long_separator_loop; |
+ ASM_LOCATION("FullCodeGenerator::EmitFastAsciiArrayJoin"); |
+ |
ZoneList<Expression*>* args = expr->arguments(); |
ASSERT(args->length() == 2); |
VisitForStackValue(args->at(1)); |
VisitForAccumulatorValue(args->at(0)); |
- // All aliases of the same register have disjoint lifetimes. |
- Register array = r0; |
- Register elements = no_reg; // Will be r0. |
- Register result = no_reg; // Will be r0. |
- Register separator = r1; |
- Register array_length = r2; |
- Register result_pos = no_reg; // Will be r2 |
- Register string_length = r3; |
- Register string = r4; |
- Register element = r5; |
- Register elements_end = r6; |
- Register scratch = r9; |
- |
- // Separator operand is on the stack. |
- __ pop(separator); |
+ Register array = x0; |
+ Register result = x0; |
+ Register elements = x1; |
+ Register element = x2; |
+ Register separator = x3; |
+ Register array_length = x4; |
+ Register result_pos = x5; |
+ Register map = x6; |
+ Register string_length = x10; |
+ Register elements_end = x11; |
+ Register string = x12; |
+ Register scratch1 = x13; |
+ Register scratch2 = x14; |
+ Register scratch3 = x7; |
+ Register separator_length = x15; |
+ |
+ Label bailout, done, one_char_separator, long_separator, |
+ non_trivial_array, not_size_one_array, loop, |
+ empty_separator_loop, one_char_separator_loop, |
+ one_char_separator_loop_entry, long_separator_loop; |
+ |
+ // The separator operand is on the stack. |
+ __ Pop(separator); |
// Check that the array is a JSArray. |
__ JumpIfSmi(array, &bailout); |
- __ CompareObjectType(array, scratch, array_length, JS_ARRAY_TYPE); |
- __ b(ne, &bailout); |
+ __ JumpIfNotObjectType(array, map, scratch1, JS_ARRAY_TYPE, &bailout); |
// Check that the array has fast elements. |
- __ CheckFastElements(scratch, array_length, &bailout); |
+ __ CheckFastElements(map, scratch1, &bailout); |
// If the array has length zero, return the empty string. |
- __ ldr(array_length, FieldMemOperand(array, JSArray::kLengthOffset)); |
- __ SmiUntag(array_length, SetCC); |
- __ b(ne, &non_trivial_array); |
- __ LoadRoot(r0, Heap::kempty_stringRootIndex); |
- __ b(&done); |
- |
- __ bind(&non_trivial_array); |
+ // Load and untag the length of the array. |
+ // It is an unsigned value, so we can skip sign extension. |
+ // We assume little endianness. |
+ __ Ldrsw(array_length, |
+ UntagSmiFieldMemOperand(array, JSArray::kLengthOffset)); |
+ __ Cbnz(array_length, &non_trivial_array); |
+ __ LoadRoot(result, Heap::kempty_stringRootIndex); |
+ __ B(&done); |
+ __ Bind(&non_trivial_array); |
// Get the FixedArray containing array's elements. |
- elements = array; |
- __ ldr(elements, FieldMemOperand(array, JSArray::kElementsOffset)); |
- array = no_reg; // End of array's live range. |
+ __ Ldr(elements, FieldMemOperand(array, JSArray::kElementsOffset)); |
// Check that all array elements are sequential ASCII strings, and |
- // accumulate the sum of their lengths, as a smi-encoded value. |
- __ mov(string_length, Operand::Zero()); |
- __ add(element, |
- elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
- __ add(elements_end, element, Operand(array_length, LSL, kPointerSizeLog2)); |
+ // accumulate the sum of their lengths. |
+ __ Mov(string_length, 0); |
+ __ Add(element, elements, FixedArray::kHeaderSize - kHeapObjectTag); |
+ __ Add(elements_end, element, Operand(array_length, LSL, kPointerSizeLog2)); |
// Loop condition: while (element < elements_end). |
// Live values in registers: |
// elements: Fixed array of strings. |
// array_length: Length of the fixed array of strings (not smi) |
// separator: Separator string |
- // string_length: Accumulated sum of string lengths (smi). |
+ // string_length: Accumulated sum of string lengths (not smi). |
// element: Current array element. |
// elements_end: Array end. |
- if (generate_debug_code_) { |
- __ cmp(array_length, Operand::Zero()); |
+ if (FLAG_debug_code) { |
+ __ Cmp(array_length, Operand(0)); |
__ Assert(gt, kNoEmptyArraysHereInEmitFastAsciiArrayJoin); |
} |
- __ bind(&loop); |
- __ ldr(string, MemOperand(element, kPointerSize, PostIndex)); |
+ __ Bind(&loop); |
+ __ Ldr(string, MemOperand(element, kPointerSize, PostIndex)); |
__ JumpIfSmi(string, &bailout); |
- __ ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset)); |
- __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); |
- __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &bailout); |
- __ ldr(scratch, FieldMemOperand(string, SeqOneByteString::kLengthOffset)); |
- __ add(string_length, string_length, Operand(scratch), SetCC); |
- __ b(vs, &bailout); |
- __ cmp(element, elements_end); |
- __ b(lt, &loop); |
+ __ Ldr(scratch1, FieldMemOperand(string, HeapObject::kMapOffset)); |
+ __ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset)); |
+ __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout); |
+ __ Ldrsw(scratch1, |
+ UntagSmiFieldMemOperand(string, SeqOneByteString::kLengthOffset)); |
+ __ Adds(string_length, string_length, scratch1); |
+ __ B(vs, &bailout); |
+ __ Cmp(element, elements_end); |
+ __ B(lt, &loop); |
// If array_length is 1, return elements[0], a string. |
- __ cmp(array_length, Operand(1)); |
- __ b(ne, ¬_size_one_array); |
- __ ldr(r0, FieldMemOperand(elements, FixedArray::kHeaderSize)); |
- __ b(&done); |
+ __ Cmp(array_length, 1); |
+ __ B(ne, ¬_size_one_array); |
+ __ Ldr(result, FieldMemOperand(elements, FixedArray::kHeaderSize)); |
+ __ B(&done); |
- __ bind(¬_size_one_array); |
+ __ Bind(¬_size_one_array); |
// Live values in registers: |
// separator: Separator string |
- // array_length: Length of the array. |
- // string_length: Sum of string lengths (smi). |
+ // array_length: Length of the array (not smi). |
+ // string_length: Sum of string lengths (not smi). |
// elements: FixedArray of strings. |
// Check that the separator is a flat ASCII string. |
__ JumpIfSmi(separator, &bailout); |
- __ ldr(scratch, FieldMemOperand(separator, HeapObject::kMapOffset)); |
- __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); |
- __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &bailout); |
+ __ Ldr(scratch1, FieldMemOperand(separator, HeapObject::kMapOffset)); |
+ __ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset)); |
+ __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout); |
// Add (separator length times array_length) - separator length to the |
- // string_length to get the length of the result string. array_length is not |
- // smi but the other values are, so the result is a smi |
- __ ldr(scratch, FieldMemOperand(separator, SeqOneByteString::kLengthOffset)); |
- __ sub(string_length, string_length, Operand(scratch)); |
- __ smull(scratch, ip, array_length, scratch); |
- // Check for smi overflow. No overflow if higher 33 bits of 64-bit result are |
- // zero. |
- __ cmp(ip, Operand::Zero()); |
- __ b(ne, &bailout); |
- __ tst(scratch, Operand(0x80000000)); |
- __ b(ne, &bailout); |
- __ add(string_length, string_length, Operand(scratch), SetCC); |
- __ b(vs, &bailout); |
- __ SmiUntag(string_length); |
- |
- // Get first element in the array to free up the elements register to be used |
- // for the result. |
- __ add(element, |
- elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
- result = elements; // End of live range for elements. |
- elements = no_reg; |
+ // string_length to get the length of the result string. |
+ // Load the separator length as untagged. |
+ // We assume little endianness, and that the length is positive. |
+ __ Ldrsw(separator_length, |
+ UntagSmiFieldMemOperand(separator, |
+ SeqOneByteString::kLengthOffset)); |
+ __ Sub(string_length, string_length, separator_length); |
+ __ Umaddl(string_length, array_length.W(), separator_length.W(), |
+ string_length); |
+ |
+ // Get first element in the array. |
+ __ Add(element, elements, FixedArray::kHeaderSize - kHeapObjectTag); |
// Live values in registers: |
// element: First array element |
// separator: Separator string |
// string_length: Length of result string (not smi) |
- // array_length: Length of the array. |
- __ AllocateAsciiString(result, |
- string_length, |
- scratch, |
- string, // used as scratch |
- elements_end, // used as scratch |
+ // array_length: Length of the array (not smi). |
+ __ AllocateAsciiString(result, string_length, scratch1, scratch2, scratch3, |
&bailout); |
+ |
// Prepare for looping. Set up elements_end to end of the array. Set |
// result_pos to the position of the result where to write the first |
// character. |
- __ add(elements_end, element, Operand(array_length, LSL, kPointerSizeLog2)); |
- result_pos = array_length; // End of live range for array_length. |
- array_length = no_reg; |
- __ add(result_pos, |
- result, |
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); |
+ // TODO(all): useless unless AllocateAsciiString trashes the register. |
+ __ Add(elements_end, element, Operand(array_length, LSL, kPointerSizeLog2)); |
+ __ Add(result_pos, result, SeqOneByteString::kHeaderSize - kHeapObjectTag); |
// Check the length of the separator. |
- __ ldr(scratch, FieldMemOperand(separator, SeqOneByteString::kLengthOffset)); |
- __ cmp(scratch, Operand(Smi::FromInt(1))); |
- __ b(eq, &one_char_separator); |
- __ b(gt, &long_separator); |
+ __ Cmp(separator_length, 1); |
+ __ B(eq, &one_char_separator); |
+ __ B(gt, &long_separator); |
// Empty separator case |
- __ bind(&empty_separator_loop); |
+ __ Bind(&empty_separator_loop); |
// Live values in registers: |
// result_pos: the position to which we are currently copying characters. |
// element: Current array element. |
// elements_end: Array end. |
// Copy next array element to the result. |
- __ ldr(string, MemOperand(element, kPointerSize, PostIndex)); |
- __ ldr(string_length, FieldMemOperand(string, String::kLengthOffset)); |
- __ SmiUntag(string_length); |
- __ add(string, |
- string, |
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); |
- __ CopyBytes(string, result_pos, string_length, scratch); |
- __ cmp(element, elements_end); |
- __ b(lt, &empty_separator_loop); // End while (element < elements_end). |
- ASSERT(result.is(r0)); |
- __ b(&done); |
+ __ Ldr(string, MemOperand(element, kPointerSize, PostIndex)); |
+ __ Ldrsw(string_length, |
+ UntagSmiFieldMemOperand(string, String::kLengthOffset)); |
+ __ Add(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag); |
+ __ CopyBytes(result_pos, string, string_length, scratch1); |
+ __ Cmp(element, elements_end); |
+ __ B(lt, &empty_separator_loop); // End while (element < elements_end). |
+ __ B(&done); |
// One-character separator case |
- __ bind(&one_char_separator); |
+ __ Bind(&one_char_separator); |
// Replace separator with its ASCII character value. |
- __ ldrb(separator, FieldMemOperand(separator, SeqOneByteString::kHeaderSize)); |
+ __ Ldrb(separator, FieldMemOperand(separator, SeqOneByteString::kHeaderSize)); |
// Jump into the loop after the code that copies the separator, so the first |
// element is not preceded by a separator |
- __ jmp(&one_char_separator_loop_entry); |
+ __ B(&one_char_separator_loop_entry); |
- __ bind(&one_char_separator_loop); |
+ __ Bind(&one_char_separator_loop); |
// Live values in registers: |
// result_pos: the position to which we are currently copying characters. |
// element: Current array element. |
@@ -4066,25 +3787,22 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { |
// separator: Single separator ASCII char (in lower byte). |
// Copy the separator character to the result. |
- __ strb(separator, MemOperand(result_pos, 1, PostIndex)); |
+ __ Strb(separator, MemOperand(result_pos, 1, PostIndex)); |
// Copy next array element to the result. |
- __ bind(&one_char_separator_loop_entry); |
- __ ldr(string, MemOperand(element, kPointerSize, PostIndex)); |
- __ ldr(string_length, FieldMemOperand(string, String::kLengthOffset)); |
- __ SmiUntag(string_length); |
- __ add(string, |
- string, |
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); |
- __ CopyBytes(string, result_pos, string_length, scratch); |
- __ cmp(element, elements_end); |
- __ b(lt, &one_char_separator_loop); // End while (element < elements_end). |
- ASSERT(result.is(r0)); |
- __ b(&done); |
+ __ Bind(&one_char_separator_loop_entry); |
+ __ Ldr(string, MemOperand(element, kPointerSize, PostIndex)); |
+ __ Ldrsw(string_length, |
+ UntagSmiFieldMemOperand(string, String::kLengthOffset)); |
+ __ Add(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag); |
+ __ CopyBytes(result_pos, string, string_length, scratch1); |
+ __ Cmp(element, elements_end); |
+ __ B(lt, &one_char_separator_loop); // End while (element < elements_end). |
+ __ B(&done); |
// Long separator case (separator is more than one character). Entry is at the |
// label long_separator below. |
- __ bind(&long_separator_loop); |
+ __ Bind(&long_separator_loop); |
// Live values in registers: |
// result_pos: the position to which we are currently copying characters. |
// element: Current array element. |
@@ -4092,30 +3810,27 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { |
// separator: Separator string. |
// Copy the separator to the result. |
- __ ldr(string_length, FieldMemOperand(separator, String::kLengthOffset)); |
- __ SmiUntag(string_length); |
- __ add(string, |
- separator, |
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); |
- __ CopyBytes(string, result_pos, string_length, scratch); |
- |
- __ bind(&long_separator); |
- __ ldr(string, MemOperand(element, kPointerSize, PostIndex)); |
- __ ldr(string_length, FieldMemOperand(string, String::kLengthOffset)); |
- __ SmiUntag(string_length); |
- __ add(string, |
- string, |
- Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); |
- __ CopyBytes(string, result_pos, string_length, scratch); |
- __ cmp(element, elements_end); |
- __ b(lt, &long_separator_loop); // End while (element < elements_end). |
- ASSERT(result.is(r0)); |
- __ b(&done); |
- |
- __ bind(&bailout); |
- __ LoadRoot(r0, Heap::kUndefinedValueRootIndex); |
- __ bind(&done); |
- context()->Plug(r0); |
+ // TODO(all): hoist next two instructions. |
+ __ Ldrsw(string_length, |
+ UntagSmiFieldMemOperand(separator, String::kLengthOffset)); |
+ __ Add(string, separator, SeqOneByteString::kHeaderSize - kHeapObjectTag); |
+ __ CopyBytes(result_pos, string, string_length, scratch1); |
+ |
+ __ Bind(&long_separator); |
+ __ Ldr(string, MemOperand(element, kPointerSize, PostIndex)); |
+ __ Ldrsw(string_length, |
+ UntagSmiFieldMemOperand(string, String::kLengthOffset)); |
+ __ Add(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag); |
+ __ CopyBytes(result_pos, string, string_length, scratch1); |
+ __ Cmp(element, elements_end); |
+ __ B(lt, &long_separator_loop); // End while (element < elements_end). |
+ __ B(&done); |
+ |
+ __ Bind(&bailout); |
+ // Returning undefined will force slower code to handle it. |
+ __ LoadRoot(result, Heap::kUndefinedValueRootIndex); |
+ __ Bind(&done); |
+ context()->Plug(result); |
} |
@@ -4127,26 +3842,24 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) { |
return; |
} |
- Comment cmnt(masm_, "[ CallRuntime"); |
+ Comment cmnt(masm_, "[ CallRunTime"); |
ZoneList<Expression*>* args = expr->arguments(); |
int arg_count = args->length(); |
if (expr->is_jsruntime()) { |
// Push the builtins object as the receiver. |
- __ ldr(r0, GlobalObjectOperand()); |
- __ ldr(r0, FieldMemOperand(r0, GlobalObject::kBuiltinsOffset)); |
- __ push(r0); |
+ __ Ldr(x10, GlobalObjectMemOperand()); |
+ __ Ldr(x0, FieldMemOperand(x10, GlobalObject::kBuiltinsOffset)); |
+ __ Push(x0); |
// Load the function from the receiver. |
- __ mov(r2, Operand(expr->name())); |
+ __ Mov(x2, Operand(name)); |
CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId()); |
// Push the target function under the receiver. |
- __ ldr(ip, MemOperand(sp, 0)); |
- __ push(ip); |
- __ str(r0, MemOperand(sp, kPointerSize)); |
+ __ Pop(x10); |
+ __ Push(x0, x10); |
- // Push the arguments ("left-to-right"). |
int arg_count = args->length(); |
for (int i = 0; i < arg_count; i++) { |
VisitForStackValue(args->at(i)); |
@@ -4155,13 +3868,13 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) { |
// Record source position of the IC call. |
SetSourcePosition(expr->position()); |
CallFunctionStub stub(arg_count, NO_CALL_FUNCTION_FLAGS); |
- __ ldr(r1, MemOperand(sp, (arg_count + 1) * kPointerSize)); |
+ __ Peek(x1, (arg_count + 1) * kPointerSize); |
__ CallStub(&stub); |
// Restore context register. |
- __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
- context()->DropAndPlug(1, r0); |
+ context()->DropAndPlug(1, x0); |
} else { |
// Push the arguments ("left-to-right"). |
for (int i = 0; i < arg_count; i++) { |
@@ -4170,7 +3883,7 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) { |
// Call the C runtime function. |
__ CallRuntime(expr->function(), arg_count); |
- context()->Plug(r0); |
+ context()->Plug(x0); |
} |
} |
@@ -4187,22 +3900,22 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) { |
VisitForStackValue(property->key()); |
StrictModeFlag strict_mode_flag = (language_mode() == CLASSIC_MODE) |
? kNonStrictMode : kStrictMode; |
- __ mov(r1, Operand(Smi::FromInt(strict_mode_flag))); |
- __ push(r1); |
+ __ Mov(x10, Operand(Smi::FromInt(strict_mode_flag))); |
+ __ Push(x10); |
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION); |
- context()->Plug(r0); |
+ context()->Plug(x0); |
} else if (proxy != NULL) { |
Variable* var = proxy->var(); |
// Delete of an unqualified identifier is disallowed in strict mode |
// but "delete this" is allowed. |
ASSERT(language_mode() == CLASSIC_MODE || var->is_this()); |
if (var->IsUnallocated()) { |
- __ ldr(r2, GlobalObjectOperand()); |
- __ mov(r1, Operand(var->name())); |
- __ mov(r0, Operand(Smi::FromInt(kNonStrictMode))); |
- __ Push(r2, r1, r0); |
+ __ Ldr(x12, GlobalObjectMemOperand()); |
+ __ Mov(x11, Operand(var->name())); |
+ __ Mov(x10, Operand(Smi::FromInt(kNonStrictMode))); |
+ __ Push(x12, x11, x10); |
__ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION); |
- context()->Plug(r0); |
+ context()->Plug(x0); |
} else if (var->IsStackAllocated() || var->IsContextSlot()) { |
// Result of deleting non-global, non-dynamic variables is false. |
// The subexpression does not have side effects. |
@@ -4210,11 +3923,10 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) { |
} else { |
// Non-global variable. Call the runtime to try to delete from the |
// context where the variable was introduced. |
- ASSERT(!context_register().is(r2)); |
- __ mov(r2, Operand(var->name())); |
- __ Push(context_register(), r2); |
+ __ Mov(x2, Operand(var->name())); |
+ __ Push(context_register(), x2); |
__ CallRuntime(Runtime::kDeleteContextSlot, 2); |
- context()->Plug(r0); |
+ context()->Plug(x0); |
} |
} else { |
// Result of deleting non-property, non-variable reference is true. |
@@ -4223,15 +3935,14 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) { |
context()->Plug(true); |
} |
break; |
+ break; |
} |
- |
case Token::VOID: { |
Comment cmnt(masm_, "[ UnaryOperation (VOID)"); |
VisitForEffect(expr->expression()); |
context()->Plug(Heap::kUndefinedValueRootIndex); |
break; |
} |
- |
case Token::NOT: { |
Comment cmnt(masm_, "[ UnaryOperation (NOT)"); |
if (context()->IsEffect()) { |
@@ -4247,40 +3958,42 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) { |
test->fall_through()); |
context()->Plug(test->true_label(), test->false_label()); |
} else { |
- // We handle value contexts explicitly rather than simply visiting |
- // for control and plugging the control flow into the context, |
- // because we need to prepare a pair of extra administrative AST ids |
- // for the optimizing compiler. |
ASSERT(context()->IsAccumulatorValue() || context()->IsStackValue()); |
+ // TODO(jbramley): This could be much more efficient using (for |
+ // example) the CSEL instruction. |
Label materialize_true, materialize_false, done; |
VisitForControl(expr->expression(), |
&materialize_false, |
&materialize_true, |
&materialize_true); |
- __ bind(&materialize_true); |
+ |
+ __ Bind(&materialize_true); |
PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS); |
- __ LoadRoot(r0, Heap::kTrueValueRootIndex); |
- if (context()->IsStackValue()) __ push(r0); |
- __ jmp(&done); |
- __ bind(&materialize_false); |
+ __ LoadRoot(result_register(), Heap::kTrueValueRootIndex); |
+ __ B(&done); |
+ |
+ __ Bind(&materialize_false); |
PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS); |
- __ LoadRoot(r0, Heap::kFalseValueRootIndex); |
- if (context()->IsStackValue()) __ push(r0); |
- __ bind(&done); |
+ __ LoadRoot(result_register(), Heap::kFalseValueRootIndex); |
+ __ B(&done); |
+ |
+ __ Bind(&done); |
+ if (context()->IsStackValue()) { |
+ __ Push(result_register()); |
+ } |
} |
break; |
} |
- |
case Token::TYPEOF: { |
Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)"); |
- { StackValueContext context(this); |
+ { |
+ StackValueContext context(this); |
VisitForTypeofValue(expr->expression()); |
} |
__ CallRuntime(Runtime::kTypeof, 1); |
- context()->Plug(r0); |
+ context()->Plug(x0); |
break; |
} |
- |
default: |
UNREACHABLE(); |
} |
@@ -4318,19 +4031,19 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { |
} else { |
// Reserve space for result of postfix operation. |
if (expr->is_postfix() && !context()->IsEffect()) { |
- __ mov(ip, Operand(Smi::FromInt(0))); |
- __ push(ip); |
+ __ Push(xzr); |
} |
if (assign_type == NAMED_PROPERTY) { |
// Put the object both on the stack and in the accumulator. |
VisitForAccumulatorValue(prop->obj()); |
- __ push(r0); |
+ __ Push(x0); |
EmitNamedPropertyLoad(prop); |
} else { |
+ // KEYED_PROPERTY |
VisitForStackValue(prop->obj()); |
VisitForAccumulatorValue(prop->key()); |
- __ ldr(r1, MemOperand(sp, 0)); |
- __ push(r0); |
+ __ Peek(x1, 0); |
+ __ Push(x0); |
EmitKeyedPropertyLoad(prop); |
} |
} |
@@ -4350,34 +4063,34 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { |
int count_value = expr->op() == Token::INC ? 1 : -1; |
if (ShouldInlineSmiCase(expr->op())) { |
Label slow; |
- patch_site.EmitJumpIfNotSmi(r0, &slow); |
+ patch_site.EmitJumpIfNotSmi(x0, &slow); |
// Save result for postfix expressions. |
if (expr->is_postfix()) { |
if (!context()->IsEffect()) { |
- // Save the result on the stack. If we have a named or keyed property |
- // we store the result under the receiver that is currently on top |
- // of the stack. |
+ // Save the result on the stack. If we have a named or keyed property we |
+ // store the result under the receiver that is currently on top of the |
+ // stack. |
switch (assign_type) { |
case VARIABLE: |
- __ push(r0); |
+ __ Push(x0); |
break; |
case NAMED_PROPERTY: |
- __ str(r0, MemOperand(sp, kPointerSize)); |
+ __ Poke(x0, kPointerSize); |
break; |
case KEYED_PROPERTY: |
- __ str(r0, MemOperand(sp, 2 * kPointerSize)); |
+ __ Poke(x0, kPointerSize * 2); |
break; |
} |
} |
} |
- __ add(r0, r0, Operand(Smi::FromInt(count_value)), SetCC); |
- __ b(vc, &done); |
+ __ Adds(x0, x0, Operand(Smi::FromInt(count_value))); |
+ __ B(vc, &done); |
// Call stub. Undo operation first. |
- __ sub(r0, r0, Operand(Smi::FromInt(count_value))); |
- __ jmp(&stub_call); |
- __ bind(&slow); |
+ __ Sub(x0, x0, Operand(Smi::FromInt(count_value))); |
+ __ B(&stub_call); |
+ __ Bind(&slow); |
} |
ToNumberStub convert_stub; |
__ CallStub(&convert_stub); |
@@ -4390,32 +4103,34 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { |
// of the stack. |
switch (assign_type) { |
case VARIABLE: |
- __ push(r0); |
+ __ Push(x0); |
break; |
case NAMED_PROPERTY: |
- __ str(r0, MemOperand(sp, kPointerSize)); |
+ __ Poke(x0, kXRegSizeInBytes); |
break; |
case KEYED_PROPERTY: |
- __ str(r0, MemOperand(sp, 2 * kPointerSize)); |
+ __ Poke(x0, 2 * kXRegSizeInBytes); |
break; |
} |
} |
} |
- |
- __ bind(&stub_call); |
- __ mov(r1, r0); |
- __ mov(r0, Operand(Smi::FromInt(count_value))); |
+ __ Bind(&stub_call); |
+ __ Mov(x1, x0); |
+ __ Mov(x0, Operand(Smi::FromInt(count_value))); |
// Record position before stub call. |
SetSourcePosition(expr->position()); |
- BinaryOpICStub stub(Token::ADD, NO_OVERWRITE); |
- CallIC(stub.GetCode(isolate()), expr->CountBinOpFeedbackId()); |
- patch_site.EmitPatchInfo(); |
- __ bind(&done); |
+ { |
+ Assembler::BlockConstPoolScope scope(masm_); |
+ BinaryOpICStub stub(Token::ADD, NO_OVERWRITE); |
+ CallIC(stub.GetCode(isolate()), expr->CountBinOpFeedbackId()); |
+ patch_site.EmitPatchInfo(); |
+ } |
+ __ Bind(&done); |
- // Store the value returned in r0. |
+ // Store the value returned in x0. |
switch (assign_type) { |
case VARIABLE: |
if (expr->is_postfix()) { |
@@ -4423,7 +4138,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { |
EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(), |
Token::ASSIGN); |
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); |
- context.Plug(r0); |
+ context.Plug(x0); |
} |
// For all contexts except EffectConstant We have the result on |
// top of the stack. |
@@ -4434,12 +4149,12 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { |
EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(), |
Token::ASSIGN); |
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); |
- context()->Plug(r0); |
+ context()->Plug(x0); |
} |
break; |
case NAMED_PROPERTY: { |
- __ mov(r2, Operand(prop->key()->AsLiteral()->value())); |
- __ pop(r1); |
+ __ Mov(x2, Operand(prop->key()->AsLiteral()->value())); |
+ __ Pop(x1); |
CallStoreIC(expr->CountStoreFeedbackId()); |
PrepareForBailoutForId(expr->AssignmentId(), TOS_REG); |
if (expr->is_postfix()) { |
@@ -4447,12 +4162,13 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { |
context()->PlugTOS(); |
} |
} else { |
- context()->Plug(r0); |
+ context()->Plug(x0); |
} |
break; |
} |
case KEYED_PROPERTY: { |
- __ Pop(r2, r1); // r1 = key. r2 = receiver. |
+ __ Pop(x1); // Key. |
+ __ Pop(x2); // Receiver. |
Handle<Code> ic = is_classic_mode() |
? isolate()->builtins()->KeyedStoreIC_Initialize() |
: isolate()->builtins()->KeyedStoreIC_Initialize_Strict(); |
@@ -4463,7 +4179,7 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { |
context()->PlugTOS(); |
} |
} else { |
- context()->Plug(r0); |
+ context()->Plug(x0); |
} |
break; |
} |
@@ -4477,13 +4193,13 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) { |
VariableProxy* proxy = expr->AsVariableProxy(); |
if (proxy != NULL && proxy->var()->IsUnallocated()) { |
Comment cmnt(masm_, "Global variable"); |
- __ ldr(r0, GlobalObjectOperand()); |
- __ mov(r2, Operand(proxy->name())); |
+ __ Ldr(x0, GlobalObjectMemOperand()); |
+ __ Mov(x2, Operand(proxy->name())); |
// Use a regular load, not a contextual load, to avoid a reference |
// error. |
CallLoadIC(NOT_CONTEXTUAL); |
PrepareForBailout(expr, TOS_REG); |
- context()->Plug(r0); |
+ context()->Plug(x0); |
} else if (proxy != NULL && proxy->var()->IsLookupSlot()) { |
Label done, slow; |
@@ -4491,14 +4207,14 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) { |
// by eval-introduced variables. |
EmitDynamicLookupFastCase(proxy->var(), INSIDE_TYPEOF, &slow, &done); |
- __ bind(&slow); |
- __ mov(r0, Operand(proxy->name())); |
- __ Push(cp, r0); |
+ __ Bind(&slow); |
+ __ Mov(x0, Operand(proxy->name())); |
+ __ Push(cp, x0); |
__ CallRuntime(Runtime::kLoadContextSlotNoReferenceError, 2); |
PrepareForBailout(expr, TOS_REG); |
- __ bind(&done); |
+ __ Bind(&done); |
- context()->Plug(r0); |
+ context()->Plug(x0); |
} else { |
// This expression cannot throw a reference error at the top level. |
VisitInDuplicateContext(expr); |
@@ -4509,6 +4225,8 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) { |
void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr, |
Expression* sub_expr, |
Handle<String> check) { |
+ ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof"); |
+ Comment cmnt(masm_, "[ EmitLiteralCompareTypeof"); |
Label materialize_true, materialize_false; |
Label* if_true = NULL; |
Label* if_false = NULL; |
@@ -4522,66 +4240,73 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr, |
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); |
if (check->Equals(isolate()->heap()->number_string())) { |
- __ JumpIfSmi(r0, if_true); |
- __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset)); |
- __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); |
- __ cmp(r0, ip); |
+ ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof number_string"); |
+ __ JumpIfSmi(x0, if_true); |
+ __ Ldr(x0, FieldMemOperand(x0, HeapObject::kMapOffset)); |
+ __ CompareRoot(x0, Heap::kHeapNumberMapRootIndex); |
Split(eq, if_true, if_false, fall_through); |
} else if (check->Equals(isolate()->heap()->string_string())) { |
- __ JumpIfSmi(r0, if_false); |
+ ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof string_string"); |
+ __ JumpIfSmi(x0, if_false); |
// Check for undetectable objects => false. |
- __ CompareObjectType(r0, r0, r1, FIRST_NONSTRING_TYPE); |
- __ b(ge, if_false); |
- __ ldrb(r1, FieldMemOperand(r0, Map::kBitFieldOffset)); |
- __ tst(r1, Operand(1 << Map::kIsUndetectable)); |
- Split(eq, if_true, if_false, fall_through); |
+ __ JumpIfObjectType(x0, x0, x1, FIRST_NONSTRING_TYPE, if_false, ge); |
+ __ Ldrb(x1, FieldMemOperand(x0, Map::kBitFieldOffset)); |
+ __ TestAndSplit(x1, 1 << Map::kIsUndetectable, if_true, if_false, |
+ fall_through); |
} else if (check->Equals(isolate()->heap()->symbol_string())) { |
- __ JumpIfSmi(r0, if_false); |
- __ CompareObjectType(r0, r0, r1, SYMBOL_TYPE); |
+ ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof symbol_string"); |
+ __ JumpIfSmi(x0, if_false); |
+ __ CompareObjectType(x0, x0, x1, SYMBOL_TYPE); |
Split(eq, if_true, if_false, fall_through); |
} else if (check->Equals(isolate()->heap()->boolean_string())) { |
- __ CompareRoot(r0, Heap::kTrueValueRootIndex); |
- __ b(eq, if_true); |
- __ CompareRoot(r0, Heap::kFalseValueRootIndex); |
+ ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof boolean_string"); |
+ __ JumpIfRoot(x0, Heap::kTrueValueRootIndex, if_true); |
+ __ CompareRoot(x0, Heap::kFalseValueRootIndex); |
Split(eq, if_true, if_false, fall_through); |
} else if (FLAG_harmony_typeof && |
check->Equals(isolate()->heap()->null_string())) { |
- __ CompareRoot(r0, Heap::kNullValueRootIndex); |
+ ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof null_string"); |
+ __ CompareRoot(x0, Heap::kNullValueRootIndex); |
Split(eq, if_true, if_false, fall_through); |
} else if (check->Equals(isolate()->heap()->undefined_string())) { |
- __ CompareRoot(r0, Heap::kUndefinedValueRootIndex); |
- __ b(eq, if_true); |
- __ JumpIfSmi(r0, if_false); |
+ ASM_LOCATION( |
+ "FullCodeGenerator::EmitLiteralCompareTypeof undefined_string"); |
+ __ JumpIfRoot(x0, Heap::kUndefinedValueRootIndex, if_true); |
+ __ JumpIfSmi(x0, if_false); |
// Check for undetectable objects => true. |
- __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset)); |
- __ ldrb(r1, FieldMemOperand(r0, Map::kBitFieldOffset)); |
- __ tst(r1, Operand(1 << Map::kIsUndetectable)); |
- Split(ne, if_true, if_false, fall_through); |
- |
+ __ Ldr(x0, FieldMemOperand(x0, HeapObject::kMapOffset)); |
+ __ Ldrb(x1, FieldMemOperand(x0, Map::kBitFieldOffset)); |
+ __ TestAndSplit(x1, 1 << Map::kIsUndetectable, if_false, if_true, |
+ fall_through); |
} else if (check->Equals(isolate()->heap()->function_string())) { |
- __ JumpIfSmi(r0, if_false); |
+ ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof function_string"); |
+ __ JumpIfSmi(x0, if_false); |
STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2); |
- __ CompareObjectType(r0, r0, r1, JS_FUNCTION_TYPE); |
- __ b(eq, if_true); |
- __ cmp(r1, Operand(JS_FUNCTION_PROXY_TYPE)); |
- Split(eq, if_true, if_false, fall_through); |
+ __ JumpIfObjectType(x0, x10, x11, JS_FUNCTION_TYPE, if_true); |
+ __ CompareAndSplit(x11, JS_FUNCTION_PROXY_TYPE, eq, if_true, if_false, |
+ fall_through); |
+ |
} else if (check->Equals(isolate()->heap()->object_string())) { |
- __ JumpIfSmi(r0, if_false); |
+ ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof object_string"); |
+ __ JumpIfSmi(x0, if_false); |
if (!FLAG_harmony_typeof) { |
- __ CompareRoot(r0, Heap::kNullValueRootIndex); |
- __ b(eq, if_true); |
+ __ JumpIfRoot(x0, Heap::kNullValueRootIndex, if_true); |
} |
// Check for JS objects => true. |
- __ CompareObjectType(r0, r0, r1, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE); |
- __ b(lt, if_false); |
- __ CompareInstanceType(r0, r1, LAST_NONCALLABLE_SPEC_OBJECT_TYPE); |
- __ b(gt, if_false); |
+ Register map = x10; |
+ __ JumpIfObjectType(x0, map, x11, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, |
+ if_false, lt); |
+ __ CompareInstanceType(map, x11, LAST_NONCALLABLE_SPEC_OBJECT_TYPE); |
+ __ B(gt, if_false); |
// Check for undetectable objects => false. |
- __ ldrb(r1, FieldMemOperand(r0, Map::kBitFieldOffset)); |
- __ tst(r1, Operand(1 << Map::kIsUndetectable)); |
- Split(eq, if_true, if_false, fall_through); |
+ __ Ldrb(x10, FieldMemOperand(map, Map::kBitFieldOffset)); |
+ |
+ __ TestAndSplit(x10, 1 << Map::kIsUndetectable, if_true, if_false, |
+ fall_through); |
+ |
} else { |
- if (if_false != fall_through) __ jmp(if_false); |
+ ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof other"); |
+ if (if_false != fall_through) __ B(if_false); |
} |
context()->Plug(if_true, if_false); |
} |
@@ -4591,13 +4316,16 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { |
Comment cmnt(masm_, "[ CompareOperation"); |
SetSourcePosition(expr->position()); |
- // First we try a fast inlined version of the compare when one of |
- // the operands is a literal. |
- if (TryLiteralCompare(expr)) return; |
+ // Try to generate an optimized comparison with a literal value. |
+ // TODO(jbramley): This only checks common values like NaN or undefined. |
+ // Should it also handle A64 immediate operands? |
+ if (TryLiteralCompare(expr)) { |
+ return; |
+ } |
- // Always perform the comparison for its control flow. Pack the result |
- // into the expression's context after the comparison is performed. |
- Label materialize_true, materialize_false; |
+ // Assign labels according to context()->PrepareTest. |
+ Label materialize_true; |
+ Label materialize_false; |
Label* if_true = NULL; |
Label* if_false = NULL; |
Label* fall_through = NULL; |
@@ -4611,8 +4339,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { |
VisitForStackValue(expr->right()); |
__ InvokeBuiltin(Builtins::IN, CALL_FUNCTION); |
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL); |
- __ LoadRoot(ip, Heap::kTrueValueRootIndex); |
- __ cmp(r0, ip); |
+ __ CompareRoot(x0, Heap::kTrueValueRootIndex); |
Split(eq, if_true, if_false, fall_through); |
break; |
@@ -4622,25 +4349,24 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { |
__ CallStub(&stub); |
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); |
// The stub returns 0 for true. |
- __ tst(r0, r0); |
- Split(eq, if_true, if_false, fall_through); |
+ __ CompareAndSplit(x0, 0, eq, if_true, if_false, fall_through); |
break; |
} |
default: { |
VisitForAccumulatorValue(expr->right()); |
Condition cond = CompareIC::ComputeCondition(op); |
- __ pop(r1); |
- bool inline_smi_code = ShouldInlineSmiCase(op); |
+ // Pop the stack value. |
+ __ Pop(x1); |
+ |
JumpPatchSite patch_site(masm_); |
- if (inline_smi_code) { |
+ if (ShouldInlineSmiCase(op)) { |
Label slow_case; |
- __ orr(r2, r0, Operand(r1)); |
- patch_site.EmitJumpIfNotSmi(r2, &slow_case); |
- __ cmp(r1, r0); |
+ patch_site.EmitJumpIfEitherNotSmi(x0, x1, &slow_case); |
+ __ Cmp(x1, x0); |
Split(cond, if_true, if_false, NULL); |
- __ bind(&slow_case); |
+ __ Bind(&slow_case); |
} |
// Record position and call the compare IC. |
@@ -4649,8 +4375,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { |
CallIC(ic, expr->CompareOperationFeedbackId()); |
patch_site.EmitPatchInfo(); |
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); |
- __ cmp(r0, Operand::Zero()); |
- Split(cond, if_true, if_false, fall_through); |
+ __ CompareAndSplit(x0, 0, cond, if_true, if_false, fall_through); |
} |
} |
@@ -4663,6 +4388,7 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { |
void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr, |
Expression* sub_expr, |
NilValue nil) { |
+ ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareNil"); |
Label materialize_true, materialize_false; |
Label* if_true = NULL; |
Label* if_false = NULL; |
@@ -4672,31 +4398,385 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr, |
VisitForAccumulatorValue(sub_expr); |
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); |
+ |
if (expr->op() == Token::EQ_STRICT) { |
Heap::RootListIndex nil_value = nil == kNullValue ? |
Heap::kNullValueRootIndex : |
Heap::kUndefinedValueRootIndex; |
- __ LoadRoot(r1, nil_value); |
- __ cmp(r0, r1); |
+ __ CompareRoot(x0, nil_value); |
Split(eq, if_true, if_false, fall_through); |
} else { |
Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil); |
CallIC(ic, expr->CompareOperationFeedbackId()); |
- __ cmp(r0, Operand(0)); |
- Split(ne, if_true, if_false, fall_through); |
+ __ CompareAndSplit(x0, 0, ne, if_true, if_false, fall_through); |
} |
+ |
context()->Plug(if_true, if_false); |
} |
void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) { |
- __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); |
- context()->Plug(r0); |
+ __ Ldr(x0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); |
+ context()->Plug(x0); |
+} |
+ |
+ |
+void FullCodeGenerator::VisitYield(Yield* expr) { |
+ Comment cmnt(masm_, "[ Yield"); |
+ // Evaluate yielded value first; the initial iterator definition depends on |
+ // this. It stays on the stack while we update the iterator. |
+ VisitForStackValue(expr->expression()); |
+ |
+ // TODO(jbramley): Tidy this up once the merge is done, using named registers |
+ // and suchlike. The implementation changes a little by bleeding_edge so I |
+ // don't want to spend too much time on it now. |
+ |
+ switch (expr->yield_kind()) { |
+ case Yield::SUSPEND: |
+ // Pop value from top-of-stack slot; box result into result register. |
+ EmitCreateIteratorResult(false); |
+ __ Push(result_register()); |
+ // Fall through. |
+ case Yield::INITIAL: { |
+ Label suspend, continuation, post_runtime, resume; |
+ |
+ __ B(&suspend); |
+ |
+ // TODO(jbramley): This label is bound here because the following code |
+ // looks at its pos(). Is it possible to do something more efficient here, |
+ // perhaps using Adr? |
+ __ Bind(&continuation); |
+ __ B(&resume); |
+ |
+ __ Bind(&suspend); |
+ VisitForAccumulatorValue(expr->generator_object()); |
+ ASSERT((continuation.pos() > 0) && Smi::IsValid(continuation.pos())); |
+ __ Mov(x1, Operand(Smi::FromInt(continuation.pos()))); |
+ __ Str(x1, FieldMemOperand(x0, JSGeneratorObject::kContinuationOffset)); |
+ __ Str(cp, FieldMemOperand(x0, JSGeneratorObject::kContextOffset)); |
+ __ Mov(x1, cp); |
+ __ RecordWriteField(x0, JSGeneratorObject::kContextOffset, x1, x2, |
+ kLRHasBeenSaved, kDontSaveFPRegs); |
+ __ Add(x1, fp, StandardFrameConstants::kExpressionsOffset); |
+ __ Cmp(__ StackPointer(), x1); |
+ __ B(eq, &post_runtime); |
+ __ Push(x0); // generator object |
+ __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1); |
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
+ __ Bind(&post_runtime); |
+ __ Pop(result_register()); |
+ EmitReturnSequence(); |
+ |
+ __ Bind(&resume); |
+ context()->Plug(result_register()); |
+ break; |
+ } |
+ |
+ case Yield::FINAL: { |
+ VisitForAccumulatorValue(expr->generator_object()); |
+ __ Mov(x1, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorClosed))); |
+ __ Str(x1, FieldMemOperand(result_register(), |
+ JSGeneratorObject::kContinuationOffset)); |
+ // Pop value from top-of-stack slot, box result into result register. |
+ EmitCreateIteratorResult(true); |
+ EmitUnwindBeforeReturn(); |
+ EmitReturnSequence(); |
+ break; |
+ } |
+ |
+ case Yield::DELEGATING: { |
+ VisitForStackValue(expr->generator_object()); |
+ |
+ // Initial stack layout is as follows: |
+ // [sp + 1 * kPointerSize] iter |
+ // [sp + 0 * kPointerSize] g |
+ |
+ Label l_catch, l_try, l_suspend, l_continuation, l_resume; |
+ Label l_next, l_call, l_loop; |
+ // Initial send value is undefined. |
+ __ LoadRoot(x0, Heap::kUndefinedValueRootIndex); |
+ __ B(&l_next); |
+ |
+ // catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; } |
+ __ Bind(&l_catch); |
+ handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos())); |
+ __ LoadRoot(x2, Heap::kthrow_stringRootIndex); // "throw" |
+ __ Peek(x3, 1 * kPointerSize); // iter |
+ __ Push(x2, x3, x0); // "throw", iter, except |
+ __ B(&l_call); |
+ |
+ // try { received = %yield result } |
+ // Shuffle the received result above a try handler and yield it without |
+ // re-boxing. |
+ __ Bind(&l_try); |
+ __ Pop(x0); // result |
+ __ PushTryHandler(StackHandler::CATCH, expr->index()); |
+ const int handler_size = StackHandlerConstants::kSize; |
+ __ Push(x0); // result |
+ __ B(&l_suspend); |
+ |
+ // TODO(jbramley): This label is bound here because the following code |
+ // looks at its pos(). Is it possible to do something more efficient here, |
+ // perhaps using Adr? |
+ __ Bind(&l_continuation); |
+ __ B(&l_resume); |
+ |
+ __ Bind(&l_suspend); |
+ const int generator_object_depth = kPointerSize + handler_size; |
+ __ Peek(x0, generator_object_depth); |
+ __ Push(x0); // g |
+ ASSERT((l_continuation.pos() > 0) && Smi::IsValid(l_continuation.pos())); |
+ __ Mov(x1, Operand(Smi::FromInt(l_continuation.pos()))); |
+ __ Str(x1, FieldMemOperand(x0, JSGeneratorObject::kContinuationOffset)); |
+ __ Str(cp, FieldMemOperand(x0, JSGeneratorObject::kContextOffset)); |
+ __ Mov(x1, cp); |
+ __ RecordWriteField(x0, JSGeneratorObject::kContextOffset, x1, x2, |
+ kLRHasBeenSaved, kDontSaveFPRegs); |
+ __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1); |
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
+ __ Pop(x0); // result |
+ EmitReturnSequence(); |
+ __ Bind(&l_resume); // received in x0 |
+ __ PopTryHandler(); |
+ |
+ // receiver = iter; f = 'next'; arg = received; |
+ __ Bind(&l_next); |
+ __ LoadRoot(x2, Heap::knext_stringRootIndex); // "next" |
+ __ Peek(x3, 1 * kPointerSize); // iter |
+ __ Push(x2, x3, x0); // "next", iter, received |
+ |
+ // result = receiver[f](arg); |
+ __ Bind(&l_call); |
+ __ Peek(x1, 1 * kPointerSize); |
+ __ Peek(x0, 2 * kPointerSize); |
+ Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize(); |
+ CallIC(ic, TypeFeedbackId::None()); |
+ __ Mov(x1, x0); |
+ __ Poke(x1, 2 * kPointerSize); |
+ CallFunctionStub stub(1, CALL_AS_METHOD); |
+ __ CallStub(&stub); |
+ |
+ __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
+ __ Drop(1); // The function is still on the stack; drop it. |
+ |
+ // if (!result.done) goto l_try; |
+ __ Bind(&l_loop); |
+ __ Push(x0); // save result |
+ __ LoadRoot(x2, Heap::kdone_stringRootIndex); // "done" |
+ CallLoadIC(NOT_CONTEXTUAL); // result.done in x0 |
+ // The ToBooleanStub argument (result.done) is in x0. |
+ Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate()); |
+ CallIC(bool_ic); |
+ __ Cbz(x0, &l_try); |
+ |
+ // result.value |
+ __ Pop(x0); // result |
+ __ LoadRoot(x2, Heap::kvalue_stringRootIndex); // "value" |
+ CallLoadIC(NOT_CONTEXTUAL); // result.value in x0 |
+ context()->DropAndPlug(2, x0); // drop iter and g |
+ break; |
+ } |
+ } |
+} |
+ |
+ |
+void FullCodeGenerator::EmitGeneratorResume(Expression *generator, |
+ Expression *value, |
+ JSGeneratorObject::ResumeMode resume_mode) { |
+ ASM_LOCATION("FullCodeGenerator::EmitGeneratorResume"); |
+ Register value_reg = x0; |
+ Register generator_object = x1; |
+ Register the_hole = x2; |
+ Register operand_stack_size = w3; |
+ Register function = x4; |
+ |
+ // The value stays in x0, and is ultimately read by the resumed generator, as |
+ // if the CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it |
+ // is read to throw the value when the resumed generator is already closed. r1 |
+ // will hold the generator object until the activation has been resumed. |
+ VisitForStackValue(generator); |
+ VisitForAccumulatorValue(value); |
+ __ Pop(generator_object); |
+ |
+ // Check generator state. |
+ Label wrong_state, closed_state, done; |
+ __ Ldr(x10, FieldMemOperand(generator_object, |
+ JSGeneratorObject::kContinuationOffset)); |
+ STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting < 0); |
+ STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed == 0); |
+ __ CompareAndBranch(x10, Operand(Smi::FromInt(0)), eq, &closed_state); |
+ __ CompareAndBranch(x10, Operand(Smi::FromInt(0)), lt, &wrong_state); |
+ |
+ // Load suspended function and context. |
+ __ Ldr(cp, FieldMemOperand(generator_object, |
+ JSGeneratorObject::kContextOffset)); |
+ __ Ldr(function, FieldMemOperand(generator_object, |
+ JSGeneratorObject::kFunctionOffset)); |
+ |
+ // Load receiver and store as the first argument. |
+ __ Ldr(x10, FieldMemOperand(generator_object, |
+ JSGeneratorObject::kReceiverOffset)); |
+ __ Push(x10); |
+ |
+ // Push holes for the rest of the arguments to the generator function. |
+ __ Ldr(x10, FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset)); |
+ |
+ // The number of arguments is stored as an int32_t, and -1 is a marker |
+ // (SharedFunctionInfo::kDontAdaptArgumentsSentinel), so we need sign |
+ // extension to correctly handle it. However, in this case, we operate on |
+ // 32-bit W registers, so extension isn't required. |
+ __ Ldr(w10, FieldMemOperand(x10, |
+ SharedFunctionInfo::kFormalParameterCountOffset)); |
+ __ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex); |
+ |
+ // TODO(jbramley): Write a variant of PushMultipleTimes which takes a register |
+ // instead of a constant count, and use it to replace this loop. |
+ Label push_argument_holes, push_frame; |
+ __ Bind(&push_argument_holes); |
+ __ Subs(w10, w10, 1); |
+ __ B(mi, &push_frame); |
+ __ Push(the_hole); |
+ __ B(&push_argument_holes); |
+ |
+ // Enter a new JavaScript frame, and initialize its slots as they were when |
+ // the generator was suspended. |
+ Label resume_frame; |
+ __ Bind(&push_frame); |
+ __ Bl(&resume_frame); |
+ __ B(&done); |
+ |
+ __ Bind(&resume_frame); |
+ __ Push(lr, // Return address. |
+ fp, // Caller's frame pointer. |
+ cp, // Callee's context. |
+ function); // Callee's JS Function. |
+ __ Add(fp, __ StackPointer(), kPointerSize * 2); |
+ |
+ // Load and untag the operand stack size. |
+ __ Ldr(x10, FieldMemOperand(generator_object, |
+ JSGeneratorObject::kOperandStackOffset)); |
+ __ Ldr(operand_stack_size, |
+ UntagSmiFieldMemOperand(x10, FixedArray::kLengthOffset)); |
+ |
+ // If we are sending a value and there is no operand stack, we can jump back |
+ // in directly. |
+ if (resume_mode == JSGeneratorObject::NEXT) { |
+ Label slow_resume; |
+ __ Cbnz(operand_stack_size, &slow_resume); |
+ __ Ldr(x10, FieldMemOperand(function, JSFunction::kCodeEntryOffset)); |
+ __ Ldrsw(x11, |
+ UntagSmiFieldMemOperand(generator_object, |
+ JSGeneratorObject::kContinuationOffset)); |
+ __ Add(x10, x10, x11); |
+ __ Mov(x12, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting))); |
+ __ Str(x12, FieldMemOperand(generator_object, |
+ JSGeneratorObject::kContinuationOffset)); |
+ __ Br(x10); |
+ |
+ __ Bind(&slow_resume); |
+ } |
+ |
+ // Otherwise, we push holes for the operand stack and call the runtime to fix |
+ // up the stack and the handlers. |
+ // TODO(jbramley): Write a variant of PushMultipleTimes which takes a register |
+ // instead of a constant count, and use it to replace this loop. |
+ Label push_operand_holes, call_resume; |
+ __ Bind(&push_operand_holes); |
+ __ Subs(operand_stack_size, operand_stack_size, 1); |
+ __ B(mi, &call_resume); |
+ __ Push(the_hole); |
+ __ B(&push_operand_holes); |
+ |
+ __ Bind(&call_resume); |
+ __ Mov(x10, Operand(Smi::FromInt(resume_mode))); |
+ __ Push(generator_object, result_register(), x10); |
+ __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3); |
+ // Not reached: the runtime call returns elsewhere. |
+ __ Unreachable(); |
+ |
+ // Reach here when generator is closed. |
+ __ Bind(&closed_state); |
+ if (resume_mode == JSGeneratorObject::NEXT) { |
+ // Return completed iterator result when generator is closed. |
+ __ LoadRoot(x10, Heap::kUndefinedValueRootIndex); |
+ __ Push(x10); |
+ // Pop value from top-of-stack slot; box result into result register. |
+ EmitCreateIteratorResult(true); |
+ } else { |
+ // Throw the provided value. |
+ __ Push(value_reg); |
+ __ CallRuntime(Runtime::kThrow, 1); |
+ } |
+ __ B(&done); |
+ |
+ // Throw error if we attempt to operate on a running generator. |
+ __ Bind(&wrong_state); |
+ __ Push(generator_object); |
+ __ CallRuntime(Runtime::kThrowGeneratorStateError, 1); |
+ |
+ __ Bind(&done); |
+ context()->Plug(result_register()); |
} |
+void FullCodeGenerator::EmitCreateIteratorResult(bool done) { |
+ Label gc_required; |
+ Label allocated; |
+ |
+ Handle<Map> map(isolate()->native_context()->generator_result_map()); |
+ |
+ // Allocate and populate an object with this form: { value: VAL, done: DONE } |
+ |
+ Register result = x0; |
+ __ Allocate(map->instance_size(), result, x10, x11, &gc_required, TAG_OBJECT); |
+ __ B(&allocated); |
+ |
+ __ Bind(&gc_required); |
+ __ Push(Smi::FromInt(map->instance_size())); |
+ __ CallRuntime(Runtime::kAllocateInNewSpace, 1); |
+ __ Ldr(context_register(), |
+ MemOperand(fp, StandardFrameConstants::kContextOffset)); |
+ |
+ __ Bind(&allocated); |
+ Register map_reg = x1; |
+ Register result_value = x2; |
+ Register boolean_done = x3; |
+ Register empty_fixed_array = x4; |
+ __ Mov(map_reg, Operand(map)); |
+ __ Pop(result_value); |
+ __ Mov(boolean_done, Operand(isolate()->factory()->ToBoolean(done))); |
+ __ Mov(empty_fixed_array, Operand(isolate()->factory()->empty_fixed_array())); |
+ ASSERT_EQ(map->instance_size(), 5 * kPointerSize); |
+ // TODO(jbramley): Use Stp if possible. |
+ __ Str(map_reg, FieldMemOperand(result, HeapObject::kMapOffset)); |
+ __ Str(empty_fixed_array, |
+ FieldMemOperand(result, JSObject::kPropertiesOffset)); |
+ __ Str(empty_fixed_array, FieldMemOperand(result, JSObject::kElementsOffset)); |
+ __ Str(result_value, |
+ FieldMemOperand(result, |
+ JSGeneratorObject::kResultValuePropertyOffset)); |
+ __ Str(boolean_done, |
+ FieldMemOperand(result, |
+ JSGeneratorObject::kResultDonePropertyOffset)); |
+ |
+ // Only the value field needs a write barrier, as the other values are in the |
+ // root set. |
+ __ RecordWriteField(result, JSGeneratorObject::kResultValuePropertyOffset, |
+ x10, x11, kLRHasBeenSaved, kDontSaveFPRegs); |
+} |
+ |
+ |
+// TODO(all): I don't like this method. |
+// It seems to me that in too many places x0 is used in place of this. |
+// Also, this function is not suitable for all places where x0 should be |
+// abstracted (eg. when used as an argument). But some places assume that the |
+// first argument register is x0, and use this function instead. |
+// Considering that most of the register allocation is hard-coded in the |
+// FullCodeGen, that it is unlikely we will need to change it extensively, and |
+// that abstracting the allocation through functions would not yield any |
+// performance benefit, I think the existence of this function is debatable. |
Register FullCodeGenerator::result_register() { |
- return r0; |
+ return x0; |
} |
@@ -4706,13 +4786,13 @@ Register FullCodeGenerator::context_register() { |
void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) { |
- ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset); |
- __ str(value, MemOperand(fp, frame_offset)); |
+ ASSERT(POINTER_SIZE_ALIGN(frame_offset) == frame_offset); |
+ __ Str(value, MemOperand(fp, frame_offset)); |
} |
void FullCodeGenerator::LoadContextField(Register dst, int context_index) { |
- __ ldr(dst, ContextOperand(cp, context_index)); |
+ __ Ldr(dst, ContextMemOperand(cp, context_index)); |
} |
@@ -4724,161 +4804,151 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() { |
// as their closure, not the anonymous closure containing the global |
// code. Pass a smi sentinel and let the runtime look up the empty |
// function. |
- __ mov(ip, Operand(Smi::FromInt(0))); |
+ ASSERT(kSmiTag == 0); |
+ __ Push(xzr); |
} else if (declaration_scope->is_eval_scope()) { |
// Contexts created by a call to eval have the same closure as the |
// context calling eval, not the anonymous closure containing the eval |
// code. Fetch it from the context. |
- __ ldr(ip, ContextOperand(cp, Context::CLOSURE_INDEX)); |
+ __ Ldr(x10, ContextMemOperand(cp, Context::CLOSURE_INDEX)); |
+ __ Push(x10); |
} else { |
ASSERT(declaration_scope->is_function_scope()); |
- __ ldr(ip, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); |
+ __ Ldr(x10, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); |
+ __ Push(x10); |
} |
- __ push(ip); |
} |
-// ---------------------------------------------------------------------------- |
-// Non-local control flow support. |
- |
void FullCodeGenerator::EnterFinallyBlock() { |
- ASSERT(!result_register().is(r1)); |
- // Store result register while executing finally block. |
- __ push(result_register()); |
- // Cook return address in link register to stack (smi encoded Code* delta) |
- __ sub(r1, lr, Operand(masm_->CodeObject())); |
- __ SmiTag(r1); |
- |
- // Store result register while executing finally block. |
- __ push(r1); |
+ ASM_LOCATION("FullCodeGenerator::EnterFinallyBlock"); |
+ ASSERT(!result_register().is(x10)); |
+ // Preserve the result register while executing finally block. |
+ // Also cook the return address in lr to the stack (smi encoded Code* delta). |
+ __ Sub(x10, lr, Operand(masm_->CodeObject())); |
+ __ SmiTag(x10); |
+ __ Push(result_register(), x10); |
// Store pending message while executing finally block. |
ExternalReference pending_message_obj = |
ExternalReference::address_of_pending_message_obj(isolate()); |
- __ mov(ip, Operand(pending_message_obj)); |
- __ ldr(r1, MemOperand(ip)); |
- __ push(r1); |
+ __ Mov(x10, Operand(pending_message_obj)); |
+ __ Ldr(x10, MemOperand(x10)); |
ExternalReference has_pending_message = |
ExternalReference::address_of_has_pending_message(isolate()); |
- __ mov(ip, Operand(has_pending_message)); |
- __ ldr(r1, MemOperand(ip)); |
- __ SmiTag(r1); |
- __ push(r1); |
+ __ Mov(x11, Operand(has_pending_message)); |
+ __ Ldr(x11, MemOperand(x11)); |
+ __ SmiTag(x11); |
+ |
+ __ Push(x10, x11); |
ExternalReference pending_message_script = |
ExternalReference::address_of_pending_message_script(isolate()); |
- __ mov(ip, Operand(pending_message_script)); |
- __ ldr(r1, MemOperand(ip)); |
- __ push(r1); |
+ __ Mov(x10, Operand(pending_message_script)); |
+ __ Ldr(x10, MemOperand(x10)); |
+ __ Push(x10); |
} |
void FullCodeGenerator::ExitFinallyBlock() { |
- ASSERT(!result_register().is(r1)); |
+ ASM_LOCATION("FullCodeGenerator::ExitFinallyBlock"); |
+ ASSERT(!result_register().is(x10)); |
+ |
// Restore pending message from stack. |
- __ pop(r1); |
+ __ Pop(x10, x11, x12); |
ExternalReference pending_message_script = |
ExternalReference::address_of_pending_message_script(isolate()); |
- __ mov(ip, Operand(pending_message_script)); |
- __ str(r1, MemOperand(ip)); |
+ __ Mov(x13, Operand(pending_message_script)); |
+ __ Str(x10, MemOperand(x13)); |
- __ pop(r1); |
- __ SmiUntag(r1); |
+ __ SmiUntag(x11); |
ExternalReference has_pending_message = |
ExternalReference::address_of_has_pending_message(isolate()); |
- __ mov(ip, Operand(has_pending_message)); |
- __ str(r1, MemOperand(ip)); |
+ __ Mov(x13, Operand(has_pending_message)); |
+ __ Str(x11, MemOperand(x13)); |
- __ pop(r1); |
ExternalReference pending_message_obj = |
ExternalReference::address_of_pending_message_obj(isolate()); |
- __ mov(ip, Operand(pending_message_obj)); |
- __ str(r1, MemOperand(ip)); |
- |
- // Restore result register from stack. |
- __ pop(r1); |
- |
- // Uncook return address and return. |
- __ pop(result_register()); |
- __ SmiUntag(r1); |
- __ add(pc, r1, Operand(masm_->CodeObject())); |
-} |
- |
- |
-#undef __ |
- |
-#define __ ACCESS_MASM(masm()) |
- |
-FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit( |
- int* stack_depth, |
- int* context_length) { |
- // The macros used here must preserve the result register. |
+ __ Mov(x13, Operand(pending_message_obj)); |
+ __ Str(x12, MemOperand(x13)); |
- // Because the handler block contains the context of the finally |
- // code, we can restore it directly from there for the finally code |
- // rather than iteratively unwinding contexts via their previous |
- // links. |
- __ Drop(*stack_depth); // Down to the handler block. |
- if (*context_length > 0) { |
- // Restore the context to its dedicated register and the stack. |
- __ ldr(cp, MemOperand(sp, StackHandlerConstants::kContextOffset)); |
- __ str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
- } |
- __ PopTryHandler(); |
- __ bl(finally_entry_); |
+ // Restore result register and cooked return address from the stack. |
+ __ Pop(x10, result_register()); |
- *stack_depth = 0; |
- *context_length = 0; |
- return previous_; |
+ // Uncook the return address (see EnterFinallyBlock). |
+ __ SmiUntag(x10); |
+ __ Add(x11, x10, Operand(masm_->CodeObject())); |
+ __ Br(x11); |
} |
#undef __ |
-static const int32_t kBranchBeforeInterrupt = 0x5a000004; |
- |
- |
void BackEdgeTable::PatchAt(Code* unoptimized_code, |
Address pc, |
BackEdgeState target_state, |
Code* replacement_code) { |
- static const int kInstrSize = Assembler::kInstrSize; |
- Address branch_address = pc - 3 * kInstrSize; |
- CodePatcher patcher(branch_address, 1); |
+ // Turn the jump into a nop. |
+ Address branch_address = pc - 3 * kInstructionSize; |
+ PatchingAssembler patcher(branch_address, 1); |
switch (target_state) { |
case INTERRUPT: |
// <decrement profiling counter> |
- // 2a 00 00 01 bpl ok |
- // e5 9f c? ?? ldr ip, [pc, <interrupt stub address>] |
- // e1 2f ff 3c blx ip |
+ // .. .. .. .. b.pl ok |
+ // .. .. .. .. ldr x16, pc+<interrupt stub address> |
+ // .. .. .. .. blr x16 |
+ // ... more instructions. |
// ok-label |
- patcher.masm()->b(4 * kInstrSize, pl); // Jump offset is 4 instructions. |
- ASSERT_EQ(kBranchBeforeInterrupt, Memory::int32_at(branch_address)); |
+ // Jump offset is 6 instructions. |
+ ASSERT(Instruction::Cast(branch_address) |
+ ->IsNop(Assembler::INTERRUPT_CODE_NOP)); |
+ patcher.b(6, pl); |
break; |
case ON_STACK_REPLACEMENT: |
case OSR_AFTER_STACK_CHECK: |
// <decrement profiling counter> |
- // e1 a0 00 00 mov r0, r0 (NOP) |
- // e5 9f c? ?? ldr ip, [pc, <on-stack replacement address>] |
- // e1 2f ff 3c blx ip |
- // ok-label |
- patcher.masm()->nop(); |
+ // .. .. .. .. mov x0, x0 (NOP) |
+ // .. .. .. .. ldr x16, pc+<on-stack replacement address> |
+ // .. .. .. .. blr x16 |
+ ASSERT(Instruction::Cast(branch_address)->IsCondBranchImm()); |
+ ASSERT(Instruction::Cast(branch_address)->ImmPCOffset() == |
+ 6 * kInstructionSize); |
+ patcher.nop(Assembler::INTERRUPT_CODE_NOP); |
break; |
} |
- Address pc_immediate_load_address = pc - 2 * kInstrSize; |
// Replace the call address. |
- uint32_t interrupt_address_offset = |
- Memory::uint16_at(pc_immediate_load_address) & 0xfff; |
- Address interrupt_address_pointer = pc + interrupt_address_offset; |
- Memory::uint32_at(interrupt_address_pointer) = |
- reinterpret_cast<uint32_t>(replacement_code->entry()); |
+ Instruction* load = Instruction::Cast(pc)->preceding(2); |
+ Address interrupt_address_pointer = |
+ reinterpret_cast<Address>(load) + load->ImmPCOffset(); |
+ ASSERT((Memory::uint64_at(interrupt_address_pointer) == |
+ reinterpret_cast<uint64_t>(unoptimized_code->GetIsolate() |
+ ->builtins() |
+ ->OnStackReplacement() |
+ ->entry())) || |
+ (Memory::uint64_at(interrupt_address_pointer) == |
+ reinterpret_cast<uint64_t>(unoptimized_code->GetIsolate() |
+ ->builtins() |
+ ->InterruptCheck() |
+ ->entry())) || |
+ (Memory::uint64_at(interrupt_address_pointer) == |
+ reinterpret_cast<uint64_t>(unoptimized_code->GetIsolate() |
+ ->builtins() |
+ ->OsrAfterStackCheck() |
+ ->entry())) || |
+ (Memory::uint64_at(interrupt_address_pointer) == |
+ reinterpret_cast<uint64_t>(unoptimized_code->GetIsolate() |
+ ->builtins() |
+ ->OnStackReplacement() |
+ ->entry()))); |
+ Memory::uint64_at(interrupt_address_pointer) = |
+ reinterpret_cast<uint64_t>(replacement_code->entry()); |
unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch( |
- unoptimized_code, pc_immediate_load_address, replacement_code); |
+ unoptimized_code, reinterpret_cast<Address>(load), replacement_code); |
} |
@@ -4886,41 +4956,61 @@ BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState( |
Isolate* isolate, |
Code* unoptimized_code, |
Address pc) { |
- static const int kInstrSize = Assembler::kInstrSize; |
- ASSERT(Memory::int32_at(pc - kInstrSize) == kBlxIp); |
- |
- Address branch_address = pc - 3 * kInstrSize; |
- Address pc_immediate_load_address = pc - 2 * kInstrSize; |
- uint32_t interrupt_address_offset = |
- Memory::uint16_at(pc_immediate_load_address) & 0xfff; |
- Address interrupt_address_pointer = pc + interrupt_address_offset; |
- |
- if (Memory::int32_at(branch_address) == kBranchBeforeInterrupt) { |
- ASSERT(Memory::uint32_at(interrupt_address_pointer) == |
- reinterpret_cast<uint32_t>( |
- isolate->builtins()->InterruptCheck()->entry())); |
- ASSERT(Assembler::IsLdrPcImmediateOffset( |
- Assembler::instr_at(pc_immediate_load_address))); |
- return INTERRUPT; |
+ // TODO(jbramley): There should be some extra assertions here (as in the ARM |
+ // back-end), but this function is gone in bleeding_edge so it might not |
+ // matter anyway. |
+ Instruction* jump_or_nop = Instruction::Cast(pc)->preceding(3); |
+ |
+ if (jump_or_nop->IsNop(Assembler::INTERRUPT_CODE_NOP)) { |
+ Instruction* load = Instruction::Cast(pc)->preceding(2); |
+ uint64_t entry = Memory::uint64_at(reinterpret_cast<Address>(load) + |
+ load->ImmPCOffset()); |
+ if (entry == reinterpret_cast<uint64_t>( |
+ isolate->builtins()->OnStackReplacement()->entry())) { |
+ return ON_STACK_REPLACEMENT; |
+ } else if (entry == reinterpret_cast<uint64_t>( |
+ isolate->builtins()->OsrAfterStackCheck()->entry())) { |
+ return OSR_AFTER_STACK_CHECK; |
+ } else { |
+ UNREACHABLE(); |
+ } |
} |
- ASSERT(Assembler::IsNop(Assembler::instr_at(branch_address))); |
- ASSERT(Assembler::IsLdrPcImmediateOffset( |
- Assembler::instr_at(pc_immediate_load_address))); |
+ return INTERRUPT; |
+} |
+ |
+ |
+#define __ ACCESS_MASM(masm()) |
+ |
+ |
+FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit( |
+ int* stack_depth, |
+ int* context_length) { |
+ ASM_LOCATION("FullCodeGenerator::TryFinally::Exit"); |
+ // The macros used here must preserve the result register. |
- if (Memory::uint32_at(interrupt_address_pointer) == |
- reinterpret_cast<uint32_t>( |
- isolate->builtins()->OnStackReplacement()->entry())) { |
- return ON_STACK_REPLACEMENT; |
+ // Because the handler block contains the context of the finally |
+ // code, we can restore it directly from there for the finally code |
+ // rather than iteratively unwinding contexts via their previous |
+ // links. |
+ __ Drop(*stack_depth); // Down to the handler block. |
+ if (*context_length > 0) { |
+ // Restore the context to its dedicated register and the stack. |
+ __ Peek(cp, StackHandlerConstants::kContextOffset); |
+ __ Str(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
} |
+ __ PopTryHandler(); |
+ __ Bl(finally_entry_); |
- ASSERT(Memory::uint32_at(interrupt_address_pointer) == |
- reinterpret_cast<uint32_t>( |
- isolate->builtins()->OsrAfterStackCheck()->entry())); |
- return OSR_AFTER_STACK_CHECK; |
+ *stack_depth = 0; |
+ *context_length = 0; |
+ return previous_; |
} |
+#undef __ |
+ |
+ |
} } // namespace v8::internal |
-#endif // V8_TARGET_ARCH_ARM |
+#endif // V8_TARGET_ARCH_A64 |