Index: src/mips64/full-codegen-mips64.cc |
diff --git a/src/mips/full-codegen-mips.cc b/src/mips64/full-codegen-mips64.cc |
similarity index 89% |
copy from src/mips/full-codegen-mips.cc |
copy to src/mips64/full-codegen-mips64.cc |
index 11a67b7adb2a44bf756c0d9d26836905e440389b..5f036b63336a2ab099b5d5a29b1ee1dedee5581c 100644 |
--- a/src/mips/full-codegen-mips.cc |
+++ b/src/mips64/full-codegen-mips64.cc |
@@ -4,7 +4,7 @@ |
#include "src/v8.h" |
-#if V8_TARGET_ARCH_MIPS |
+#if V8_TARGET_ARCH_MIPS64 |
// Note on Mips implementation: |
// |
@@ -24,8 +24,8 @@ |
#include "src/scopes.h" |
#include "src/stub-cache.h" |
-#include "src/mips/code-stubs-mips.h" |
-#include "src/mips/macro-assembler-mips.h" |
+#include "src/mips64/code-stubs-mips64.h" |
+#include "src/mips64/macro-assembler-mips64.h" |
namespace v8 { |
namespace internal { |
@@ -136,23 +136,20 @@ void FullCodeGenerator::Generate() { |
if (info->strict_mode() == SLOPPY && !info->is_native()) { |
Label ok; |
int receiver_offset = info->scope()->num_parameters() * kPointerSize; |
- __ lw(at, MemOperand(sp, receiver_offset)); |
+ __ ld(at, MemOperand(sp, receiver_offset)); |
__ LoadRoot(a2, Heap::kUndefinedValueRootIndex); |
__ Branch(&ok, ne, a2, Operand(at)); |
- __ lw(a2, GlobalObjectOperand()); |
- __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalProxyOffset)); |
- |
- __ sw(a2, MemOperand(sp, receiver_offset)); |
+ __ ld(a2, GlobalObjectOperand()); |
+ __ ld(a2, FieldMemOperand(a2, GlobalObject::kGlobalProxyOffset)); |
+ __ sd(a2, MemOperand(sp, receiver_offset)); |
__ bind(&ok); |
} |
- |
// Open a frame scope to indicate that there is a frame on the stack. The |
// MANUAL indicates that the scope shouldn't actually generate code to set up |
// the frame (that is done below). |
FrameScope frame_scope(masm_, StackFrame::MANUAL); |
- |
info->set_prologue_offset(masm_->pc_offset()); |
__ Prologue(info->IsCodePreAgingActive()); |
info->AddNoFrameRange(0, masm_->pc_offset()); |
@@ -164,13 +161,13 @@ void FullCodeGenerator::Generate() { |
if (locals_count > 0) { |
if (locals_count >= 128) { |
Label ok; |
- __ Subu(t5, sp, Operand(locals_count * kPointerSize)); |
+ __ Dsubu(t1, sp, Operand(locals_count * kPointerSize)); |
__ LoadRoot(a2, Heap::kRealStackLimitRootIndex); |
- __ Branch(&ok, hs, t5, Operand(a2)); |
+ __ Branch(&ok, hs, t1, Operand(a2)); |
__ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION); |
__ bind(&ok); |
} |
- __ LoadRoot(t5, Heap::kUndefinedValueRootIndex); |
+ __ LoadRoot(t1, Heap::kUndefinedValueRootIndex); |
int kMaxPushes = FLAG_optimize_for_size ? 4 : 32; |
if (locals_count >= kMaxPushes) { |
int loop_iterations = locals_count / kMaxPushes; |
@@ -178,19 +175,19 @@ void FullCodeGenerator::Generate() { |
Label loop_header; |
__ bind(&loop_header); |
// Do pushes. |
- __ Subu(sp, sp, Operand(kMaxPushes * kPointerSize)); |
+ __ Dsubu(sp, sp, Operand(kMaxPushes * kPointerSize)); |
for (int i = 0; i < kMaxPushes; i++) { |
- __ sw(t5, MemOperand(sp, i * kPointerSize)); |
+ __ sd(t1, MemOperand(sp, i * kPointerSize)); |
} |
// Continue loop if not done. |
- __ Subu(a2, a2, Operand(1)); |
+ __ Dsubu(a2, a2, Operand(1)); |
__ Branch(&loop_header, ne, a2, Operand(zero_reg)); |
} |
int remaining = locals_count % kMaxPushes; |
// Emit the remaining pushes. |
- __ Subu(sp, sp, Operand(remaining * kPointerSize)); |
+ __ Dsubu(sp, sp, Operand(remaining * kPointerSize)); |
for (int i = 0; i < remaining; i++) { |
- __ sw(t5, MemOperand(sp, i * kPointerSize)); |
+ __ sd(t1, MemOperand(sp, i * kPointerSize)); |
} |
} |
} |
@@ -220,7 +217,7 @@ void FullCodeGenerator::Generate() { |
// Context is returned in v0. It replaces the context passed to us. |
// It's saved in the stack and kept live in cp. |
__ mov(cp, v0); |
- __ sw(v0, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
+ __ sd(v0, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
// Copy any necessary parameters into the context. |
int num_parameters = info->scope()->num_parameters(); |
for (int i = 0; i < num_parameters; i++) { |
@@ -229,10 +226,10 @@ void FullCodeGenerator::Generate() { |
int parameter_offset = StandardFrameConstants::kCallerSPOffset + |
(num_parameters - 1 - i) * kPointerSize; |
// Load parameter from stack. |
- __ lw(a0, MemOperand(fp, parameter_offset)); |
+ __ ld(a0, MemOperand(fp, parameter_offset)); |
// Store it in the context. |
MemOperand target = ContextOperand(cp, var->index()); |
- __ sw(a0, target); |
+ __ sd(a0, target); |
// Update the write barrier. |
if (need_write_barrier) { |
@@ -247,21 +244,20 @@ void FullCodeGenerator::Generate() { |
} |
} |
} |
- |
Variable* arguments = scope()->arguments(); |
if (arguments != NULL) { |
// Function uses arguments object. |
Comment cmnt(masm_, "[ Allocate arguments object"); |
if (!function_in_register) { |
// Load this again, if it's used by the local context below. |
- __ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); |
+ __ ld(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); |
} else { |
__ mov(a3, a1); |
} |
// Receiver is just before the parameters on the caller's stack. |
int num_parameters = info->scope()->num_parameters(); |
int offset = num_parameters * kPointerSize; |
- __ Addu(a2, fp, |
+ __ Daddu(a2, fp, |
Operand(StandardFrameConstants::kCallerSPOffset + offset)); |
__ li(a1, Operand(Smi::FromInt(num_parameters))); |
__ Push(a3, a2, a1); |
@@ -287,7 +283,6 @@ void FullCodeGenerator::Generate() { |
if (FLAG_trace) { |
__ CallRuntime(Runtime::kTraceEnter, 0); |
} |
- |
// Visit the declarations and body unless there is an illegal |
// redeclaration. |
if (scope()->HasIllegalRedeclaration()) { |
@@ -308,7 +303,6 @@ void FullCodeGenerator::Generate() { |
} |
VisitDeclarations(scope()->declarations()); |
} |
- |
{ Comment cmnt(masm_, "[ Stack check"); |
PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS); |
Label ok; |
@@ -323,7 +317,9 @@ void FullCodeGenerator::Generate() { |
{ Comment cmnt(masm_, "[ Body"); |
ASSERT(loop_depth() == 0); |
+ |
VisitStatements(function()->body()); |
+ |
ASSERT(loop_depth() == 0); |
} |
} |
@@ -345,9 +341,9 @@ void FullCodeGenerator::ClearAccumulator() { |
void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) { |
__ li(a2, Operand(profiling_counter_)); |
- __ lw(a3, FieldMemOperand(a2, Cell::kValueOffset)); |
- __ Subu(a3, a3, Operand(Smi::FromInt(delta))); |
- __ sw(a3, FieldMemOperand(a2, Cell::kValueOffset)); |
+ __ ld(a3, FieldMemOperand(a2, Cell::kValueOffset)); |
+ __ Dsubu(a3, a3, Operand(Smi::FromInt(delta))); |
+ __ sd(a3, FieldMemOperand(a2, Cell::kValueOffset)); |
} |
@@ -359,7 +355,7 @@ void FullCodeGenerator::EmitProfilingCounterReset() { |
} |
__ li(a2, Operand(profiling_counter_)); |
__ li(a3, Operand(Smi::FromInt(reset_value))); |
- __ sw(a3, FieldMemOperand(a2, Cell::kValueOffset)); |
+ __ sd(a3, FieldMemOperand(a2, Cell::kValueOffset)); |
} |
@@ -444,7 +440,7 @@ void FullCodeGenerator::EmitReturnSequence() { |
masm_->mov(sp, fp); |
int no_frame_start = masm_->pc_offset(); |
masm_->MultiPop(static_cast<RegList>(fp.bit() | ra.bit())); |
- masm_->Addu(sp, sp, Operand(sp_delta)); |
+ masm_->Daddu(sp, sp, Operand(sp_delta)); |
masm_->Jump(ra); |
info_->AddNoFrameRange(no_frame_start, masm_->pc_offset()); |
} |
@@ -587,7 +583,7 @@ void FullCodeGenerator::StackValueContext::DropAndPlug(int count, |
Register reg) const { |
ASSERT(count > 0); |
if (count > 1) __ Drop(count - 1); |
- __ sw(reg, MemOperand(sp, 0)); |
+ __ sd(reg, MemOperand(sp, 0)); |
} |
@@ -735,7 +731,7 @@ MemOperand FullCodeGenerator::VarOperand(Variable* var, Register scratch) { |
void FullCodeGenerator::GetVar(Register dest, Variable* var) { |
// Use destination as scratch. |
MemOperand location = VarOperand(var, dest); |
- __ lw(dest, location); |
+ __ ld(dest, location); |
} |
@@ -748,7 +744,7 @@ void FullCodeGenerator::SetVar(Variable* var, |
ASSERT(!scratch0.is(scratch1)); |
ASSERT(!scratch1.is(src)); |
MemOperand location = VarOperand(var, scratch0); |
- __ sw(src, location); |
+ __ sd(src, location); |
// Emit the write barrier code if the location is in the heap. |
if (var->IsContextSlot()) { |
__ RecordWriteContextSlot(scratch0, |
@@ -774,8 +770,8 @@ void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr, |
if (should_normalize) __ Branch(&skip); |
PrepareForBailout(expr, TOS_REG); |
if (should_normalize) { |
- __ LoadRoot(t0, Heap::kTrueValueRootIndex); |
- Split(eq, a0, Operand(t0), if_true, if_false, NULL); |
+ __ LoadRoot(a4, Heap::kTrueValueRootIndex); |
+ Split(eq, a0, Operand(a4), if_true, if_false, NULL); |
__ bind(&skip); |
} |
} |
@@ -787,13 +783,13 @@ void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) { |
ASSERT_EQ(0, scope()->ContextChainLength(variable->scope())); |
if (generate_debug_code_) { |
// Check that we're not inside a with or catch context. |
- __ lw(a1, FieldMemOperand(cp, HeapObject::kMapOffset)); |
- __ LoadRoot(t0, Heap::kWithContextMapRootIndex); |
+ __ ld(a1, FieldMemOperand(cp, HeapObject::kMapOffset)); |
+ __ LoadRoot(a4, Heap::kWithContextMapRootIndex); |
__ Check(ne, kDeclarationInWithContext, |
- a1, Operand(t0)); |
- __ LoadRoot(t0, Heap::kCatchContextMapRootIndex); |
+ a1, Operand(a4)); |
+ __ LoadRoot(a4, Heap::kCatchContextMapRootIndex); |
__ Check(ne, kDeclarationInCatchContext, |
- a1, Operand(t0)); |
+ a1, Operand(a4)); |
} |
} |
@@ -820,8 +816,8 @@ void FullCodeGenerator::VisitVariableDeclaration( |
case Variable::LOCAL: |
if (hole_init) { |
Comment cmnt(masm_, "[ VariableDeclaration"); |
- __ LoadRoot(t0, Heap::kTheHoleValueRootIndex); |
- __ sw(t0, StackOperand(variable)); |
+ __ LoadRoot(a4, Heap::kTheHoleValueRootIndex); |
+ __ sd(a4, StackOperand(variable)); |
} |
break; |
@@ -830,7 +826,7 @@ void FullCodeGenerator::VisitVariableDeclaration( |
Comment cmnt(masm_, "[ VariableDeclaration"); |
EmitDebugCheckDeclarationContext(variable); |
__ LoadRoot(at, Heap::kTheHoleValueRootIndex); |
- __ sw(at, ContextOperand(cp, variable->index())); |
+ __ sd(at, ContextOperand(cp, variable->index())); |
// No write barrier since the_hole_value is in old space. |
PrepareForBailoutForId(proxy->id(), NO_REGISTERS); |
} |
@@ -882,7 +878,7 @@ void FullCodeGenerator::VisitFunctionDeclaration( |
case Variable::LOCAL: { |
Comment cmnt(masm_, "[ FunctionDeclaration"); |
VisitForAccumulatorValue(declaration->fun()); |
- __ sw(result_register(), StackOperand(variable)); |
+ __ sd(result_register(), StackOperand(variable)); |
break; |
} |
@@ -890,7 +886,7 @@ void FullCodeGenerator::VisitFunctionDeclaration( |
Comment cmnt(masm_, "[ FunctionDeclaration"); |
EmitDebugCheckDeclarationContext(variable); |
VisitForAccumulatorValue(declaration->fun()); |
- __ sw(result_register(), ContextOperand(cp, variable->index())); |
+ __ sd(result_register(), ContextOperand(cp, variable->index())); |
int offset = Context::SlotOffset(variable->index()); |
// We know that we have written a function, which is not a smi. |
__ RecordWriteContextSlot(cp, |
@@ -923,17 +919,16 @@ void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) { |
Variable* variable = declaration->proxy()->var(); |
ASSERT(variable->location() == Variable::CONTEXT); |
ASSERT(variable->interface()->IsFrozen()); |
- |
Comment cmnt(masm_, "[ ModuleDeclaration"); |
EmitDebugCheckDeclarationContext(variable); |
// Load instance object. |
__ LoadContext(a1, scope_->ContextChainLength(scope_->GlobalScope())); |
- __ lw(a1, ContextOperand(a1, variable->interface()->Index())); |
- __ lw(a1, ContextOperand(a1, Context::EXTENSION_INDEX)); |
+ __ ld(a1, ContextOperand(a1, variable->interface()->Index())); |
+ __ ld(a1, ContextOperand(a1, Context::EXTENSION_INDEX)); |
// Assign it. |
- __ sw(a1, ContextOperand(cp, variable->index())); |
+ __ sd(a1, ContextOperand(cp, variable->index())); |
// We know that we have written a module, which is not a smi. |
__ RecordWriteContextSlot(cp, |
Context::SlotOffset(variable->index()), |
@@ -1030,7 +1025,7 @@ void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) { |
__ mov(a0, result_register()); // CompareStub requires args in a0, a1. |
// Perform the comparison as if via '==='. |
- __ lw(a1, MemOperand(sp, 0)); // Switch value. |
+ __ ld(a1, MemOperand(sp, 0)); // Switch value. |
bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT); |
JumpPatchSite patch_site(masm_); |
if (inline_smi_code) { |
@@ -1104,7 +1099,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { |
__ mov(a0, result_register()); // Result as param to InvokeBuiltin below. |
__ LoadRoot(at, Heap::kUndefinedValueRootIndex); |
__ Branch(&exit, eq, a0, Operand(at)); |
- Register null_value = t1; |
+ Register null_value = a5; |
__ LoadRoot(null_value, Heap::kNullValueRootIndex); |
__ Branch(&exit, eq, a0, Operand(null_value)); |
PrepareForBailoutForId(stmt->PrepareId(), TOS_REG); |
@@ -1136,7 +1131,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { |
// The enum cache is valid. Load the map of the object being |
// iterated over and use the cache for the iteration. |
Label use_cache; |
- __ lw(v0, FieldMemOperand(a0, HeapObject::kMapOffset)); |
+ __ ld(v0, FieldMemOperand(a0, HeapObject::kMapOffset)); |
__ Branch(&use_cache); |
// Get the set of properties to enumerate. |
@@ -1148,7 +1143,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { |
// modification check. Otherwise, we got a fixed array, and we have |
// to do a slow check. |
Label fixed_array; |
- __ lw(a2, FieldMemOperand(v0, HeapObject::kMapOffset)); |
+ __ ld(a2, FieldMemOperand(v0, HeapObject::kMapOffset)); |
__ LoadRoot(at, Heap::kMetaMapRootIndex); |
__ Branch(&fixed_array, ne, a2, Operand(at)); |
@@ -1160,8 +1155,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { |
__ Branch(&no_descriptors, eq, a1, Operand(Smi::FromInt(0))); |
__ LoadInstanceDescriptors(v0, a2); |
- __ lw(a2, FieldMemOperand(a2, DescriptorArray::kEnumCacheOffset)); |
- __ lw(a2, FieldMemOperand(a2, DescriptorArray::kEnumCacheBridgeCacheOffset)); |
+ __ ld(a2, FieldMemOperand(a2, DescriptorArray::kEnumCacheOffset)); |
+ __ ld(a2, FieldMemOperand(a2, DescriptorArray::kEnumCacheBridgeCacheOffset)); |
// Set up the four remaining stack slots. |
__ li(a0, Operand(Smi::FromInt(0))); |
@@ -1179,17 +1174,17 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { |
__ li(a1, FeedbackVector()); |
__ li(a2, Operand(TypeFeedbackInfo::MegamorphicSentinel(isolate()))); |
- __ sw(a2, FieldMemOperand(a1, FixedArray::OffsetOfElementAt(slot))); |
+ __ sd(a2, FieldMemOperand(a1, FixedArray::OffsetOfElementAt(slot))); |
__ li(a1, Operand(Smi::FromInt(1))); // Smi indicates slow check |
- __ lw(a2, MemOperand(sp, 0 * kPointerSize)); // Get enumerated object |
+ __ ld(a2, MemOperand(sp, 0 * kPointerSize)); // Get enumerated object |
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); |
__ GetObjectType(a2, a3, a3); |
__ Branch(&non_proxy, gt, a3, Operand(LAST_JS_PROXY_TYPE)); |
__ li(a1, Operand(Smi::FromInt(0))); // Zero indicates proxy |
__ bind(&non_proxy); |
__ Push(a1, v0); // Smi and array |
- __ lw(a1, FieldMemOperand(v0, FixedArray::kLengthOffset)); |
+ __ ld(a1, FieldMemOperand(v0, FixedArray::kLengthOffset)); |
__ li(a0, Operand(Smi::FromInt(0))); |
__ Push(a1, a0); // Fixed array length (as smi) and initial index. |
@@ -1197,27 +1192,27 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { |
PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS); |
__ bind(&loop); |
// Load the current count to a0, load the length to a1. |
- __ lw(a0, MemOperand(sp, 0 * kPointerSize)); |
- __ lw(a1, MemOperand(sp, 1 * kPointerSize)); |
+ __ ld(a0, MemOperand(sp, 0 * kPointerSize)); |
+ __ ld(a1, MemOperand(sp, 1 * kPointerSize)); |
__ Branch(loop_statement.break_label(), hs, a0, Operand(a1)); |
// Get the current entry of the array into register a3. |
- __ lw(a2, MemOperand(sp, 2 * kPointerSize)); |
- __ Addu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
- __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize); |
- __ addu(t0, a2, t0); // Array base + scaled (smi) index. |
- __ lw(a3, MemOperand(t0)); // Current entry. |
+ __ ld(a2, MemOperand(sp, 2 * kPointerSize)); |
+ __ Daddu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
+ __ SmiScale(a4, a0, kPointerSizeLog2); |
+ __ daddu(a4, a2, a4); // Array base + scaled (smi) index. |
+ __ ld(a3, MemOperand(a4)); // Current entry. |
// Get the expected map from the stack or a smi in the |
// permanent slow case into register a2. |
- __ lw(a2, MemOperand(sp, 3 * kPointerSize)); |
+ __ ld(a2, MemOperand(sp, 3 * kPointerSize)); |
// Check if the expected map still matches that of the enumerable. |
// If not, we may have to filter the key. |
Label update_each; |
- __ lw(a1, MemOperand(sp, 4 * kPointerSize)); |
- __ lw(t0, FieldMemOperand(a1, HeapObject::kMapOffset)); |
- __ Branch(&update_each, eq, t0, Operand(a2)); |
+ __ ld(a1, MemOperand(sp, 4 * kPointerSize)); |
+ __ ld(a4, FieldMemOperand(a1, HeapObject::kMapOffset)); |
+ __ Branch(&update_each, eq, a4, Operand(a2)); |
// For proxies, no filtering is done. |
// TODO(rossberg): What if only a prototype is a proxy? Not specified yet. |
@@ -1248,7 +1243,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) { |
// the index (smi) stored on top of the stack. |
__ bind(loop_statement.continue_label()); |
__ pop(a0); |
- __ Addu(a0, a0, Operand(Smi::FromInt(1))); |
+ __ Daddu(a0, a0, Operand(Smi::FromInt(1))); |
__ push(a0); |
EmitBackEdgeBookkeeping(stmt, &loop); |
@@ -1364,11 +1359,11 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var, |
if (s->num_heap_slots() > 0) { |
if (s->calls_sloppy_eval()) { |
// Check that extension is NULL. |
- __ lw(temp, ContextOperand(current, Context::EXTENSION_INDEX)); |
+ __ ld(temp, ContextOperand(current, Context::EXTENSION_INDEX)); |
__ Branch(slow, ne, temp, Operand(zero_reg)); |
} |
// Load next context in chain. |
- __ lw(next, ContextOperand(current, Context::PREVIOUS_INDEX)); |
+ __ ld(next, ContextOperand(current, Context::PREVIOUS_INDEX)); |
// Walk the rest of the chain without clobbering cp. |
current = next; |
} |
@@ -1385,19 +1380,19 @@ void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var, |
} |
__ bind(&loop); |
// Terminate at native context. |
- __ lw(temp, FieldMemOperand(next, HeapObject::kMapOffset)); |
- __ LoadRoot(t0, Heap::kNativeContextMapRootIndex); |
- __ Branch(&fast, eq, temp, Operand(t0)); |
+ __ ld(temp, FieldMemOperand(next, HeapObject::kMapOffset)); |
+ __ LoadRoot(a4, Heap::kNativeContextMapRootIndex); |
+ __ Branch(&fast, eq, temp, Operand(a4)); |
// Check that extension is NULL. |
- __ lw(temp, ContextOperand(next, Context::EXTENSION_INDEX)); |
+ __ ld(temp, ContextOperand(next, Context::EXTENSION_INDEX)); |
__ Branch(slow, ne, temp, Operand(zero_reg)); |
// Load next context in chain. |
- __ lw(next, ContextOperand(next, Context::PREVIOUS_INDEX)); |
+ __ ld(next, ContextOperand(next, Context::PREVIOUS_INDEX)); |
__ Branch(&loop); |
__ bind(&fast); |
} |
- __ lw(LoadIC::ReceiverRegister(), GlobalObjectOperand()); |
+ __ ld(LoadIC::ReceiverRegister(), GlobalObjectOperand()); |
__ li(LoadIC::NameRegister(), Operand(var->name())); |
ContextualMode mode = (typeof_state == INSIDE_TYPEOF) |
? NOT_CONTEXTUAL |
@@ -1411,22 +1406,22 @@ MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var, |
ASSERT(var->IsContextSlot()); |
Register context = cp; |
Register next = a3; |
- Register temp = t0; |
+ Register temp = a4; |
for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) { |
if (s->num_heap_slots() > 0) { |
if (s->calls_sloppy_eval()) { |
// Check that extension is NULL. |
- __ lw(temp, ContextOperand(context, Context::EXTENSION_INDEX)); |
+ __ ld(temp, ContextOperand(context, Context::EXTENSION_INDEX)); |
__ Branch(slow, ne, temp, Operand(zero_reg)); |
} |
- __ lw(next, ContextOperand(context, Context::PREVIOUS_INDEX)); |
+ __ ld(next, ContextOperand(context, Context::PREVIOUS_INDEX)); |
// Walk the rest of the chain without clobbering cp. |
context = next; |
} |
} |
// Check that last extension is NULL. |
- __ lw(temp, ContextOperand(context, Context::EXTENSION_INDEX)); |
+ __ ld(temp, ContextOperand(context, Context::EXTENSION_INDEX)); |
__ Branch(slow, ne, temp, Operand(zero_reg)); |
// This function is used only for loads, not stores, so it's safe to |
@@ -1450,11 +1445,11 @@ void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var, |
__ Branch(done); |
} else if (var->mode() == DYNAMIC_LOCAL) { |
Variable* local = var->local_if_not_shadowed(); |
- __ lw(v0, ContextSlotOperandCheckExtensions(local, slow)); |
+ __ ld(v0, ContextSlotOperandCheckExtensions(local, slow)); |
if (local->mode() == LET || local->mode() == CONST || |
local->mode() == CONST_LEGACY) { |
__ LoadRoot(at, Heap::kTheHoleValueRootIndex); |
- __ subu(at, v0, at); // Sub as compare: at == 0 on eq. |
+ __ dsubu(at, v0, at); // Sub as compare: at == 0 on eq. |
if (local->mode() == CONST_LEGACY) { |
__ LoadRoot(a0, Heap::kUndefinedValueRootIndex); |
__ Movz(v0, a0, at); // Conditional move: return Undefined if TheHole. |
@@ -1480,7 +1475,9 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) { |
switch (var->location()) { |
case Variable::UNALLOCATED: { |
Comment cmnt(masm_, "[ Global variable"); |
- __ lw(LoadIC::ReceiverRegister(), GlobalObjectOperand()); |
+ // Use inline caching. Variable name is passed in a2 and the global |
+ // object (receiver) in a0. |
+ __ ld(LoadIC::ReceiverRegister(), GlobalObjectOperand()); |
__ li(LoadIC::NameRegister(), Operand(var->name())); |
CallLoadIC(CONTEXTUAL); |
context()->Plug(v0); |
@@ -1531,7 +1528,7 @@ void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) { |
// Let and const need a read barrier. |
GetVar(v0, var); |
__ LoadRoot(at, Heap::kTheHoleValueRootIndex); |
- __ subu(at, v0, at); // Sub as compare: at == 0 on eq. |
+ __ dsubu(at, v0, at); // Sub as compare: at == 0 on eq. |
if (var->mode() == LET || var->mode() == CONST) { |
// Throw a reference error when using an uninitialized let/const |
// binding in harmony mode. |
@@ -1576,28 +1573,28 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) { |
Comment cmnt(masm_, "[ RegExpLiteral"); |
Label materialized; |
// Registers will be used as follows: |
- // t1 = materialized value (RegExp literal) |
- // t0 = JS function, literals array |
+ // a5 = materialized value (RegExp literal) |
+ // a4 = JS function, literals array |
// a3 = literal index |
// a2 = RegExp pattern |
// a1 = RegExp flags |
// a0 = RegExp literal clone |
- __ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); |
- __ lw(t0, FieldMemOperand(a0, JSFunction::kLiteralsOffset)); |
+ __ ld(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); |
+ __ ld(a4, FieldMemOperand(a0, JSFunction::kLiteralsOffset)); |
int literal_offset = |
FixedArray::kHeaderSize + expr->literal_index() * kPointerSize; |
- __ lw(t1, FieldMemOperand(t0, literal_offset)); |
+ __ ld(a5, FieldMemOperand(a4, literal_offset)); |
__ LoadRoot(at, Heap::kUndefinedValueRootIndex); |
- __ Branch(&materialized, ne, t1, Operand(at)); |
+ __ Branch(&materialized, ne, a5, Operand(at)); |
// Create regexp literal using runtime function. |
// Result will be in v0. |
__ li(a3, Operand(Smi::FromInt(expr->literal_index()))); |
__ li(a2, Operand(expr->pattern())); |
__ li(a1, Operand(expr->flags())); |
- __ Push(t0, a3, a2, a1); |
+ __ Push(a4, a3, a2, a1); |
__ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4); |
- __ mov(t1, v0); |
+ __ mov(a5, v0); |
__ bind(&materialized); |
int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize; |
@@ -1607,17 +1604,17 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) { |
__ bind(&runtime_allocate); |
__ li(a0, Operand(Smi::FromInt(size))); |
- __ Push(t1, a0); |
+ __ Push(a5, a0); |
__ CallRuntime(Runtime::kAllocateInNewSpace, 1); |
- __ pop(t1); |
+ __ pop(a5); |
__ bind(&allocated); |
// After this, registers are used as follows: |
// v0: Newly allocated regexp. |
- // t1: Materialized regexp. |
+ // a5: Materialized regexp. |
// a2: temp. |
- __ CopyFields(v0, t1, a2.bit(), size / kPointerSize); |
+ __ CopyFields(v0, a5, a2.bit(), size / kPointerSize); |
context()->Plug(v0); |
} |
@@ -1637,8 +1634,8 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { |
expr->BuildConstantProperties(isolate()); |
Handle<FixedArray> constant_properties = expr->constant_properties(); |
- __ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); |
- __ lw(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset)); |
+ __ ld(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); |
+ __ ld(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset)); |
__ li(a2, Operand(Smi::FromInt(expr->literal_index()))); |
__ li(a1, Operand(constant_properties)); |
int flags = expr->fast_elements() |
@@ -1691,7 +1688,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { |
VisitForAccumulatorValue(value); |
__ mov(a0, result_register()); |
__ li(a2, Operand(key->value())); |
- __ lw(a1, MemOperand(sp)); |
+ __ ld(a1, MemOperand(sp)); |
CallStoreIC(key->LiteralFeedbackId()); |
PrepareForBailoutForId(key->id(), NO_REGISTERS); |
} else { |
@@ -1700,7 +1697,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { |
break; |
} |
// Duplicate receiver on stack. |
- __ lw(a0, MemOperand(sp)); |
+ __ ld(a0, MemOperand(sp)); |
__ push(a0); |
VisitForStackValue(key); |
VisitForStackValue(value); |
@@ -1714,7 +1711,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { |
break; |
case ObjectLiteral::Property::PROTOTYPE: |
// Duplicate receiver on stack. |
- __ lw(a0, MemOperand(sp)); |
+ __ ld(a0, MemOperand(sp)); |
__ push(a0); |
VisitForStackValue(value); |
if (property->emit_store()) { |
@@ -1737,7 +1734,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { |
for (AccessorTable::Iterator it = accessor_table.begin(); |
it != accessor_table.end(); |
++it) { |
- __ lw(a0, MemOperand(sp)); // Duplicate receiver. |
+ __ ld(a0, MemOperand(sp)); // Duplicate receiver. |
__ push(a0); |
VisitForStackValue(it->first); |
EmitAccessor(it->second->getter); |
@@ -1749,7 +1746,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) { |
if (expr->has_function()) { |
ASSERT(result_saved); |
- __ lw(a0, MemOperand(sp)); |
+ __ ld(a0, MemOperand(sp)); |
__ push(a0); |
__ CallRuntime(Runtime::kToFastProperties, 1); |
} |
@@ -1790,8 +1787,8 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { |
} |
__ mov(a0, result_register()); |
- __ lw(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); |
- __ lw(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset)); |
+ __ ld(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); |
+ __ ld(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset)); |
__ li(a2, Operand(Smi::FromInt(expr->literal_index()))); |
__ li(a1, Operand(constant_elements)); |
if (expr->depth() > 1 || length > JSObject::kInitialMaxFastElementArray) { |
@@ -1823,9 +1820,9 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) { |
if (IsFastObjectElementsKind(constant_elements_kind)) { |
int offset = FixedArray::kHeaderSize + (i * kPointerSize); |
- __ lw(t2, MemOperand(sp, kPointerSize)); // Copy of array literal. |
- __ lw(a1, FieldMemOperand(t2, JSObject::kElementsOffset)); |
- __ sw(result_register(), FieldMemOperand(a1, offset)); |
+ __ ld(a6, MemOperand(sp, kPointerSize)); // Copy of array literal. |
+ __ ld(a1, FieldMemOperand(a6, JSObject::kElementsOffset)); |
+ __ sd(result_register(), FieldMemOperand(a1, offset)); |
// Update the write barrier for the array store. |
__ RecordWriteField(a1, offset, result_register(), a2, |
kRAHasBeenSaved, kDontSaveFPRegs, |
@@ -1873,7 +1870,7 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) { |
if (expr->is_compound()) { |
// We need the receiver both on the stack and in the register. |
VisitForStackValue(property->obj()); |
- __ lw(LoadIC::ReceiverRegister(), MemOperand(sp, 0)); |
+ __ ld(LoadIC::ReceiverRegister(), MemOperand(sp, 0)); |
} else { |
VisitForStackValue(property->obj()); |
} |
@@ -1883,8 +1880,8 @@ void FullCodeGenerator::VisitAssignment(Assignment* expr) { |
if (expr->is_compound()) { |
VisitForStackValue(property->obj()); |
VisitForStackValue(property->key()); |
- __ lw(LoadIC::ReceiverRegister(), MemOperand(sp, 1 * kPointerSize)); |
- __ lw(LoadIC::NameRegister(), MemOperand(sp, 0)); |
+ __ ld(LoadIC::ReceiverRegister(), MemOperand(sp, 1 * kPointerSize)); |
+ __ ld(LoadIC::NameRegister(), MemOperand(sp, 0)); |
} else { |
VisitForStackValue(property->obj()); |
VisitForStackValue(property->key()); |
@@ -1982,16 +1979,16 @@ void FullCodeGenerator::VisitYield(Yield* expr) { |
VisitForAccumulatorValue(expr->generator_object()); |
ASSERT(continuation.pos() > 0 && Smi::IsValid(continuation.pos())); |
__ li(a1, Operand(Smi::FromInt(continuation.pos()))); |
- __ sw(a1, FieldMemOperand(v0, JSGeneratorObject::kContinuationOffset)); |
- __ sw(cp, FieldMemOperand(v0, JSGeneratorObject::kContextOffset)); |
+ __ sd(a1, FieldMemOperand(v0, JSGeneratorObject::kContinuationOffset)); |
+ __ sd(cp, FieldMemOperand(v0, JSGeneratorObject::kContextOffset)); |
__ mov(a1, cp); |
__ RecordWriteField(v0, JSGeneratorObject::kContextOffset, a1, a2, |
kRAHasBeenSaved, kDontSaveFPRegs); |
- __ Addu(a1, fp, Operand(StandardFrameConstants::kExpressionsOffset)); |
+ __ Daddu(a1, fp, Operand(StandardFrameConstants::kExpressionsOffset)); |
__ Branch(&post_runtime, eq, sp, Operand(a1)); |
__ push(v0); // generator object |
__ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1); |
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
+ __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
__ bind(&post_runtime); |
__ pop(result_register()); |
EmitReturnSequence(); |
@@ -2004,7 +2001,7 @@ void FullCodeGenerator::VisitYield(Yield* expr) { |
case Yield::FINAL: { |
VisitForAccumulatorValue(expr->generator_object()); |
__ li(a1, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorClosed))); |
- __ sw(a1, FieldMemOperand(result_register(), |
+ __ sd(a1, FieldMemOperand(result_register(), |
JSGeneratorObject::kContinuationOffset)); |
// Pop value from top-of-stack slot, box result into result register. |
EmitCreateIteratorResult(true); |
@@ -2024,7 +2021,6 @@ void FullCodeGenerator::VisitYield(Yield* expr) { |
Label l_next, l_call; |
Register load_receiver = LoadIC::ReceiverRegister(); |
Register load_name = LoadIC::NameRegister(); |
- |
// Initial send value is undefined. |
__ LoadRoot(a0, Heap::kUndefinedValueRootIndex); |
__ Branch(&l_next); |
@@ -2033,9 +2029,9 @@ void FullCodeGenerator::VisitYield(Yield* expr) { |
__ bind(&l_catch); |
__ mov(a0, v0); |
handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos())); |
- __ LoadRoot(load_name, Heap::kthrow_stringRootIndex); // "throw" |
- __ lw(a3, MemOperand(sp, 1 * kPointerSize)); // iter |
- __ Push(load_name, a3, a0); // "throw", iter, except |
+ __ LoadRoot(a2, Heap::kthrow_stringRootIndex); // "throw" |
+ __ ld(a3, MemOperand(sp, 1 * kPointerSize)); // iter |
+ __ Push(a2, a3, a0); // "throw", iter, except |
__ jmp(&l_call); |
// try { received = %yield result } |
@@ -2052,17 +2048,17 @@ void FullCodeGenerator::VisitYield(Yield* expr) { |
__ jmp(&l_resume); |
__ bind(&l_suspend); |
const int generator_object_depth = kPointerSize + handler_size; |
- __ lw(a0, MemOperand(sp, generator_object_depth)); |
+ __ ld(a0, MemOperand(sp, generator_object_depth)); |
__ push(a0); // g |
ASSERT(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos())); |
__ li(a1, Operand(Smi::FromInt(l_continuation.pos()))); |
- __ sw(a1, FieldMemOperand(a0, JSGeneratorObject::kContinuationOffset)); |
- __ sw(cp, FieldMemOperand(a0, JSGeneratorObject::kContextOffset)); |
+ __ sd(a1, FieldMemOperand(a0, JSGeneratorObject::kContinuationOffset)); |
+ __ sd(cp, FieldMemOperand(a0, JSGeneratorObject::kContextOffset)); |
__ mov(a1, cp); |
__ RecordWriteField(a0, JSGeneratorObject::kContextOffset, a1, a2, |
kRAHasBeenSaved, kDontSaveFPRegs); |
__ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1); |
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
+ __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
__ pop(v0); // result |
EmitReturnSequence(); |
__ mov(a0, v0); |
@@ -2071,30 +2067,29 @@ void FullCodeGenerator::VisitYield(Yield* expr) { |
// receiver = iter; f = 'next'; arg = received; |
__ bind(&l_next); |
- |
__ LoadRoot(load_name, Heap::knext_stringRootIndex); // "next" |
- __ lw(a3, MemOperand(sp, 1 * kPointerSize)); // iter |
+ __ ld(a3, MemOperand(sp, 1 * kPointerSize)); // iter |
__ Push(load_name, a3, a0); // "next", iter, received |
// result = receiver[f](arg); |
__ bind(&l_call); |
- __ lw(load_receiver, MemOperand(sp, kPointerSize)); |
- __ lw(load_name, MemOperand(sp, 2 * kPointerSize)); |
+ __ ld(load_receiver, MemOperand(sp, kPointerSize)); |
+ __ ld(load_name, MemOperand(sp, 2 * kPointerSize)); |
Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize(); |
CallIC(ic, TypeFeedbackId::None()); |
__ mov(a0, v0); |
__ mov(a1, a0); |
- __ sw(a1, MemOperand(sp, 2 * kPointerSize)); |
+ __ sd(a1, MemOperand(sp, 2 * kPointerSize)); |
CallFunctionStub stub(isolate(), 1, CALL_AS_METHOD); |
__ CallStub(&stub); |
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
+ __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
__ Drop(1); // The function is still on the stack; drop it. |
// if (!result.done) goto l_try; |
__ Move(load_receiver, v0); |
- __ push(load_receiver); // save result |
+ __ push(load_receiver); // save result |
__ LoadRoot(load_name, Heap::kdone_stringRootIndex); // "done" |
CallLoadIC(NOT_CONTEXTUAL); // v0=result.done |
__ mov(a0, v0); |
@@ -2126,28 +2121,30 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator, |
// Check generator state. |
Label wrong_state, closed_state, done; |
- __ lw(a3, FieldMemOperand(a1, JSGeneratorObject::kContinuationOffset)); |
+ __ ld(a3, FieldMemOperand(a1, JSGeneratorObject::kContinuationOffset)); |
STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting < 0); |
STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed == 0); |
__ Branch(&closed_state, eq, a3, Operand(zero_reg)); |
__ Branch(&wrong_state, lt, a3, Operand(zero_reg)); |
// Load suspended function and context. |
- __ lw(cp, FieldMemOperand(a1, JSGeneratorObject::kContextOffset)); |
- __ lw(t0, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset)); |
+ __ ld(cp, FieldMemOperand(a1, JSGeneratorObject::kContextOffset)); |
+ __ ld(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset)); |
// Load receiver and store as the first argument. |
- __ lw(a2, FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset)); |
+ __ ld(a2, FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset)); |
__ push(a2); |
// Push holes for the rest of the arguments to the generator function. |
- __ lw(a3, FieldMemOperand(t0, JSFunction::kSharedFunctionInfoOffset)); |
+ __ ld(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset)); |
+ // The argument count is stored as int32_t on 64-bit platforms. |
+ // TODO(plind): Smi on 32-bit platforms. |
__ lw(a3, |
FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset)); |
__ LoadRoot(a2, Heap::kTheHoleValueRootIndex); |
Label push_argument_holes, push_frame; |
__ bind(&push_argument_holes); |
- __ Subu(a3, a3, Operand(Smi::FromInt(1))); |
+ __ Dsubu(a3, a3, Operand(1)); |
__ Branch(&push_frame, lt, a3, Operand(zero_reg)); |
__ push(a2); |
__ jmp(&push_argument_holes); |
@@ -2162,14 +2159,14 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator, |
// ra = return address. |
// fp = caller's frame pointer. |
// cp = callee's context, |
- // t0 = callee's JS function. |
- __ Push(ra, fp, cp, t0); |
+ // a4 = callee's JS function. |
+ __ Push(ra, fp, cp, a4); |
// Adjust FP to point to saved FP. |
- __ Addu(fp, sp, 2 * kPointerSize); |
+ __ Daddu(fp, sp, 2 * kPointerSize); |
// Load the operand stack size. |
- __ lw(a3, FieldMemOperand(a1, JSGeneratorObject::kOperandStackOffset)); |
- __ lw(a3, FieldMemOperand(a3, FixedArray::kLengthOffset)); |
+ __ ld(a3, FieldMemOperand(a1, JSGeneratorObject::kOperandStackOffset)); |
+ __ ld(a3, FieldMemOperand(a3, FixedArray::kLengthOffset)); |
__ SmiUntag(a3); |
// If we are sending a value and there is no operand stack, we can jump back |
@@ -2177,12 +2174,12 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator, |
if (resume_mode == JSGeneratorObject::NEXT) { |
Label slow_resume; |
__ Branch(&slow_resume, ne, a3, Operand(zero_reg)); |
- __ lw(a3, FieldMemOperand(t0, JSFunction::kCodeEntryOffset)); |
- __ lw(a2, FieldMemOperand(a1, JSGeneratorObject::kContinuationOffset)); |
+ __ ld(a3, FieldMemOperand(a4, JSFunction::kCodeEntryOffset)); |
+ __ ld(a2, FieldMemOperand(a1, JSGeneratorObject::kContinuationOffset)); |
__ SmiUntag(a2); |
- __ Addu(a3, a3, Operand(a2)); |
+ __ Daddu(a3, a3, Operand(a2)); |
__ li(a2, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting))); |
- __ sw(a2, FieldMemOperand(a1, JSGeneratorObject::kContinuationOffset)); |
+ __ sd(a2, FieldMemOperand(a1, JSGeneratorObject::kContinuationOffset)); |
__ Jump(a3); |
__ bind(&slow_resume); |
} |
@@ -2191,7 +2188,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator, |
// up the stack and the handlers. |
Label push_operand_holes, call_resume; |
__ bind(&push_operand_holes); |
- __ Subu(a3, a3, Operand(1)); |
+ __ Dsubu(a3, a3, Operand(1)); |
__ Branch(&call_resume, lt, a3, Operand(zero_reg)); |
__ push(a2); |
__ Branch(&push_operand_holes); |
@@ -2240,21 +2237,21 @@ void FullCodeGenerator::EmitCreateIteratorResult(bool done) { |
__ bind(&gc_required); |
__ Push(Smi::FromInt(map->instance_size())); |
__ CallRuntime(Runtime::kAllocateInNewSpace, 1); |
- __ lw(context_register(), |
+ __ ld(context_register(), |
MemOperand(fp, StandardFrameConstants::kContextOffset)); |
__ bind(&allocated); |
__ li(a1, Operand(map)); |
__ pop(a2); |
__ li(a3, Operand(isolate()->factory()->ToBoolean(done))); |
- __ li(t0, Operand(isolate()->factory()->empty_fixed_array())); |
+ __ li(a4, Operand(isolate()->factory()->empty_fixed_array())); |
ASSERT_EQ(map->instance_size(), 5 * kPointerSize); |
- __ sw(a1, FieldMemOperand(v0, HeapObject::kMapOffset)); |
- __ sw(t0, FieldMemOperand(v0, JSObject::kPropertiesOffset)); |
- __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset)); |
- __ sw(a2, |
+ __ sd(a1, FieldMemOperand(v0, HeapObject::kMapOffset)); |
+ __ sd(a4, FieldMemOperand(v0, JSObject::kPropertiesOffset)); |
+ __ sd(a4, FieldMemOperand(v0, JSObject::kElementsOffset)); |
+ __ sd(a2, |
FieldMemOperand(v0, JSGeneratorObject::kResultValuePropertyOffset)); |
- __ sw(a3, |
+ __ sd(a3, |
FieldMemOperand(v0, JSGeneratorObject::kResultDonePropertyOffset)); |
// Only the value field needs a write barrier, as the other values are in the |
@@ -2315,23 +2312,21 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr, |
switch (op) { |
case Token::SAR: |
__ GetLeastBitsFromSmi(scratch1, right, 5); |
- __ srav(right, left, scratch1); |
- __ And(v0, right, Operand(~kSmiTagMask)); |
+ __ dsrav(right, left, scratch1); |
+ __ And(v0, right, Operand(0xffffffff00000000L)); |
break; |
case Token::SHL: { |
__ SmiUntag(scratch1, left); |
__ GetLeastBitsFromSmi(scratch2, right, 5); |
- __ sllv(scratch1, scratch1, scratch2); |
- __ Addu(scratch2, scratch1, Operand(0x40000000)); |
- __ Branch(&stub_call, lt, scratch2, Operand(zero_reg)); |
+ __ dsllv(scratch1, scratch1, scratch2); |
__ SmiTag(v0, scratch1); |
break; |
} |
case Token::SHR: { |
__ SmiUntag(scratch1, left); |
__ GetLeastBitsFromSmi(scratch2, right, 5); |
- __ srlv(scratch1, scratch1, scratch2); |
- __ And(scratch2, scratch1, 0xc0000000); |
+ __ dsrlv(scratch1, scratch1, scratch2); |
+ __ And(scratch2, scratch1, 0x80000000); |
__ Branch(&stub_call, ne, scratch2, Operand(zero_reg)); |
__ SmiTag(v0, scratch1); |
break; |
@@ -2346,14 +2341,14 @@ void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr, |
break; |
case Token::MUL: { |
__ SmiUntag(scratch1, right); |
- __ Mult(left, scratch1); |
+ __ Dmult(left, scratch1); |
__ mflo(scratch1); |
__ mfhi(scratch2); |
- __ sra(scratch1, scratch1, 31); |
+ __ dsra32(scratch1, scratch1, 31); |
__ Branch(&stub_call, ne, scratch1, Operand(scratch2)); |
__ mflo(v0); |
__ Branch(&done, ne, v0, Operand(zero_reg)); |
- __ Addu(scratch2, right, left); |
+ __ Daddu(scratch2, right, left); |
__ Branch(&stub_call, lt, scratch2, Operand(zero_reg)); |
ASSERT(Smi::FromInt(0) == 0); |
__ mov(v0, zero_reg); |
@@ -2439,7 +2434,7 @@ void FullCodeGenerator::EmitAssignment(Expression* expr) { |
void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot( |
Variable* var, MemOperand location) { |
- __ sw(result_register(), location); |
+ __ sd(result_register(), location); |
if (var->IsContextSlot()) { |
// RecordWrite may destroy all its register arguments. |
__ Move(a3, result_register()); |
@@ -2464,9 +2459,8 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) { |
// Global var, const, or let. |
__ mov(a0, result_register()); |
__ li(a2, Operand(var->name())); |
- __ lw(a1, GlobalObjectOperand()); |
+ __ ld(a1, GlobalObjectOperand()); |
CallStoreIC(); |
- |
} else if (op == Token::INIT_CONST_LEGACY) { |
// Const initializers need a write barrier. |
ASSERT(!var->IsParameter()); // No const parameters. |
@@ -2478,7 +2472,7 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) { |
ASSERT(var->IsStackAllocated() || var->IsContextSlot()); |
Label skip; |
MemOperand location = VarOperand(var, a1); |
- __ lw(a2, location); |
+ __ ld(a2, location); |
__ LoadRoot(at, Heap::kTheHoleValueRootIndex); |
__ Branch(&skip, ne, a2, Operand(at)); |
EmitStoreToStackLocalOrContextSlot(var, location); |
@@ -2493,9 +2487,9 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) { |
ASSERT(var->IsStackAllocated() || var->IsContextSlot()); |
Label assign; |
MemOperand location = VarOperand(var, a1); |
- __ lw(a3, location); |
- __ LoadRoot(t0, Heap::kTheHoleValueRootIndex); |
- __ Branch(&assign, ne, a3, Operand(t0)); |
+ __ ld(a3, location); |
+ __ LoadRoot(a4, Heap::kTheHoleValueRootIndex); |
+ __ Branch(&assign, ne, a3, Operand(a4)); |
__ li(a3, Operand(var->name())); |
__ push(a3); |
__ CallRuntime(Runtime::kThrowReferenceError, 1); |
@@ -2514,9 +2508,9 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) { |
MemOperand location = VarOperand(var, a1); |
if (generate_debug_code_ && op == Token::INIT_LET) { |
// Check for an uninitialized let binding. |
- __ lw(a2, location); |
- __ LoadRoot(t0, Heap::kTheHoleValueRootIndex); |
- __ Check(eq, kLetBindingReInitialization, a2, Operand(t0)); |
+ __ ld(a2, location); |
+ __ LoadRoot(a4, Heap::kTheHoleValueRootIndex); |
+ __ Check(eq, kLetBindingReInitialization, a2, Operand(a4)); |
} |
EmitStoreToStackLocalOrContextSlot(var, location); |
} |
@@ -2615,13 +2609,13 @@ void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) { |
} else { |
// Load the function from the receiver. |
ASSERT(callee->IsProperty()); |
- __ lw(LoadIC::ReceiverRegister(), MemOperand(sp, 0)); |
+ __ ld(LoadIC::ReceiverRegister(), MemOperand(sp, 0)); |
EmitNamedPropertyLoad(callee->AsProperty()); |
PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG); |
// Push the target function under the receiver. |
- __ lw(at, MemOperand(sp, 0)); |
+ __ ld(at, MemOperand(sp, 0)); |
__ push(at); |
- __ sw(v0, MemOperand(sp, kPointerSize)); |
+ __ sd(v0, MemOperand(sp, kPointerSize)); |
} |
EmitCall(expr, call_type); |
@@ -2638,15 +2632,15 @@ void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr, |
// Load the function from the receiver. |
ASSERT(callee->IsProperty()); |
- __ lw(LoadIC::ReceiverRegister(), MemOperand(sp, 0)); |
+ __ ld(LoadIC::ReceiverRegister(), MemOperand(sp, 0)); |
__ Move(LoadIC::NameRegister(), v0); |
EmitKeyedPropertyLoad(callee->AsProperty()); |
PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG); |
// Push the target function under the receiver. |
- __ lw(at, MemOperand(sp, 0)); |
+ __ ld(at, MemOperand(sp, 0)); |
__ push(at); |
- __ sw(v0, MemOperand(sp, kPointerSize)); |
+ __ sd(v0, MemOperand(sp, kPointerSize)); |
EmitCall(expr, CallIC::METHOD); |
} |
@@ -2667,38 +2661,37 @@ void FullCodeGenerator::EmitCall(Call* expr, CallIC::CallType call_type) { |
Handle<Code> ic = CallIC::initialize_stub( |
isolate(), arg_count, call_type); |
__ li(a3, Operand(Smi::FromInt(expr->CallFeedbackSlot()))); |
- __ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize)); |
+ __ ld(a1, MemOperand(sp, (arg_count + 1) * kPointerSize)); |
// Don't assign a type feedback id to the IC, since type feedback is provided |
// by the vector above. |
CallIC(ic); |
- |
RecordJSReturnSite(expr); |
// Restore context register. |
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
+ __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
context()->DropAndPlug(1, v0); |
} |
void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) { |
- // t2: copy of the first argument or undefined if it doesn't exist. |
+ // a6: copy of the first argument or undefined if it doesn't exist. |
if (arg_count > 0) { |
- __ lw(t2, MemOperand(sp, arg_count * kPointerSize)); |
+ __ ld(a6, MemOperand(sp, arg_count * kPointerSize)); |
} else { |
- __ LoadRoot(t2, Heap::kUndefinedValueRootIndex); |
+ __ LoadRoot(a6, Heap::kUndefinedValueRootIndex); |
} |
- // t1: the receiver of the enclosing function. |
+ // a5: the receiver of the enclosing function. |
int receiver_offset = 2 + info_->scope()->num_parameters(); |
- __ lw(t1, MemOperand(fp, receiver_offset * kPointerSize)); |
+ __ ld(a5, MemOperand(fp, receiver_offset * kPointerSize)); |
- // t0: the strict mode. |
- __ li(t0, Operand(Smi::FromInt(strict_mode()))); |
+ // a4: the strict mode. |
+ __ li(a4, Operand(Smi::FromInt(strict_mode()))); |
// a1: the start position of the scope the calls resides in. |
__ li(a1, Operand(Smi::FromInt(scope()->start_position()))); |
// Do the runtime call. |
- __ Push(t2, t1, t0, a1); |
+ __ Push(a6, a5, a4, a1); |
__ CallRuntime(Runtime::kResolvePossiblyDirectEval, 5); |
} |
@@ -2734,23 +2727,23 @@ void FullCodeGenerator::VisitCall(Call* expr) { |
// Push a copy of the function (found below the arguments) and |
// resolve eval. |
- __ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize)); |
+ __ ld(a1, MemOperand(sp, (arg_count + 1) * kPointerSize)); |
__ push(a1); |
EmitResolvePossiblyDirectEval(arg_count); |
// The runtime call returns a pair of values in v0 (function) and |
// v1 (receiver). Touch up the stack with the right values. |
- __ sw(v0, MemOperand(sp, (arg_count + 1) * kPointerSize)); |
- __ sw(v1, MemOperand(sp, arg_count * kPointerSize)); |
+ __ sd(v0, MemOperand(sp, (arg_count + 1) * kPointerSize)); |
+ __ sd(v1, MemOperand(sp, arg_count * kPointerSize)); |
} |
// Record source position for debugger. |
SetSourcePosition(expr->position()); |
CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS); |
- __ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize)); |
+ __ ld(a1, MemOperand(sp, (arg_count + 1) * kPointerSize)); |
__ CallStub(&stub); |
RecordJSReturnSite(expr); |
// Restore context register. |
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
+ __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
context()->DropAndPlug(1, v0); |
} else if (call_type == Call::GLOBAL_CALL) { |
EmitCallWithLoadIC(expr); |
@@ -2839,14 +2832,13 @@ void FullCodeGenerator::VisitCallNew(CallNew* expr) { |
for (int i = 0; i < arg_count; i++) { |
VisitForStackValue(args->at(i)); |
} |
- |
// Call the construct call builtin that handles allocation and |
// constructor invocation. |
SetSourcePosition(expr->position()); |
// Load function and argument count into a1 and a0. |
__ li(a0, Operand(arg_count)); |
- __ lw(a1, MemOperand(sp, arg_count * kPointerSize)); |
+ __ ld(a1, MemOperand(sp, arg_count * kPointerSize)); |
// Record call targets in unoptimized code. |
if (FLAG_pretenuring_call_new) { |
@@ -2879,8 +2871,8 @@ void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) { |
&if_true, &if_false, &fall_through); |
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); |
- __ SmiTst(v0, t0); |
- Split(eq, t0, Operand(zero_reg), if_true, if_false, fall_through); |
+ __ SmiTst(v0, a4); |
+ Split(eq, a4, Operand(zero_reg), if_true, if_false, fall_through); |
context()->Plug(if_true, if_false); |
} |
@@ -2923,7 +2915,7 @@ void FullCodeGenerator::EmitIsObject(CallRuntime* expr) { |
__ JumpIfSmi(v0, if_false); |
__ LoadRoot(at, Heap::kNullValueRootIndex); |
__ Branch(if_true, eq, v0, Operand(at)); |
- __ lw(a2, FieldMemOperand(v0, HeapObject::kMapOffset)); |
+ __ ld(a2, FieldMemOperand(v0, HeapObject::kMapOffset)); |
// Undetectable objects behave like undefined when tested with typeof. |
__ lbu(a1, FieldMemOperand(a2, Map::kBitFieldOffset)); |
__ And(at, a1, Operand(1 << Map::kIsUndetectable)); |
@@ -2975,7 +2967,7 @@ void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) { |
&if_true, &if_false, &fall_through); |
__ JumpIfSmi(v0, if_false); |
- __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset)); |
+ __ ld(a1, FieldMemOperand(v0, HeapObject::kMapOffset)); |
__ lbu(a1, FieldMemOperand(a1, Map::kBitFieldOffset)); |
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); |
__ And(at, a1, Operand(1 << Map::kIsUndetectable)); |
@@ -3001,16 +2993,16 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf( |
__ AssertNotSmi(v0); |
- __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset)); |
- __ lbu(t0, FieldMemOperand(a1, Map::kBitField2Offset)); |
- __ And(t0, t0, 1 << Map::kStringWrapperSafeForDefaultValueOf); |
- __ Branch(&skip_lookup, ne, t0, Operand(zero_reg)); |
+ __ ld(a1, FieldMemOperand(v0, HeapObject::kMapOffset)); |
+ __ lbu(a4, FieldMemOperand(a1, Map::kBitField2Offset)); |
+ __ And(a4, a4, 1 << Map::kStringWrapperSafeForDefaultValueOf); |
+ __ Branch(&skip_lookup, ne, a4, Operand(zero_reg)); |
// Check for fast case object. Generate false result for slow case object. |
- __ lw(a2, FieldMemOperand(v0, JSObject::kPropertiesOffset)); |
- __ lw(a2, FieldMemOperand(a2, HeapObject::kMapOffset)); |
- __ LoadRoot(t0, Heap::kHashTableMapRootIndex); |
- __ Branch(if_false, eq, a2, Operand(t0)); |
+ __ ld(a2, FieldMemOperand(v0, JSObject::kPropertiesOffset)); |
+ __ ld(a2, FieldMemOperand(a2, HeapObject::kMapOffset)); |
+ __ LoadRoot(a4, Heap::kHashTableMapRootIndex); |
+ __ Branch(if_false, eq, a2, Operand(a4)); |
// Look for valueOf name in the descriptor array, and indicate false if |
// found. Since we omit an enumeration index check, if it is added via a |
@@ -3021,33 +3013,34 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf( |
__ NumberOfOwnDescriptors(a3, a1); |
__ Branch(&done, eq, a3, Operand(zero_reg)); |
- __ LoadInstanceDescriptors(a1, t0); |
- // t0: descriptor array. |
+ __ LoadInstanceDescriptors(a1, a4); |
+ // a4: descriptor array. |
// a3: valid entries in the descriptor array. |
STATIC_ASSERT(kSmiTag == 0); |
STATIC_ASSERT(kSmiTagSize == 1); |
- STATIC_ASSERT(kPointerSize == 4); |
+// Does not need? |
+// STATIC_ASSERT(kPointerSize == 4); |
__ li(at, Operand(DescriptorArray::kDescriptorSize)); |
- __ Mul(a3, a3, at); |
+ __ Dmul(a3, a3, at); |
// Calculate location of the first key name. |
- __ Addu(t0, t0, Operand(DescriptorArray::kFirstOffset - kHeapObjectTag)); |
+ __ Daddu(a4, a4, Operand(DescriptorArray::kFirstOffset - kHeapObjectTag)); |
// Calculate the end of the descriptor array. |
- __ mov(a2, t0); |
- __ sll(t1, a3, kPointerSizeLog2); |
- __ Addu(a2, a2, t1); |
+ __ mov(a2, a4); |
+ __ dsll(a5, a3, kPointerSizeLog2); |
+ __ Daddu(a2, a2, a5); |
// Loop through all the keys in the descriptor array. If one of these is the |
// string "valueOf" the result is false. |
- // The use of t2 to store the valueOf string assumes that it is not otherwise |
+ // The use of a6 to store the valueOf string assumes that it is not otherwise |
// used in the loop below. |
- __ li(t2, Operand(isolate()->factory()->value_of_string())); |
+ __ li(a6, Operand(isolate()->factory()->value_of_string())); |
__ jmp(&entry); |
__ bind(&loop); |
- __ lw(a3, MemOperand(t0, 0)); |
- __ Branch(if_false, eq, a3, Operand(t2)); |
- __ Addu(t0, t0, Operand(DescriptorArray::kDescriptorSize * kPointerSize)); |
+ __ ld(a3, MemOperand(a4, 0)); |
+ __ Branch(if_false, eq, a3, Operand(a6)); |
+ __ Daddu(a4, a4, Operand(DescriptorArray::kDescriptorSize * kPointerSize)); |
__ bind(&entry); |
- __ Branch(&loop, ne, t0, Operand(a2)); |
+ __ Branch(&loop, ne, a4, Operand(a2)); |
__ bind(&done); |
@@ -3060,12 +3053,12 @@ void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf( |
// If a valueOf property is not found on the object check that its |
// prototype is the un-modified String prototype. If not result is false. |
- __ lw(a2, FieldMemOperand(a1, Map::kPrototypeOffset)); |
+ __ ld(a2, FieldMemOperand(a1, Map::kPrototypeOffset)); |
__ JumpIfSmi(a2, if_false); |
- __ lw(a2, FieldMemOperand(a2, HeapObject::kMapOffset)); |
- __ lw(a3, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX)); |
- __ lw(a3, FieldMemOperand(a3, GlobalObject::kNativeContextOffset)); |
- __ lw(a3, ContextOperand(a3, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX)); |
+ __ ld(a2, FieldMemOperand(a2, HeapObject::kMapOffset)); |
+ __ ld(a3, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX)); |
+ __ ld(a3, FieldMemOperand(a3, GlobalObject::kNativeContextOffset)); |
+ __ ld(a3, ContextOperand(a3, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX)); |
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); |
Split(eq, a2, Operand(a3), if_true, if_false, fall_through); |
@@ -3110,17 +3103,17 @@ void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) { |
&if_true, &if_false, &fall_through); |
__ CheckMap(v0, a1, Heap::kHeapNumberMapRootIndex, if_false, DO_SMI_CHECK); |
- __ lw(a2, FieldMemOperand(v0, HeapNumber::kExponentOffset)); |
- __ lw(a1, FieldMemOperand(v0, HeapNumber::kMantissaOffset)); |
- __ li(t0, 0x80000000); |
+ __ lwu(a2, FieldMemOperand(v0, HeapNumber::kExponentOffset)); |
+ __ lwu(a1, FieldMemOperand(v0, HeapNumber::kMantissaOffset)); |
+ __ li(a4, 0x80000000); |
Label not_nan; |
- __ Branch(¬_nan, ne, a2, Operand(t0)); |
- __ mov(t0, zero_reg); |
+ __ Branch(¬_nan, ne, a2, Operand(a4)); |
+ __ mov(a4, zero_reg); |
__ mov(a2, a1); |
__ bind(¬_nan); |
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); |
- Split(eq, a2, Operand(t0), if_true, if_false, fall_through); |
+ Split(eq, a2, Operand(a4), if_true, if_false, fall_through); |
context()->Plug(if_true, if_false); |
} |
@@ -3182,18 +3175,18 @@ void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) { |
&if_true, &if_false, &fall_through); |
// Get the frame pointer for the calling frame. |
- __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); |
+ __ ld(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); |
// Skip the arguments adaptor frame if it exists. |
Label check_frame_marker; |
- __ lw(a1, MemOperand(a2, StandardFrameConstants::kContextOffset)); |
+ __ ld(a1, MemOperand(a2, StandardFrameConstants::kContextOffset)); |
__ Branch(&check_frame_marker, ne, |
a1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); |
- __ lw(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset)); |
+ __ ld(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset)); |
// Check the marker in the calling frame. |
__ bind(&check_frame_marker); |
- __ lw(a1, MemOperand(a2, StandardFrameConstants::kMarkerOffset)); |
+ __ ld(a1, MemOperand(a2, StandardFrameConstants::kMarkerOffset)); |
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); |
Split(eq, a1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)), |
if_true, if_false, fall_through); |
@@ -3247,14 +3240,14 @@ void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) { |
__ li(v0, Operand(Smi::FromInt(info_->scope()->num_parameters()))); |
// Check if the calling frame is an arguments adaptor frame. |
- __ lw(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); |
- __ lw(a3, MemOperand(a2, StandardFrameConstants::kContextOffset)); |
+ __ ld(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); |
+ __ ld(a3, MemOperand(a2, StandardFrameConstants::kContextOffset)); |
__ Branch(&exit, ne, a3, |
Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); |
// Arguments adaptor case: Read the arguments length from the |
// adaptor frame. |
- __ lw(v0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset)); |
+ __ ld(v0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset)); |
__ bind(&exit); |
context()->Plug(v0); |
@@ -3290,14 +3283,14 @@ void FullCodeGenerator::EmitClassOf(CallRuntime* expr) { |
STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1); |
// Check if the constructor in the map is a JS function. |
- __ lw(v0, FieldMemOperand(v0, Map::kConstructorOffset)); |
+ __ ld(v0, FieldMemOperand(v0, Map::kConstructorOffset)); |
__ GetObjectType(v0, a1, a1); |
__ Branch(&non_function_constructor, ne, a1, Operand(JS_FUNCTION_TYPE)); |
// v0 now contains the constructor function. Grab the |
// instance class name from there. |
- __ lw(v0, FieldMemOperand(v0, JSFunction::kSharedFunctionInfoOffset)); |
- __ lw(v0, FieldMemOperand(v0, SharedFunctionInfo::kInstanceClassNameOffset)); |
+ __ ld(v0, FieldMemOperand(v0, JSFunction::kSharedFunctionInfoOffset)); |
+ __ ld(v0, FieldMemOperand(v0, SharedFunctionInfo::kInstanceClassNameOffset)); |
__ Branch(&done); |
// Functions have class 'Function'. |
@@ -3361,7 +3354,7 @@ void FullCodeGenerator::EmitValueOf(CallRuntime* expr) { |
__ GetObjectType(v0, a1, a1); |
__ Branch(&done, ne, a1, Operand(JS_VALUE_TYPE)); |
- __ lw(v0, FieldMemOperand(v0, JSValue::kValueOffset)); |
+ __ ld(v0, FieldMemOperand(v0, JSValue::kValueOffset)); |
__ bind(&done); |
context()->Plug(v0); |
@@ -3379,7 +3372,7 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) { |
Label runtime, done, not_date_object; |
Register object = v0; |
Register result = v0; |
- Register scratch0 = t5; |
+ Register scratch0 = t1; |
Register scratch1 = a1; |
__ JumpIfSmi(object, ¬_date_object); |
@@ -3387,16 +3380,16 @@ void FullCodeGenerator::EmitDateField(CallRuntime* expr) { |
__ Branch(¬_date_object, ne, scratch1, Operand(JS_DATE_TYPE)); |
if (index->value() == 0) { |
- __ lw(result, FieldMemOperand(object, JSDate::kValueOffset)); |
+ __ ld(result, FieldMemOperand(object, JSDate::kValueOffset)); |
__ jmp(&done); |
} else { |
if (index->value() < JSDate::kFirstUncachedField) { |
ExternalReference stamp = ExternalReference::date_cache_stamp(isolate()); |
__ li(scratch1, Operand(stamp)); |
- __ lw(scratch1, MemOperand(scratch1)); |
- __ lw(scratch0, FieldMemOperand(object, JSDate::kCacheStampOffset)); |
+ __ ld(scratch1, MemOperand(scratch1)); |
+ __ ld(scratch0, FieldMemOperand(object, JSDate::kCacheStampOffset)); |
__ Branch(&runtime, ne, scratch1, Operand(scratch0)); |
- __ lw(result, FieldMemOperand(object, JSDate::kValueOffset + |
+ __ ld(result, FieldMemOperand(object, JSDate::kValueOffset + |
kPointerSize * index->value())); |
__ jmp(&done); |
} |
@@ -3435,18 +3428,18 @@ void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) { |
__ Check(eq, kNonSmiIndex, at, Operand(zero_reg)); |
__ SmiUntag(index, index); |
static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag; |
- Register scratch = t5; |
+ Register scratch = t1; |
__ EmitSeqStringSetCharCheck( |
string, index, value, scratch, one_byte_seq_type); |
__ SmiTag(index, index); |
} |
__ SmiUntag(value, value); |
- __ Addu(at, |
+ __ Daddu(at, |
string, |
Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); |
__ SmiUntag(index); |
- __ Addu(at, at, index); |
+ __ Daddu(at, at, index); |
__ sb(value, MemOperand(at)); |
context()->Plug(string); |
} |
@@ -3472,17 +3465,18 @@ void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) { |
__ Check(eq, kNonSmiIndex, at, Operand(zero_reg)); |
__ SmiUntag(index, index); |
static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag; |
- Register scratch = t5; |
+ Register scratch = t1; |
__ EmitSeqStringSetCharCheck( |
string, index, value, scratch, two_byte_seq_type); |
__ SmiTag(index, index); |
} |
__ SmiUntag(value, value); |
- __ Addu(at, |
+ __ Daddu(at, |
string, |
Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); |
- __ Addu(at, at, index); |
+ __ dsra(index, index, 32 - 1); |
+ __ Daddu(at, at, index); |
STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); |
__ sh(value, MemOperand(at)); |
context()->Plug(string); |
@@ -3518,7 +3512,7 @@ void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) { |
__ Branch(&done, ne, a2, Operand(JS_VALUE_TYPE)); |
// Store the value. |
- __ sw(v0, FieldMemOperand(a1, JSValue::kValueOffset)); |
+ __ sd(v0, FieldMemOperand(a1, JSValue::kValueOffset)); |
// Update the write barrier. Save the value as it will be |
// overwritten by the write barrier code and is needed afterward. |
__ mov(a2, v0); |
@@ -3706,7 +3700,7 @@ void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) { |
__ mov(a1, result_register()); |
ParameterCount count(arg_count); |
__ InvokeFunction(a1, count, CALL_FUNCTION, NullCallWrapper()); |
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
+ __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
__ jmp(&done); |
__ bind(&runtime); |
@@ -3753,28 +3747,28 @@ void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) { |
Register key = v0; |
Register cache = a1; |
- __ lw(cache, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX)); |
- __ lw(cache, FieldMemOperand(cache, GlobalObject::kNativeContextOffset)); |
- __ lw(cache, |
+ __ ld(cache, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX)); |
+ __ ld(cache, FieldMemOperand(cache, GlobalObject::kNativeContextOffset)); |
+ __ ld(cache, |
ContextOperand( |
cache, Context::JSFUNCTION_RESULT_CACHES_INDEX)); |
- __ lw(cache, |
+ __ ld(cache, |
FieldMemOperand(cache, FixedArray::OffsetOfElementAt(cache_id))); |
Label done, not_found; |
STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1); |
- __ lw(a2, FieldMemOperand(cache, JSFunctionResultCache::kFingerOffset)); |
+ __ ld(a2, FieldMemOperand(cache, JSFunctionResultCache::kFingerOffset)); |
// a2 now holds finger offset as a smi. |
- __ Addu(a3, cache, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
+ __ Daddu(a3, cache, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
// a3 now points to the start of fixed array elements. |
- __ sll(at, a2, kPointerSizeLog2 - kSmiTagSize); |
- __ addu(a3, a3, at); |
+ __ SmiScale(at, a2, kPointerSizeLog2); |
+ __ daddu(a3, a3, at); |
// a3 now points to key of indexed element of cache. |
- __ lw(a2, MemOperand(a3)); |
+ __ ld(a2, MemOperand(a3)); |
__ Branch(¬_found, ne, key, Operand(a2)); |
- __ lw(v0, MemOperand(a3, kPointerSize)); |
+ __ ld(v0, MemOperand(a3, kPointerSize)); |
__ Branch(&done); |
__ bind(¬_found); |
@@ -3798,7 +3792,7 @@ void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) { |
context()->PrepareTest(&materialize_true, &materialize_false, |
&if_true, &if_false, &fall_through); |
- __ lw(a0, FieldMemOperand(v0, String::kHashFieldOffset)); |
+ __ lwu(a0, FieldMemOperand(v0, String::kHashFieldOffset)); |
__ And(a0, a0, Operand(String::kContainsCachedArrayIndexMask)); |
PrepareForBailoutBeforeSplit(expr, true, if_true, if_false); |
@@ -3815,7 +3809,7 @@ void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) { |
__ AssertString(v0); |
- __ lw(v0, FieldMemOperand(v0, String::kHashFieldOffset)); |
+ __ lwu(v0, FieldMemOperand(v0, String::kHashFieldOffset)); |
__ IndexFromHash(v0, v0); |
context()->Plug(v0); |
@@ -3840,12 +3834,12 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { |
Register array_length = a2; |
Register result_pos = no_reg; // Will be a2. |
Register string_length = a3; |
- Register string = t0; |
- Register element = t1; |
- Register elements_end = t2; |
- Register scratch1 = t3; |
- Register scratch2 = t5; |
- Register scratch3 = t4; |
+ Register string = a4; |
+ Register element = a5; |
+ Register elements_end = a6; |
+ Register scratch1 = a7; |
+ Register scratch2 = t1; |
+ Register scratch3 = t0; |
// Separator operand is on the stack. |
__ pop(separator); |
@@ -3859,7 +3853,7 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { |
__ CheckFastElements(scratch1, scratch2, &bailout); |
// If the array has length zero, return the empty string. |
- __ lw(array_length, FieldMemOperand(array, JSArray::kLengthOffset)); |
+ __ ld(array_length, FieldMemOperand(array, JSArray::kLengthOffset)); |
__ SmiUntag(array_length); |
__ Branch(&non_trivial_array, ne, array_length, Operand(zero_reg)); |
__ LoadRoot(v0, Heap::kempty_stringRootIndex); |
@@ -3869,16 +3863,16 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { |
// Get the FixedArray containing array's elements. |
elements = array; |
- __ lw(elements, FieldMemOperand(array, JSArray::kElementsOffset)); |
+ __ ld(elements, FieldMemOperand(array, JSArray::kElementsOffset)); |
array = no_reg; // End of array's live range. |
// Check that all array elements are sequential ASCII strings, and |
// accumulate the sum of their lengths, as a smi-encoded value. |
__ mov(string_length, zero_reg); |
- __ Addu(element, |
+ __ Daddu(element, |
elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
- __ sll(elements_end, array_length, kPointerSizeLog2); |
- __ Addu(elements_end, element, elements_end); |
+ __ dsll(elements_end, array_length, kPointerSizeLog2); |
+ __ Daddu(elements_end, element, elements_end); |
// Loop condition: while (element < elements_end). |
// Live values in registers: |
// elements: Fixed array of strings. |
@@ -3892,20 +3886,20 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { |
array_length, Operand(zero_reg)); |
} |
__ bind(&loop); |
- __ lw(string, MemOperand(element)); |
- __ Addu(element, element, kPointerSize); |
+ __ ld(string, MemOperand(element)); |
+ __ Daddu(element, element, kPointerSize); |
__ JumpIfSmi(string, &bailout); |
- __ lw(scratch1, FieldMemOperand(string, HeapObject::kMapOffset)); |
+ __ ld(scratch1, FieldMemOperand(string, HeapObject::kMapOffset)); |
__ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset)); |
__ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout); |
- __ lw(scratch1, FieldMemOperand(string, SeqOneByteString::kLengthOffset)); |
+ __ ld(scratch1, FieldMemOperand(string, SeqOneByteString::kLengthOffset)); |
__ AdduAndCheckForOverflow(string_length, string_length, scratch1, scratch3); |
__ BranchOnOverflow(&bailout, scratch3); |
__ Branch(&loop, lt, element, Operand(elements_end)); |
// If array_length is 1, return elements[0], a string. |
__ Branch(¬_size_one_array, ne, array_length, Operand(1)); |
- __ lw(v0, FieldMemOperand(elements, FixedArray::kHeaderSize)); |
+ __ ld(v0, FieldMemOperand(elements, FixedArray::kHeaderSize)); |
__ Branch(&done); |
__ bind(¬_size_one_array); |
@@ -3918,30 +3912,29 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { |
// Check that the separator is a flat ASCII string. |
__ JumpIfSmi(separator, &bailout); |
- __ lw(scratch1, FieldMemOperand(separator, HeapObject::kMapOffset)); |
+ __ ld(scratch1, FieldMemOperand(separator, HeapObject::kMapOffset)); |
__ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset)); |
__ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout); |
// Add (separator length times array_length) - separator length to the |
// string_length to get the length of the result string. array_length is not |
// smi but the other values are, so the result is a smi. |
- __ lw(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset)); |
- __ Subu(string_length, string_length, Operand(scratch1)); |
- __ Mult(array_length, scratch1); |
+ __ ld(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset)); |
+ __ Dsubu(string_length, string_length, Operand(scratch1)); |
+ __ SmiUntag(scratch1); |
+ __ Dmult(array_length, scratch1); |
// Check for smi overflow. No overflow if higher 33 bits of 64-bit result are |
// zero. |
__ mfhi(scratch2); |
__ Branch(&bailout, ne, scratch2, Operand(zero_reg)); |
__ mflo(scratch2); |
- __ And(scratch3, scratch2, Operand(0x80000000)); |
- __ Branch(&bailout, ne, scratch3, Operand(zero_reg)); |
+ __ SmiUntag(string_length); |
__ AdduAndCheckForOverflow(string_length, string_length, scratch2, scratch3); |
__ BranchOnOverflow(&bailout, scratch3); |
- __ SmiUntag(string_length); |
// Get first element in the array to free up the elements register to be used |
// for the result. |
- __ Addu(element, |
+ __ Daddu(element, |
elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
result = elements; // End of live range for elements. |
elements = no_reg; |
@@ -3959,16 +3952,16 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { |
// Prepare for looping. Set up elements_end to end of the array. Set |
// result_pos to the position of the result where to write the first |
// character. |
- __ sll(elements_end, array_length, kPointerSizeLog2); |
- __ Addu(elements_end, element, elements_end); |
+ __ dsll(elements_end, array_length, kPointerSizeLog2); |
+ __ Daddu(elements_end, element, elements_end); |
result_pos = array_length; // End of live range for array_length. |
array_length = no_reg; |
- __ Addu(result_pos, |
+ __ Daddu(result_pos, |
result, |
Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); |
// Check the length of the separator. |
- __ lw(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset)); |
+ __ ld(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset)); |
__ li(at, Operand(Smi::FromInt(1))); |
__ Branch(&one_char_separator, eq, scratch1, Operand(at)); |
__ Branch(&long_separator, gt, scratch1, Operand(at)); |
@@ -3981,11 +3974,11 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { |
// elements_end: Array end. |
// Copy next array element to the result. |
- __ lw(string, MemOperand(element)); |
- __ Addu(element, element, kPointerSize); |
- __ lw(string_length, FieldMemOperand(string, String::kLengthOffset)); |
+ __ ld(string, MemOperand(element)); |
+ __ Daddu(element, element, kPointerSize); |
+ __ ld(string_length, FieldMemOperand(string, String::kLengthOffset)); |
__ SmiUntag(string_length); |
- __ Addu(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag); |
+ __ Daddu(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag); |
__ CopyBytes(string, result_pos, string_length, scratch1); |
// End while (element < elements_end). |
__ Branch(&empty_separator_loop, lt, element, Operand(elements_end)); |
@@ -4009,15 +4002,15 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { |
// Copy the separator character to the result. |
__ sb(separator, MemOperand(result_pos)); |
- __ Addu(result_pos, result_pos, 1); |
+ __ Daddu(result_pos, result_pos, 1); |
// Copy next array element to the result. |
__ bind(&one_char_separator_loop_entry); |
- __ lw(string, MemOperand(element)); |
- __ Addu(element, element, kPointerSize); |
- __ lw(string_length, FieldMemOperand(string, String::kLengthOffset)); |
+ __ ld(string, MemOperand(element)); |
+ __ Daddu(element, element, kPointerSize); |
+ __ ld(string_length, FieldMemOperand(string, String::kLengthOffset)); |
__ SmiUntag(string_length); |
- __ Addu(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag); |
+ __ Daddu(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag); |
__ CopyBytes(string, result_pos, string_length, scratch1); |
// End while (element < elements_end). |
__ Branch(&one_char_separator_loop, lt, element, Operand(elements_end)); |
@@ -4034,19 +4027,19 @@ void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) { |
// separator: Separator string. |
// Copy the separator to the result. |
- __ lw(string_length, FieldMemOperand(separator, String::kLengthOffset)); |
+ __ ld(string_length, FieldMemOperand(separator, String::kLengthOffset)); |
__ SmiUntag(string_length); |
- __ Addu(string, |
+ __ Daddu(string, |
separator, |
Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); |
__ CopyBytes(string, result_pos, string_length, scratch1); |
__ bind(&long_separator); |
- __ lw(string, MemOperand(element)); |
- __ Addu(element, element, kPointerSize); |
- __ lw(string_length, FieldMemOperand(string, String::kLengthOffset)); |
+ __ ld(string, MemOperand(element)); |
+ __ Daddu(element, element, kPointerSize); |
+ __ ld(string_length, FieldMemOperand(string, String::kLengthOffset)); |
__ SmiUntag(string_length); |
- __ Addu(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag); |
+ __ Daddu(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag); |
__ CopyBytes(string, result_pos, string_length, scratch1); |
// End while (element < elements_end). |
__ Branch(&long_separator_loop, lt, element, Operand(elements_end)); |
@@ -4065,7 +4058,7 @@ void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) { |
ExternalReference debug_is_active = |
ExternalReference::debug_is_active_address(isolate()); |
__ li(at, Operand(debug_is_active)); |
- __ lb(v0, MemOperand(at)); |
+ __ lbu(v0, MemOperand(at)); |
__ SmiTag(v0); |
context()->Plug(v0); |
} |
@@ -4086,8 +4079,8 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) { |
if (expr->is_jsruntime()) { |
// Push the builtins object as the receiver. |
Register receiver = LoadIC::ReceiverRegister(); |
- __ lw(receiver, GlobalObjectOperand()); |
- __ lw(receiver, FieldMemOperand(receiver, GlobalObject::kBuiltinsOffset)); |
+ __ ld(receiver, GlobalObjectOperand()); |
+ __ ld(receiver, FieldMemOperand(receiver, GlobalObject::kBuiltinsOffset)); |
__ push(receiver); |
// Load the function from the receiver. |
@@ -4095,9 +4088,9 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) { |
CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId()); |
// Push the target function under the receiver. |
- __ lw(at, MemOperand(sp, 0)); |
+ __ ld(at, MemOperand(sp, 0)); |
__ push(at); |
- __ sw(v0, MemOperand(sp, kPointerSize)); |
+ __ sd(v0, MemOperand(sp, kPointerSize)); |
// Push the arguments ("left-to-right"). |
int arg_count = args->length(); |
@@ -4108,11 +4101,11 @@ void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) { |
// Record source position of the IC call. |
SetSourcePosition(expr->position()); |
CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS); |
- __ lw(a1, MemOperand(sp, (arg_count + 1) * kPointerSize)); |
+ __ ld(a1, MemOperand(sp, (arg_count + 1) * kPointerSize)); |
__ CallStub(&stub); |
// Restore context register. |
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
+ __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
context()->DropAndPlug(1, v0); |
} else { |
@@ -4148,7 +4141,7 @@ void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) { |
// but "delete this" is allowed. |
ASSERT(strict_mode() == SLOPPY || var->is_this()); |
if (var->IsUnallocated()) { |
- __ lw(a2, GlobalObjectOperand()); |
+ __ ld(a2, GlobalObjectOperand()); |
__ li(a1, Operand(var->name())); |
__ li(a0, Operand(Smi::FromInt(SLOPPY))); |
__ Push(a2, a1, a0); |
@@ -4270,13 +4263,13 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { |
if (assign_type == NAMED_PROPERTY) { |
// Put the object both on the stack and in the register. |
VisitForStackValue(prop->obj()); |
- __ lw(LoadIC::ReceiverRegister(), MemOperand(sp, 0)); |
+ __ ld(LoadIC::ReceiverRegister(), MemOperand(sp, 0)); |
EmitNamedPropertyLoad(prop); |
} else { |
VisitForStackValue(prop->obj()); |
VisitForStackValue(prop->key()); |
- __ lw(LoadIC::ReceiverRegister(), MemOperand(sp, 1 * kPointerSize)); |
- __ lw(LoadIC::NameRegister(), MemOperand(sp, 0)); |
+ __ ld(LoadIC::ReceiverRegister(), MemOperand(sp, 1 * kPointerSize)); |
+ __ ld(LoadIC::NameRegister(), MemOperand(sp, 0)); |
EmitKeyedPropertyLoad(prop); |
} |
} |
@@ -4310,17 +4303,17 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { |
__ push(v0); |
break; |
case NAMED_PROPERTY: |
- __ sw(v0, MemOperand(sp, kPointerSize)); |
+ __ sd(v0, MemOperand(sp, kPointerSize)); |
break; |
case KEYED_PROPERTY: |
- __ sw(v0, MemOperand(sp, 2 * kPointerSize)); |
+ __ sd(v0, MemOperand(sp, 2 * kPointerSize)); |
break; |
} |
} |
} |
Register scratch1 = a1; |
- Register scratch2 = t0; |
+ Register scratch2 = a4; |
__ li(scratch1, Operand(Smi::FromInt(count_value))); |
__ AdduAndCheckForOverflow(v0, v0, scratch1, scratch2); |
__ BranchOnNoOverflow(&done, scratch2); |
@@ -4343,10 +4336,10 @@ void FullCodeGenerator::VisitCountOperation(CountOperation* expr) { |
__ push(v0); |
break; |
case NAMED_PROPERTY: |
- __ sw(v0, MemOperand(sp, kPointerSize)); |
+ __ sd(v0, MemOperand(sp, kPointerSize)); |
break; |
case KEYED_PROPERTY: |
- __ sw(v0, MemOperand(sp, 2 * kPointerSize)); |
+ __ sd(v0, MemOperand(sp, 2 * kPointerSize)); |
break; |
} |
} |
@@ -4428,7 +4421,7 @@ void FullCodeGenerator::VisitForTypeofValue(Expression* expr) { |
VariableProxy* proxy = expr->AsVariableProxy(); |
if (proxy != NULL && proxy->var()->IsUnallocated()) { |
Comment cmnt(masm_, "[ Global variable"); |
- __ lw(LoadIC::ReceiverRegister(), GlobalObjectOperand()); |
+ __ ld(LoadIC::ReceiverRegister(), GlobalObjectOperand()); |
__ li(LoadIC::NameRegister(), Operand(proxy->name())); |
// Use a regular load, not a contextual load, to avoid a reference |
// error. |
@@ -4475,7 +4468,7 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr, |
Factory* factory = isolate()->factory(); |
if (String::Equals(check, factory->number_string())) { |
__ JumpIfSmi(v0, if_true); |
- __ lw(v0, FieldMemOperand(v0, HeapObject::kMapOffset)); |
+ __ ld(v0, FieldMemOperand(v0, HeapObject::kMapOffset)); |
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex); |
Split(eq, v0, Operand(at), if_true, if_false, fall_through); |
} else if (String::Equals(check, factory->string_string())) { |
@@ -4505,7 +4498,7 @@ void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr, |
__ Branch(if_true, eq, v0, Operand(at)); |
__ JumpIfSmi(v0, if_false); |
// Check for undetectable objects => true. |
- __ lw(v0, FieldMemOperand(v0, HeapObject::kMapOffset)); |
+ __ ld(v0, FieldMemOperand(v0, HeapObject::kMapOffset)); |
__ lbu(a1, FieldMemOperand(v0, Map::kBitFieldOffset)); |
__ And(a1, a1, Operand(1 << Map::kIsUndetectable)); |
Split(ne, a1, Operand(zero_reg), if_true, if_false, fall_through); |
@@ -4562,8 +4555,8 @@ void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) { |
VisitForStackValue(expr->right()); |
__ InvokeBuiltin(Builtins::IN, CALL_FUNCTION); |
PrepareForBailoutBeforeSplit(expr, false, NULL, NULL); |
- __ LoadRoot(t0, Heap::kTrueValueRootIndex); |
- Split(eq, v0, Operand(t0), if_true, if_false, fall_through); |
+ __ LoadRoot(a4, Heap::kTrueValueRootIndex); |
+ Split(eq, v0, Operand(a4), if_true, if_false, fall_through); |
break; |
case Token::INSTANCEOF: { |
@@ -4636,7 +4629,7 @@ void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr, |
void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) { |
- __ lw(v0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); |
+ __ ld(v0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); |
context()->Plug(v0); |
} |
@@ -4652,13 +4645,15 @@ Register FullCodeGenerator::context_register() { |
void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) { |
- ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset); |
- __ sw(value, MemOperand(fp, frame_offset)); |
+ // ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset); |
+ ASSERT(IsAligned(frame_offset, kPointerSize)); |
+ // __ sw(value, MemOperand(fp, frame_offset)); |
+ __ sd(value, MemOperand(fp, frame_offset)); |
} |
void FullCodeGenerator::LoadContextField(Register dst, int context_index) { |
- __ lw(dst, ContextOperand(cp, context_index)); |
+ __ ld(dst, ContextOperand(cp, context_index)); |
} |
@@ -4675,10 +4670,10 @@ void FullCodeGenerator::PushFunctionArgumentForContextAllocation() { |
// Contexts created by a call to eval have the same closure as the |
// context calling eval, not the anonymous closure containing the eval |
// code. Fetch it from the context. |
- __ lw(at, ContextOperand(cp, Context::CLOSURE_INDEX)); |
+ __ ld(at, ContextOperand(cp, Context::CLOSURE_INDEX)); |
} else { |
ASSERT(declaration_scope->is_function_scope()); |
- __ lw(at, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); |
+ __ ld(at, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); |
} |
__ push(at); |
} |
@@ -4692,10 +4687,8 @@ void FullCodeGenerator::EnterFinallyBlock() { |
// Store result register while executing finally block. |
__ push(result_register()); |
// Cook return address in link register to stack (smi encoded Code* delta). |
- __ Subu(a1, ra, Operand(masm_->CodeObject())); |
- ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize); |
- STATIC_ASSERT(0 == kSmiTag); |
- __ Addu(a1, a1, Operand(a1)); // Convert to smi. |
+ __ Dsubu(a1, ra, Operand(masm_->CodeObject())); |
+ __ SmiTag(a1); |
// Store result register while executing finally block. |
__ push(a1); |
@@ -4704,20 +4697,20 @@ void FullCodeGenerator::EnterFinallyBlock() { |
ExternalReference pending_message_obj = |
ExternalReference::address_of_pending_message_obj(isolate()); |
__ li(at, Operand(pending_message_obj)); |
- __ lw(a1, MemOperand(at)); |
+ __ ld(a1, MemOperand(at)); |
__ push(a1); |
ExternalReference has_pending_message = |
ExternalReference::address_of_has_pending_message(isolate()); |
__ li(at, Operand(has_pending_message)); |
- __ lw(a1, MemOperand(at)); |
+ __ ld(a1, MemOperand(at)); |
__ SmiTag(a1); |
__ push(a1); |
ExternalReference pending_message_script = |
ExternalReference::address_of_pending_message_script(isolate()); |
__ li(at, Operand(pending_message_script)); |
- __ lw(a1, MemOperand(at)); |
+ __ ld(a1, MemOperand(at)); |
__ push(a1); |
} |
@@ -4729,29 +4722,29 @@ void FullCodeGenerator::ExitFinallyBlock() { |
ExternalReference pending_message_script = |
ExternalReference::address_of_pending_message_script(isolate()); |
__ li(at, Operand(pending_message_script)); |
- __ sw(a1, MemOperand(at)); |
+ __ sd(a1, MemOperand(at)); |
__ pop(a1); |
__ SmiUntag(a1); |
ExternalReference has_pending_message = |
ExternalReference::address_of_has_pending_message(isolate()); |
__ li(at, Operand(has_pending_message)); |
- __ sw(a1, MemOperand(at)); |
+ __ sd(a1, MemOperand(at)); |
__ pop(a1); |
ExternalReference pending_message_obj = |
ExternalReference::address_of_pending_message_obj(isolate()); |
__ li(at, Operand(pending_message_obj)); |
- __ sw(a1, MemOperand(at)); |
+ __ sd(a1, MemOperand(at)); |
// Restore result register from stack. |
__ pop(a1); |
// Uncook return address and return. |
__ pop(result_register()); |
- ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize); |
- __ sra(a1, a1, 1); // Un-smi-tag value. |
- __ Addu(at, a1, Operand(masm_->CodeObject())); |
+ |
+ __ SmiUntag(a1); |
+ __ Daddu(at, a1, Operand(masm_->CodeObject())); |
__ Jump(at); |
} |
@@ -4772,8 +4765,8 @@ FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit( |
__ Drop(*stack_depth); // Down to the handler block. |
if (*context_length > 0) { |
// Restore the context to its dedicated register and the stack. |
- __ lw(cp, MemOperand(sp, StackHandlerConstants::kContextOffset)); |
- __ sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
+ __ ld(cp, MemOperand(sp, StackHandlerConstants::kContextOffset)); |
+ __ sd(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
} |
__ PopTryHandler(); |
__ Call(finally_entry_); |
@@ -4792,15 +4785,17 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code, |
BackEdgeState target_state, |
Code* replacement_code) { |
static const int kInstrSize = Assembler::kInstrSize; |
- Address branch_address = pc - 6 * kInstrSize; |
+ Address branch_address = pc - 8 * kInstrSize; |
CodePatcher patcher(branch_address, 1); |
switch (target_state) { |
case INTERRUPT: |
- // slt at, a3, zero_reg (in case of count based interrupts) |
- // beq at, zero_reg, ok |
- // lui t9, <interrupt stub address> upper |
- // ori t9, <interrupt stub address> lower |
+ // slt at, a3, zero_reg (in case of count based interrupts) |
+ // beq at, zero_reg, ok |
+ // lui t9, <interrupt stub address> upper |
+ // ori t9, <interrupt stub address> u-middle |
+ // dsll t9, t9, 16 |
+ // ori t9, <interrupt stub address> lower |
// jalr t9 |
// nop |
// ok-label ----- pc_after points here |
@@ -4809,17 +4804,19 @@ void BackEdgeTable::PatchAt(Code* unoptimized_code, |
case ON_STACK_REPLACEMENT: |
case OSR_AFTER_STACK_CHECK: |
// addiu at, zero_reg, 1 |
- // beq at, zero_reg, ok ;; Not changed |
- // lui t9, <on-stack replacement address> upper |
- // ori t9, <on-stack replacement address> lower |
+ // beq at, zero_reg, ok ;; Not changed |
+ // lui t9, <on-stack replacement address> upper |
+ // ori t9, <on-stack replacement address> middle |
+ // dsll t9, t9, 16 |
+ // ori t9, <on-stack replacement address> lower |
// jalr t9 ;; Not changed |
// nop ;; Not changed |
// ok-label ----- pc_after points here |
- patcher.masm()->addiu(at, zero_reg, 1); |
+ patcher.masm()->daddiu(at, zero_reg, 1); |
break; |
} |
- Address pc_immediate_load_address = pc - 4 * kInstrSize; |
- // Replace the stack check address in the load-immediate (lui/ori pair) |
+ Address pc_immediate_load_address = pc - 6 * kInstrSize; |
+ // Replace the stack check address in the load-immediate (6-instr sequence) |
// with the entry address of the replacement code. |
Assembler::set_target_address_at(pc_immediate_load_address, |
replacement_code->entry()); |
@@ -4834,30 +4831,30 @@ BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState( |
Code* unoptimized_code, |
Address pc) { |
static const int kInstrSize = Assembler::kInstrSize; |
- Address branch_address = pc - 6 * kInstrSize; |
- Address pc_immediate_load_address = pc - 4 * kInstrSize; |
+ Address branch_address = pc - 8 * kInstrSize; |
+ Address pc_immediate_load_address = pc - 6 * kInstrSize; |
- ASSERT(Assembler::IsBeq(Assembler::instr_at(pc - 5 * kInstrSize))); |
+ ASSERT(Assembler::IsBeq(Assembler::instr_at(pc - 7 * kInstrSize))); |
if (!Assembler::IsAddImmediate(Assembler::instr_at(branch_address))) { |
- ASSERT(reinterpret_cast<uint32_t>( |
+ ASSERT(reinterpret_cast<uint64_t>( |
Assembler::target_address_at(pc_immediate_load_address)) == |
- reinterpret_cast<uint32_t>( |
+ reinterpret_cast<uint64_t>( |
isolate->builtins()->InterruptCheck()->entry())); |
return INTERRUPT; |
} |
ASSERT(Assembler::IsAddImmediate(Assembler::instr_at(branch_address))); |
- if (reinterpret_cast<uint32_t>( |
+ if (reinterpret_cast<uint64_t>( |
Assembler::target_address_at(pc_immediate_load_address)) == |
- reinterpret_cast<uint32_t>( |
+ reinterpret_cast<uint64_t>( |
isolate->builtins()->OnStackReplacement()->entry())) { |
return ON_STACK_REPLACEMENT; |
} |
- ASSERT(reinterpret_cast<uint32_t>( |
+ ASSERT(reinterpret_cast<uint64_t>( |
Assembler::target_address_at(pc_immediate_load_address)) == |
- reinterpret_cast<uint32_t>( |
+ reinterpret_cast<uint64_t>( |
isolate->builtins()->OsrAfterStackCheck()->entry())); |
return OSR_AFTER_STACK_CHECK; |
} |
@@ -4865,4 +4862,4 @@ BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState( |
} } // namespace v8::internal |
-#endif // V8_TARGET_ARCH_MIPS |
+#endif // V8_TARGET_ARCH_MIPS64 |