Index: src/mips64/builtins-mips64.cc |
diff --git a/src/mips/builtins-mips.cc b/src/mips64/builtins-mips64.cc |
similarity index 73% |
copy from src/mips/builtins-mips.cc |
copy to src/mips64/builtins-mips64.cc |
index 386d9b8b8679dbcb8434c7ed89bf9862e5293125..4d2ac19684f5a72c654833b23ceafbf4a65b4114 100644 |
--- a/src/mips/builtins-mips.cc |
+++ b/src/mips64/builtins-mips64.cc |
@@ -6,7 +6,7 @@ |
#include "src/v8.h" |
-#if V8_TARGET_ARCH_MIPS |
+#if V8_TARGET_ARCH_MIPS64 |
#include "src/codegen.h" |
#include "src/debug.h" |
@@ -32,8 +32,8 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm, |
// -- cp : context |
// -- sp[0] : last argument |
// -- ... |
- // -- sp[4 * (argc - 1)] : first argument |
- // -- sp[4 * agrc] : receiver |
+ // -- sp[8 * (argc - 1)] : first argument |
+ // -- sp[8 * agrc] : receiver |
// ----------------------------------- |
// Insert extra arguments. |
@@ -47,9 +47,9 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm, |
// JumpToExternalReference expects s0 to contain the number of arguments |
// including the receiver and the extra arguments. |
- __ Addu(s0, a0, num_extra_args + 1); |
- __ sll(s1, s0, kPointerSizeLog2); |
- __ Subu(s1, s1, kPointerSize); |
+ __ Daddu(s0, a0, num_extra_args + 1); |
+ __ dsll(s1, s0, kPointerSizeLog2); |
+ __ Dsubu(s1, s1, kPointerSize); |
__ JumpToExternalReference(ExternalReference(id, masm->isolate())); |
} |
@@ -59,12 +59,12 @@ static void GenerateLoadInternalArrayFunction(MacroAssembler* masm, |
Register result) { |
// Load the native context. |
- __ lw(result, |
+ __ ld(result, |
MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); |
- __ lw(result, |
+ __ ld(result, |
FieldMemOperand(result, GlobalObject::kNativeContextOffset)); |
// Load the InternalArray function from the native context. |
- __ lw(result, |
+ __ ld(result, |
MemOperand(result, |
Context::SlotOffset( |
Context::INTERNAL_ARRAY_FUNCTION_INDEX))); |
@@ -75,12 +75,12 @@ static void GenerateLoadInternalArrayFunction(MacroAssembler* masm, |
static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) { |
// Load the native context. |
- __ lw(result, |
+ __ ld(result, |
MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); |
- __ lw(result, |
+ __ ld(result, |
FieldMemOperand(result, GlobalObject::kNativeContextOffset)); |
// Load the Array function from the native context. |
- __ lw(result, |
+ __ ld(result, |
MemOperand(result, |
Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX))); |
} |
@@ -99,13 +99,13 @@ void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) { |
if (FLAG_debug_code) { |
// Initial map for the builtin InternalArray functions should be maps. |
- __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset)); |
- __ SmiTst(a2, t0); |
+ __ ld(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset)); |
+ __ SmiTst(a2, a4); |
__ Assert(ne, kUnexpectedInitialMapForInternalArrayFunction, |
- t0, Operand(zero_reg)); |
- __ GetObjectType(a2, a3, t0); |
+ a4, Operand(zero_reg)); |
+ __ GetObjectType(a2, a3, a4); |
__ Assert(eq, kUnexpectedInitialMapForInternalArrayFunction, |
- t0, Operand(MAP_TYPE)); |
+ a4, Operand(MAP_TYPE)); |
} |
// Run the native code for the InternalArray function called as a normal |
@@ -129,13 +129,13 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) { |
if (FLAG_debug_code) { |
// Initial map for the builtin Array functions should be maps. |
- __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset)); |
- __ SmiTst(a2, t0); |
+ __ ld(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset)); |
+ __ SmiTst(a2, a4); |
__ Assert(ne, kUnexpectedInitialMapForArrayFunction1, |
- t0, Operand(zero_reg)); |
- __ GetObjectType(a2, a3, t0); |
+ a4, Operand(zero_reg)); |
+ __ GetObjectType(a2, a3, a4); |
__ Assert(eq, kUnexpectedInitialMapForArrayFunction2, |
- t0, Operand(MAP_TYPE)); |
+ a4, Operand(MAP_TYPE)); |
} |
// Run the native code for the Array function called as a normal function. |
@@ -151,8 +151,8 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) { |
// -- a0 : number of arguments |
// -- a1 : constructor function |
// -- ra : return address |
- // -- sp[(argc - n - 1) * 4] : arg[n] (zero based) |
- // -- sp[argc * 4] : receiver |
+ // -- sp[(argc - n - 1) * 8] : arg[n] (zero based) |
+ // -- sp[argc * 8] : receiver |
// ----------------------------------- |
Counters* counters = masm->isolate()->counters(); |
__ IncrementCounter(counters->string_ctor_calls(), 1, a2, a3); |
@@ -166,11 +166,11 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) { |
// Load the first arguments in a0 and get rid of the rest. |
Label no_arguments; |
__ Branch(&no_arguments, eq, a0, Operand(zero_reg)); |
- // First args = sp[(argc - 1) * 4]. |
- __ Subu(a0, a0, Operand(1)); |
- __ sll(a0, a0, kPointerSizeLog2); |
- __ Addu(sp, a0, sp); |
- __ lw(a0, MemOperand(sp)); |
+ // First args = sp[(argc - 1) * 8]. |
+ __ Dsubu(a0, a0, Operand(1)); |
+ __ dsll(a0, a0, kPointerSizeLog2); |
+ __ Daddu(sp, a0, sp); |
+ __ ld(a0, MemOperand(sp)); |
// sp now point to args[0], drop args[0] + receiver. |
__ Drop(2); |
@@ -179,10 +179,10 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) { |
__ LookupNumberStringCache(a0, // Input. |
argument, // Result. |
a3, // Scratch. |
- t0, // Scratch. |
- t1, // Scratch. |
+ a4, // Scratch. |
+ a5, // Scratch. |
¬_cached); |
- __ IncrementCounter(counters->string_ctor_cached_number(), 1, a3, t0); |
+ __ IncrementCounter(counters->string_ctor_cached_number(), 1, a3, a4); |
__ bind(&argument_is_string); |
// ----------- S t a t e ------------- |
@@ -195,28 +195,28 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) { |
__ Allocate(JSValue::kSize, |
v0, // Result. |
a3, // Scratch. |
- t0, // Scratch. |
+ a4, // Scratch. |
&gc_required, |
TAG_OBJECT); |
// Initialising the String Object. |
Register map = a3; |
- __ LoadGlobalFunctionInitialMap(function, map, t0); |
+ __ LoadGlobalFunctionInitialMap(function, map, a4); |
if (FLAG_debug_code) { |
- __ lbu(t0, FieldMemOperand(map, Map::kInstanceSizeOffset)); |
+ __ lbu(a4, FieldMemOperand(map, Map::kInstanceSizeOffset)); |
__ Assert(eq, kUnexpectedStringWrapperInstanceSize, |
- t0, Operand(JSValue::kSize >> kPointerSizeLog2)); |
- __ lbu(t0, FieldMemOperand(map, Map::kUnusedPropertyFieldsOffset)); |
+ a4, Operand(JSValue::kSize >> kPointerSizeLog2)); |
+ __ lbu(a4, FieldMemOperand(map, Map::kUnusedPropertyFieldsOffset)); |
__ Assert(eq, kUnexpectedUnusedPropertiesOfStringWrapper, |
- t0, Operand(zero_reg)); |
+ a4, Operand(zero_reg)); |
} |
- __ sw(map, FieldMemOperand(v0, HeapObject::kMapOffset)); |
+ __ sd(map, FieldMemOperand(v0, HeapObject::kMapOffset)); |
__ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex); |
- __ sw(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset)); |
- __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset)); |
+ __ sd(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset)); |
+ __ sd(a3, FieldMemOperand(v0, JSObject::kElementsOffset)); |
- __ sw(argument, FieldMemOperand(v0, JSValue::kValueOffset)); |
+ __ sd(argument, FieldMemOperand(v0, JSValue::kValueOffset)); |
// Ensure the object is fully initialized. |
STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize); |
@@ -230,19 +230,19 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) { |
__ JumpIfSmi(a0, &convert_argument); |
// Is it a String? |
- __ lw(a2, FieldMemOperand(a0, HeapObject::kMapOffset)); |
+ __ ld(a2, FieldMemOperand(a0, HeapObject::kMapOffset)); |
__ lbu(a3, FieldMemOperand(a2, Map::kInstanceTypeOffset)); |
STATIC_ASSERT(kNotStringTag != 0); |
- __ And(t0, a3, Operand(kIsNotStringMask)); |
- __ Branch(&convert_argument, ne, t0, Operand(zero_reg)); |
+ __ And(a4, a3, Operand(kIsNotStringMask)); |
+ __ Branch(&convert_argument, ne, a4, Operand(zero_reg)); |
__ mov(argument, a0); |
- __ IncrementCounter(counters->string_ctor_conversions(), 1, a3, t0); |
+ __ IncrementCounter(counters->string_ctor_conversions(), 1, a3, a4); |
__ Branch(&argument_is_string); |
// Invoke the conversion builtin and put the result into a2. |
__ bind(&convert_argument); |
__ push(function); // Preserve the function. |
- __ IncrementCounter(counters->string_ctor_conversions(), 1, a3, t0); |
+ __ IncrementCounter(counters->string_ctor_conversions(), 1, a3, a4); |
{ |
FrameScope scope(masm, StackFrame::INTERNAL); |
__ push(a0); |
@@ -262,7 +262,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) { |
// At this point the argument is already a string. Call runtime to |
// create a string wrapper. |
__ bind(&gc_required); |
- __ IncrementCounter(counters->string_ctor_gc_required(), 1, a3, t0); |
+ __ IncrementCounter(counters->string_ctor_gc_required(), 1, a3, a4); |
{ |
FrameScope scope(masm, StackFrame::INTERNAL); |
__ push(argument); |
@@ -286,15 +286,15 @@ static void CallRuntimePassFunction( |
static void GenerateTailCallToSharedCode(MacroAssembler* masm) { |
- __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); |
- __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset)); |
- __ Addu(at, a2, Operand(Code::kHeaderSize - kHeapObjectTag)); |
+ __ ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); |
+ __ ld(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset)); |
+ __ Daddu(at, a2, Operand(Code::kHeaderSize - kHeapObjectTag)); |
__ Jump(at); |
} |
static void GenerateTailCallToReturnedCode(MacroAssembler* masm) { |
- __ Addu(at, v0, Operand(Code::kHeaderSize - kHeapObjectTag)); |
+ __ Daddu(at, v0, Operand(Code::kHeaderSize - kHeapObjectTag)); |
__ Jump(at); |
} |
@@ -306,8 +306,8 @@ void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) { |
// would be quite expensive. A good compromise is to first check against |
// stack limit as a cue for an interrupt signal. |
Label ok; |
- __ LoadRoot(t0, Heap::kStackLimitRootIndex); |
- __ Branch(&ok, hs, sp, Operand(t0)); |
+ __ LoadRoot(a4, Heap::kStackLimitRootIndex); |
+ __ Branch(&ok, hs, sp, Operand(a4)); |
CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode); |
GenerateTailCallToReturnedCode(masm); |
@@ -350,7 +350,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, |
} |
// Preserve the two incoming parameters on the stack. |
- __ sll(a0, a0, kSmiTagSize); // Tag arguments count. |
+ // Tag arguments count. |
+ __ dsll32(a0, a0, 0); |
__ MultiPushReversed(a0.bit() | a1.bit()); |
Label rt_call, allocated; |
@@ -361,15 +362,15 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, |
ExternalReference debug_step_in_fp = |
ExternalReference::debug_step_in_fp_address(isolate); |
__ li(a2, Operand(debug_step_in_fp)); |
- __ lw(a2, MemOperand(a2)); |
+ __ ld(a2, MemOperand(a2)); |
__ Branch(&rt_call, ne, a2, Operand(zero_reg)); |
// Load the initial map and verify that it is in fact a map. |
// a1: constructor function |
- __ lw(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset)); |
+ __ ld(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset)); |
__ JumpIfSmi(a2, &rt_call); |
- __ GetObjectType(a2, a3, t4); |
- __ Branch(&rt_call, ne, t4, Operand(MAP_TYPE)); |
+ __ GetObjectType(a2, a3, t0); |
+ __ Branch(&rt_call, ne, t0, Operand(MAP_TYPE)); |
// Check that the constructor is not constructing a JSFunction (see |
// comments in Runtime_NewObject in runtime.cc). In which case the |
@@ -383,14 +384,17 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, |
Label allocate; |
MemOperand bit_field3 = FieldMemOperand(a2, Map::kBitField3Offset); |
// Check if slack tracking is enabled. |
- __ lw(t0, bit_field3); |
- __ DecodeField<Map::ConstructionCount>(t2, t0); |
- __ Branch(&allocate, eq, t2, Operand(JSFunction::kNoSlackTracking)); |
+ __ lwu(a4, bit_field3); |
+ __ DecodeField<Map::ConstructionCount>(a6, a4); |
+ __ Branch(&allocate, |
+ eq, |
+ a6, |
+ Operand(static_cast<int64_t>(JSFunction::kNoSlackTracking))); |
// Decrease generous allocation count. |
- __ Subu(t0, t0, Operand(1 << Map::ConstructionCount::kShift)); |
+ __ Dsubu(a4, a4, Operand(1 << Map::ConstructionCount::kShift)); |
__ Branch(USE_DELAY_SLOT, |
- &allocate, ne, t2, Operand(JSFunction::kFinishSlackTracking)); |
- __ sw(t0, bit_field3); // In delay slot. |
+ &allocate, ne, a6, Operand(JSFunction::kFinishSlackTracking)); |
+ __ sw(a4, bit_field3); // In delay slot. |
__ Push(a1, a2, a1); // a1 = Constructor. |
__ CallRuntime(Runtime::kFinalizeInstanceSize, 1); |
@@ -398,7 +402,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, |
__ Pop(a1, a2); |
// Slack tracking counter is kNoSlackTracking after runtime call. |
ASSERT(JSFunction::kNoSlackTracking == 0); |
- __ mov(t2, zero_reg); |
+ __ mov(a6, zero_reg); |
__ bind(&allocate); |
} |
@@ -408,23 +412,23 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, |
// a2: initial map |
__ lbu(a3, FieldMemOperand(a2, Map::kInstanceSizeOffset)); |
if (create_memento) { |
- __ Addu(a3, a3, Operand(AllocationMemento::kSize / kPointerSize)); |
+ __ Daddu(a3, a3, Operand(AllocationMemento::kSize / kPointerSize)); |
} |
- __ Allocate(a3, t4, t5, t6, &rt_call, SIZE_IN_WORDS); |
+ __ Allocate(a3, t0, t1, t2, &rt_call, SIZE_IN_WORDS); |
// Allocated the JSObject, now initialize the fields. Map is set to |
// initial map and properties and elements are set to empty fixed array. |
// a1: constructor function |
// a2: initial map |
// a3: object size (not including memento if create_memento) |
- // t4: JSObject (not tagged) |
- __ LoadRoot(t6, Heap::kEmptyFixedArrayRootIndex); |
- __ mov(t5, t4); |
- __ sw(a2, MemOperand(t5, JSObject::kMapOffset)); |
- __ sw(t6, MemOperand(t5, JSObject::kPropertiesOffset)); |
- __ sw(t6, MemOperand(t5, JSObject::kElementsOffset)); |
- __ Addu(t5, t5, Operand(3*kPointerSize)); |
+ // t0: JSObject (not tagged) |
+ __ LoadRoot(t2, Heap::kEmptyFixedArrayRootIndex); |
+ __ mov(t1, t0); |
+ __ sd(a2, MemOperand(t1, JSObject::kMapOffset)); |
+ __ sd(t2, MemOperand(t1, JSObject::kPropertiesOffset)); |
+ __ sd(t2, MemOperand(t1, JSObject::kElementsOffset)); |
+ __ Daddu(t1, t1, Operand(3*kPointerSize)); |
ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset); |
ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset); |
ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset); |
@@ -433,81 +437,88 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, |
// a1: constructor function |
// a2: initial map |
// a3: object size (in words, including memento if create_memento) |
- // t4: JSObject (not tagged) |
- // t5: First in-object property of JSObject (not tagged) |
- // t2: slack tracking counter (non-API function case) |
+ // t0: JSObject (not tagged) |
+ // t1: First in-object property of JSObject (not tagged) |
+ // a6: slack tracking counter (non-API function case) |
ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize); |
- // Use t7 to hold undefined, which is used in several places below. |
- __ LoadRoot(t7, Heap::kUndefinedValueRootIndex); |
+ // Use t3 to hold undefined, which is used in several places below. |
+ __ LoadRoot(t3, Heap::kUndefinedValueRootIndex); |
if (!is_api_function) { |
Label no_inobject_slack_tracking; |
// Check if slack tracking is enabled. |
__ Branch(&no_inobject_slack_tracking, |
- eq, t2, Operand(JSFunction::kNoSlackTracking)); |
+ eq, |
+ a6, |
+ Operand(static_cast<int64_t>(JSFunction::kNoSlackTracking))); |
// Allocate object with a slack. |
- __ lbu(a0, FieldMemOperand(a2, Map::kPreAllocatedPropertyFieldsOffset)); |
- __ sll(at, a0, kPointerSizeLog2); |
- __ addu(a0, t5, at); |
+ __ lwu(a0, FieldMemOperand(a2, Map::kInstanceSizesOffset)); |
+ __ Ext(a0, a0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte, |
+ kBitsPerByte); |
+ __ dsll(at, a0, kPointerSizeLog2); |
+ __ daddu(a0, t1, at); |
// a0: offset of first field after pre-allocated fields |
if (FLAG_debug_code) { |
- __ sll(at, a3, kPointerSizeLog2); |
- __ Addu(t6, t4, Operand(at)); // End of object. |
+ __ dsll(at, a3, kPointerSizeLog2); |
+ __ Daddu(t2, t0, Operand(at)); // End of object. |
__ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields, |
- a0, Operand(t6)); |
+ a0, Operand(t2)); |
} |
- __ InitializeFieldsWithFiller(t5, a0, t7); |
+ __ InitializeFieldsWithFiller(t1, a0, t3); |
// To allow for truncation. |
- __ LoadRoot(t7, Heap::kOnePointerFillerMapRootIndex); |
+ __ LoadRoot(t3, Heap::kOnePointerFillerMapRootIndex); |
// Fill the remaining fields with one pointer filler map. |
__ bind(&no_inobject_slack_tracking); |
} |
if (create_memento) { |
- __ Subu(a0, a3, Operand(AllocationMemento::kSize / kPointerSize)); |
- __ sll(a0, a0, kPointerSizeLog2); |
- __ Addu(a0, t4, Operand(a0)); // End of object. |
- __ InitializeFieldsWithFiller(t5, a0, t7); |
+ __ Dsubu(a0, a3, Operand(AllocationMemento::kSize / kPointerSize)); |
+ __ dsll(a0, a0, kPointerSizeLog2); |
+ __ Daddu(a0, t0, Operand(a0)); // End of object. |
+ __ InitializeFieldsWithFiller(t1, a0, t3); |
// Fill in memento fields. |
- // t5: points to the allocated but uninitialized memento. |
- __ LoadRoot(t7, Heap::kAllocationMementoMapRootIndex); |
+ // t1: points to the allocated but uninitialized memento. |
+ __ LoadRoot(t3, Heap::kAllocationMementoMapRootIndex); |
ASSERT_EQ(0 * kPointerSize, AllocationMemento::kMapOffset); |
- __ sw(t7, MemOperand(t5)); |
- __ Addu(t5, t5, kPointerSize); |
+ __ sd(t3, MemOperand(t1)); |
+ __ Daddu(t1, t1, kPointerSize); |
// Load the AllocationSite. |
- __ lw(t7, MemOperand(sp, 2 * kPointerSize)); |
+ __ ld(t3, MemOperand(sp, 2 * kPointerSize)); |
ASSERT_EQ(1 * kPointerSize, AllocationMemento::kAllocationSiteOffset); |
- __ sw(t7, MemOperand(t5)); |
- __ Addu(t5, t5, kPointerSize); |
+ __ sd(t3, MemOperand(t1)); |
+ __ Daddu(t1, t1, kPointerSize); |
} else { |
- __ sll(at, a3, kPointerSizeLog2); |
- __ Addu(a0, t4, Operand(at)); // End of object. |
- __ InitializeFieldsWithFiller(t5, a0, t7); |
+ __ dsll(at, a3, kPointerSizeLog2); |
+ __ Daddu(a0, t0, Operand(at)); // End of object. |
+ __ InitializeFieldsWithFiller(t1, a0, t3); |
} |
// Add the object tag to make the JSObject real, so that we can continue |
// and jump into the continuation code at any time from now on. Any |
// failures need to undo the allocation, so that the heap is in a |
// consistent state and verifiable. |
- __ Addu(t4, t4, Operand(kHeapObjectTag)); |
+ __ Daddu(t0, t0, Operand(kHeapObjectTag)); |
// Check if a non-empty properties array is needed. Continue with |
// allocated object if not fall through to runtime call if it is. |
// a1: constructor function |
- // t4: JSObject |
- // t5: start of next object (not tagged) |
+ // t0: JSObject |
+ // t1: start of next object (not tagged) |
__ lbu(a3, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset)); |
// The field instance sizes contains both pre-allocated property fields |
// and in-object properties. |
- __ lbu(t6, FieldMemOperand(a2, Map::kPreAllocatedPropertyFieldsOffset)); |
- __ Addu(a3, a3, Operand(t6)); |
- __ lbu(t6, FieldMemOperand(a2, Map::kInObjectPropertiesOffset)); |
- __ subu(a3, a3, t6); |
+ __ lw(a0, FieldMemOperand(a2, Map::kInstanceSizesOffset)); |
+ __ Ext(t2, a0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte, |
+ kBitsPerByte); |
+ __ Daddu(a3, a3, Operand(t2)); |
+ __ Ext(t2, a0, Map::kInObjectPropertiesByte * kBitsPerByte, |
+ kBitsPerByte); |
+ __ dsubu(a3, a3, t2); |
// Done if no extra properties are to be allocated. |
__ Branch(&allocated, eq, a3, Operand(zero_reg)); |
@@ -518,13 +529,13 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, |
// FixedArrays to the start of the next object calculation from above. |
// a1: constructor |
// a3: number of elements in properties array |
- // t4: JSObject |
- // t5: start of next object |
- __ Addu(a0, a3, Operand(FixedArray::kHeaderSize / kPointerSize)); |
+ // t0: JSObject |
+ // t1: start of next object |
+ __ Daddu(a0, a3, Operand(FixedArray::kHeaderSize / kPointerSize)); |
__ Allocate( |
a0, |
- t5, |
- t6, |
+ t1, |
+ t2, |
a2, |
&undo_allocation, |
static_cast<AllocationFlags>(RESULT_CONTAINS_TOP | SIZE_IN_WORDS)); |
@@ -532,14 +543,15 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, |
// Initialize the FixedArray. |
// a1: constructor |
// a3: number of elements in properties array (untagged) |
- // t4: JSObject |
- // t5: start of next object |
- __ LoadRoot(t6, Heap::kFixedArrayMapRootIndex); |
- __ mov(a2, t5); |
- __ sw(t6, MemOperand(a2, JSObject::kMapOffset)); |
- __ sll(a0, a3, kSmiTagSize); |
- __ sw(a0, MemOperand(a2, FixedArray::kLengthOffset)); |
- __ Addu(a2, a2, Operand(2 * kPointerSize)); |
+ // t0: JSObject |
+ // t1: start of next object |
+ __ LoadRoot(t2, Heap::kFixedArrayMapRootIndex); |
+ __ mov(a2, t1); |
+ __ sd(t2, MemOperand(a2, JSObject::kMapOffset)); |
+ // Tag number of elements. |
+ __ dsll32(a0, a3, 0); |
+ __ sd(a0, MemOperand(a2, FixedArray::kLengthOffset)); |
+ __ Daddu(a2, a2, Operand(2 * kPointerSize)); |
ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset); |
ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset); |
@@ -548,33 +560,33 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, |
// a1: constructor |
// a2: First element of FixedArray (not tagged) |
// a3: number of elements in properties array |
- // t4: JSObject |
- // t5: FixedArray (not tagged) |
- __ sll(t3, a3, kPointerSizeLog2); |
- __ addu(t6, a2, t3); // End of object. |
+ // t0: JSObject |
+ // t1: FixedArray (not tagged) |
+ __ dsll(a7, a3, kPointerSizeLog2); |
+ __ daddu(t2, a2, a7); // End of object. |
ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize); |
{ Label loop, entry; |
if (!is_api_function || create_memento) { |
- __ LoadRoot(t7, Heap::kUndefinedValueRootIndex); |
+ __ LoadRoot(t3, Heap::kUndefinedValueRootIndex); |
} else if (FLAG_debug_code) { |
- __ LoadRoot(t2, Heap::kUndefinedValueRootIndex); |
- __ Assert(eq, kUndefinedValueNotLoaded, t7, Operand(t2)); |
+ __ LoadRoot(a6, Heap::kUndefinedValueRootIndex); |
+ __ Assert(eq, kUndefinedValueNotLoaded, t3, Operand(a6)); |
} |
__ jmp(&entry); |
__ bind(&loop); |
- __ sw(t7, MemOperand(a2)); |
- __ addiu(a2, a2, kPointerSize); |
+ __ sd(t3, MemOperand(a2)); |
+ __ daddiu(a2, a2, kPointerSize); |
__ bind(&entry); |
- __ Branch(&loop, less, a2, Operand(t6)); |
+ __ Branch(&loop, less, a2, Operand(t2)); |
} |
// Store the initialized FixedArray into the properties field of |
// the JSObject. |
// a1: constructor function |
- // t4: JSObject |
- // t5: FixedArray (not tagged) |
- __ Addu(t5, t5, Operand(kHeapObjectTag)); // Add the heap tag. |
- __ sw(t5, FieldMemOperand(t4, JSObject::kPropertiesOffset)); |
+ // t0: JSObject |
+ // t1: FixedArray (not tagged) |
+ __ Daddu(t1, t1, Operand(kHeapObjectTag)); // Add the heap tag. |
+ __ sd(t1, FieldMemOperand(t0, JSObject::kPropertiesOffset)); |
// Continue with JSObject being successfully allocated. |
// a1: constructor function |
@@ -584,9 +596,9 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, |
// Undo the setting of the new top so that the heap is verifiable. For |
// example, the map's unused properties potentially do not match the |
// allocated objects unused properties. |
- // t4: JSObject (previous new top) |
+ // t0: JSObject (previous new top) |
__ bind(&undo_allocation); |
- __ UndoAllocationInNewSpace(t4, t5); |
+ __ UndoAllocationInNewSpace(t0, t1); |
} |
// Allocate the new receiver object using the runtime call. |
@@ -594,7 +606,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, |
__ bind(&rt_call); |
if (create_memento) { |
// Get the cell or allocation site. |
- __ lw(a2, MemOperand(sp, 2 * kPointerSize)); |
+ __ ld(a2, MemOperand(sp, 2 * kPointerSize)); |
__ push(a2); |
} |
@@ -604,7 +616,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, |
} else { |
__ CallRuntime(Runtime::kNewObject, 1); |
} |
- __ mov(t4, v0); |
+ __ mov(t0, v0); |
// If we ended up using the runtime, and we want a memento, then the |
// runtime call made it for us, and we shouldn't do create count |
@@ -615,38 +627,38 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, |
} |
// Receiver for constructor call allocated. |
- // t4: JSObject |
+ // t0: JSObject |
__ bind(&allocated); |
if (create_memento) { |
- __ lw(a2, MemOperand(sp, kPointerSize * 2)); |
- __ LoadRoot(t5, Heap::kUndefinedValueRootIndex); |
- __ Branch(&count_incremented, eq, a2, Operand(t5)); |
+ __ ld(a2, MemOperand(sp, kPointerSize * 2)); |
+ __ LoadRoot(t1, Heap::kUndefinedValueRootIndex); |
+ __ Branch(&count_incremented, eq, a2, Operand(t1)); |
// a2 is an AllocationSite. We are creating a memento from it, so we |
// need to increment the memento create count. |
- __ lw(a3, FieldMemOperand(a2, |
+ __ ld(a3, FieldMemOperand(a2, |
AllocationSite::kPretenureCreateCountOffset)); |
- __ Addu(a3, a3, Operand(Smi::FromInt(1))); |
- __ sw(a3, FieldMemOperand(a2, |
+ __ Daddu(a3, a3, Operand(Smi::FromInt(1))); |
+ __ sd(a3, FieldMemOperand(a2, |
AllocationSite::kPretenureCreateCountOffset)); |
__ bind(&count_incremented); |
} |
- __ Push(t4, t4); |
+ __ Push(t0, t0); |
// Reload the number of arguments from the stack. |
// sp[0]: receiver |
// sp[1]: receiver |
// sp[2]: constructor function |
// sp[3]: number of arguments (smi-tagged) |
- __ lw(a1, MemOperand(sp, 2 * kPointerSize)); |
- __ lw(a3, MemOperand(sp, 3 * kPointerSize)); |
+ __ ld(a1, MemOperand(sp, 2 * kPointerSize)); |
+ __ ld(a3, MemOperand(sp, 3 * kPointerSize)); |
// Set up pointer to last argument. |
- __ Addu(a2, fp, Operand(StandardFrameConstants::kCallerSPOffset)); |
+ __ Daddu(a2, fp, Operand(StandardFrameConstants::kCallerSPOffset)); |
// Set up number of arguments for function call below. |
- __ srl(a0, a3, kSmiTagSize); |
+ __ SmiUntag(a0, a3); |
// Copy arguments and receiver to the expression stack. |
// a0: number of arguments |
@@ -658,21 +670,22 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, |
// sp[2]: constructor function |
// sp[3]: number of arguments (smi-tagged) |
Label loop, entry; |
+ __ SmiUntag(a3); |
__ jmp(&entry); |
__ bind(&loop); |
- __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize); |
- __ Addu(t0, a2, Operand(t0)); |
- __ lw(t1, MemOperand(t0)); |
- __ push(t1); |
+ __ dsll(a4, a3, kPointerSizeLog2); |
+ __ Daddu(a4, a2, Operand(a4)); |
+ __ ld(a5, MemOperand(a4)); |
+ __ push(a5); |
__ bind(&entry); |
- __ Addu(a3, a3, Operand(-2)); |
+ __ Daddu(a3, a3, Operand(-1)); |
__ Branch(&loop, greater_equal, a3, Operand(zero_reg)); |
// Call the function. |
// a0: number of arguments |
// a1: constructor function |
if (is_api_function) { |
- __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); |
+ __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); |
Handle<Code> code = |
masm->isolate()->builtins()->HandleApiCallConstruct(); |
__ Call(code, RelocInfo::CODE_TARGET); |
@@ -687,7 +700,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, |
} |
// Restore context from the frame. |
- __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
+ __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
// If the result is an object (in the ECMA sense), we should get rid |
// of the receiver and use the result; see ECMA-262 section 13.2.2-7 |
@@ -709,7 +722,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, |
// Throw away the result of the constructor invocation and use the |
// on-stack receiver as the result. |
__ bind(&use_receiver); |
- __ lw(v0, MemOperand(sp)); |
+ __ ld(v0, MemOperand(sp)); |
// Remove receiver from the stack, remove caller arguments, and |
// return. |
@@ -718,14 +731,14 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm, |
// sp[0]: receiver (newly allocated object) |
// sp[1]: constructor function |
// sp[2]: number of arguments (smi-tagged) |
- __ lw(a1, MemOperand(sp, 2 * kPointerSize)); |
+ __ ld(a1, MemOperand(sp, 2 * kPointerSize)); |
// Leave construct frame. |
} |
- __ sll(t0, a1, kPointerSizeLog2 - 1); |
- __ Addu(sp, sp, t0); |
- __ Addu(sp, sp, kPointerSize); |
+ __ SmiScale(a4, a1, kPointerSizeLog2); |
+ __ Daddu(sp, sp, a4); |
+ __ Daddu(sp, sp, kPointerSize); |
__ IncrementCounter(isolate->counters()->constructed_objects(), 1, a1, a2); |
__ Ret(); |
} |
@@ -753,7 +766,6 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, |
// -- s0: argv |
// ----------------------------------- |
ProfileEntryHookStub::MaybeCallEntryHook(masm); |
- |
// Clear the context before we push it when entering the JS frame. |
__ mov(cp, zero_reg); |
@@ -762,7 +774,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, |
FrameScope scope(masm, StackFrame::INTERNAL); |
// Set up the context from the function argument. |
- __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); |
+ __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); |
// Push the function and the receiver onto the stack. |
__ Push(a1, a2); |
@@ -771,27 +783,32 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, |
// a3: argc |
// s0: argv, i.e. points to first arg |
Label loop, entry; |
- __ sll(t0, a3, kPointerSizeLog2); |
- __ addu(t2, s0, t0); |
+ // TODO(plind): At least on simulator, argc in a3 is an int32_t with junk |
+ // in upper bits. Should fix the root cause, rather than use below |
+ // workaround to clear upper bits. |
+ __ dsll32(a3, a3, 0); // int32_t -> int64_t. |
+ __ dsrl32(a3, a3, 0); |
+ __ dsll(a4, a3, kPointerSizeLog2); |
+ __ daddu(a6, s0, a4); |
__ b(&entry); |
__ nop(); // Branch delay slot nop. |
- // t2 points past last arg. |
+ // a6 points past last arg. |
__ bind(&loop); |
- __ lw(t0, MemOperand(s0)); // Read next parameter. |
- __ addiu(s0, s0, kPointerSize); |
- __ lw(t0, MemOperand(t0)); // Dereference handle. |
- __ push(t0); // Push parameter. |
+ __ ld(a4, MemOperand(s0)); // Read next parameter. |
+ __ daddiu(s0, s0, kPointerSize); |
+ __ ld(a4, MemOperand(a4)); // Dereference handle. |
+ __ push(a4); // Push parameter. |
__ bind(&entry); |
- __ Branch(&loop, ne, s0, Operand(t2)); |
+ __ Branch(&loop, ne, s0, Operand(a6)); |
// Initialize all JavaScript callee-saved registers, since they will be seen |
// by the garbage collector as part of handlers. |
- __ LoadRoot(t0, Heap::kUndefinedValueRootIndex); |
- __ mov(s1, t0); |
- __ mov(s2, t0); |
- __ mov(s3, t0); |
- __ mov(s4, t0); |
- __ mov(s5, t0); |
+ __ LoadRoot(a4, Heap::kUndefinedValueRootIndex); |
+ __ mov(s1, a4); |
+ __ mov(s2, a4); |
+ __ mov(s3, a4); |
+ __ mov(s4, a4); |
+ __ mov(s5, a4); |
// s6 holds the root address. Do not clobber. |
// s7 is cp. Do not init. |
@@ -809,7 +826,6 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm, |
// Leave internal frame. |
} |
- |
__ Jump(ra); |
} |
@@ -856,7 +872,6 @@ void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) { |
} |
- |
static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) { |
// For now, we are relying on the fact that make_code_young doesn't do any |
// garbage collection which allows us to save/restore the registers without |
@@ -865,7 +880,7 @@ static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) { |
// crawls in MakeCodeYoung. This seems a bit fragile. |
// Set a0 to point to the head of the PlatformCodeAge sequence. |
- __ Subu(a0, a0, |
+ __ Dsubu(a0, a0, |
Operand(kNoCodeAgeSequenceLength - Assembler::kInstrSize)); |
// The following registers must be saved and restored when calling through to |
@@ -904,7 +919,7 @@ void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) { |
// pointers. |
// Set a0 to point to the head of the PlatformCodeAge sequence. |
- __ Subu(a0, a0, |
+ __ Dsubu(a0, a0, |
Operand(kNoCodeAgeSequenceLength - Assembler::kInstrSize)); |
// The following registers must be saved and restored when calling through to |
@@ -924,10 +939,10 @@ void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) { |
// Perform prologue operations usually performed by the young code stub. |
__ Push(ra, fp, cp, a1); |
- __ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); |
+ __ Daddu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); |
// Jump to point after the code-age stub. |
- __ Addu(a0, a0, Operand(kNoCodeAgeSequenceLength)); |
+ __ Daddu(a0, a0, Operand((kNoCodeAgeSequenceLength))); |
__ Jump(a0); |
} |
@@ -951,7 +966,7 @@ static void Generate_NotifyStubFailureHelper(MacroAssembler* masm, |
__ MultiPop(kJSCallerSaved | kCalleeSaved); |
} |
- __ Addu(sp, sp, Operand(kPointerSize)); // Ignore state |
+ __ Daddu(sp, sp, Operand(kPointerSize)); // Ignore state |
__ Jump(ra); // Jump to miss handler |
} |
@@ -976,24 +991,24 @@ static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm, |
__ CallRuntime(Runtime::kNotifyDeoptimized, 1); |
} |
- // Get the full codegen state from the stack and untag it -> t2. |
- __ lw(t2, MemOperand(sp, 0 * kPointerSize)); |
- __ SmiUntag(t2); |
+ // Get the full codegen state from the stack and untag it -> a6. |
+ __ ld(a6, MemOperand(sp, 0 * kPointerSize)); |
+ __ SmiUntag(a6); |
// Switch on the state. |
Label with_tos_register, unknown_state; |
__ Branch(&with_tos_register, |
- ne, t2, Operand(FullCodeGenerator::NO_REGISTERS)); |
+ ne, a6, Operand(FullCodeGenerator::NO_REGISTERS)); |
__ Ret(USE_DELAY_SLOT); |
// Safe to fill delay slot Addu will emit one instruction. |
- __ Addu(sp, sp, Operand(1 * kPointerSize)); // Remove state. |
+ __ Daddu(sp, sp, Operand(1 * kPointerSize)); // Remove state. |
__ bind(&with_tos_register); |
- __ lw(v0, MemOperand(sp, 1 * kPointerSize)); |
- __ Branch(&unknown_state, ne, t2, Operand(FullCodeGenerator::TOS_REG)); |
+ __ ld(v0, MemOperand(sp, 1 * kPointerSize)); |
+ __ Branch(&unknown_state, ne, a6, Operand(FullCodeGenerator::TOS_REG)); |
__ Ret(USE_DELAY_SLOT); |
// Safe to fill delay slot Addu will emit one instruction. |
- __ Addu(sp, sp, Operand(2 * kPointerSize)); // Remove state. |
+ __ Daddu(sp, sp, Operand(2 * kPointerSize)); // Remove state. |
__ bind(&unknown_state); |
__ stop("no cases left"); |
@@ -1017,7 +1032,7 @@ void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) { |
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) { |
// Lookup the function in the JavaScript frame. |
- __ lw(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); |
+ __ ld(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); |
{ |
FrameScope scope(masm, StackFrame::INTERNAL); |
// Pass function as argument. |
@@ -1030,18 +1045,18 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) { |
// Load deoptimization data from the code object. |
// <deopt_data> = <code>[#deoptimization_data_offset] |
- __ lw(a1, MemOperand(v0, Code::kDeoptimizationDataOffset - kHeapObjectTag)); |
+ __ Uld(a1, MemOperand(v0, Code::kDeoptimizationDataOffset - kHeapObjectTag)); |
// Load the OSR entrypoint offset from the deoptimization data. |
// <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset] |
- __ lw(a1, MemOperand(a1, FixedArray::OffsetOfElementAt( |
+ __ ld(a1, MemOperand(a1, FixedArray::OffsetOfElementAt( |
DeoptimizationInputData::kOsrPcOffsetIndex) - kHeapObjectTag)); |
__ SmiUntag(a1); |
// Compute the target address = code_obj + header_size + osr_offset |
// <entry_addr> = <code_obj> + #header_size + <osr_offset> |
- __ addu(v0, v0, a1); |
- __ addiu(ra, v0, Code::kHeaderSize - kHeapObjectTag); |
+ __ daddu(v0, v0, a1); |
+ __ daddiu(ra, v0, Code::kHeaderSize - kHeapObjectTag); |
// And "return" to the OSR entry point of the function. |
__ Ret(); |
@@ -1070,9 +1085,9 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) { |
// a0: actual number of arguments |
{ Label done; |
__ Branch(&done, ne, a0, Operand(zero_reg)); |
- __ LoadRoot(t2, Heap::kUndefinedValueRootIndex); |
- __ push(t2); |
- __ Addu(a0, a0, Operand(1)); |
+ __ LoadRoot(a6, Heap::kUndefinedValueRootIndex); |
+ __ push(a6); |
+ __ Daddu(a0, a0, Operand(1)); |
__ bind(&done); |
} |
@@ -1080,9 +1095,9 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) { |
// if it is a function. |
// a0: actual number of arguments |
Label slow, non_function; |
- __ sll(at, a0, kPointerSizeLog2); |
- __ addu(at, sp, at); |
- __ lw(a1, MemOperand(at)); |
+ __ dsll(at, a0, kPointerSizeLog2); |
+ __ daddu(at, sp, at); |
+ __ ld(a1, MemOperand(at)); |
__ JumpIfSmi(a1, &non_function); |
__ GetObjectType(a1, a2, a2); |
__ Branch(&slow, ne, a2, Operand(JS_FUNCTION_TYPE)); |
@@ -1091,31 +1106,31 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) { |
// a0: actual number of arguments |
// a1: function |
Label shift_arguments; |
- __ li(t0, Operand(0, RelocInfo::NONE32)); // Indicate regular JS_FUNCTION. |
+ __ li(a4, Operand(0, RelocInfo::NONE32)); // Indicate regular JS_FUNCTION. |
{ Label convert_to_object, use_global_proxy, patch_receiver; |
// Change context eagerly in case we need the global receiver. |
- __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); |
+ __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); |
// Do not transform the receiver for strict mode functions. |
- __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); |
- __ lw(a3, FieldMemOperand(a2, SharedFunctionInfo::kCompilerHintsOffset)); |
- __ And(t3, a3, Operand(1 << (SharedFunctionInfo::kStrictModeFunction + |
- kSmiTagSize))); |
- __ Branch(&shift_arguments, ne, t3, Operand(zero_reg)); |
+ __ ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); |
+ __ lbu(a3, FieldMemOperand(a2, SharedFunctionInfo::kStrictModeByteOffset)); |
+ __ And(a7, a3, Operand(1 << SharedFunctionInfo::kStrictModeBitWithinByte)); |
+ __ Branch(&shift_arguments, ne, a7, Operand(zero_reg)); |
// Do not transform the receiver for native (Compilerhints already in a3). |
- __ And(t3, a3, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize))); |
- __ Branch(&shift_arguments, ne, t3, Operand(zero_reg)); |
+ __ lbu(a3, FieldMemOperand(a2, SharedFunctionInfo::kNativeByteOffset)); |
+ __ And(a7, a3, Operand(1 << SharedFunctionInfo::kNativeBitWithinByte)); |
+ __ Branch(&shift_arguments, ne, a7, Operand(zero_reg)); |
// Compute the receiver in sloppy mode. |
// Load first argument in a2. a2 = -kPointerSize(sp + n_args << 2). |
- __ sll(at, a0, kPointerSizeLog2); |
- __ addu(a2, sp, at); |
- __ lw(a2, MemOperand(a2, -kPointerSize)); |
+ __ dsll(at, a0, kPointerSizeLog2); |
+ __ daddu(a2, sp, at); |
+ __ ld(a2, MemOperand(a2, -kPointerSize)); |
// a0: actual number of arguments |
// a1: function |
// a2: first argument |
- __ JumpIfSmi(a2, &convert_to_object, t2); |
+ __ JumpIfSmi(a2, &convert_to_object, a6); |
__ LoadRoot(a3, Heap::kUndefinedValueRootIndex); |
__ Branch(&use_global_proxy, eq, a2, Operand(a3)); |
@@ -1130,42 +1145,41 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) { |
// Enter an internal frame in order to preserve argument count. |
{ |
FrameScope scope(masm, StackFrame::INTERNAL); |
- __ sll(a0, a0, kSmiTagSize); // Smi tagged. |
+ __ SmiTag(a0); |
__ Push(a0, a2); |
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); |
__ mov(a2, v0); |
__ pop(a0); |
- __ sra(a0, a0, kSmiTagSize); // Un-tag. |
+ __ SmiUntag(a0); |
// Leave internal frame. |
} |
- |
- // Restore the function to a1, and the flag to t0. |
- __ sll(at, a0, kPointerSizeLog2); |
- __ addu(at, sp, at); |
- __ lw(a1, MemOperand(at)); |
+ // Restore the function to a1, and the flag to a4. |
+ __ dsll(at, a0, kPointerSizeLog2); |
+ __ daddu(at, sp, at); |
+ __ ld(a1, MemOperand(at)); |
__ Branch(USE_DELAY_SLOT, &patch_receiver); |
- __ li(t0, Operand(0, RelocInfo::NONE32)); // In delay slot. |
+ __ li(a4, Operand(0, RelocInfo::NONE32)); |
__ bind(&use_global_proxy); |
- __ lw(a2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX)); |
- __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalProxyOffset)); |
+ __ ld(a2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX)); |
+ __ ld(a2, FieldMemOperand(a2, GlobalObject::kGlobalProxyOffset)); |
__ bind(&patch_receiver); |
- __ sll(at, a0, kPointerSizeLog2); |
- __ addu(a3, sp, at); |
- __ sw(a2, MemOperand(a3, -kPointerSize)); |
+ __ dsll(at, a0, kPointerSizeLog2); |
+ __ daddu(a3, sp, at); |
+ __ sd(a2, MemOperand(a3, -kPointerSize)); |
__ Branch(&shift_arguments); |
} |
// 3b. Check for function proxy. |
__ bind(&slow); |
- __ li(t0, Operand(1, RelocInfo::NONE32)); // Indicate function proxy. |
+ __ li(a4, Operand(1, RelocInfo::NONE32)); // Indicate function proxy. |
__ Branch(&shift_arguments, eq, a2, Operand(JS_FUNCTION_PROXY_TYPE)); |
__ bind(&non_function); |
- __ li(t0, Operand(2, RelocInfo::NONE32)); // Indicate non-function. |
+ __ li(a4, Operand(2, RelocInfo::NONE32)); // Indicate non-function. |
// 3c. Patch the first argument when calling a non-function. The |
// CALL_NON_FUNCTION builtin expects the non-function callee as |
@@ -1173,31 +1187,31 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) { |
// become the receiver. |
// a0: actual number of arguments |
// a1: function |
- // t0: call type (0: JS function, 1: function proxy, 2: non-function) |
- __ sll(at, a0, kPointerSizeLog2); |
- __ addu(a2, sp, at); |
- __ sw(a1, MemOperand(a2, -kPointerSize)); |
+ // a4: call type (0: JS function, 1: function proxy, 2: non-function) |
+ __ dsll(at, a0, kPointerSizeLog2); |
+ __ daddu(a2, sp, at); |
+ __ sd(a1, MemOperand(a2, -kPointerSize)); |
// 4. Shift arguments and return address one slot down on the stack |
// (overwriting the original receiver). Adjust argument count to make |
// the original first argument the new receiver. |
// a0: actual number of arguments |
// a1: function |
- // t0: call type (0: JS function, 1: function proxy, 2: non-function) |
+ // a4: call type (0: JS function, 1: function proxy, 2: non-function) |
__ bind(&shift_arguments); |
{ Label loop; |
// Calculate the copy start address (destination). Copy end address is sp. |
- __ sll(at, a0, kPointerSizeLog2); |
- __ addu(a2, sp, at); |
+ __ dsll(at, a0, kPointerSizeLog2); |
+ __ daddu(a2, sp, at); |
__ bind(&loop); |
- __ lw(at, MemOperand(a2, -kPointerSize)); |
- __ sw(at, MemOperand(a2)); |
- __ Subu(a2, a2, Operand(kPointerSize)); |
+ __ ld(at, MemOperand(a2, -kPointerSize)); |
+ __ sd(at, MemOperand(a2)); |
+ __ Dsubu(a2, a2, Operand(kPointerSize)); |
__ Branch(&loop, ne, a2, Operand(sp)); |
// Adjust the actual number of arguments and remove the top element |
// (which is a copy of the last argument). |
- __ Subu(a0, a0, Operand(1)); |
+ __ Dsubu(a0, a0, Operand(1)); |
__ Pop(); |
} |
@@ -1205,15 +1219,15 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) { |
// or a function proxy via CALL_FUNCTION_PROXY. |
// a0: actual number of arguments |
// a1: function |
- // t0: call type (0: JS function, 1: function proxy, 2: non-function) |
+ // a4: call type (0: JS function, 1: function proxy, 2: non-function) |
{ Label function, non_proxy; |
- __ Branch(&function, eq, t0, Operand(zero_reg)); |
+ __ Branch(&function, eq, a4, Operand(zero_reg)); |
// Expected number of arguments is 0 for CALL_NON_FUNCTION. |
__ mov(a2, zero_reg); |
- __ Branch(&non_proxy, ne, t0, Operand(1)); |
+ __ Branch(&non_proxy, ne, a4, Operand(1)); |
__ push(a1); // Re-add proxy object as additional argument. |
- __ Addu(a0, a0, Operand(1)); |
+ __ Daddu(a0, a0, Operand(1)); |
__ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY); |
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), |
RelocInfo::CODE_TARGET); |
@@ -1230,15 +1244,16 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) { |
// (tail-call) to the code in register edx without checking arguments. |
// a0: actual number of arguments |
// a1: function |
- __ lw(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); |
+ __ ld(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); |
+ // The argument count is stored as int32_t on 64-bit platforms. |
+ // TODO(plind): Smi on 32-bit platforms. |
__ lw(a2, |
FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset)); |
- __ sra(a2, a2, kSmiTagSize); |
// Check formal and actual parameter counts. |
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), |
RelocInfo::CODE_TARGET, ne, a2, Operand(a0)); |
- __ lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset)); |
+ __ ld(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset)); |
ParameterCount expected(0); |
__ InvokeCode(a3, expected, expected, JUMP_FUNCTION, NullCallWrapper()); |
} |
@@ -1255,9 +1270,9 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) { |
{ |
FrameScope frame_scope(masm, StackFrame::INTERNAL); |
- __ lw(a0, MemOperand(fp, kFunctionOffset)); // Get the function. |
+ __ ld(a0, MemOperand(fp, kFunctionOffset)); // Get the function. |
__ push(a0); |
- __ lw(a0, MemOperand(fp, kArgsOffset)); // Get the args array. |
+ __ ld(a0, MemOperand(fp, kArgsOffset)); // Get the args array. |
__ push(a0); |
// Returns (in v0) number of arguments to copy to stack as Smi. |
__ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION); |
@@ -1269,13 +1284,13 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) { |
__ LoadRoot(a2, Heap::kRealStackLimitRootIndex); |
// Make a2 the space we have left. The stack might already be overflowed |
// here which will cause a2 to become negative. |
- __ subu(a2, sp, a2); |
+ __ dsubu(a2, sp, a2); |
// Check if the arguments will overflow the stack. |
- __ sll(t3, v0, kPointerSizeLog2 - kSmiTagSize); |
- __ Branch(&okay, gt, a2, Operand(t3)); // Signed comparison. |
+ __ SmiScale(a7, v0, kPointerSizeLog2); |
+ __ Branch(&okay, gt, a2, Operand(a7)); // Signed comparison. |
// Out of stack space. |
- __ lw(a1, MemOperand(fp, kFunctionOffset)); |
+ __ ld(a1, MemOperand(fp, kFunctionOffset)); |
__ Push(a1, v0); |
__ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION); |
// End of stack check. |
@@ -1286,30 +1301,30 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) { |
__ Push(v0, a1); // Limit and initial index. |
// Get the receiver. |
- __ lw(a0, MemOperand(fp, kRecvOffset)); |
+ __ ld(a0, MemOperand(fp, kRecvOffset)); |
// Check that the function is a JS function (otherwise it must be a proxy). |
Label push_receiver; |
- __ lw(a1, MemOperand(fp, kFunctionOffset)); |
+ __ ld(a1, MemOperand(fp, kFunctionOffset)); |
__ GetObjectType(a1, a2, a2); |
__ Branch(&push_receiver, ne, a2, Operand(JS_FUNCTION_TYPE)); |
// Change context eagerly to get the right global object if necessary. |
- __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); |
+ __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); |
// Load the shared function info while the function is still in a1. |
- __ lw(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); |
+ __ ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); |
// Compute the receiver. |
// Do not transform the receiver for strict mode functions. |
Label call_to_object, use_global_proxy; |
- __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kCompilerHintsOffset)); |
- __ And(t3, a2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction + |
- kSmiTagSize))); |
- __ Branch(&push_receiver, ne, t3, Operand(zero_reg)); |
+ __ lbu(a7, FieldMemOperand(a2, SharedFunctionInfo::kStrictModeByteOffset)); |
+ __ And(a7, a7, Operand(1 << SharedFunctionInfo::kStrictModeBitWithinByte)); |
+ __ Branch(&push_receiver, ne, a7, Operand(zero_reg)); |
// Do not transform the receiver for native (Compilerhints already in a2). |
- __ And(t3, a2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize))); |
- __ Branch(&push_receiver, ne, t3, Operand(zero_reg)); |
+ __ lbu(a7, FieldMemOperand(a2, SharedFunctionInfo::kNativeByteOffset)); |
+ __ And(a7, a7, Operand(1 << SharedFunctionInfo::kNativeBitWithinByte)); |
+ __ Branch(&push_receiver, ne, a7, Operand(zero_reg)); |
// Compute the receiver in sloppy mode. |
__ JumpIfSmi(a0, &call_to_object); |
@@ -1333,8 +1348,8 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) { |
__ Branch(&push_receiver); |
__ bind(&use_global_proxy); |
- __ lw(a0, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX)); |
- __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalProxyOffset)); |
+ __ ld(a0, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX)); |
+ __ ld(a0, FieldMemOperand(a0, GlobalObject::kGlobalProxyOffset)); |
// Push the receiver. |
// a0: receiver |
@@ -1343,14 +1358,14 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) { |
// Copy all arguments from the array to the stack. |
Label entry, loop; |
- __ lw(a0, MemOperand(fp, kIndexOffset)); |
+ __ ld(a0, MemOperand(fp, kIndexOffset)); |
__ Branch(&entry); |
// Load the current argument from the arguments array and push it to the |
// stack. |
// a0: current argument index |
__ bind(&loop); |
- __ lw(a1, MemOperand(fp, kArgsOffset)); |
+ __ ld(a1, MemOperand(fp, kArgsOffset)); |
__ Push(a1, a0); |
// Call the runtime to access the property in the arguments array. |
@@ -1358,21 +1373,21 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) { |
__ push(v0); |
// Use inline caching to access the arguments. |
- __ lw(a0, MemOperand(fp, kIndexOffset)); |
- __ Addu(a0, a0, Operand(1 << kSmiTagSize)); |
- __ sw(a0, MemOperand(fp, kIndexOffset)); |
+ __ ld(a0, MemOperand(fp, kIndexOffset)); |
+ __ Daddu(a0, a0, Operand(Smi::FromInt(1))); |
+ __ sd(a0, MemOperand(fp, kIndexOffset)); |
// Test if the copy loop has finished copying all the elements from the |
// arguments object. |
__ bind(&entry); |
- __ lw(a1, MemOperand(fp, kLimitOffset)); |
+ __ ld(a1, MemOperand(fp, kLimitOffset)); |
__ Branch(&loop, ne, a0, Operand(a1)); |
// Call the function. |
Label call_proxy; |
ParameterCount actual(a0); |
- __ sra(a0, a0, kSmiTagSize); |
- __ lw(a1, MemOperand(fp, kFunctionOffset)); |
+ __ SmiUntag(a0); |
+ __ ld(a1, MemOperand(fp, kFunctionOffset)); |
__ GetObjectType(a1, a2, a2); |
__ Branch(&call_proxy, ne, a2, Operand(JS_FUNCTION_TYPE)); |
@@ -1380,12 +1395,12 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) { |
frame_scope.GenerateLeaveFrame(); |
__ Ret(USE_DELAY_SLOT); |
- __ Addu(sp, sp, Operand(3 * kPointerSize)); // In delay slot. |
+ __ Daddu(sp, sp, Operand(3 * kPointerSize)); // In delay slot. |
// Call the function proxy. |
__ bind(&call_proxy); |
__ push(a1); // Add function proxy as last argument. |
- __ Addu(a0, a0, Operand(1)); |
+ __ Daddu(a0, a0, Operand(1)); |
__ li(a2, Operand(0, RelocInfo::NONE32)); |
__ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY); |
__ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), |
@@ -1394,7 +1409,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) { |
} |
__ Ret(USE_DELAY_SLOT); |
- __ Addu(sp, sp, Operand(3 * kPointerSize)); // In delay slot. |
+ __ Daddu(sp, sp, Operand(3 * kPointerSize)); // In delay slot. |
} |
@@ -1408,22 +1423,23 @@ static void ArgumentAdaptorStackCheck(MacroAssembler* masm, |
// Check the stack for overflow. We are not trying to catch |
// interruptions (e.g. debug break and preemption) here, so the "real stack |
// limit" is checked. |
- __ LoadRoot(t1, Heap::kRealStackLimitRootIndex); |
- // Make t1 the space we have left. The stack might already be overflowed |
- // here which will cause t1 to become negative. |
- __ subu(t1, sp, t1); |
+ __ LoadRoot(a5, Heap::kRealStackLimitRootIndex); |
+ // Make a5 the space we have left. The stack might already be overflowed |
+ // here which will cause a5 to become negative. |
+ __ dsubu(a5, sp, a5); |
// Check if the arguments will overflow the stack. |
- __ sll(at, a2, kPointerSizeLog2); |
+ __ dsll(at, a2, kPointerSizeLog2); |
// Signed comparison. |
- __ Branch(stack_overflow, le, t1, Operand(at)); |
+ __ Branch(stack_overflow, le, a5, Operand(at)); |
} |
static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) { |
- __ sll(a0, a0, kSmiTagSize); |
- __ li(t0, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); |
- __ MultiPush(a0.bit() | a1.bit() | t0.bit() | fp.bit() | ra.bit()); |
- __ Addu(fp, sp, |
+ // __ sll(a0, a0, kSmiTagSize); |
+ __ dsll32(a0, a0, 0); |
+ __ li(a4, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); |
+ __ MultiPush(a0.bit() | a1.bit() | a4.bit() | fp.bit() | ra.bit()); |
+ __ Daddu(fp, sp, |
Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize)); |
} |
@@ -1434,14 +1450,14 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) { |
// ----------------------------------- |
// Get the number of arguments passed (as a smi), tear down the frame and |
// then tear down the parameters. |
- __ lw(a1, MemOperand(fp, -(StandardFrameConstants::kFixedFrameSizeFromFp + |
+ __ ld(a1, MemOperand(fp, -(StandardFrameConstants::kFixedFrameSizeFromFp + |
kPointerSize))); |
__ mov(sp, fp); |
__ MultiPop(fp.bit() | ra.bit()); |
- __ sll(t0, a1, kPointerSizeLog2 - kSmiTagSize); |
- __ Addu(sp, sp, t0); |
+ __ SmiScale(a4, a1, kPointerSizeLog2); |
+ __ Daddu(sp, sp, a4); |
// Adjust for the receiver. |
- __ Addu(sp, sp, Operand(kPointerSize)); |
+ __ Daddu(sp, sp, Operand(kPointerSize)); |
} |
@@ -1458,7 +1474,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { |
Label invoke, dont_adapt_arguments; |
Label enough, too_few; |
- __ lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset)); |
+ __ ld(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset)); |
__ Branch(&dont_adapt_arguments, eq, |
a2, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel)); |
// We use Uless as the number of argument should always be greater than 0. |
@@ -1473,13 +1489,13 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { |
EnterArgumentsAdaptorFrame(masm); |
// Calculate copy start address into a0 and copy end address into a2. |
- __ sll(a0, a0, kPointerSizeLog2 - kSmiTagSize); |
- __ Addu(a0, fp, a0); |
+ __ SmiScale(a0, a0, kPointerSizeLog2); |
+ __ Daddu(a0, fp, a0); |
// Adjust for return address and receiver. |
- __ Addu(a0, a0, Operand(2 * kPointerSize)); |
+ __ Daddu(a0, a0, Operand(2 * kPointerSize)); |
// Compute copy end address. |
- __ sll(a2, a2, kPointerSizeLog2); |
- __ subu(a2, a0, a2); |
+ __ dsll(a2, a2, kPointerSizeLog2); |
+ __ dsubu(a2, a0, a2); |
// Copy the arguments (including the receiver) to the new stack frame. |
// a0: copy start address |
@@ -1489,10 +1505,10 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { |
Label copy; |
__ bind(©); |
- __ lw(t0, MemOperand(a0)); |
- __ push(t0); |
+ __ ld(a4, MemOperand(a0)); |
+ __ push(a4); |
__ Branch(USE_DELAY_SLOT, ©, ne, a0, Operand(a2)); |
- __ addiu(a0, a0, -kPointerSize); // In delay slot. |
+ __ daddiu(a0, a0, -kPointerSize); // In delay slot. |
__ jmp(&invoke); |
} |
@@ -1506,43 +1522,43 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { |
// a1: function |
// a2: expected number of arguments |
// a3: code entry to call |
- __ sll(a0, a0, kPointerSizeLog2 - kSmiTagSize); |
- __ Addu(a0, fp, a0); |
+ __ SmiScale(a0, a0, kPointerSizeLog2); |
+ __ Daddu(a0, fp, a0); |
// Adjust for return address and receiver. |
- __ Addu(a0, a0, Operand(2 * kPointerSize)); |
+ __ Daddu(a0, a0, Operand(2 * kPointerSize)); |
// Compute copy end address. Also adjust for return address. |
- __ Addu(t3, fp, kPointerSize); |
+ __ Daddu(a7, fp, kPointerSize); |
// Copy the arguments (including the receiver) to the new stack frame. |
// a0: copy start address |
// a1: function |
// a2: expected number of arguments |
// a3: code entry to call |
- // t3: copy end address |
+ // a7: copy end address |
Label copy; |
__ bind(©); |
- __ lw(t0, MemOperand(a0)); // Adjusted above for return addr and receiver. |
- __ Subu(sp, sp, kPointerSize); |
- __ Subu(a0, a0, kPointerSize); |
- __ Branch(USE_DELAY_SLOT, ©, ne, a0, Operand(t3)); |
- __ sw(t0, MemOperand(sp)); // In the delay slot. |
+ __ ld(a4, MemOperand(a0)); // Adjusted above for return addr and receiver. |
+ __ Dsubu(sp, sp, kPointerSize); |
+ __ Dsubu(a0, a0, kPointerSize); |
+ __ Branch(USE_DELAY_SLOT, ©, ne, a0, Operand(a7)); |
+ __ sd(a4, MemOperand(sp)); // In the delay slot. |
// Fill the remaining expected arguments with undefined. |
// a1: function |
// a2: expected number of arguments |
// a3: code entry to call |
- __ LoadRoot(t0, Heap::kUndefinedValueRootIndex); |
- __ sll(t2, a2, kPointerSizeLog2); |
- __ Subu(a2, fp, Operand(t2)); |
+ __ LoadRoot(a4, Heap::kUndefinedValueRootIndex); |
+ __ dsll(a6, a2, kPointerSizeLog2); |
+ __ Dsubu(a2, fp, Operand(a6)); |
// Adjust for frame. |
- __ Subu(a2, a2, Operand(StandardFrameConstants::kFixedFrameSizeFromFp + |
+ __ Dsubu(a2, a2, Operand(StandardFrameConstants::kFixedFrameSizeFromFp + |
2 * kPointerSize)); |
Label fill; |
__ bind(&fill); |
- __ Subu(sp, sp, kPointerSize); |
+ __ Dsubu(sp, sp, kPointerSize); |
__ Branch(USE_DELAY_SLOT, &fill, ne, sp, Operand(a2)); |
- __ sw(t0, MemOperand(sp)); |
+ __ sd(a4, MemOperand(sp)); |
} |
// Call the entry point. |
@@ -1578,4 +1594,4 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { |
} } // namespace v8::internal |
-#endif // V8_TARGET_ARCH_MIPS |
+#endif // V8_TARGET_ARCH_MIPS64 |