Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(176)

Unified Diff: src/mips64/code-stubs-mips64.cc

Issue 2829073002: MIPS64: Move load/store instructions to macro-assembler. (Closed)
Patch Set: Created 3 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « src/mips64/assembler-mips64.cc ('k') | src/mips64/codegen-mips64.cc » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: src/mips64/code-stubs-mips64.cc
diff --git a/src/mips64/code-stubs-mips64.cc b/src/mips64/code-stubs-mips64.cc
index 1738ef432e9a2d344dd298253295aa45865b3521..7ea89d2ba9c08074937c248942ba2b60e59c9aa6 100644
--- a/src/mips64/code-stubs-mips64.cc
+++ b/src/mips64/code-stubs-mips64.cc
@@ -25,7 +25,7 @@ namespace internal {
void ArrayNArgumentsConstructorStub::Generate(MacroAssembler* masm) {
__ dsll(t9, a0, kPointerSizeLog2);
__ Daddu(t9, sp, t9);
- __ sd(a1, MemOperand(t9, 0));
+ __ Sd(a1, MemOperand(t9, 0));
__ Push(a1);
__ Push(a2);
__ Daddu(a0, a0, 3);
@@ -61,7 +61,7 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
__ Dsubu(sp, sp, Operand(param_count * kPointerSize));
for (int i = 0; i < param_count; ++i) {
// Store argument to stack.
- __ sd(descriptor.GetRegisterParameter(i),
+ __ Sd(descriptor.GetRegisterParameter(i),
MemOperand(sp, (param_count - 1 - i) * kPointerSize));
}
__ CallExternalReference(miss, param_count);
@@ -91,7 +91,7 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
__ Push(scratch, scratch2, scratch3);
if (!skip_fastpath()) {
// Load double input.
- __ ldc1(double_scratch, MemOperand(input_reg, double_offset));
+ __ Ldc1(double_scratch, MemOperand(input_reg, double_offset));
// Clear cumulative exception flags and save the FCSR.
__ cfc1(scratch2, FCSR);
@@ -123,9 +123,9 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
Register input_high = scratch2;
Register input_low = scratch3;
- __ lw(input_low,
+ __ Lw(input_low,
MemOperand(input_reg, double_offset + Register::kMantissaOffset));
- __ lw(input_high,
+ __ Lw(input_high,
MemOperand(input_reg, double_offset + Register::kExponentOffset));
Label normal_exponent, restore_sign;
@@ -281,7 +281,7 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
// The representation of NaN values has all exponent bits (52..62) set,
// and not all mantissa bits (0..51) clear.
// Read top bits of double representation (second word of value).
- __ lwu(a6, FieldMemOperand(a0, HeapNumber::kExponentOffset));
+ __ Lwu(a6, FieldMemOperand(a0, HeapNumber::kExponentOffset));
// Test that exponent bits are all set.
__ And(a7, a6, Operand(exp_mask_reg));
// If all bits not set (ne cond), then not a NaN, objects are equal.
@@ -290,7 +290,7 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
// Shift out flag and all exponent bits, retaining only mantissa.
__ sll(a6, a6, HeapNumber::kNonMantissaBitsInTopWord);
// Or with all low-bits of mantissa.
- __ lwu(a7, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
+ __ Lwu(a7, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
__ Or(v0, a7, Operand(a6));
// For equal we already have the right value in v0: Return zero (equal)
// if all bits in mantissa are zero (it's an Infinity) and non-zero if
@@ -343,7 +343,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
__ SmiUntag(at, rhs);
__ mtc1(at, f14);
__ cvt_d_w(f14, f14);
- __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
+ __ Ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
// We now have both loaded as doubles.
__ jmp(both_loaded_as_doubles);
@@ -367,7 +367,7 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm,
__ SmiUntag(at, lhs);
__ mtc1(at, f12);
__ cvt_d_w(f12, f12);
- __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+ __ Ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
// Fall through to both_loaded_as_doubles.
}
@@ -418,14 +418,14 @@ static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
Label* slow) {
__ GetObjectType(lhs, a3, a2);
__ Branch(not_heap_numbers, ne, a2, Operand(HEAP_NUMBER_TYPE));
- __ ld(a2, FieldMemOperand(rhs, HeapObject::kMapOffset));
+ __ Ld(a2, FieldMemOperand(rhs, HeapObject::kMapOffset));
// If first was a heap number & second wasn't, go to slow case.
__ Branch(slow, ne, a3, Operand(a2));
// Both are heap numbers. Load them up then jump to the code we have
// for that.
- __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
- __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+ __ Ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
+ __ Ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
__ jmp(both_loaded_as_doubles);
}
@@ -458,10 +458,10 @@ static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
__ mov(v0, a0); // In delay slot.
__ bind(&object_test);
- __ ld(a2, FieldMemOperand(lhs, HeapObject::kMapOffset));
- __ ld(a3, FieldMemOperand(rhs, HeapObject::kMapOffset));
- __ lbu(t0, FieldMemOperand(a2, Map::kBitFieldOffset));
- __ lbu(t1, FieldMemOperand(a3, Map::kBitFieldOffset));
+ __ Ld(a2, FieldMemOperand(lhs, HeapObject::kMapOffset));
+ __ Ld(a3, FieldMemOperand(rhs, HeapObject::kMapOffset));
+ __ Lbu(t0, FieldMemOperand(a2, Map::kBitFieldOffset));
+ __ Lbu(t1, FieldMemOperand(a3, Map::kBitFieldOffset));
__ And(at, t0, Operand(1 << Map::kIsUndetectable));
__ Branch(&undetectable, ne, at, Operand(zero_reg));
__ And(at, t1, Operand(1 << Map::kIsUndetectable));
@@ -760,7 +760,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
// Base is already in double_base.
__ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
- __ ldc1(double_exponent,
+ __ Ldc1(double_exponent,
FieldMemOperand(exponent, HeapNumber::kValueOffset));
}
@@ -996,7 +996,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
__ bind(&find_ra);
// This spot was reserved in EnterExitFrame.
- __ sd(ra, MemOperand(sp, result_stack_size));
+ __ Sd(ra, MemOperand(sp, result_stack_size));
// Stack space reservation moved to the branch delay slot below.
// Stack is still aligned.
@@ -1012,9 +1012,9 @@ void CEntryStub::Generate(MacroAssembler* masm) {
if (result_size() > 2) {
DCHECK_EQ(3, result_size());
// Read result values stored on stack.
- __ ld(a0, MemOperand(v0, 2 * kPointerSize));
- __ ld(v1, MemOperand(v0, 1 * kPointerSize));
- __ ld(v0, MemOperand(v0, 0 * kPointerSize));
+ __ Ld(a0, MemOperand(v0, 2 * kPointerSize));
+ __ Ld(v1, MemOperand(v0, 1 * kPointerSize));
+ __ Ld(v0, MemOperand(v0, 0 * kPointerSize));
}
// Result returned in v0, v1:v0 or a0:v1:v0 - do not destroy these registers!
@@ -1030,7 +1030,7 @@ void CEntryStub::Generate(MacroAssembler* masm) {
ExternalReference pending_exception_address(
Isolate::kPendingExceptionAddress, isolate());
__ li(a2, Operand(pending_exception_address));
- __ ld(a2, MemOperand(a2));
+ __ Ld(a2, MemOperand(a2));
__ LoadRoot(a4, Heap::kTheHoleValueRootIndex);
// Cannot use check here as it attempts to generate call into runtime.
__ Branch(&okay, eq, a4, Operand(a2));
@@ -1081,24 +1081,24 @@ void CEntryStub::Generate(MacroAssembler* masm) {
// Retrieve the handler context, SP and FP.
__ li(cp, Operand(pending_handler_context_address));
- __ ld(cp, MemOperand(cp));
+ __ Ld(cp, MemOperand(cp));
__ li(sp, Operand(pending_handler_sp_address));
- __ ld(sp, MemOperand(sp));
+ __ Ld(sp, MemOperand(sp));
__ li(fp, Operand(pending_handler_fp_address));
- __ ld(fp, MemOperand(fp));
+ __ Ld(fp, MemOperand(fp));
// If the handler is a JS frame, restore the context to the frame. Note that
// the context will be set to (cp == 0) for non-JS frames.
Label zero;
__ Branch(&zero, eq, cp, Operand(zero_reg));
- __ sd(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ Sd(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ bind(&zero);
// Compute the handler entry address and jump to it.
__ li(a1, Operand(pending_handler_code_address));
- __ ld(a1, MemOperand(a1));
+ __ Ld(a1, MemOperand(a1));
__ li(a2, Operand(pending_handler_offset_address));
- __ ld(a2, MemOperand(a2));
+ __ Ld(a2, MemOperand(a2));
__ Daddu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Daddu(t9, a1, a2);
__ Jump(t9);
@@ -1143,7 +1143,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ li(a5, Operand(StackFrame::TypeToMarker(marker)));
ExternalReference c_entry_fp(Isolate::kCEntryFPAddress, isolate);
__ li(a4, Operand(c_entry_fp));
- __ ld(a4, MemOperand(a4));
+ __ Ld(a4, MemOperand(a4));
__ Push(a7, a6, a5, a4);
// Set up frame pointer for the frame to be pushed.
__ daddiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
@@ -1168,9 +1168,9 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
Label non_outermost_js;
ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
__ li(a5, Operand(ExternalReference(js_entry_sp)));
- __ ld(a6, MemOperand(a5));
+ __ Ld(a6, MemOperand(a5));
__ Branch(&non_outermost_js, ne, a6, Operand(zero_reg));
- __ sd(fp, MemOperand(a5));
+ __ Sd(fp, MemOperand(a5));
__ li(a4, Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
Label cont;
__ b(&cont);
@@ -1191,7 +1191,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// signal the existence of the JSEntry frame.
__ li(a4, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
isolate)));
- __ sd(v0, MemOperand(a4)); // We come back from 'invoke'. result is in v0.
+ __ Sd(v0, MemOperand(a4)); // We come back from 'invoke'. result is in v0.
__ LoadRoot(v0, Heap::kExceptionRootIndex);
__ b(&exit); // b exposes branch delay slot.
__ nop(); // Branch delay slot nop.
@@ -1230,7 +1230,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
ExternalReference entry(Builtins::kJSEntryTrampoline, masm->isolate());
__ li(a4, Operand(entry));
}
- __ ld(t9, MemOperand(a4)); // Deref address.
+ __ Ld(t9, MemOperand(a4)); // Deref address.
// Call JSEntryTrampoline.
__ daddiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
__ Call(t9);
@@ -1245,14 +1245,14 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
__ Branch(&non_outermost_js_2, ne, a5,
Operand(StackFrame::OUTERMOST_JSENTRY_FRAME));
__ li(a5, Operand(ExternalReference(js_entry_sp)));
- __ sd(zero_reg, MemOperand(a5));
+ __ Sd(zero_reg, MemOperand(a5));
__ bind(&non_outermost_js_2);
// Restore the top frame descriptors from the stack.
__ pop(a5);
__ li(a4, Operand(ExternalReference(Isolate::kCEntryFPAddress,
isolate)));
- __ sd(a5, MemOperand(a4));
+ __ Sd(a5, MemOperand(a4));
// Reset the stack to the callee saved registers.
__ daddiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
@@ -1296,7 +1296,7 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
// Argument 9: Pass current isolate address.
__ li(t1, Operand(ExternalReference::isolate_address(isolate())));
- __ sd(t1, MemOperand(sp, 1 * kPointerSize));
+ __ Sd(t1, MemOperand(sp, 1 * kPointerSize));
// Argument 8: Indicate that this is a direct call from JavaScript.
__ li(a7, Operand(1));
@@ -1307,9 +1307,9 @@ void RegExpExecStub::Generate(MacroAssembler* masm) {
ExternalReference address_of_regexp_stack_memory_size =
ExternalReference::address_of_regexp_stack_memory_size(isolate());
__ li(t1, Operand(address_of_regexp_stack_memory_address));
- __ ld(t1, MemOperand(t1, 0));
+ __ Ld(t1, MemOperand(t1, 0));
__ li(t2, Operand(address_of_regexp_stack_memory_size));
- __ ld(t2, MemOperand(t2, 0));
+ __ Ld(t2, MemOperand(t2, 0));
__ daddu(a6, t1, t2);
// Argument 6: Set the number of capture registers to zero to force global
@@ -1388,7 +1388,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Load the cache state into a5.
__ dsrl(a5, a3, 32 - kPointerSizeLog2);
__ Daddu(a5, a2, Operand(a5));
- __ ld(a5, FieldMemOperand(a5, FixedArray::kHeaderSize));
+ __ Ld(a5, FieldMemOperand(a5, FixedArray::kHeaderSize));
// A monomorphic cache hit or an already megamorphic state: invoke the
// function without changing the state.
@@ -1397,11 +1397,11 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
Label check_allocation_site;
Register feedback_map = a6;
Register weak_value = t0;
- __ ld(weak_value, FieldMemOperand(a5, WeakCell::kValueOffset));
+ __ Ld(weak_value, FieldMemOperand(a5, WeakCell::kValueOffset));
__ Branch(&done, eq, a1, Operand(weak_value));
__ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
__ Branch(&done, eq, a5, Operand(at));
- __ ld(feedback_map, FieldMemOperand(a5, HeapObject::kMapOffset));
+ __ Ld(feedback_map, FieldMemOperand(a5, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kWeakCellMapRootIndex);
__ Branch(&check_allocation_site, ne, feedback_map, Operand(at));
@@ -1434,7 +1434,7 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
__ dsrl(a5, a3, 32 - kPointerSizeLog2);
__ Daddu(a5, a2, Operand(a5));
__ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
- __ sd(at, FieldMemOperand(a5, FixedArray::kHeaderSize));
+ __ Sd(at, FieldMemOperand(a5, FixedArray::kHeaderSize));
__ jmp(&done);
// An uninitialized cache is patched with the function.
@@ -1460,9 +1460,9 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) {
// Increment the call count for all function calls.
__ SmiScale(a4, a3, kPointerSizeLog2);
__ Daddu(a5, a2, Operand(a4));
- __ ld(a4, FieldMemOperand(a5, FixedArray::kHeaderSize + kPointerSize));
+ __ Ld(a4, FieldMemOperand(a5, FixedArray::kHeaderSize + kPointerSize));
__ Daddu(a4, a4, Operand(Smi::FromInt(1)));
- __ sd(a4, FieldMemOperand(a5, FixedArray::kHeaderSize + kPointerSize));
+ __ Sd(a4, FieldMemOperand(a5, FixedArray::kHeaderSize + kPointerSize));
}
@@ -1485,8 +1485,8 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
__ Daddu(a5, a2, at);
Label feedback_register_initialized;
// Put the AllocationSite from the feedback vector into a2, or undefined.
- __ ld(a2, FieldMemOperand(a5, FixedArray::kHeaderSize));
- __ ld(a5, FieldMemOperand(a2, AllocationSite::kMapOffset));
+ __ Ld(a2, FieldMemOperand(a5, FixedArray::kHeaderSize));
+ __ Ld(a5, FieldMemOperand(a2, AllocationSite::kMapOffset));
__ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
__ Branch(&feedback_register_initialized, eq, a5, Operand(at));
__ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
@@ -1499,8 +1499,8 @@ void CallConstructStub::Generate(MacroAssembler* masm) {
// Tail call to the function-specific construct stub (still in the caller
// context at this point).
- __ ld(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
- __ ld(a4, FieldMemOperand(a4, SharedFunctionInfo::kConstructStubOffset));
+ __ Ld(a4, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+ __ Ld(a4, FieldMemOperand(a4, SharedFunctionInfo::kConstructStubOffset));
__ Daddu(at, a4, Operand(Code::kHeaderSize - kHeapObjectTag));
__ Jump(at);
@@ -1521,8 +1521,8 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
__ JumpIfSmi(object_, receiver_not_string_);
// Fetch the instance type of the receiver into result register.
- __ ld(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
- __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
+ __ Ld(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
+ __ Lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
// If the receiver is not a string trigger the non-string case.
__ And(a4, result_, Operand(kIsNotStringMask));
__ Branch(receiver_not_string_, ne, a4, Operand(zero_reg));
@@ -1534,7 +1534,7 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
__ bind(&got_smi_index_);
// Check for index out of range.
- __ ld(a4, FieldMemOperand(object_, String::kLengthOffset));
+ __ Ld(a4, FieldMemOperand(object_, String::kLengthOffset));
__ Branch(index_out_of_range_, ls, a4, Operand(index_));
__ SmiUntag(index_);
@@ -1583,8 +1583,8 @@ void StringCharCodeAtGenerator::GenerateSlow(
__ pop(object_);
}
// Reload the instance type.
- __ ld(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
- __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
+ __ Ld(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
+ __ Lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
call_helper.AfterCall(masm);
// If index is still not a smi, it must be out of range.
__ JumpIfNotSmi(index_, index_out_of_range_);
@@ -1615,8 +1615,8 @@ void StringHelper::GenerateFlatOneByteStringEquals(
// Compare lengths.
Label strings_not_equal, check_zero_length;
- __ ld(length, FieldMemOperand(left, String::kLengthOffset));
- __ ld(scratch2, FieldMemOperand(right, String::kLengthOffset));
+ __ Ld(length, FieldMemOperand(left, String::kLengthOffset));
+ __ Ld(scratch2, FieldMemOperand(right, String::kLengthOffset));
__ Branch(&check_zero_length, eq, length, Operand(scratch2));
__ bind(&strings_not_equal);
// Can not put li in delayslot, it has multi instructions.
@@ -1649,8 +1649,8 @@ void StringHelper::GenerateCompareFlatOneByteStrings(
Register scratch2, Register scratch3, Register scratch4) {
Label result_not_equal, compare_lengths;
// Find minimum length and length difference.
- __ ld(scratch1, FieldMemOperand(left, String::kLengthOffset));
- __ ld(scratch2, FieldMemOperand(right, String::kLengthOffset));
+ __ Ld(scratch1, FieldMemOperand(left, String::kLengthOffset));
+ __ Ld(scratch2, FieldMemOperand(right, String::kLengthOffset));
__ Dsubu(scratch3, scratch1, Operand(scratch2));
Register length_delta = scratch3;
__ slt(scratch4, scratch2, scratch1);
@@ -1704,9 +1704,9 @@ void StringHelper::GenerateOneByteCharsCompareLoop(
Label loop;
__ bind(&loop);
__ Daddu(scratch3, left, index);
- __ lbu(scratch1, MemOperand(scratch3));
+ __ Lbu(scratch1, MemOperand(scratch3));
__ Daddu(scratch3, right, index);
- __ lbu(scratch2, MemOperand(scratch3));
+ __ Lbu(scratch2, MemOperand(scratch3));
__ Branch(chars_not_equal, ne, scratch1, Operand(scratch2));
__ Daddu(index, index, 1);
__ Branch(&loop, ne, index, Operand(zero_reg));
@@ -1729,7 +1729,7 @@ void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
if (FLAG_debug_code) {
__ And(at, a2, Operand(kSmiTagMask));
__ Assert(ne, kExpectedAllocationSite, at, Operand(zero_reg));
- __ ld(a4, FieldMemOperand(a2, HeapObject::kMapOffset));
+ __ Ld(a4, FieldMemOperand(a2, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
__ Assert(eq, kExpectedAllocationSite, a4, Operand(at));
}
@@ -1748,9 +1748,9 @@ void CompareICStub::GenerateBooleans(MacroAssembler* masm) {
__ CheckMap(a1, a2, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
__ CheckMap(a0, a3, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK);
if (!Token::IsEqualityOp(op())) {
- __ ld(a1, FieldMemOperand(a1, Oddball::kToNumberOffset));
+ __ Ld(a1, FieldMemOperand(a1, Oddball::kToNumberOffset));
__ AssertSmi(a1);
- __ ld(a0, FieldMemOperand(a0, Oddball::kToNumberOffset));
+ __ Ld(a0, FieldMemOperand(a0, Oddball::kToNumberOffset));
__ AssertSmi(a0);
}
__ Ret(USE_DELAY_SLOT);
@@ -1806,7 +1806,7 @@ void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
__ CheckMap(a0, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
DONT_DO_SMI_CHECK);
__ Dsubu(a2, a0, Operand(kHeapObjectTag));
- __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
+ __ Ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
__ Branch(&left);
__ bind(&right_smi);
__ SmiUntag(a2, a0); // Can't clobber a0 yet.
@@ -1819,7 +1819,7 @@ void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
__ CheckMap(a1, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
DONT_DO_SMI_CHECK);
__ Dsubu(a2, a1, Operand(kHeapObjectTag));
- __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset));
+ __ Ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset));
__ Branch(&done);
__ bind(&left_smi);
__ SmiUntag(a2, a1); // Can't clobber a1 yet.
@@ -1891,10 +1891,10 @@ void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
__ JumpIfEitherSmi(left, right, &miss);
// Check that both operands are internalized strings.
- __ ld(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
- __ ld(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
- __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
- __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
+ __ Ld(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
+ __ Ld(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
+ __ Lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
+ __ Lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
__ Or(tmp1, tmp1, Operand(tmp2));
__ And(at, tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask));
@@ -1933,10 +1933,10 @@ void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
// Check that both operands are unique names. This leaves the instance
// types loaded in tmp1 and tmp2.
- __ ld(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
- __ ld(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
- __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
- __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
+ __ Ld(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
+ __ Ld(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
+ __ Lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
+ __ Lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
__ JumpIfNotUniqueNameInstanceType(tmp1, &miss);
__ JumpIfNotUniqueNameInstanceType(tmp2, &miss);
@@ -1981,10 +1981,10 @@ void CompareICStub::GenerateStrings(MacroAssembler* masm) {
// Check that both operands are strings. This leaves the instance
// types loaded in tmp1 and tmp2.
- __ ld(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
- __ ld(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
- __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
- __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
+ __ Ld(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
+ __ Ld(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
+ __ Lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
+ __ Lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
STATIC_ASSERT(kNotStringTag != 0);
__ Or(tmp3, tmp1, tmp2);
__ And(tmp5, tmp3, Operand(kIsNotStringMask));
@@ -2081,8 +2081,8 @@ void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) {
__ And(a2, a1, a0);
__ JumpIfSmi(a2, &miss);
__ GetWeakValue(a4, cell);
- __ ld(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
- __ ld(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
+ __ Ld(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
+ __ Ld(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
__ Branch(&miss, ne, a2, Operand(a4));
__ Branch(&miss, ne, a3, Operand(a4));
@@ -2114,7 +2114,7 @@ void CompareICStub::GenerateMiss(MacroAssembler* masm) {
__ daddiu(sp, sp, -kPointerSize);
__ CallRuntime(Runtime::kCompareIC_Miss, 3, kDontSaveFPRegs,
USE_DELAY_SLOT);
- __ sd(a4, MemOperand(sp)); // In the delay slot.
+ __ Sd(a4, MemOperand(sp)); // In the delay slot.
// Compute the entry point of the rewritten stub.
__ Daddu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
// Restore registers.
@@ -2133,9 +2133,9 @@ void DirectCEntryStub::Generate(MacroAssembler* masm) {
__ daddiu(sp, sp, -kCArgsSlotsSize);
// Place the return address on the stack, making the call
// GC safe. The RegExp backend also relies on this.
- __ sd(ra, MemOperand(sp, kCArgsSlotsSize));
+ __ Sd(ra, MemOperand(sp, kCArgsSlotsSize));
__ Call(t9); // Call the C++ function.
- __ ld(t9, MemOperand(sp, kCArgsSlotsSize));
+ __ Ld(t9, MemOperand(sp, kCArgsSlotsSize));
if (FLAG_debug_code && FLAG_enable_slow_asserts) {
// In case of an error the return address may point to a memory area
@@ -2192,7 +2192,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
Register tmp = properties;
__ Dlsa(tmp, properties, index, kPointerSizeLog2);
- __ ld(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
+ __ Ld(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
DCHECK(!tmp.is(entity_name));
__ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
@@ -2208,15 +2208,13 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
__ Branch(&good, eq, entity_name, Operand(tmp));
// Check if the entry name is not a unique name.
- __ ld(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
- __ lbu(entity_name,
- FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
+ __ Ld(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
+ __ Lbu(entity_name, FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
__ JumpIfNotUniqueNameInstanceType(entity_name, miss);
__ bind(&good);
// Restore the properties.
- __ ld(properties,
- FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ Ld(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
}
const int spill_mask =
@@ -2224,7 +2222,7 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
a2.bit() | a1.bit() | a0.bit() | v0.bit());
__ MultiPush(spill_mask);
- __ ld(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+ __ Ld(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
__ li(a1, Operand(Handle<Name>(name)));
NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
__ CallStub(&stub);
@@ -2258,11 +2256,11 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
- __ ld(mask, FieldMemOperand(dictionary, kCapacityOffset));
+ __ Ld(mask, FieldMemOperand(dictionary, kCapacityOffset));
__ SmiUntag(mask);
__ Dsubu(mask, mask, Operand(1));
- __ lwu(hash, FieldMemOperand(key, Name::kHashFieldOffset));
+ __ Lwu(hash, FieldMemOperand(key, Name::kHashFieldOffset));
__ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
@@ -2290,7 +2288,7 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
STATIC_ASSERT(kSmiTagSize == 1);
__ Dlsa(index, dictionary, index, kPointerSizeLog2);
- __ ld(entry_key, FieldMemOperand(index, kElementsStartOffset));
+ __ Ld(entry_key, FieldMemOperand(index, kElementsStartOffset));
// Having undefined at this place means the name is not contained.
__ Branch(&not_in_dictionary, eq, entry_key, Operand(undefined));
@@ -2300,9 +2298,8 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
// Check if the entry name is not a unique name.
- __ ld(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
- __ lbu(entry_key,
- FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
+ __ Ld(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
+ __ Lbu(entry_key, FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
__ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
}
}
@@ -2384,7 +2381,7 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
if (remembered_set_action() == EMIT_REMEMBERED_SET) {
Label dont_need_remembered_set;
- __ ld(regs_.scratch0(), MemOperand(regs_.address(), 0));
+ __ Ld(regs_.scratch0(), MemOperand(regs_.address(), 0));
__ JumpIfNotInNewSpace(regs_.scratch0(), // Value.
regs_.scratch0(),
&dont_need_remembered_set);
@@ -2462,7 +2459,7 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
__ bind(&on_black);
// Get the value from the slot.
- __ ld(regs_.scratch0(), MemOperand(regs_.address(), 0));
+ __ Ld(regs_.scratch0(), MemOperand(regs_.address(), 0));
if (mode == INCREMENTAL_COMPACTION) {
Label ensure_not_white;
@@ -2517,7 +2514,7 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
__ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
int parameter_count_offset =
StubFailureTrampolineFrameConstants::kArgumentsLengthOffset;
- __ ld(a1, MemOperand(fp, parameter_count_offset));
+ __ Ld(a1, MemOperand(fp, parameter_count_offset));
if (function_mode() == JS_FUNCTION_STUB_MODE) {
__ Daddu(a1, a1, Operand(1));
}
@@ -2645,7 +2642,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
__ Branch(&normal_sequence, ne, at, Operand(zero_reg));
}
// look at the first argument
- __ ld(a5, MemOperand(sp, 0));
+ __ Ld(a5, MemOperand(sp, 0));
__ Branch(&normal_sequence, eq, a5, Operand(zero_reg));
if (mode == DISABLE_ALLOCATION_SITES) {
@@ -2668,7 +2665,7 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
__ Daddu(a3, a3, Operand(1));
if (FLAG_debug_code) {
- __ ld(a5, FieldMemOperand(a2, 0));
+ __ Ld(a5, FieldMemOperand(a2, 0));
__ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
__ Assert(eq, kExpectedAllocationSite, a5, Operand(at));
}
@@ -2677,10 +2674,9 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
// in the AllocationSite::transition_info field because elements kind is
// restricted to a portion of the field...upper bits need to be left alone.
STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
- __ ld(a4, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
+ __ Ld(a4, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
__ Daddu(a4, a4, Operand(Smi::FromInt(kFastElementsKindPackedToHoley)));
- __ sd(a4, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
-
+ __ Sd(a4, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
__ bind(&normal_sequence);
int last_index = GetSequenceIndexFromFastElementsKind(
@@ -2764,7 +2760,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// builtin Array functions which always have maps.
// Initial map for the builtin Array function should be a map.
- __ ld(a4, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ Ld(a4, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
__ SmiTst(a4, at);
__ Assert(ne, kUnexpectedInitialMapForArrayFunction,
@@ -2778,7 +2774,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
}
// Enter the context of the Array function.
- __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+ __ Ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
Label subclassing;
__ Branch(&subclassing, ne, a1, Operand(a3));
@@ -2788,7 +2784,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
__ LoadRoot(at, Heap::kUndefinedValueRootIndex);
__ Branch(&no_info, eq, a2, Operand(at));
- __ ld(a3, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
+ __ Ld(a3, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
__ SmiUntag(a3);
STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
__ And(a3, a3, Operand(AllocationSite::ElementsKindBits::kMask));
@@ -2800,7 +2796,7 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) {
// Subclassing.
__ bind(&subclassing);
__ Dlsa(at, sp, a0, kPointerSizeLog2);
- __ sd(a1, MemOperand(at));
+ __ Sd(a1, MemOperand(at));
__ li(at, Operand(3));
__ Daddu(a0, a0, at);
__ Push(a3, a2);
@@ -2820,7 +2816,7 @@ void InternalArrayConstructorStub::GenerateCase(
if (IsFastPackedElementsKind(kind)) {
// We might need to create a holey array
// look at the first argument.
- __ ld(at, MemOperand(sp, 0));
+ __ Ld(at, MemOperand(sp, 0));
InternalArraySingleArgumentConstructorStub
stub1_holey(isolate(), GetHoleyElementsKind(kind));
@@ -2845,7 +2841,7 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
// builtin Array functions which always have maps.
// Initial map for the builtin Array function should be a map.
- __ ld(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ Ld(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
// Will both indicate a NULL and a Smi.
__ SmiTst(a3, at);
__ Assert(ne, kUnexpectedInitialMapForArrayFunction,
@@ -2856,11 +2852,11 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
}
// Figure out the right elements kind.
- __ ld(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+ __ Ld(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
// Load the map's "bit field 2" into a3. We only need the first byte,
// but the following bit field extraction takes care of that anyway.
- __ lbu(a3, FieldMemOperand(a3, Map::kBitField2Offset));
+ __ Lbu(a3, FieldMemOperand(a3, Map::kBitField2Offset));
// Retrieve elements_kind from bit field 2.
__ DecodeField<Map::ElementsKindBits>(a3);
@@ -2910,7 +2906,7 @@ static void CallApiFunctionAndReturn(
Label profiler_disabled;
Label end_profiler_check;
__ li(t9, Operand(ExternalReference::is_profiling_address(isolate)));
- __ lb(t9, MemOperand(t9, 0));
+ __ Lb(t9, MemOperand(t9, 0));
__ Branch(&profiler_disabled, eq, t9, Operand(zero_reg));
// Additional parameter is the address of the actual callback.
@@ -2923,11 +2919,11 @@ static void CallApiFunctionAndReturn(
// Allocate HandleScope in callee-save registers.
__ li(s3, Operand(next_address));
- __ ld(s0, MemOperand(s3, kNextOffset));
- __ ld(s1, MemOperand(s3, kLimitOffset));
- __ lw(s2, MemOperand(s3, kLevelOffset));
+ __ Ld(s0, MemOperand(s3, kNextOffset));
+ __ Ld(s1, MemOperand(s3, kLimitOffset));
+ __ Lw(s2, MemOperand(s3, kLevelOffset));
__ Addu(s2, s2, Operand(1));
- __ sw(s2, MemOperand(s3, kLevelOffset));
+ __ Sw(s2, MemOperand(s3, kLevelOffset));
if (FLAG_log_timer_events) {
FrameScope frame(masm, StackFrame::MANUAL);
@@ -2961,19 +2957,19 @@ static void CallApiFunctionAndReturn(
Label return_value_loaded;
// Load value from ReturnValue.
- __ ld(v0, return_value_operand);
+ __ Ld(v0, return_value_operand);
__ bind(&return_value_loaded);
// No more valid handles (the result handle was the last one). Restore
// previous handle scope.
- __ sd(s0, MemOperand(s3, kNextOffset));
+ __ Sd(s0, MemOperand(s3, kNextOffset));
if (__ emit_debug_code()) {
- __ lw(a1, MemOperand(s3, kLevelOffset));
+ __ Lw(a1, MemOperand(s3, kLevelOffset));
__ Check(eq, kUnexpectedLevelAfterReturnFromApiCall, a1, Operand(s2));
}
__ Subu(s2, s2, Operand(1));
- __ sw(s2, MemOperand(s3, kLevelOffset));
- __ ld(at, MemOperand(s3, kLimitOffset));
+ __ Sw(s2, MemOperand(s3, kLevelOffset));
+ __ Ld(at, MemOperand(s3, kLimitOffset));
__ Branch(&delete_allocated_handles, ne, s1, Operand(at));
// Leave the API exit frame.
@@ -2981,11 +2977,11 @@ static void CallApiFunctionAndReturn(
bool restore_context = context_restore_operand != NULL;
if (restore_context) {
- __ ld(cp, *context_restore_operand);
+ __ Ld(cp, *context_restore_operand);
}
if (stack_space_offset != kInvalidStackOffset) {
DCHECK(kCArgsSlotsSize == 0);
- __ ld(s0, MemOperand(sp, stack_space_offset));
+ __ Ld(s0, MemOperand(sp, stack_space_offset));
} else {
__ li(s0, Operand(stack_space));
}
@@ -2995,7 +2991,7 @@ static void CallApiFunctionAndReturn(
// Check if the function scheduled an exception.
__ LoadRoot(a4, Heap::kTheHoleValueRootIndex);
__ li(at, Operand(ExternalReference::scheduled_exception_address(isolate)));
- __ ld(a5, MemOperand(at));
+ __ Ld(a5, MemOperand(at));
__ Branch(&promote_scheduled_exception, ne, a4, Operand(a5));
__ Ret();
@@ -3006,7 +3002,7 @@ static void CallApiFunctionAndReturn(
// HandleScope limit has changed. Delete allocated extensions.
__ bind(&delete_allocated_handles);
- __ sd(s1, MemOperand(s3, kLimitOffset));
+ __ Sd(s1, MemOperand(s3, kLimitOffset));
__ mov(s0, v0);
__ mov(a0, v0);
__ PrepareCallCFunction(1, s1);
@@ -3056,7 +3052,7 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
__ Push(context, callee, call_data);
if (!is_lazy()) {
// Load context from callee.
- __ ld(context, FieldMemOperand(callee, JSFunction::kContextOffset));
+ __ Ld(context, FieldMemOperand(callee, JSFunction::kContextOffset));
}
Register scratch = call_data;
@@ -3084,16 +3080,16 @@ void CallApiCallbackStub::Generate(MacroAssembler* masm) {
// Arguments is after the return address.
__ Daddu(a0, sp, Operand(1 * kPointerSize));
// FunctionCallbackInfo::implicit_args_
- __ sd(scratch, MemOperand(a0, 0 * kPointerSize));
+ __ Sd(scratch, MemOperand(a0, 0 * kPointerSize));
// FunctionCallbackInfo::values_
__ Daddu(at, scratch,
Operand((FCA::kArgsLength - 1 + argc()) * kPointerSize));
- __ sd(at, MemOperand(a0, 1 * kPointerSize));
+ __ Sd(at, MemOperand(a0, 1 * kPointerSize));
// FunctionCallbackInfo::length_ = argc
// Stored as int field, 32-bit integers within struct on stack always left
// justified by n64 ABI.
__ li(at, Operand(argc()));
- __ sw(at, MemOperand(a0, 2 * kPointerSize));
+ __ Sw(at, MemOperand(a0, 2 * kPointerSize));
ExternalReference thunk_ref =
ExternalReference::invoke_function_callback(masm->isolate());
@@ -3143,22 +3139,22 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
// Here and below +1 is for name() pushed after the args_ array.
typedef PropertyCallbackArguments PCA;
__ Dsubu(sp, sp, (PCA::kArgsLength + 1) * kPointerSize);
- __ sd(receiver, MemOperand(sp, (PCA::kThisIndex + 1) * kPointerSize));
- __ ld(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
- __ sd(scratch, MemOperand(sp, (PCA::kDataIndex + 1) * kPointerSize));
+ __ Sd(receiver, MemOperand(sp, (PCA::kThisIndex + 1) * kPointerSize));
+ __ Ld(scratch, FieldMemOperand(callback, AccessorInfo::kDataOffset));
+ __ Sd(scratch, MemOperand(sp, (PCA::kDataIndex + 1) * kPointerSize));
__ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
- __ sd(scratch, MemOperand(sp, (PCA::kReturnValueOffset + 1) * kPointerSize));
- __ sd(scratch, MemOperand(sp, (PCA::kReturnValueDefaultValueIndex + 1) *
+ __ Sd(scratch, MemOperand(sp, (PCA::kReturnValueOffset + 1) * kPointerSize));
+ __ Sd(scratch, MemOperand(sp, (PCA::kReturnValueDefaultValueIndex + 1) *
kPointerSize));
__ li(scratch, Operand(ExternalReference::isolate_address(isolate())));
- __ sd(scratch, MemOperand(sp, (PCA::kIsolateIndex + 1) * kPointerSize));
- __ sd(holder, MemOperand(sp, (PCA::kHolderIndex + 1) * kPointerSize));
+ __ Sd(scratch, MemOperand(sp, (PCA::kIsolateIndex + 1) * kPointerSize));
+ __ Sd(holder, MemOperand(sp, (PCA::kHolderIndex + 1) * kPointerSize));
// should_throw_on_error -> false
DCHECK(Smi::kZero == nullptr);
- __ sd(zero_reg,
+ __ Sd(zero_reg,
MemOperand(sp, (PCA::kShouldThrowOnErrorIndex + 1) * kPointerSize));
- __ ld(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
- __ sd(scratch, MemOperand(sp, 0 * kPointerSize));
+ __ Ld(scratch, FieldMemOperand(callback, AccessorInfo::kNameOffset));
+ __ Sd(scratch, MemOperand(sp, 0 * kPointerSize));
// v8::PropertyCallbackInfo::args_ array and name handle.
const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
@@ -3173,15 +3169,15 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) {
// Create v8::PropertyCallbackInfo object on the stack and initialize
// it's args_ field.
- __ sd(a1, MemOperand(sp, 1 * kPointerSize));
+ __ Sd(a1, MemOperand(sp, 1 * kPointerSize));
__ Daddu(a1, sp, Operand(1 * kPointerSize));
// a1 = v8::PropertyCallbackInfo&
ExternalReference thunk_ref =
ExternalReference::invoke_accessor_getter_callback(isolate());
- __ ld(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
- __ ld(api_function_address,
+ __ Ld(scratch, FieldMemOperand(callback, AccessorInfo::kJsGetterOffset));
+ __ Ld(api_function_address,
FieldMemOperand(scratch, Foreign::kForeignAddressOffset));
// +3 is to skip prolog, return address and name handle.
« no previous file with comments | « src/mips64/assembler-mips64.cc ('k') | src/mips64/codegen-mips64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698