Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1398)

Unified Diff: src/crankshaft/mips64/lithium-codegen-mips64.cc

Issue 2829073002: MIPS64: Move load/store instructions to macro-assembler. (Closed)
Patch Set: Created 3 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « src/compiler/mips64/code-generator-mips64.cc ('k') | src/crankshaft/mips64/lithium-gap-resolver-mips64.cc » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: src/crankshaft/mips64/lithium-codegen-mips64.cc
diff --git a/src/crankshaft/mips64/lithium-codegen-mips64.cc b/src/crankshaft/mips64/lithium-codegen-mips64.cc
index 9cfa19a15d8eab90702b2624c003cfa45bbdbf7a..9424c4622d1c901ebaad87cf1968c4eec9e0d402 100644
--- a/src/crankshaft/mips64/lithium-codegen-mips64.cc
+++ b/src/crankshaft/mips64/lithium-codegen-mips64.cc
@@ -91,7 +91,7 @@ void LCodeGen::SaveCallerDoubles() {
BitVector* doubles = chunk()->allocated_double_registers();
BitVector::Iterator save_iterator(doubles);
while (!save_iterator.Done()) {
- __ sdc1(DoubleRegister::from_code(save_iterator.Current()),
+ __ Sdc1(DoubleRegister::from_code(save_iterator.Current()),
MemOperand(sp, count * kDoubleSize));
save_iterator.Advance();
count++;
@@ -107,7 +107,7 @@ void LCodeGen::RestoreCallerDoubles() {
BitVector::Iterator save_iterator(doubles);
int count = 0;
while (!save_iterator.Done()) {
- __ ldc1(DoubleRegister::from_code(save_iterator.Current()),
+ __ Ldc1(DoubleRegister::from_code(save_iterator.Current()),
MemOperand(sp, count * kDoubleSize));
save_iterator.Advance();
count++;
@@ -148,7 +148,7 @@ bool LCodeGen::GeneratePrologue() {
Label loop;
__ bind(&loop);
__ Dsubu(a0, a0, Operand(kPointerSize));
- __ sd(a1, MemOperand(a0, 2 * kPointerSize));
+ __ Sd(a1, MemOperand(a0, 2 * kPointerSize));
__ Branch(&loop, ne, a0, Operand(sp));
__ Pop(a0, a1);
} else {
@@ -198,7 +198,7 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
// Context is returned in both v0. It replaces the context passed to us.
// It's saved in the stack and kept live in cp.
__ mov(cp, v0);
- __ sd(v0, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ Sd(v0, MemOperand(fp, StandardFrameConstants::kContextOffset));
// Copy any necessary parameters into the context.
int num_parameters = info()->scope()->num_parameters();
int first_parameter = info()->scope()->has_this_declaration() ? -1 : 0;
@@ -209,10 +209,10 @@ void LCodeGen::DoPrologue(LPrologue* instr) {
int parameter_offset = StandardFrameConstants::kCallerSPOffset +
(num_parameters - 1 - i) * kPointerSize;
// Load parameter from stack.
- __ ld(a0, MemOperand(fp, parameter_offset));
+ __ Ld(a0, MemOperand(fp, parameter_offset));
// Store it in the context.
MemOperand target = ContextMemOperand(cp, var->index());
- __ sd(a0, target);
+ __ Sd(a0, target);
// Update the write barrier. This clobbers a3 and a0.
if (need_write_barrier) {
__ RecordWriteContextSlot(
@@ -417,7 +417,7 @@ Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
}
return scratch;
} else if (op->IsStackSlot()) {
- __ ld(scratch, ToMemOperand(op));
+ __ Ld(scratch, ToMemOperand(op));
return scratch;
}
UNREACHABLE();
@@ -454,7 +454,7 @@ DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
}
} else if (op->IsStackSlot()) {
MemOperand mem_op = ToMemOperand(op);
- __ ldc1(dbl_scratch, mem_op);
+ __ Ldc1(dbl_scratch, mem_op);
return dbl_scratch;
}
UNREACHABLE();
@@ -697,7 +697,7 @@ void LCodeGen::LoadContextFromDeferred(LOperand* context) {
if (context->IsRegister()) {
__ Move(cp, ToRegister(context));
} else if (context->IsStackSlot()) {
- __ ld(cp, ToMemOperand(context));
+ __ Ld(cp, ToMemOperand(context));
} else if (context->IsConstantOperand()) {
HConstant* constant =
chunk_->LookupConstant(LConstantOperand::cast(context));
@@ -776,16 +776,16 @@ void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
Label no_deopt;
__ Push(a1, scratch);
__ li(scratch, Operand(count));
- __ lw(a1, MemOperand(scratch));
+ __ Lw(a1, MemOperand(scratch));
__ Subu(a1, a1, Operand(1));
__ Branch(&no_deopt, ne, a1, Operand(zero_reg));
__ li(a1, Operand(FLAG_deopt_every_n_times));
- __ sw(a1, MemOperand(scratch));
+ __ Sw(a1, MemOperand(scratch));
__ Pop(a1, scratch);
__ Call(entry, RelocInfo::RUNTIME_ENTRY);
__ bind(&no_deopt);
- __ sw(a1, MemOperand(scratch));
+ __ Sw(a1, MemOperand(scratch));
__ Pop(a1, scratch);
}
@@ -1765,8 +1765,8 @@ void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
if (FLAG_debug_code) {
Register scratch = scratch0();
- __ ld(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
- __ lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+ __ Ld(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
+ __ Lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
__ And(scratch, scratch,
Operand(kStringRepresentationMask | kStringEncodingMask));
@@ -1779,9 +1779,9 @@ void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
if (encoding == String::ONE_BYTE_ENCODING) {
- __ lbu(result, operand);
+ __ Lbu(result, operand);
} else {
- __ lhu(result, operand);
+ __ Lhu(result, operand);
}
}
@@ -1804,9 +1804,9 @@ void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
if (encoding == String::ONE_BYTE_ENCODING) {
- __ sb(value, operand);
+ __ Sb(value, operand);
} else {
- __ sh(value, operand);
+ __ Sh(value, operand);
}
}
@@ -2069,12 +2069,12 @@ void LCodeGen::DoBranch(LBranch* instr) {
} else if (type.IsHeapNumber()) {
DCHECK(!info()->IsStub());
DoubleRegister dbl_scratch = double_scratch0();
- __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
+ __ Ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
// Test the double value. Zero and NaN are false.
EmitBranchF(instr, ogl, dbl_scratch, kDoubleRegZero);
} else if (type.IsString()) {
DCHECK(!info()->IsStub());
- __ ld(at, FieldMemOperand(reg, String::kLengthOffset));
+ __ Ld(at, FieldMemOperand(reg, String::kLengthOffset));
EmitBranch(instr, ne, at, Operand(zero_reg));
} else {
ToBooleanHints expected = instr->hydrogen()->expected_input_types();
@@ -2111,10 +2111,10 @@ void LCodeGen::DoBranch(LBranch* instr) {
const Register map = scratch0();
if (expected & ToBooleanHint::kNeedsMap) {
- __ ld(map, FieldMemOperand(reg, HeapObject::kMapOffset));
+ __ Ld(map, FieldMemOperand(reg, HeapObject::kMapOffset));
if (expected & ToBooleanHint::kCanBeUndetectable) {
// Undetectable -> false.
- __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
+ __ Lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
__ And(at, at, Operand(1 << Map::kIsUndetectable));
__ Branch(instr->FalseLabel(chunk_), ne, at, Operand(zero_reg));
}
@@ -2122,7 +2122,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (expected & ToBooleanHint::kReceiver) {
// spec object -> true.
- __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ __ Lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
__ Branch(instr->TrueLabel(chunk_),
ge, at, Operand(FIRST_JS_RECEIVER_TYPE));
}
@@ -2130,9 +2130,9 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (expected & ToBooleanHint::kString) {
// String value -> false iff empty.
Label not_string;
- __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ __ Lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
__ Branch(&not_string, ge , at, Operand(FIRST_NONSTRING_TYPE));
- __ ld(at, FieldMemOperand(reg, String::kLengthOffset));
+ __ Ld(at, FieldMemOperand(reg, String::kLengthOffset));
__ Branch(instr->TrueLabel(chunk_), ne, at, Operand(zero_reg));
__ Branch(instr->FalseLabel(chunk_));
__ bind(&not_string);
@@ -2141,7 +2141,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (expected & ToBooleanHint::kSymbol) {
// Symbol value -> true.
const Register scratch = scratch1();
- __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
+ __ Lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
__ Branch(instr->TrueLabel(chunk_), eq, scratch, Operand(SYMBOL_TYPE));
}
@@ -2151,7 +2151,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
Label not_heap_number;
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
__ Branch(&not_heap_number, ne, map, Operand(at));
- __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
+ __ Ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
__ BranchF(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
ne, dbl_scratch, kDoubleRegZero);
// Falls through if dbl_scratch == 0.
@@ -2345,8 +2345,8 @@ void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
__ JumpIfSmi(input, instr->FalseLabel(chunk_));
}
- __ ld(temp, FieldMemOperand(input, HeapObject::kMapOffset));
- __ lbu(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
+ __ Ld(temp, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ Lbu(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
__ And(at, temp, Operand(1 << Map::kIsUndetectable));
EmitBranch(instr, ne, at, Operand(zero_reg));
}
@@ -2453,8 +2453,8 @@ void LCodeGen::EmitClassOfTest(Label* is_true, Label* is_false,
// temp now contains the constructor function. Grab the
// instance class name from there.
- __ ld(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
- __ ld(temp,
+ __ Ld(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
+ __ Ld(temp,
FieldMemOperand(temp, SharedFunctionInfo::kInstanceClassNameOffset));
// The class name we are testing against is internalized since it's a literal.
// The name in the constructor is internalized because of the way the context
@@ -2483,7 +2483,7 @@ void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
Register reg = ToRegister(instr->value());
Register temp = ToRegister(instr->temp());
- __ ld(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
+ __ Ld(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
EmitBranch(instr, eq, temp, Operand(instr->map()));
}
@@ -2505,28 +2505,28 @@ void LCodeGen::DoHasInPrototypeChainAndBranch(
}
// Loop through the {object}s prototype chain looking for the {prototype}.
- __ ld(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
+ __ Ld(object_map, FieldMemOperand(object, HeapObject::kMapOffset));
Label loop;
__ bind(&loop);
// Deoptimize if the object needs to be access checked.
- __ lbu(object_instance_type,
+ __ Lbu(object_instance_type,
FieldMemOperand(object_map, Map::kBitFieldOffset));
__ And(object_instance_type, object_instance_type,
Operand(1 << Map::kIsAccessCheckNeeded));
DeoptimizeIf(ne, instr, DeoptimizeReason::kAccessCheck, object_instance_type,
Operand(zero_reg));
- __ lbu(object_instance_type,
+ __ Lbu(object_instance_type,
FieldMemOperand(object_map, Map::kInstanceTypeOffset));
DeoptimizeIf(eq, instr, DeoptimizeReason::kProxy, object_instance_type,
Operand(JS_PROXY_TYPE));
- __ ld(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
+ __ Ld(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
__ LoadRoot(at, Heap::kNullValueRootIndex);
EmitFalseBranch(instr, eq, object_prototype, Operand(at));
EmitTrueBranch(instr, eq, object_prototype, Operand(prototype));
__ Branch(&loop, USE_DELAY_SLOT);
- __ ld(object_map, FieldMemOperand(object_prototype,
+ __ Ld(object_map, FieldMemOperand(object_prototype,
HeapObject::kMapOffset)); // In delay slot.
}
@@ -2560,7 +2560,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
// managed by the register allocator and tearing down the frame, it's
// safe to write to the context register.
__ push(v0);
- __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ Ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
__ CallRuntime(Runtime::kTraceExit);
}
if (info()->saves_caller_doubles()) {
@@ -2592,7 +2592,7 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
Register context = ToRegister(instr->context());
Register result = ToRegister(instr->result());
- __ ld(result, ContextMemOperand(context, instr->slot_index()));
+ __ Ld(result, ContextMemOperand(context, instr->slot_index()));
if (instr->hydrogen()->RequiresHoleCheck()) {
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
@@ -2617,7 +2617,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
Label skip_assignment;
if (instr->hydrogen()->RequiresHoleCheck()) {
- __ ld(scratch, target);
+ __ Ld(scratch, target);
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
if (instr->hydrogen()->DeoptimizesOnHole()) {
@@ -2627,7 +2627,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
}
}
- __ sd(value, target);
+ __ Sd(value, target);
if (instr->hydrogen()->NeedsWriteBarrier()) {
SmiCheck check_needed =
instr->hydrogen()->value()->type().IsHeapObject()
@@ -2659,13 +2659,13 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
if (instr->hydrogen()->representation().IsDouble()) {
DoubleRegister result = ToDoubleRegister(instr->result());
- __ ldc1(result, FieldMemOperand(object, offset));
+ __ Ldc1(result, FieldMemOperand(object, offset));
return;
}
Register result = ToRegister(instr->result());
if (!access.IsInobject()) {
- __ ld(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
+ __ Ld(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
object = result;
}
@@ -2695,8 +2695,8 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
Register result = ToRegister(instr->result());
// Get the prototype or initial map from the function.
- __ ld(result,
- FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+ __ Ld(result,
+ FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
// Check that the function has a prototype or an initial map.
__ LoadRoot(at, Heap::kTheHoleValueRootIndex);
@@ -2708,7 +2708,7 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
__ Branch(&done, ne, scratch, Operand(MAP_TYPE));
// Get the prototype from the initial map.
- __ ld(result, FieldMemOperand(result, Map::kPrototypeOffset));
+ __ Ld(result, FieldMemOperand(result, Map::kPrototypeOffset));
// All done.
__ bind(&done);
@@ -2731,13 +2731,13 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
if (instr->index()->IsConstantOperand()) {
int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
int index = (const_length - const_index) + 1;
- __ ld(result, MemOperand(arguments, index * kPointerSize));
+ __ Ld(result, MemOperand(arguments, index * kPointerSize));
} else {
Register index = ToRegister(instr->index());
__ li(at, Operand(const_length + 1));
__ Dsubu(result, at, index);
__ Dlsa(at, arguments, result, kPointerSizeLog2);
- __ ld(result, MemOperand(at));
+ __ Ld(result, MemOperand(at));
}
} else if (instr->index()->IsConstantOperand()) {
Register length = ToRegister(instr->length());
@@ -2746,10 +2746,10 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
if (loc != 0) {
__ Dsubu(result, length, Operand(loc));
__ Dlsa(at, arguments, result, kPointerSizeLog2);
- __ ld(result, MemOperand(at));
+ __ Ld(result, MemOperand(at));
} else {
__ Dlsa(at, arguments, length, kPointerSizeLog2);
- __ ld(result, MemOperand(at));
+ __ Ld(result, MemOperand(at));
}
} else {
Register length = ToRegister(instr->length());
@@ -2757,7 +2757,7 @@ void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
__ Dsubu(result, length, index);
__ Daddu(result, result, 1);
__ Dlsa(at, arguments, result, kPointerSizeLog2);
- __ ld(result, MemOperand(at));
+ __ Ld(result, MemOperand(at));
}
}
@@ -2800,10 +2800,10 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
__ Daddu(scratch0(), scratch0(), external_pointer);
}
if (elements_kind == FLOAT32_ELEMENTS) {
- __ lwc1(result, MemOperand(scratch0(), base_offset));
+ __ Lwc1(result, MemOperand(scratch0(), base_offset));
__ cvt_d_s(result, result);
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
- __ ldc1(result, MemOperand(scratch0(), base_offset));
+ __ Ldc1(result, MemOperand(scratch0(), base_offset));
}
} else {
Register result = ToRegister(instr->result());
@@ -2812,23 +2812,23 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
element_size_shift, shift_size, base_offset);
switch (elements_kind) {
case INT8_ELEMENTS:
- __ lb(result, mem_operand);
+ __ Lb(result, mem_operand);
break;
case UINT8_ELEMENTS:
case UINT8_CLAMPED_ELEMENTS:
- __ lbu(result, mem_operand);
+ __ Lbu(result, mem_operand);
break;
case INT16_ELEMENTS:
- __ lh(result, mem_operand);
+ __ Lh(result, mem_operand);
break;
case UINT16_ELEMENTS:
- __ lhu(result, mem_operand);
+ __ Lhu(result, mem_operand);
break;
case INT32_ELEMENTS:
- __ lw(result, mem_operand);
+ __ Lw(result, mem_operand);
break;
case UINT32_ELEMENTS:
- __ lw(result, mem_operand);
+ __ Lw(result, mem_operand);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
DeoptimizeIf(Ugreater_equal, instr, DeoptimizeReason::kNegativeValue,
result, Operand(0x80000000));
@@ -2889,7 +2889,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
__ Daddu(scratch, scratch, at);
}
- __ ldc1(result, MemOperand(scratch));
+ __ Ldc1(result, MemOperand(scratch));
if (instr->hydrogen()->RequiresHoleCheck()) {
__ FmoveHigh(scratch, result);
@@ -2965,7 +2965,7 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
// it needs to bail out.
__ LoadRoot(result, Heap::kArrayProtectorRootIndex);
// The comparison only needs LS bits of value, which is a smi.
- __ ld(result, FieldMemOperand(result, PropertyCell::kValueOffset));
+ __ Ld(result, FieldMemOperand(result, PropertyCell::kValueOffset));
DeoptimizeIf(ne, instr, DeoptimizeReason::kHole, result,
Operand(Smi::FromInt(Isolate::kProtectorValid)));
}
@@ -3039,8 +3039,8 @@ void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
} else if (instr->hydrogen()->arguments_adaptor()) {
// Check if the calling frame is an arguments adaptor frame.
Label done, adapted;
- __ ld(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ld(result,
+ __ Ld(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ Ld(result,
MemOperand(scratch, CommonFrameConstants::kContextOrFrameTypeOffset));
__ Xor(temp, result,
Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
@@ -3066,8 +3066,8 @@ void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
__ Branch(&done, eq, fp, Operand(elem));
// Arguments adaptor frame present. Get argument length from there.
- __ ld(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ld(result,
+ __ Ld(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ Ld(result,
MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ SmiUntag(result);
@@ -3089,20 +3089,19 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
if (!instr->hydrogen()->known_function()) {
// Do not transform the receiver to object for strict mode functions.
- __ ld(scratch,
- FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+ __ Ld(scratch,
+ FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
// Do not transform the receiver to object for builtins.
int32_t strict_mode_function_mask =
1 << SharedFunctionInfo::kStrictModeBitWithinByte;
int32_t native_mask = 1 << SharedFunctionInfo::kNativeBitWithinByte;
- __ lbu(at,
+ __ Lbu(at,
FieldMemOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset));
__ And(at, at, Operand(strict_mode_function_mask));
__ Branch(&result_in_receiver, ne, at, Operand(zero_reg));
- __ lbu(at,
- FieldMemOperand(scratch, SharedFunctionInfo::kNativeByteOffset));
+ __ Lbu(at, FieldMemOperand(scratch, SharedFunctionInfo::kNativeByteOffset));
__ And(at, at, Operand(native_mask));
__ Branch(&result_in_receiver, ne, at, Operand(zero_reg));
}
@@ -3123,9 +3122,9 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
__ Branch(&result_in_receiver);
__ bind(&global_object);
- __ ld(result, FieldMemOperand(function, JSFunction::kContextOffset));
- __ ld(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX));
- __ ld(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX));
+ __ Ld(result, FieldMemOperand(function, JSFunction::kContextOffset));
+ __ Ld(result, ContextMemOperand(result, Context::NATIVE_CONTEXT_INDEX));
+ __ Ld(result, ContextMemOperand(result, Context::GLOBAL_PROXY_INDEX));
if (result.is(receiver)) {
__ bind(&result_in_receiver);
@@ -3170,7 +3169,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
__ dsll(scratch, length, kPointerSizeLog2);
__ bind(&loop);
__ Daddu(scratch, elements, scratch);
- __ ld(scratch, MemOperand(scratch));
+ __ Ld(scratch, MemOperand(scratch));
__ push(scratch);
__ Dsubu(length, length, Operand(1));
__ Branch(USE_DELAY_SLOT, &loop, ne, length, Operand(zero_reg));
@@ -3217,7 +3216,7 @@ void LCodeGen::DoDrop(LDrop* instr) {
void LCodeGen::DoThisFunction(LThisFunction* instr) {
Register result = ToRegister(instr->result());
- __ ld(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+ __ Ld(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
}
@@ -3225,7 +3224,7 @@ void LCodeGen::DoContext(LContext* instr) {
// If there is a non-return use, the context must be moved to a register.
Register result = ToRegister(instr->result());
if (info()->IsOptimizing()) {
- __ ld(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
+ __ Ld(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
} else {
// If there is no frame, the context must be in cp.
DCHECK(result.is(cp));
@@ -3256,7 +3255,7 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
if (can_invoke_directly) {
// Change context.
- __ ld(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
+ __ Ld(cp, FieldMemOperand(function_reg, JSFunction::kContextOffset));
// Always initialize new target and number of actual arguments.
__ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
@@ -3273,7 +3272,7 @@ void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
__ Call(self, RelocInfo::CODE_TARGET);
}
} else {
- __ ld(at, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
+ __ Ld(at, FieldMemOperand(function_reg, JSFunction::kCodeEntryOffset));
if (is_tail_call) {
__ Jump(at);
} else {
@@ -3303,7 +3302,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
Register scratch = scratch0();
// Deoptimize if not a heap number.
- __ ld(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ Ld(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber, scratch,
Operand(at));
@@ -3311,7 +3310,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
Label done;
Register exponent = scratch0();
scratch = no_reg;
- __ lwu(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
+ __ Lwu(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
// Check the sign of the argument. If the argument is positive, just
// return it.
__ Move(result, input);
@@ -3347,15 +3346,15 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
__ mov(tmp1, v0);
// Restore input_reg after call to runtime.
__ LoadFromSafepointRegisterSlot(input, input);
- __ lwu(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
+ __ Lwu(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
__ bind(&allocated);
// exponent: floating point exponent value.
// tmp1: allocated heap number.
__ And(exponent, exponent, Operand(~HeapNumber::kSignMask));
- __ sw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
- __ lwu(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
- __ sw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
+ __ Sw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
+ __ Lwu(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
+ __ Sw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
__ StoreToSafepointRegisterSlot(tmp1, result);
}
@@ -3601,7 +3600,7 @@ void LCodeGen::DoPower(LPower* instr) {
Label no_deopt;
__ JumpIfSmi(tagged_exponent, &no_deopt);
DCHECK(!a7.is(tagged_exponent));
- __ lw(a7, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
+ __ Lw(a7, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber, a7, Operand(at));
__ bind(&no_deopt);
@@ -3676,14 +3675,14 @@ void LCodeGen::PrepareForTailCall(const ParameterCount& actual,
// Check if next frame is an arguments adaptor frame.
Register caller_args_count_reg = scratch1;
Label no_arguments_adaptor, formal_parameter_count_loaded;
- __ ld(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
- __ ld(scratch3, MemOperand(scratch2, StandardFrameConstants::kContextOffset));
+ __ Ld(scratch2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+ __ Ld(scratch3, MemOperand(scratch2, StandardFrameConstants::kContextOffset));
__ Branch(&no_arguments_adaptor, ne, scratch3,
Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
// Drop current frame and load arguments count from arguments adaptor frame.
__ mov(fp, scratch2);
- __ ld(caller_args_count_reg,
+ __ Ld(caller_args_count_reg,
MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ SmiUntag(caller_args_count_reg);
__ Branch(&formal_parameter_count_loaded);
@@ -3788,7 +3787,7 @@ void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
Label packed_case;
// We might need a change here,
// look at the first argument.
- __ ld(a5, MemOperand(sp, 0));
+ __ Ld(a5, MemOperand(sp, 0));
__ Branch(&packed_case, eq, a5, Operand(zero_reg));
ElementsKind holey_kind = GetHoleyElementsKind(kind);
@@ -3820,8 +3819,7 @@ void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
Register code_object = ToRegister(instr->code_object());
__ Daddu(code_object, code_object,
Operand(Code::kHeaderSize - kHeapObjectTag));
- __ sd(code_object,
- FieldMemOperand(function, JSFunction::kCodeEntryOffset));
+ __ Sd(code_object, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
}
@@ -3864,7 +3862,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
DCHECK(!instr->hydrogen()->has_transition());
DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
DoubleRegister value = ToDoubleRegister(instr->value());
- __ sdc1(value, FieldMemOperand(object, offset));
+ __ Sdc1(value, FieldMemOperand(object, offset));
return;
}
@@ -3872,7 +3870,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
Handle<Map> transition = instr->hydrogen()->transition_map();
AddDeprecationDependency(transition);
__ li(scratch1, Operand(transition));
- __ sd(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
+ __ Sd(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
Register temp = ToRegister(instr->temp());
// Update the write barrier for the map field.
@@ -3888,7 +3886,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
Register destination = object;
if (!access.IsInobject()) {
destination = scratch1;
- __ ld(destination, FieldMemOperand(object, JSObject::kPropertiesOffset));
+ __ Ld(destination, FieldMemOperand(object, JSObject::kPropertiesOffset));
}
if (representation.IsSmi() && SmiValuesAre32Bits() &&
@@ -3907,7 +3905,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
if (FLAG_unbox_double_fields && representation.IsDouble()) {
DCHECK(access.IsInobject());
DoubleRegister value = ToDoubleRegister(instr->value());
- __ sdc1(value, operand);
+ __ Sdc1(value, operand);
} else {
DCHECK(instr->value()->IsRegister());
Register value = ToRegister(instr->value());
@@ -3998,9 +3996,9 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
if (elements_kind == FLOAT32_ELEMENTS) {
__ cvt_s_d(double_scratch0(), value);
- __ swc1(double_scratch0(), MemOperand(address, base_offset));
+ __ Swc1(double_scratch0(), MemOperand(address, base_offset));
} else { // Storing doubles, not floats.
- __ sdc1(value, MemOperand(address, base_offset));
+ __ Sdc1(value, MemOperand(address, base_offset));
}
} else {
Register value(ToRegister(instr->value()));
@@ -4012,15 +4010,15 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
case UINT8_ELEMENTS:
case UINT8_CLAMPED_ELEMENTS:
case INT8_ELEMENTS:
- __ sb(value, mem_operand);
+ __ Sb(value, mem_operand);
break;
case INT16_ELEMENTS:
case UINT16_ELEMENTS:
- __ sh(value, mem_operand);
+ __ Sh(value, mem_operand);
break;
case INT32_ELEMENTS:
case UINT32_ELEMENTS:
- __ sw(value, mem_operand);
+ __ Sw(value, mem_operand);
break;
case FLOAT32_ELEMENTS:
case FLOAT64_ELEMENTS:
@@ -4078,9 +4076,9 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
if (instr->NeedsCanonicalization()) {
__ FPUCanonicalizeNaN(double_scratch, value);
- __ sdc1(double_scratch, MemOperand(scratch, 0));
+ __ Sdc1(double_scratch, MemOperand(scratch, 0));
} else {
- __ sdc1(value, MemOperand(scratch, 0));
+ __ Sdc1(value, MemOperand(scratch, 0));
}
}
@@ -4211,7 +4209,7 @@ void LCodeGen::DoMaybeGrowElements(LMaybeGrowElements* instr) {
if (instr->elements()->IsRegister()) {
__ mov(result, ToRegister(instr->elements()));
} else {
- __ ld(result, ToMemOperand(instr->elements()));
+ __ Ld(result, ToMemOperand(instr->elements()));
}
__ bind(deferred->exit());
@@ -4231,7 +4229,7 @@ void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
if (instr->object()->IsRegister()) {
__ mov(result, ToRegister(instr->object()));
} else {
- __ ld(result, ToMemOperand(instr->object()));
+ __ Ld(result, ToMemOperand(instr->object()));
}
LOperand* key = instr->key();
@@ -4266,13 +4264,13 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
ElementsKind to_kind = instr->to_kind();
Label not_applicable;
- __ ld(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
+ __ Ld(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
__ Branch(&not_applicable, ne, scratch, Operand(from_map));
if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
Register new_map_reg = ToRegister(instr->new_map_temp());
__ li(new_map_reg, Operand(to_map));
- __ sd(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
+ __ Sd(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
// Write barrier.
__ RecordWriteForMap(object_reg,
new_map_reg,
@@ -4395,7 +4393,7 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
char_code, Operand(String::kMaxOneByteCharCode));
__ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
__ Dlsa(result, result, char_code, kPointerSizeLog2);
- __ ld(result, FieldMemOperand(result, FixedArray::kHeaderSize));
+ __ Ld(result, FieldMemOperand(result, FixedArray::kHeaderSize));
__ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
__ Branch(deferred->entry(), eq, result, Operand(scratch));
__ bind(deferred->exit());
@@ -4428,7 +4426,7 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
FPURegister single_scratch = double_scratch0().low();
if (input->IsStackSlot()) {
Register scratch = scratch0();
- __ ld(scratch, ToMemOperand(input));
+ __ Ld(scratch, ToMemOperand(input));
__ mtc1(scratch, single_scratch);
} else {
__ mtc1(ToRegister(input), single_scratch);
@@ -4531,7 +4529,7 @@ void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
// Done. Put the value in dbl_scratch into the value of the allocated heap
// number.
__ bind(&done);
- __ sdc1(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
+ __ Sdc1(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
}
@@ -4562,7 +4560,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
__ Branch(deferred->entry());
}
__ bind(deferred->exit());
- __ sdc1(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset));
+ __ Sdc1(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset));
}
@@ -4633,7 +4631,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
// Smi check.
__ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
// Heap number map check.
- __ ld(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+ __ Ld(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
if (can_convert_undefined_to_nan) {
__ Branch(&convert, ne, scratch, Operand(at));
@@ -4642,7 +4640,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
Operand(at));
}
// Load heap number.
- __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
+ __ Ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
if (deoptimize_on_minus_zero) {
__ mfc1(at, result_reg);
__ Branch(&done, ne, at, Operand(zero_reg));
@@ -4658,7 +4656,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined,
input_reg, Operand(at));
__ LoadRoot(scratch, Heap::kNanValueRootIndex);
- __ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
+ __ Ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
__ Branch(&done);
}
} else {
@@ -4688,7 +4686,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
// The input is a tagged HeapObject.
// Heap number map check.
- __ ld(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+ __ Ld(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
// This 'at' value and scratch1 map value are used for tests in both clauses
// of the if.
@@ -4697,7 +4695,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
Label truncate;
__ Branch(USE_DELAY_SLOT, &truncate, eq, scratch1, Operand(at));
__ mov(scratch2, input_reg); // In delay slot.
- __ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+ __ Lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
DeoptimizeIf(ne, instr, DeoptimizeReason::kNotANumberOrOddball, scratch1,
Operand(ODDBALL_TYPE));
__ bind(&truncate);
@@ -4707,7 +4705,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
Operand(at));
// Load the double value.
- __ ldc1(double_scratch,
+ __ Ldc1(double_scratch,
FieldMemOperand(input_reg, HeapNumber::kValueOffset));
Register except_flag = scratch2;
@@ -4877,8 +4875,8 @@ void LCodeGen::DoCheckArrayBufferNotNeutered(
Register view = ToRegister(instr->view());
Register scratch = scratch0();
- __ ld(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
- __ lw(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
+ __ Ld(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
+ __ Lw(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
__ And(at, scratch, 1 << JSArrayBuffer::WasNeutered::kShift);
DeoptimizeIf(ne, instr, DeoptimizeReason::kOutOfBounds, at,
Operand(zero_reg));
@@ -4936,7 +4934,7 @@ void LCodeGen::DoCheckValue(LCheckValue* instr) {
Register reg = ToRegister(instr->value());
Handle<Cell> cell = isolate()->factory()->NewCell(object);
__ li(at, Operand(cell));
- __ ld(at, FieldMemOperand(at, Cell::kValueOffset));
+ __ Ld(at, FieldMemOperand(at, Cell::kValueOffset));
DeoptimizeIf(ne, instr, DeoptimizeReason::kValueMismatch, reg, Operand(at));
} else {
DeoptimizeIf(ne, instr, DeoptimizeReason::kValueMismatch, reg,
@@ -4948,8 +4946,8 @@ void LCodeGen::DoCheckValue(LCheckValue* instr) {
void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
Label deopt, done;
// If the map is not deprecated the migration attempt does not make sense.
- __ ld(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
- __ lwu(scratch0(), FieldMemOperand(scratch0(), Map::kBitField3Offset));
+ __ Ld(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
+ __ Lwu(scratch0(), FieldMemOperand(scratch0(), Map::kBitField3Offset));
__ And(at, scratch0(), Operand(Map::Deprecated::kMask));
__ Branch(&deopt, eq, at, Operand(zero_reg));
@@ -5006,7 +5004,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
LOperand* input = instr->value();
DCHECK(input->IsRegister());
Register reg = ToRegister(input);
- __ ld(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
+ __ Ld(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
DeferredCheckMaps* deferred = NULL;
if (instr->hydrogen()->HasMigrationTarget()) {
@@ -5058,7 +5056,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
__ UntagAndJumpIfSmi(scratch, input_reg, &is_smi);
// Check for heap number
- __ ld(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+ __ Ld(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
__ Branch(&heap_number, eq, scratch, Operand(factory()->heap_number_map()));
// Check for undefined. Undefined is converted to zero for clamping
@@ -5070,8 +5068,8 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
// Heap number
__ bind(&heap_number);
- __ ldc1(double_scratch0(), FieldMemOperand(input_reg,
- HeapNumber::kValueOffset));
+ __ Ldc1(double_scratch0(),
+ FieldMemOperand(input_reg, HeapNumber::kValueOffset));
__ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
__ jmp(&done);
@@ -5140,7 +5138,7 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
__ bind(&loop);
__ Dsubu(scratch, scratch, Operand(kPointerSize));
__ Daddu(at, result, Operand(scratch));
- __ sd(scratch2, MemOperand(at));
+ __ Sd(scratch2, MemOperand(at));
__ Branch(&loop, ge, scratch, Operand(zero_reg));
}
}
@@ -5201,7 +5199,7 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
Register top_address = scratch0();
__ Dsubu(v0, v0, Operand(kHeapObjectTag));
__ li(top_address, Operand(allocation_top));
- __ sd(v0, MemOperand(top_address));
+ __ Sd(v0, MemOperand(top_address));
__ Daddu(v0, v0, Operand(kHeapObjectTag));
}
}
@@ -5283,7 +5281,7 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
Factory* factory = isolate()->factory();
if (String::Equals(type_name, factory->number_string())) {
__ JumpIfSmi(input, true_label);
- __ ld(input, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ Ld(input, FieldMemOperand(input, HeapObject::kMapOffset));
__ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
*cmp1 = input;
*cmp2 = Operand(at);
@@ -5318,8 +5316,8 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
// slot.
__ JumpIfSmi(input, false_label);
// Check for undetectable objects => true.
- __ ld(input, FieldMemOperand(input, HeapObject::kMapOffset));
- __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
+ __ Ld(input, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ Lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
__ And(at, at, 1 << Map::kIsUndetectable);
*cmp1 = at;
*cmp2 = Operand(zero_reg);
@@ -5327,8 +5325,8 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
} else if (String::Equals(type_name, factory->function_string())) {
__ JumpIfSmi(input, false_label);
- __ ld(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
- __ lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
+ __ Ld(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+ __ Lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
__ And(scratch, scratch,
Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
*cmp1 = scratch;
@@ -5343,7 +5341,7 @@ Condition LCodeGen::EmitTypeofIs(Label* true_label,
__ GetObjectType(input, scratch, scratch1());
__ Branch(false_label, lt, scratch1(), Operand(FIRST_JS_RECEIVER_TYPE));
// Check for callable or undetectable objects => false.
- __ lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
+ __ Lbu(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
__ And(at, scratch,
Operand((1 << Map::kIsCallable) | (1 << Map::kIsUndetectable)));
*cmp1 = at;
@@ -5492,7 +5490,7 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
DCHECK(object.is(a0));
__ CheckEnumCache(&call_runtime);
- __ ld(result, FieldMemOperand(object, HeapObject::kMapOffset));
+ __ Ld(result, FieldMemOperand(object, HeapObject::kMapOffset));
__ Branch(&use_cache);
// Get the set of properties to enumerate.
@@ -5514,10 +5512,8 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
__ bind(&load_cache);
__ LoadInstanceDescriptors(map, result);
- __ ld(result,
- FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
- __ ld(result,
- FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
+ __ Ld(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
+ __ Ld(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
DeoptimizeIf(eq, instr, DeoptimizeReason::kNoCache, result,
Operand(zero_reg));
@@ -5528,7 +5524,7 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
Register object = ToRegister(instr->value());
Register map = ToRegister(instr->map());
- __ ld(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
+ __ Ld(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap, map,
Operand(scratch0()));
}
@@ -5592,16 +5588,16 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
__ Branch(USE_DELAY_SLOT, &out_of_object, lt, index, Operand(zero_reg));
__ SmiScale(scratch, index, kPointerSizeLog2); // In delay slot.
__ Daddu(scratch, object, scratch);
- __ ld(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
+ __ Ld(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
__ Branch(&done);
__ bind(&out_of_object);
- __ ld(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
+ __ Ld(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
// Index is equal to negated out of object property index plus 1.
__ Dsubu(scratch, result, scratch);
- __ ld(result, FieldMemOperand(scratch,
- FixedArray::kHeaderSize - kPointerSize));
+ __ Ld(result,
+ FieldMemOperand(scratch, FixedArray::kHeaderSize - kPointerSize));
__ bind(deferred->exit());
__ bind(&done);
}
« no previous file with comments | « src/compiler/mips64/code-generator-mips64.cc ('k') | src/crankshaft/mips64/lithium-gap-resolver-mips64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698