Index: src/compiler/mips64/code-generator-mips64.cc |
diff --git a/src/compiler/mips64/code-generator-mips64.cc b/src/compiler/mips64/code-generator-mips64.cc |
index 8359d6fb20779c7554929fc4785dbf714de48bf8..dc24eb1b3cec094e667b60f61319640aaeac7caa 100644 |
--- a/src/compiler/mips64/code-generator-mips64.cc |
+++ b/src/compiler/mips64/code-generator-mips64.cc |
@@ -583,8 +583,8 @@ void CodeGenerator::AssembleDeconstructFrame() { |
void CodeGenerator::AssemblePrepareTailCall() { |
if (frame_access_state()->has_frame()) { |
- __ ld(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset)); |
- __ ld(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); |
+ __ Ld(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset)); |
+ __ Ld(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); |
} |
frame_access_state()->SetFrameAccessToSP(); |
} |
@@ -597,14 +597,14 @@ void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg, |
Label done; |
// Check if current frame is an arguments adaptor frame. |
- __ ld(scratch3, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
+ __ Ld(scratch3, MemOperand(fp, StandardFrameConstants::kContextOffset)); |
__ Branch(&done, ne, scratch3, |
Operand(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR))); |
// Load arguments count from current arguments adaptor frame (note, it |
// does not include receiver). |
Register caller_args_count_reg = scratch1; |
- __ ld(caller_args_count_reg, |
+ __ Ld(caller_args_count_reg, |
MemOperand(fp, ArgumentsAdaptorFrameConstants::kLengthOffset)); |
__ SmiUntag(caller_args_count_reg); |
@@ -696,10 +696,10 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( |
Register func = i.InputRegister(0); |
if (FLAG_debug_code) { |
// Check the function's context matches the context argument. |
- __ ld(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset)); |
+ __ Ld(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset)); |
__ Assert(eq, kWrongFunctionContext, cp, Operand(kScratchReg)); |
} |
- __ ld(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset)); |
+ __ Ld(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset)); |
__ Call(at); |
RecordCallPosition(instr); |
frame_access_state()->ClearSPDelta(); |
@@ -709,13 +709,13 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( |
Register func = i.InputRegister(0); |
if (FLAG_debug_code) { |
// Check the function's context matches the context argument. |
- __ ld(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset)); |
+ __ Ld(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset)); |
__ Assert(eq, kWrongFunctionContext, cp, Operand(kScratchReg)); |
} |
AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister, |
i.TempRegister(0), i.TempRegister(1), |
i.TempRegister(2)); |
- __ ld(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset)); |
+ __ Ld(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset)); |
__ Jump(at); |
frame_access_state()->ClearSPDelta(); |
frame_access_state()->SetFrameAccessToDefault(); |
@@ -784,7 +784,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( |
break; |
case kArchParentFramePointer: |
if (frame_access_state()->has_frame()) { |
- __ ld(i.OutputRegister(), MemOperand(fp, 0)); |
+ __ Ld(i.OutputRegister(), MemOperand(fp, 0)); |
} else { |
__ mov(i.OutputRegister(), fp); |
} |
@@ -803,7 +803,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( |
auto ool = new (zone()) OutOfLineRecordWrite(this, object, index, value, |
scratch0, scratch1, mode); |
__ Daddu(at, object, index); |
- __ sd(value, MemOperand(at)); |
+ __ Sd(value, MemOperand(at)); |
__ CheckPageFlag(object, scratch0, |
MemoryChunk::kPointersFromHereAreInterestingMask, ne, |
ool->entry()); |
@@ -1712,64 +1712,64 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( |
__ seh(i.OutputRegister(), i.InputRegister(0)); |
break; |
case kMips64Lbu: |
- __ lbu(i.OutputRegister(), i.MemoryOperand()); |
+ __ Lbu(i.OutputRegister(), i.MemoryOperand()); |
break; |
case kMips64Lb: |
- __ lb(i.OutputRegister(), i.MemoryOperand()); |
+ __ Lb(i.OutputRegister(), i.MemoryOperand()); |
break; |
case kMips64Sb: |
- __ sb(i.InputOrZeroRegister(2), i.MemoryOperand()); |
+ __ Sb(i.InputOrZeroRegister(2), i.MemoryOperand()); |
break; |
case kMips64Lhu: |
- __ lhu(i.OutputRegister(), i.MemoryOperand()); |
+ __ Lhu(i.OutputRegister(), i.MemoryOperand()); |
break; |
case kMips64Ulhu: |
__ Ulhu(i.OutputRegister(), i.MemoryOperand()); |
break; |
case kMips64Lh: |
- __ lh(i.OutputRegister(), i.MemoryOperand()); |
+ __ Lh(i.OutputRegister(), i.MemoryOperand()); |
break; |
case kMips64Ulh: |
__ Ulh(i.OutputRegister(), i.MemoryOperand()); |
break; |
case kMips64Sh: |
- __ sh(i.InputOrZeroRegister(2), i.MemoryOperand()); |
+ __ Sh(i.InputOrZeroRegister(2), i.MemoryOperand()); |
break; |
case kMips64Ush: |
__ Ush(i.InputOrZeroRegister(2), i.MemoryOperand(), kScratchReg); |
break; |
case kMips64Lw: |
- __ lw(i.OutputRegister(), i.MemoryOperand()); |
+ __ Lw(i.OutputRegister(), i.MemoryOperand()); |
break; |
case kMips64Ulw: |
__ Ulw(i.OutputRegister(), i.MemoryOperand()); |
break; |
case kMips64Lwu: |
- __ lwu(i.OutputRegister(), i.MemoryOperand()); |
+ __ Lwu(i.OutputRegister(), i.MemoryOperand()); |
break; |
case kMips64Ulwu: |
__ Ulwu(i.OutputRegister(), i.MemoryOperand()); |
break; |
case kMips64Ld: |
- __ ld(i.OutputRegister(), i.MemoryOperand()); |
+ __ Ld(i.OutputRegister(), i.MemoryOperand()); |
break; |
case kMips64Uld: |
__ Uld(i.OutputRegister(), i.MemoryOperand()); |
break; |
case kMips64Sw: |
- __ sw(i.InputOrZeroRegister(2), i.MemoryOperand()); |
+ __ Sw(i.InputOrZeroRegister(2), i.MemoryOperand()); |
break; |
case kMips64Usw: |
__ Usw(i.InputOrZeroRegister(2), i.MemoryOperand()); |
break; |
case kMips64Sd: |
- __ sd(i.InputOrZeroRegister(2), i.MemoryOperand()); |
+ __ Sd(i.InputOrZeroRegister(2), i.MemoryOperand()); |
break; |
case kMips64Usd: |
__ Usd(i.InputOrZeroRegister(2), i.MemoryOperand()); |
break; |
case kMips64Lwc1: { |
- __ lwc1(i.OutputSingleRegister(), i.MemoryOperand()); |
+ __ Lwc1(i.OutputSingleRegister(), i.MemoryOperand()); |
break; |
} |
case kMips64Ulwc1: { |
@@ -1783,7 +1783,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( |
if (ft.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) { |
__ Move(kDoubleRegZero, 0.0); |
} |
- __ swc1(ft, operand); |
+ __ Swc1(ft, operand); |
break; |
} |
case kMips64Uswc1: { |
@@ -1797,7 +1797,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( |
break; |
} |
case kMips64Ldc1: |
- __ ldc1(i.OutputDoubleRegister(), i.MemoryOperand()); |
+ __ Ldc1(i.OutputDoubleRegister(), i.MemoryOperand()); |
break; |
case kMips64Uldc1: |
__ Uldc1(i.OutputDoubleRegister(), i.MemoryOperand(), kScratchReg); |
@@ -1807,7 +1807,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( |
if (ft.is(kDoubleRegZero) && !__ IsDoubleZeroRegSet()) { |
__ Move(kDoubleRegZero, 0.0); |
} |
- __ sdc1(ft, i.MemoryOperand()); |
+ __ Sdc1(ft, i.MemoryOperand()); |
break; |
} |
case kMips64Usdc1: { |
@@ -1820,7 +1820,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( |
} |
case kMips64Push: |
if (instr->InputAt(0)->IsFPRegister()) { |
- __ sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize)); |
+ __ Sdc1(i.InputDoubleRegister(0), MemOperand(sp, -kDoubleSize)); |
__ Subu(sp, sp, Operand(kDoubleSize)); |
frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize); |
} else { |
@@ -1835,9 +1835,9 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction( |
} |
case kMips64StoreToStackSlot: { |
if (instr->InputAt(0)->IsFPRegister()) { |
- __ sdc1(i.InputDoubleRegister(0), MemOperand(sp, i.InputInt32(1))); |
+ __ Sdc1(i.InputDoubleRegister(0), MemOperand(sp, i.InputInt32(1))); |
} else { |
- __ sd(i.InputRegister(0), MemOperand(sp, i.InputInt32(1))); |
+ __ Sd(i.InputRegister(0), MemOperand(sp, i.InputInt32(1))); |
} |
break; |
} |
@@ -2764,17 +2764,17 @@ void CodeGenerator::AssembleMove(InstructionOperand* source, |
if (destination->IsRegister()) { |
__ mov(g.ToRegister(destination), src); |
} else { |
- __ sd(src, g.ToMemOperand(destination)); |
+ __ Sd(src, g.ToMemOperand(destination)); |
} |
} else if (source->IsStackSlot()) { |
DCHECK(destination->IsRegister() || destination->IsStackSlot()); |
MemOperand src = g.ToMemOperand(source); |
if (destination->IsRegister()) { |
- __ ld(g.ToRegister(destination), src); |
+ __ Ld(g.ToRegister(destination), src); |
} else { |
Register temp = kScratchReg; |
- __ ld(temp, src); |
- __ sd(temp, g.ToMemOperand(destination)); |
+ __ Ld(temp, src); |
+ __ Sd(temp, g.ToMemOperand(destination)); |
} |
} else if (source->IsConstant()) { |
Constant src = g.ToConstant(source); |
@@ -2820,15 +2820,15 @@ void CodeGenerator::AssembleMove(InstructionOperand* source, |
UNREACHABLE(); // TODO(titzer): loading RPO numbers on mips64. |
break; |
} |
- if (destination->IsStackSlot()) __ sd(dst, g.ToMemOperand(destination)); |
+ if (destination->IsStackSlot()) __ Sd(dst, g.ToMemOperand(destination)); |
} else if (src.type() == Constant::kFloat32) { |
if (destination->IsFPStackSlot()) { |
MemOperand dst = g.ToMemOperand(destination); |
if (bit_cast<int32_t>(src.ToFloat32()) == 0) { |
- __ sw(zero_reg, dst); |
+ __ Sw(zero_reg, dst); |
} else { |
__ li(at, Operand(bit_cast<int32_t>(src.ToFloat32()))); |
- __ sw(at, dst); |
+ __ Sw(at, dst); |
} |
} else { |
DCHECK(destination->IsFPRegister()); |
@@ -2842,7 +2842,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source, |
: kScratchDoubleReg; |
__ Move(dst, src.ToFloat64()); |
if (destination->IsFPStackSlot()) { |
- __ sdc1(dst, g.ToMemOperand(destination)); |
+ __ Sdc1(dst, g.ToMemOperand(destination)); |
} |
} |
} else if (source->IsFPRegister()) { |
@@ -2852,17 +2852,17 @@ void CodeGenerator::AssembleMove(InstructionOperand* source, |
__ Move(dst, src); |
} else { |
DCHECK(destination->IsFPStackSlot()); |
- __ sdc1(src, g.ToMemOperand(destination)); |
+ __ Sdc1(src, g.ToMemOperand(destination)); |
} |
} else if (source->IsFPStackSlot()) { |
DCHECK(destination->IsFPRegister() || destination->IsFPStackSlot()); |
MemOperand src = g.ToMemOperand(source); |
if (destination->IsFPRegister()) { |
- __ ldc1(g.ToDoubleRegister(destination), src); |
+ __ Ldc1(g.ToDoubleRegister(destination), src); |
} else { |
FPURegister temp = kScratchDoubleReg; |
- __ ldc1(temp, src); |
- __ sdc1(temp, g.ToMemOperand(destination)); |
+ __ Ldc1(temp, src); |
+ __ Sdc1(temp, g.ToMemOperand(destination)); |
} |
} else { |
UNREACHABLE(); |
@@ -2888,8 +2888,8 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source, |
DCHECK(destination->IsStackSlot()); |
MemOperand dst = g.ToMemOperand(destination); |
__ mov(temp, src); |
- __ ld(src, dst); |
- __ sd(temp, dst); |
+ __ Ld(src, dst); |
+ __ Sd(temp, dst); |
} |
} else if (source->IsStackSlot()) { |
DCHECK(destination->IsStackSlot()); |
@@ -2897,10 +2897,10 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source, |
Register temp_1 = kScratchReg2; |
MemOperand src = g.ToMemOperand(source); |
MemOperand dst = g.ToMemOperand(destination); |
- __ ld(temp_0, src); |
- __ ld(temp_1, dst); |
- __ sd(temp_0, dst); |
- __ sd(temp_1, src); |
+ __ Ld(temp_0, src); |
+ __ Ld(temp_1, dst); |
+ __ Sd(temp_0, dst); |
+ __ Sd(temp_1, src); |
} else if (source->IsFPRegister()) { |
FPURegister temp = kScratchDoubleReg; |
FPURegister src = g.ToDoubleRegister(source); |
@@ -2913,8 +2913,8 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source, |
DCHECK(destination->IsFPStackSlot()); |
MemOperand dst = g.ToMemOperand(destination); |
__ Move(temp, src); |
- __ ldc1(src, dst); |
- __ sdc1(temp, dst); |
+ __ Ldc1(src, dst); |
+ __ Sdc1(temp, dst); |
} |
} else if (source->IsFPStackSlot()) { |
DCHECK(destination->IsFPStackSlot()); |
@@ -2924,12 +2924,12 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source, |
MemOperand src1(src0.rm(), src0.offset() + kIntSize); |
MemOperand dst0 = g.ToMemOperand(destination); |
MemOperand dst1(dst0.rm(), dst0.offset() + kIntSize); |
- __ ldc1(temp_1, dst0); // Save destination in temp_1. |
- __ lw(temp_0, src0); // Then use temp_0 to copy source to destination. |
- __ sw(temp_0, dst0); |
- __ lw(temp_0, src1); |
- __ sw(temp_0, dst1); |
- __ sdc1(temp_1, src0); |
+ __ Ldc1(temp_1, dst0); // Save destination in temp_1. |
+ __ Lw(temp_0, src0); // Then use temp_0 to copy source to destination. |
+ __ Sw(temp_0, dst0); |
+ __ Lw(temp_0, src1); |
+ __ Sw(temp_0, dst1); |
+ __ Sdc1(temp_1, src0); |
} else { |
// No other combinations are possible. |
UNREACHABLE(); |