Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(117)

Unified Diff: src/compiler/mips64/code-generator-mips64.cc

Issue 732403002: MIPS64: Add turbofan support for mips64. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Addressed comments and code cleanup. Created 6 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « src/compiler/mips64/OWNERS ('k') | src/compiler/mips64/instruction-codes-mips64.h » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: src/compiler/mips64/code-generator-mips64.cc
diff --git a/src/compiler/mips/code-generator-mips.cc b/src/compiler/mips64/code-generator-mips64.cc
similarity index 68%
copy from src/compiler/mips/code-generator-mips.cc
copy to src/compiler/mips64/code-generator-mips64.cc
index 3904a71947b850f1a69bb67865feab9dd7921c46..32ec9206d894514e5d51023ceae006982b02ff47 100644
--- a/src/compiler/mips/code-generator-mips.cc
+++ b/src/compiler/mips64/code-generator-mips64.cc
@@ -19,7 +19,7 @@ namespace compiler {
// TODO(plind): Possibly avoid using these lithium names.
#define kScratchReg kLithiumScratchReg
-#define kCompareReg kLithiumScratchReg2
+#define kScratchReg2 kLithiumScratchReg2
#define kScratchDoubleReg kLithiumScratchDouble
@@ -58,13 +58,14 @@ class MipsOperandConverter FINAL : public InstructionOperandConverter {
switch (constant.type()) {
case Constant::kInt32:
return Operand(constant.ToInt32());
+ case Constant::kInt64:
+ return Operand(constant.ToInt64());
case Constant::kFloat32:
return Operand(
isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
case Constant::kFloat64:
return Operand(
isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
- case Constant::kInt64:
case Constant::kExternalReference:
case Constant::kHeapObject:
// TODO(plind): Maybe we should handle ExtRef & HeapObj here?
@@ -133,7 +134,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ Call(Handle<Code>::cast(i.InputHeapObject(0)),
RelocInfo::CODE_TARGET);
} else {
- __ addiu(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
+ __ daddiu(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
__ Call(at);
}
AddSafepointAndDeopt(instr);
@@ -144,11 +145,11 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
Register func = i.InputRegister(0);
if (FLAG_debug_code) {
// Check the function's context matches the context argument.
- __ lw(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
+ __ ld(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
__ Assert(eq, kWrongFunctionContext, cp, Operand(kScratchReg));
}
- __ lw(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
+ __ ld(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
__ Call(at);
AddSafepointAndDeopt(instr);
break;
@@ -168,51 +169,66 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
case kArchTruncateDoubleToI:
__ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
break;
- case kMipsAdd:
+ case kMips64Add:
__ Addu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
- case kMipsAddOvf:
- __ AdduAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0),
- i.InputOperand(1), kCompareReg, kScratchReg);
+ case kMips64Dadd:
+ __ Daddu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
- case kMipsSub:
+ case kMips64Sub:
__ Subu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
- case kMipsSubOvf:
- __ SubuAndCheckForOverflow(i.OutputRegister(), i.InputRegister(0),
- i.InputOperand(1), kCompareReg, kScratchReg);
+ case kMips64Dsub:
+ __ Dsubu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
- case kMipsMul:
+ case kMips64Mul:
__ Mul(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
- case kMipsMulHigh:
+ case kMips64MulHigh:
__ Mulh(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
- case kMipsMulHighU:
+ case kMips64MulHighU:
__ Mulhu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
- case kMipsDiv:
+ case kMips64Div:
__ Div(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
- case kMipsDivU:
+ case kMips64DivU:
__ Divu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
- case kMipsMod:
+ case kMips64Mod:
__ Mod(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
- case kMipsModU:
+ case kMips64ModU:
__ Modu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
- case kMipsAnd:
+ case kMips64Dmul:
+ __ Dmul(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kMips64Ddiv:
+ __ Ddiv(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kMips64DdivU:
+ __ Ddivu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kMips64Dmod:
+ __ Dmod(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kMips64DmodU:
+ __ Dmodu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kMips64And:
__ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
- case kMipsOr:
+ case kMips64Or:
__ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
- case kMipsXor:
+ case kMips64Xor:
__ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
- case kMipsShl:
+ case kMips64Shl:
if (instr->InputAt(1)->IsRegister()) {
__ sllv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
} else {
@@ -220,7 +236,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ sll(i.OutputRegister(), i.InputRegister(0), imm);
}
break;
- case kMipsShr:
+ case kMips64Shr:
if (instr->InputAt(1)->IsRegister()) {
__ srlv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
} else {
@@ -228,7 +244,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ srl(i.OutputRegister(), i.InputRegister(0), imm);
}
break;
- case kMipsSar:
+ case kMips64Sar:
if (instr->InputAt(1)->IsRegister()) {
__ srav(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
} else {
@@ -236,16 +252,65 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ sra(i.OutputRegister(), i.InputRegister(0), imm);
}
break;
- case kMipsRor:
+ case kMips64Ext:
+ __ Ext(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
+ i.InputInt8(2));
+ break;
+ case kMips64Dext:
+ __ Dext(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
+ i.InputInt8(2));
+ break;
+ case kMips64Dshl:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ dsllv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ int32_t imm = i.InputOperand(1).immediate();
+ if (imm < 32) {
+ __ dsll(i.OutputRegister(), i.InputRegister(0), imm);
+ } else {
+ __ dsll32(i.OutputRegister(), i.InputRegister(0), imm - 32);
+ }
+ }
+ break;
+ case kMips64Dshr:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ dsrlv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ int32_t imm = i.InputOperand(1).immediate();
+ if (imm < 32) {
+ __ dsrl(i.OutputRegister(), i.InputRegister(0), imm);
+ } else {
+ __ dsrl32(i.OutputRegister(), i.InputRegister(0), imm - 32);
+ }
+ }
+ break;
+ case kMips64Dsar:
+ if (instr->InputAt(1)->IsRegister()) {
+ __ dsrav(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+ } else {
+ int32_t imm = i.InputOperand(1).immediate();
+ if (imm < 32) {
+ __ dsra(i.OutputRegister(), i.InputRegister(0), imm);
+ } else {
+ __ dsra32(i.OutputRegister(), i.InputRegister(0), imm - 32);
+ }
+ }
+ break;
+ case kMips64Ror:
__ Ror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
break;
- case kMipsTst:
- // Pseudo-instruction used for tst/branch. No opcode emitted here.
+ case kMips64Dror:
+ __ Dror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+ break;
+ case kMips64Tst:
+ case kMips64Tst32:
+ // Pseudo-instruction used for cmp/branch. No opcode emitted here.
break;
- case kMipsCmp:
+ case kMips64Cmp:
+ case kMips64Cmp32:
// Pseudo-instruction used for cmp/branch. No opcode emitted here.
break;
- case kMipsMov:
+ case kMips64Mov:
// TODO(plind): Should we combine mov/li like this, or use separate instr?
// - Also see x64 ASSEMBLE_BINOP & RegisterOrOperandType
if (HasRegisterInput(instr, 0)) {
@@ -255,28 +320,28 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
break;
- case kMipsCmpD:
+ case kMips64CmpD:
// Psuedo-instruction used for FP cmp/branch. No opcode emitted here.
break;
- case kMipsAddD:
+ case kMips64AddD:
// TODO(plind): add special case: combine mult & add.
__ add_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
break;
- case kMipsSubD:
+ case kMips64SubD:
__ sub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
break;
- case kMipsMulD:
+ case kMips64MulD:
// TODO(plind): add special case: right op is -1.0, see arm port.
__ mul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
break;
- case kMipsDivD:
+ case kMips64DivD:
__ div_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputDoubleRegister(1));
break;
- case kMipsModD: {
+ case kMips64ModD: {
// TODO(bmeurer): We should really get rid of this special instruction,
// and generate a CallAddress instruction instead.
FrameScope scope(masm(), StackFrame::MANUAL);
@@ -289,37 +354,52 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
__ MovFromFloatResult(i.OutputDoubleRegister());
break;
}
- case kMipsSqrtD: {
+ case kMips64FloorD: {
+ __ floor_l_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ __ cvt_d_l(i.OutputDoubleRegister(), i.OutputDoubleRegister());
+ break;
+ }
+ case kMips64CeilD: {
+ __ ceil_l_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ __ cvt_d_l(i.OutputDoubleRegister(), i.OutputDoubleRegister());
+ break;
+ }
+ case kMips64RoundTruncateD: {
+ __ trunc_l_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+ __ cvt_d_l(i.OutputDoubleRegister(), i.OutputDoubleRegister());
+ break;
+ }
+ case kMips64SqrtD: {
__ sqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
}
- case kMipsCvtSD: {
+ case kMips64CvtSD: {
__ cvt_s_d(i.OutputSingleRegister(), i.InputDoubleRegister(0));
break;
}
- case kMipsCvtDS: {
+ case kMips64CvtDS: {
__ cvt_d_s(i.OutputDoubleRegister(), i.InputSingleRegister(0));
break;
}
- case kMipsCvtDW: {
+ case kMips64CvtDW: {
FPURegister scratch = kScratchDoubleReg;
__ mtc1(i.InputRegister(0), scratch);
__ cvt_d_w(i.OutputDoubleRegister(), scratch);
break;
}
- case kMipsCvtDUw: {
+ case kMips64CvtDUw: {
FPURegister scratch = kScratchDoubleReg;
__ Cvt_d_uw(i.OutputDoubleRegister(), i.InputRegister(0), scratch);
break;
}
- case kMipsTruncWD: {
+ case kMips64TruncWD: {
FPURegister scratch = kScratchDoubleReg;
// Other arches use round to zero here, so we follow.
__ trunc_w_d(scratch, i.InputDoubleRegister(0));
__ mfc1(i.OutputRegister(), scratch);
break;
}
- case kMipsTruncUwD: {
+ case kMips64TruncUwD: {
FPURegister scratch = kScratchDoubleReg;
// TODO(plind): Fix wrong param order of Trunc_uw_d() macro-asm function.
__ Trunc_uw_d(i.InputDoubleRegister(0), i.OutputRegister(), scratch);
@@ -327,55 +407,61 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
}
// ... more basic instructions ...
- case kMipsLbu:
+ case kMips64Lbu:
__ lbu(i.OutputRegister(), i.MemoryOperand());
break;
- case kMipsLb:
+ case kMips64Lb:
__ lb(i.OutputRegister(), i.MemoryOperand());
break;
- case kMipsSb:
+ case kMips64Sb:
__ sb(i.InputRegister(2), i.MemoryOperand());
break;
- case kMipsLhu:
+ case kMips64Lhu:
__ lhu(i.OutputRegister(), i.MemoryOperand());
break;
- case kMipsLh:
+ case kMips64Lh:
__ lh(i.OutputRegister(), i.MemoryOperand());
break;
- case kMipsSh:
+ case kMips64Sh:
__ sh(i.InputRegister(2), i.MemoryOperand());
break;
- case kMipsLw:
+ case kMips64Lw:
__ lw(i.OutputRegister(), i.MemoryOperand());
break;
- case kMipsSw:
+ case kMips64Ld:
+ __ ld(i.OutputRegister(), i.MemoryOperand());
+ break;
+ case kMips64Sw:
__ sw(i.InputRegister(2), i.MemoryOperand());
break;
- case kMipsLwc1: {
+ case kMips64Sd:
+ __ sd(i.InputRegister(2), i.MemoryOperand());
+ break;
+ case kMips64Lwc1: {
__ lwc1(i.OutputSingleRegister(), i.MemoryOperand());
break;
}
- case kMipsSwc1: {
+ case kMips64Swc1: {
int index = 0;
MemOperand operand = i.MemoryOperand(&index);
__ swc1(i.InputSingleRegister(index), operand);
break;
}
- case kMipsLdc1:
+ case kMips64Ldc1:
__ ldc1(i.OutputDoubleRegister(), i.MemoryOperand());
break;
- case kMipsSdc1:
+ case kMips64Sdc1:
__ sdc1(i.InputDoubleRegister(2), i.MemoryOperand());
break;
- case kMipsPush:
+ case kMips64Push:
__ Push(i.InputRegister(0));
break;
- case kMipsStoreWriteBarrier:
+ case kMips64StoreWriteBarrier:
Register object = i.InputRegister(0);
Register index = i.InputRegister(1);
Register value = i.InputRegister(2);
- __ addu(index, object, index);
- __ sw(value, MemOperand(index));
+ __ daddu(index, object, index);
+ __ sd(value, MemOperand(index));
SaveFPRegsMode mode =
frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
RAStatus ra_status = kRAHasNotBeenSaved;
@@ -416,7 +502,7 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr,
// TODO(plind): Add CHECK() to ensure that test/cmp and this branch were
// not separated by other instructions.
- if (instr->arch_opcode() == kMipsTst) {
+ if (instr->arch_opcode() == kMips64Tst) {
switch (condition) {
case kNotEqual:
cc = ne;
@@ -425,29 +511,49 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr,
cc = eq;
break;
default:
- UNSUPPORTED_COND(kMipsTst, condition);
+ UNSUPPORTED_COND(kMips64Tst, condition);
break;
}
__ And(at, i.InputRegister(0), i.InputOperand(1));
__ Branch(tlabel, cc, at, Operand(zero_reg));
-
- } else if (instr->arch_opcode() == kMipsAddOvf ||
- instr->arch_opcode() == kMipsSubOvf) {
- // kMipsAddOvf, SubOvf emit negative result to 'kCompareReg' on overflow.
+ } else if (instr->arch_opcode() == kMips64Tst32) {
+ switch (condition) {
+ case kNotEqual:
+ cc = ne;
+ break;
+ case kEqual:
+ cc = eq;
+ break;
+ default:
+ UNSUPPORTED_COND(kMips64Tst32, condition);
+ break;
+ }
+ // Zero-extend registers on MIPS64 only 64-bit operand
+ // branch and compare op. is available.
+ // This is a disadvantage to perform 32-bit operation on MIPS64.
+ // Try to force globally in front-end Word64 representation to be preferred
+ // for MIPS64 even for Word32.
+ __ And(at, i.InputRegister(0), i.InputOperand(1));
+ __ Dext(at, at, 0, 32);
+ __ Branch(tlabel, cc, at, Operand(zero_reg));
+ } else if (instr->arch_opcode() == kMips64Dadd ||
+ instr->arch_opcode() == kMips64Dsub) {
switch (condition) {
case kOverflow:
- cc = lt;
+ cc = ne;
break;
case kNotOverflow:
- cc = ge;
+ cc = eq;
break;
default:
- UNSUPPORTED_COND(kMipsAddOvf, condition);
+ UNSUPPORTED_COND(kMips64Dadd, condition);
break;
}
- __ Branch(tlabel, cc, kCompareReg, Operand(zero_reg));
- } else if (instr->arch_opcode() == kMipsCmp) {
+ __ dsra32(kScratchReg, i.OutputRegister(), 0);
+ __ sra(at, i.OutputRegister(), 31);
+ __ Branch(tlabel, cc, at, Operand(kScratchReg));
+ } else if (instr->arch_opcode() == kMips64Cmp) {
switch (condition) {
case kEqual:
cc = eq;
@@ -480,7 +586,7 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr,
cc = hi;
break;
default:
- UNSUPPORTED_COND(kMipsCmp, condition);
+ UNSUPPORTED_COND(kMips64Cmp, condition);
break;
}
__ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
@@ -488,7 +594,77 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr,
if (!fallthru) __ Branch(flabel); // no fallthru to flabel.
__ bind(&done);
- } else if (instr->arch_opcode() == kMipsCmpD) {
+ } else if (instr->arch_opcode() == kMips64Cmp32) {
+ switch (condition) {
+ case kEqual:
+ cc = eq;
+ break;
+ case kNotEqual:
+ cc = ne;
+ break;
+ case kSignedLessThan:
+ cc = lt;
+ break;
+ case kSignedGreaterThanOrEqual:
+ cc = ge;
+ break;
+ case kSignedLessThanOrEqual:
+ cc = le;
+ break;
+ case kSignedGreaterThan:
+ cc = gt;
+ break;
+ case kUnsignedLessThan:
+ cc = lo;
+ break;
+ case kUnsignedGreaterThanOrEqual:
+ cc = hs;
+ break;
+ case kUnsignedLessThanOrEqual:
+ cc = ls;
+ break;
+ case kUnsignedGreaterThan:
+ cc = hi;
+ break;
+ default:
+ UNSUPPORTED_COND(kMips64Cmp32, condition);
+ break;
+ }
+
+ switch (condition) {
+ case kEqual:
+ case kNotEqual:
+ case kSignedLessThan:
+ case kSignedGreaterThanOrEqual:
+ case kSignedLessThanOrEqual:
+ case kSignedGreaterThan:
+ // Sign-extend registers on MIPS64 only 64-bit operand
+ // branch and compare op. is available.
+ __ sll(i.InputRegister(0), i.InputRegister(0), 0);
+ if (instr->InputAt(1)->IsRegister()) {
+ __ sll(i.InputRegister(1), i.InputRegister(1), 0);
+ }
+ break;
+ case kUnsignedLessThan:
+ case kUnsignedGreaterThanOrEqual:
+ case kUnsignedLessThanOrEqual:
+ case kUnsignedGreaterThan:
+ // Zero-extend registers on MIPS64 only 64-bit operand
+ // branch and compare op. is available.
+ __ Dext(i.InputRegister(0), i.InputRegister(0), 0, 32);
+ if (instr->InputAt(1)->IsRegister()) {
+ __ Dext(i.InputRegister(1), i.InputRegister(1), 0, 32);
+ }
+ break;
+ default:
+ UNSUPPORTED_COND(kMips64Cmp, condition);
+ break;
+ }
+ __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
+
+ if (!fallthru) __ Branch(flabel); // no fallthru to flabel.
+ __ bind(&done);
+ } else if (instr->arch_opcode() == kMips64CmpD) {
// TODO(dusmil) optimize unordered checks to use less instructions
// even if we have to unfold BranchF macro.
Label* nan = flabel;
@@ -515,7 +691,7 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr,
nan = tlabel;
break;
default:
- UNSUPPORTED_COND(kMipsCmpD, condition);
+ UNSUPPORTED_COND(kMips64CmpD, condition);
break;
}
__ BranchF(tlabel, nan, cc, i.InputDoubleRegister(0),
@@ -547,15 +723,13 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
// MIPS does not have condition code flags, so compare and branch are
// implemented differently than on the other arch's. The compare operations
- // emit mips psuedo-instructions, which are checked and handled here.
+ // emit mips pseudo-instructions, which are checked and handled here.
// For materializations, we use delay slot to set the result true, and
- // in the false case, where we fall thru the branch, we reset the result
+ // in the false case, where we fall through the branch, we reset the result
// false.
- // TODO(plind): Add CHECK() to ensure that test/cmp and this branch were
- // not separated by other instructions.
- if (instr->arch_opcode() == kMipsTst) {
+ if (instr->arch_opcode() == kMips64Tst) {
switch (condition) {
case kNotEqual:
cc = ne;
@@ -564,32 +738,48 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
cc = eq;
break;
default:
- UNSUPPORTED_COND(kMipsTst, condition);
+ UNSUPPORTED_COND(kMips64Tst, condition);
break;
}
__ And(at, i.InputRegister(0), i.InputOperand(1));
__ Branch(USE_DELAY_SLOT, &done, cc, at, Operand(zero_reg));
__ li(result, Operand(1)); // In delay slot.
-
- } else if (instr->arch_opcode() == kMipsAddOvf ||
- instr->arch_opcode() == kMipsSubOvf) {
- // kMipsAddOvf, SubOvf emits negative result to 'kCompareReg' on overflow.
+ } else if (instr->arch_opcode() == kMips64Tst32) {
+ switch (condition) {
+ case kNotEqual:
+ cc = ne;
+ break;
+ case kEqual:
+ cc = eq;
+ break;
+ default:
+ UNSUPPORTED_COND(kMips64Tst, condition);
+ break;
+ }
+ // Zero-extend register on MIPS64 only 64-bit operand
+ // branch and compare op. is available.
+ __ And(at, i.InputRegister(0), i.InputOperand(1));
+ __ Dext(at, at, 0, 32);
+ __ Branch(USE_DELAY_SLOT, &done, cc, at, Operand(zero_reg));
+ __ li(result, Operand(1)); // In delay slot.
+ } else if (instr->arch_opcode() == kMips64Dadd ||
+ instr->arch_opcode() == kMips64Dsub) {
switch (condition) {
case kOverflow:
- cc = lt;
+ cc = ne;
break;
case kNotOverflow:
- cc = ge;
+ cc = eq;
break;
default:
- UNSUPPORTED_COND(kMipsAddOvf, condition);
+ UNSUPPORTED_COND(kMips64DAdd, condition);
break;
}
- __ Branch(USE_DELAY_SLOT, &done, cc, kCompareReg, Operand(zero_reg));
+ __ dsra32(kScratchReg, i.OutputRegister(), 0);
+ __ sra(at, i.OutputRegister(), 31);
+ __ Branch(USE_DELAY_SLOT, &done, cc, at, Operand(kScratchReg));
__ li(result, Operand(1)); // In delay slot.
-
-
- } else if (instr->arch_opcode() == kMipsCmp) {
+ } else if (instr->arch_opcode() == kMips64Cmp) {
Register left = i.InputRegister(0);
Operand right = i.InputOperand(1);
switch (condition) {
@@ -624,13 +814,82 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
cc = hi;
break;
default:
- UNSUPPORTED_COND(kMipsCmp, condition);
+ UNSUPPORTED_COND(kMips64Cmp, condition);
break;
}
__ Branch(USE_DELAY_SLOT, &done, cc, left, right);
__ li(result, Operand(1)); // In delay slot.
+ } else if (instr->arch_opcode() == kMips64Cmp32) {
+ Register left = i.InputRegister(0);
+ Operand right = i.InputOperand(1);
+ switch (condition) {
+ case kEqual:
+ cc = eq;
+ break;
+ case kNotEqual:
+ cc = ne;
+ break;
+ case kSignedLessThan:
+ cc = lt;
+ break;
+ case kSignedGreaterThanOrEqual:
+ cc = ge;
+ break;
+ case kSignedLessThanOrEqual:
+ cc = le;
+ break;
+ case kSignedGreaterThan:
+ cc = gt;
+ break;
+ case kUnsignedLessThan:
+ cc = lo;
+ break;
+ case kUnsignedGreaterThanOrEqual:
+ cc = hs;
+ break;
+ case kUnsignedLessThanOrEqual:
+ cc = ls;
+ break;
+ case kUnsignedGreaterThan:
+ cc = hi;
+ break;
+ default:
+ UNSUPPORTED_COND(kMips64Cmp, condition);
+ break;
+ }
- } else if (instr->arch_opcode() == kMipsCmpD) {
+ switch (condition) {
+ case kEqual:
+ case kNotEqual:
+ case kSignedLessThan:
+ case kSignedGreaterThanOrEqual:
+ case kSignedLessThanOrEqual:
+ case kSignedGreaterThan:
+ // Sign-extend registers on MIPS64 only 64-bit operand
+ // branch and compare op. is available.
+ __ sll(left, left, 0);
+ if (instr->InputAt(1)->IsRegister()) {
+ __ sll(i.InputRegister(1), i.InputRegister(1), 0);
+ }
+ break;
+ case kUnsignedLessThan:
+ case kUnsignedGreaterThanOrEqual:
+ case kUnsignedLessThanOrEqual:
+ case kUnsignedGreaterThan:
+ // Zero-extend registers on MIPS64 only 64-bit operand
+ // branch and compare op. is available.
+ __ Dext(left, left, 0, 32);
+ if (instr->InputAt(1)->IsRegister()) {
+ __ Dext(i.InputRegister(1), i.InputRegister(1), 0, 32);
+ }
+ break;
+ default:
+ UNSUPPORTED_COND(kMips64Cmp32, condition);
+ break;
+ }
+ __ Branch(USE_DELAY_SLOT, &done, cc, left, right);
+ __ li(result, Operand(1)); // In delay slot.
+ } else if (instr->arch_opcode() == kMips64CmpD) {
FPURegister left = i.InputDoubleRegister(0);
FPURegister right = i.InputDoubleRegister(1);
// TODO(plind): Provide NaN-testing macro-asm function without need for
@@ -667,7 +926,7 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
cc = gt;
break;
default:
- UNSUPPORTED_COND(kMipsCmp, condition);
+ UNSUPPORTED_COND(kMips64Cmp, condition);
break;
}
__ BranchF(USE_DELAY_SLOT, &done, NULL, cc, left, right);
@@ -682,7 +941,7 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
}
// Fallthru case is the false materialization.
__ bind(&false_value);
- __ li(result, Operand(0));
+ __ li(result, Operand(static_cast<int64_t>(0)));
__ bind(&done);
}
@@ -715,6 +974,24 @@ void CodeGenerator::AssemblePrologue() {
__ Prologue(info->IsCodePreAgingActive());
frame()->SetRegisterSaveAreaSize(
StandardFrameConstants::kFixedFrameSizeFromFp);
+
+ // Sloppy mode functions and builtins need to replace the receiver with the
+ // global proxy when called as functions (without an explicit receiver
+ // object).
+ // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
+ if (info->strict_mode() == SLOPPY && !info->is_native()) {
+ Label ok;
+ // +2 for return address and saved frame pointer.
+ int receiver_slot = info->scope()->num_parameters() + 2;
+ __ ld(a2, MemOperand(fp, receiver_slot * kPointerSize));
+ __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+ __ Branch(&ok, ne, a2, Operand(at));
+
+ __ ld(a2, GlobalObjectOperand());
+ __ ld(a2, FieldMemOperand(a2, GlobalObject::kGlobalProxyOffset));
+ __ sd(a2, MemOperand(fp, receiver_slot * kPointerSize));
+ __ bind(&ok);
+ }
} else {
__ StubPrologue();
frame()->SetRegisterSaveAreaSize(
@@ -722,7 +999,7 @@ void CodeGenerator::AssemblePrologue() {
}
int stack_slots = frame()->GetSpillSlotCount();
if (stack_slots > 0) {
- __ Subu(sp, sp, Operand(stack_slots * kPointerSize));
+ __ Dsubu(sp, sp, Operand(stack_slots * kPointerSize));
}
}
@@ -734,7 +1011,7 @@ void CodeGenerator::AssembleReturn() {
// Remove this frame's spill slots first.
int stack_slots = frame()->GetSpillSlotCount();
if (stack_slots > 0) {
- __ Addu(sp, sp, Operand(stack_slots * kPointerSize));
+ __ Daddu(sp, sp, Operand(stack_slots * kPointerSize));
}
// Restore registers.
const RegList saves = descriptor->CalleeSavedRegisters();
@@ -767,17 +1044,17 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
if (destination->IsRegister()) {
__ mov(g.ToRegister(destination), src);
} else {
- __ sw(src, g.ToMemOperand(destination));
+ __ sd(src, g.ToMemOperand(destination));
}
} else if (source->IsStackSlot()) {
DCHECK(destination->IsRegister() || destination->IsStackSlot());
MemOperand src = g.ToMemOperand(source);
if (destination->IsRegister()) {
- __ lw(g.ToRegister(destination), src);
+ __ ld(g.ToRegister(destination), src);
} else {
Register temp = kScratchReg;
- __ lw(temp, src);
- __ sw(temp, g.ToMemOperand(destination));
+ __ ld(temp, src);
+ __ sd(temp, g.ToMemOperand(destination));
}
} else if (source->IsConstant()) {
Constant src = g.ToConstant(source);
@@ -792,7 +1069,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
__ li(dst, isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
break;
case Constant::kInt64:
- UNREACHABLE();
+ __ li(dst, Operand(src.ToInt64()));
break;
case Constant::kFloat64:
__ li(dst, isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
@@ -804,7 +1081,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
__ li(dst, src.ToHeapObject());
break;
}
- if (destination->IsStackSlot()) __ sw(dst, g.ToMemOperand(destination));
+ if (destination->IsStackSlot()) __ sd(dst, g.ToMemOperand(destination));
} else if (src.type() == Constant::kFloat32) {
FPURegister dst = destination->IsDoubleRegister()
? g.ToDoubleRegister(destination)
@@ -868,19 +1145,19 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
DCHECK(destination->IsStackSlot());
MemOperand dst = g.ToMemOperand(destination);
__ mov(temp, src);
- __ lw(src, dst);
- __ sw(temp, dst);
+ __ ld(src, dst);
+ __ sd(temp, dst);
}
} else if (source->IsStackSlot()) {
DCHECK(destination->IsStackSlot());
Register temp_0 = kScratchReg;
- Register temp_1 = kCompareReg;
+ Register temp_1 = kScratchReg2;
MemOperand src = g.ToMemOperand(source);
MemOperand dst = g.ToMemOperand(destination);
- __ lw(temp_0, src);
- __ lw(temp_1, dst);
- __ sw(temp_0, dst);
- __ sw(temp_1, src);
+ __ ld(temp_0, src);
+ __ ld(temp_1, dst);
+ __ sd(temp_0, dst);
+ __ sd(temp_1, src);
} else if (source->IsDoubleRegister()) {
FPURegister temp = kScratchDoubleReg;
FPURegister src = g.ToDoubleRegister(source);
« no previous file with comments | « src/compiler/mips64/OWNERS ('k') | src/compiler/mips64/instruction-codes-mips64.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698