Chromium Code Reviews| Index: src/mips64/macro-assembler-mips64.cc |
| diff --git a/src/mips64/macro-assembler-mips64.cc b/src/mips64/macro-assembler-mips64.cc |
| index 16b4ba9d5346a4399774bc024cfd6d1826ffcfc3..c26c43f6f75cbe4eff0d38c970ce7308ca7c6fe4 100644 |
| --- a/src/mips64/macro-assembler-mips64.cc |
| +++ b/src/mips64/macro-assembler-mips64.cc |
| @@ -695,11 +695,35 @@ void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) { |
| } |
| +void MacroAssembler::Mulh(Register rd, Register rs, const Operand& rt) { |
| + if (rt.is_reg()) { |
| + if (kArchVariant != kMips64r6) { |
| + mult(rs, rt.rm()); |
| + mfhi(rd); |
| + } else { |
| + muh(rd, rs, rt.rm()); |
| + } |
| + } else { |
| + // li handles the relocation. |
| + ASSERT(!rs.is(at)); |
| + li(at, rt); |
| + if (kArchVariant != kMips64r6) { |
| + mult(rs, at); |
| + mfhi(rd); |
| + } else { |
| + muh(rd, rs, at); |
| + } |
| + } |
| +} |
| + |
| + |
| void MacroAssembler::Dmul(Register rd, Register rs, const Operand& rt) { |
| if (rt.is_reg()) { |
| if (kArchVariant == kLoongson) { |
| dmult(rs, rt.rm()); |
| mflo(rd); |
| + } else if (kArchVariant == kMips64r6) { |
| + dmul(rd, rs, rt.rm()); |
| } else { |
| // TODO(yuyin): |
| // dmul(rd, rs, rt.rm()); |
| @@ -713,6 +737,8 @@ void MacroAssembler::Dmul(Register rd, Register rs, const Operand& rt) { |
| if (kArchVariant == kLoongson) { |
| dmult(rs, at); |
| mflo(rd); |
| + } else if (kArchVariant == kMips64r6) { |
| + dmul(rd, rs, at); |
| } else { |
| // TODO(yuyin): |
| // dmul(rd, rs, at); |
| @@ -723,6 +749,38 @@ void MacroAssembler::Dmul(Register rd, Register rs, const Operand& rt) { |
| } |
| +void MacroAssembler::Dmulh(Register rd, Register rs, const Operand& rt) { |
| + if (rt.is_reg()) { |
| + if (kArchVariant == kLoongson) { |
| + dmult(rs, rt.rm()); |
| + mfhi(rd); |
| + } else if (kArchVariant == kMips64r6) { |
| + dmuh(rd, rs, rt.rm()); |
| + } else { |
| + // TODO(yuyin): |
| + // dmul(rd, rs, rt.rm()); |
|
paul.l...
2014/07/29 14:58:04
I think we can now remove this TODO, and the one j
dusmil.imgtec
2014/07/29 17:39:12
Done.
|
| + dmult(rs, rt.rm()); |
| + mfhi(rd); |
| + } |
| + } else { |
| + // li handles the relocation. |
| + ASSERT(!rs.is(at)); |
| + li(at, rt); |
| + if (kArchVariant == kLoongson) { |
| + dmult(rs, at); |
| + mfhi(rd); |
| + } else if (kArchVariant == kMips64r6) { |
| + dmuh(rd, rs, at); |
| + } else { |
| + // TODO(yuyin): |
| + // dmul(rd, rs, at); |
| + dmult(rs, at); |
| + mfhi(rd); |
| + } |
| + } |
| +} |
| + |
| + |
| void MacroAssembler::Mult(Register rs, const Operand& rt) { |
| if (rt.is_reg()) { |
| mult(rs, rt.rm()); |
| @@ -795,6 +853,31 @@ void MacroAssembler::Ddiv(Register rs, const Operand& rt) { |
| } |
| +void MacroAssembler::Ddiv(Register rd, Register rs, const Operand& rt) { |
| + if (kArchVariant != kMips64r6) { |
| + if (rt.is_reg()) { |
| + ddiv(rs, rt.rm()); |
| + mflo(rd); |
| + } else { |
| + // li handles the relocation. |
| + ASSERT(!rs.is(at)); |
| + li(at, rt); |
| + ddiv(rs, at); |
| + mflo(rd); |
| + } |
| + } else { |
| + if (rt.is_reg()) { |
| + ddiv(rd, rs, rt.rm()); |
| + } else { |
| + // li handles the relocation. |
| + ASSERT(!rs.is(at)); |
| + li(at, rt); |
| + ddiv(rd, rs, at); |
| + } |
| + } |
| +} |
| + |
| + |
| void MacroAssembler::Divu(Register rs, const Operand& rt) { |
| if (rt.is_reg()) { |
| divu(rs, rt.rm()); |
| @@ -819,6 +902,31 @@ void MacroAssembler::Ddivu(Register rs, const Operand& rt) { |
| } |
| +void MacroAssembler::Dmod(Register rd, Register rs, const Operand& rt) { |
| + if (kArchVariant != kMips64r6) { |
| + if (rt.is_reg()) { |
| + ddiv(rs, rt.rm()); |
| + mfhi(rd); |
| + } else { |
| + // li handles the relocation. |
| + ASSERT(!rs.is(at)); |
| + li(at, rt); |
| + ddiv(rs, at); |
| + mfhi(rd); |
| + } |
| + } else { |
| + if (rt.is_reg()) { |
| + dmod(rd, rs, rt.rm()); |
| + } else { |
| + // li handles the relocation. |
| + ASSERT(!rs.is(at)); |
| + li(at, rt); |
| + dmod(rd, rs, at); |
| + } |
| + } |
| +} |
| + |
| + |
| void MacroAssembler::And(Register rd, Register rs, const Operand& rt) { |
| if (rt.is_reg()) { |
| and_(rd, rs, rt.rm()); |
| @@ -1384,49 +1492,102 @@ void MacroAssembler::BranchF(Label* target, |
| ASSERT(nan || target); |
| // Check for unordered (NaN) cases. |
| if (nan) { |
| - c(UN, D, cmp1, cmp2); |
| - bc1t(nan); |
| - } |
| - |
| - if (target) { |
| - // Here NaN cases were either handled by this function or are assumed to |
| - // have been handled by the caller. |
| - // Unsigned conditions are treated as their signed counterpart. |
| - switch (cc) { |
| - case lt: |
| - c(OLT, D, cmp1, cmp2); |
| - bc1t(target); |
| - break; |
| - case gt: |
| - c(ULE, D, cmp1, cmp2); |
| - bc1f(target); |
| - break; |
| - case ge: |
| - c(ULT, D, cmp1, cmp2); |
| - bc1f(target); |
| - break; |
| - case le: |
| - c(OLE, D, cmp1, cmp2); |
| - bc1t(target); |
| - break; |
| - case eq: |
| - c(EQ, D, cmp1, cmp2); |
| - bc1t(target); |
| - break; |
| - case ueq: |
| - c(UEQ, D, cmp1, cmp2); |
| - bc1t(target); |
| - break; |
| - case ne: |
| - c(EQ, D, cmp1, cmp2); |
| - bc1f(target); |
| - break; |
| - case nue: |
| - c(UEQ, D, cmp1, cmp2); |
| - bc1f(target); |
| - break; |
| - default: |
| - CHECK(0); |
| + if (kArchVariant != kMips64r6) { |
| + c(UN, D, cmp1, cmp2); |
| + bc1t(nan); |
| + } else { |
| + // Use f31 for comparison result. It has to be unavailable to lithium |
| + // register allocator. |
| + ASSERT(!cmp1.is(f31) && !cmp2.is(f31)); |
| + cmp(UN, L, f31, cmp1, cmp2); |
| + bc1nez(nan, f31); |
| + } |
| + } |
| + |
| + if (kArchVariant != kMips64r6) { |
| + if (target) { |
| + // Here NaN cases were either handled by this function or are assumed to |
| + // have been handled by the caller. |
| + switch (cc) { |
| + case lt: |
| + c(OLT, D, cmp1, cmp2); |
| + bc1t(target); |
| + break; |
| + case gt: |
| + c(ULE, D, cmp1, cmp2); |
| + bc1f(target); |
| + break; |
| + case ge: |
| + c(ULT, D, cmp1, cmp2); |
| + bc1f(target); |
| + break; |
| + case le: |
| + c(OLE, D, cmp1, cmp2); |
| + bc1t(target); |
| + break; |
| + case eq: |
| + c(EQ, D, cmp1, cmp2); |
| + bc1t(target); |
| + break; |
| + case ueq: |
| + c(UEQ, D, cmp1, cmp2); |
| + bc1t(target); |
| + break; |
| + case ne: |
| + c(EQ, D, cmp1, cmp2); |
| + bc1f(target); |
| + break; |
| + case nue: |
| + c(UEQ, D, cmp1, cmp2); |
| + bc1f(target); |
| + break; |
| + default: |
| + CHECK(0); |
| + } |
| + } |
| + } else { |
| + if (target) { |
| + // Here NaN cases were either handled by this function or are assumed to |
| + // have been handled by the caller. |
| + // Unsigned conditions are treated as their signed counterpart. |
| + // Use f31 for comparison result, it is valid in fp64 (FR = 1) mode. |
| + ASSERT(!cmp1.is(f31) && !cmp2.is(f31)); |
| + switch (cc) { |
| + case lt: |
| + cmp(OLT, L, f31, cmp1, cmp2); |
| + bc1nez(target, f31); |
| + break; |
| + case gt: |
| + cmp(ULE, L, f31, cmp1, cmp2); |
| + bc1eqz(target, f31); |
| + break; |
| + case ge: |
| + cmp(ULT, L, f31, cmp1, cmp2); |
| + bc1eqz(target, f31); |
| + break; |
| + case le: |
| + cmp(OLE, L, f31, cmp1, cmp2); |
| + bc1nez(target, f31); |
| + break; |
| + case eq: |
| + cmp(EQ, L, f31, cmp1, cmp2); |
| + bc1nez(target, f31); |
| + break; |
| + case ueq: |
| + cmp(UEQ, L, f31, cmp1, cmp2); |
| + bc1nez(target, f31); |
| + break; |
| + case ne: |
| + cmp(EQ, L, f31, cmp1, cmp2); |
| + bc1eqz(target, f31); |
| + break; |
| + case nue: |
| + cmp(UEQ, L, f31, cmp1, cmp2); |
| + bc1eqz(target, f31); |
| + break; |
| + default: |
| + CHECK(0); |
| + } |
| } |
| } |
| @@ -1470,7 +1631,7 @@ void MacroAssembler::Move(FPURegister dst, double imm) { |
| void MacroAssembler::Movz(Register rd, Register rs, Register rt) { |
| - if (kArchVariant == kLoongson) { |
| + if (kArchVariant == kLoongson || kArchVariant == kMips64r6) { |
| Label done; |
| Branch(&done, ne, rt, Operand(zero_reg)); |
| mov(rd, rs); |
| @@ -1482,7 +1643,7 @@ void MacroAssembler::Movz(Register rd, Register rs, Register rt) { |
| void MacroAssembler::Movn(Register rd, Register rs, Register rt) { |
| - if (kArchVariant == kLoongson) { |
| + if (kArchVariant == kLoongson || kArchVariant == kMips64r6) { |
| Label done; |
| Branch(&done, eq, rt, Operand(zero_reg)); |
| mov(rd, rs); |
| @@ -2371,48 +2532,64 @@ void MacroAssembler::BranchAndLinkShort(int16_t offset, Condition cond, |
| // Signed comparison. |
| case greater: |
| + // rs > rt |
| slt(scratch, r2, rs); |
| - daddiu(scratch, scratch, -1); |
| - bgezal(scratch, offset); |
| + beq(scratch, zero_reg, 2); |
| + nop(); |
| + bal(offset); |
| break; |
| case greater_equal: |
| + // rs >= rt |
| slt(scratch, rs, r2); |
| - daddiu(scratch, scratch, -1); |
| - bltzal(scratch, offset); |
| + bne(scratch, zero_reg, 2); |
| + nop(); |
| + bal(offset); |
| break; |
| case less: |
| + // rs < r2 |
| slt(scratch, rs, r2); |
| - daddiu(scratch, scratch, -1); |
| - bgezal(scratch, offset); |
| + bne(scratch, zero_reg, 2); |
| + nop(); |
| + bal(offset); |
| break; |
| case less_equal: |
| + // rs <= r2 |
| slt(scratch, r2, rs); |
| - daddiu(scratch, scratch, -1); |
| - bltzal(scratch, offset); |
| + bne(scratch, zero_reg, 2); |
| + nop(); |
| + bal(offset); |
| break; |
| + |
| // Unsigned comparison. |
| case Ugreater: |
| + // rs > rt |
| sltu(scratch, r2, rs); |
| - daddiu(scratch, scratch, -1); |
| - bgezal(scratch, offset); |
| + beq(scratch, zero_reg, 2); |
| + nop(); |
| + bal(offset); |
| break; |
| case Ugreater_equal: |
| + // rs >= rt |
| sltu(scratch, rs, r2); |
| - daddiu(scratch, scratch, -1); |
| - bltzal(scratch, offset); |
| + bne(scratch, zero_reg, 2); |
| + nop(); |
| + bal(offset); |
| break; |
| case Uless: |
| + // rs < r2 |
| sltu(scratch, rs, r2); |
| - daddiu(scratch, scratch, -1); |
| - bgezal(scratch, offset); |
| + bne(scratch, zero_reg, 2); |
| + nop(); |
| + bal(offset); |
| break; |
| case Uless_equal: |
| + // rs <= r2 |
| sltu(scratch, r2, rs); |
| - daddiu(scratch, scratch, -1); |
| - bltzal(scratch, offset); |
| + bne(scratch, zero_reg, 2); |
| + nop(); |
| + bal(offset); |
| break; |
| - |
| default: |
| UNREACHABLE(); |
| } |
| @@ -2469,54 +2646,71 @@ void MacroAssembler::BranchAndLinkShort(Label* L, Condition cond, Register rs, |
| // Signed comparison. |
| case greater: |
| + // rs > rt |
| slt(scratch, r2, rs); |
| - daddiu(scratch, scratch, -1); |
| + beq(scratch, zero_reg, 2); |
| + nop(); |
| offset = shifted_branch_offset(L, false); |
| - bgezal(scratch, offset); |
| + bal(offset); |
| break; |
| case greater_equal: |
| + // rs >= rt |
| slt(scratch, rs, r2); |
| - daddiu(scratch, scratch, -1); |
| + bne(scratch, zero_reg, 2); |
| + nop(); |
| offset = shifted_branch_offset(L, false); |
| - bltzal(scratch, offset); |
| + bal(offset); |
| break; |
| case less: |
| + // rs < r2 |
| slt(scratch, rs, r2); |
| - daddiu(scratch, scratch, -1); |
| + bne(scratch, zero_reg, 2); |
| + nop(); |
| offset = shifted_branch_offset(L, false); |
| - bgezal(scratch, offset); |
| + bal(offset); |
| break; |
| case less_equal: |
| + // rs <= r2 |
| slt(scratch, r2, rs); |
| - daddiu(scratch, scratch, -1); |
| + bne(scratch, zero_reg, 2); |
| + nop(); |
| offset = shifted_branch_offset(L, false); |
| - bltzal(scratch, offset); |
| + bal(offset); |
| break; |
| + |
| // Unsigned comparison. |
| case Ugreater: |
| + // rs > rt |
| sltu(scratch, r2, rs); |
| - daddiu(scratch, scratch, -1); |
| + beq(scratch, zero_reg, 2); |
| + nop(); |
| offset = shifted_branch_offset(L, false); |
| - bgezal(scratch, offset); |
| + bal(offset); |
| break; |
| case Ugreater_equal: |
| + // rs >= rt |
| sltu(scratch, rs, r2); |
| - daddiu(scratch, scratch, -1); |
| + bne(scratch, zero_reg, 2); |
| + nop(); |
| offset = shifted_branch_offset(L, false); |
| - bltzal(scratch, offset); |
| + bal(offset); |
| break; |
| case Uless: |
| + // rs < r2 |
| sltu(scratch, rs, r2); |
| - daddiu(scratch, scratch, -1); |
| + bne(scratch, zero_reg, 2); |
| + nop(); |
| offset = shifted_branch_offset(L, false); |
| - bgezal(scratch, offset); |
| + bal(offset); |
| break; |
| case Uless_equal: |
| + // rs <= r2 |
| sltu(scratch, r2, rs); |
| - daddiu(scratch, scratch, -1); |
| + bne(scratch, zero_reg, 2); |
| + nop(); |
| offset = shifted_branch_offset(L, false); |
| - bltzal(scratch, offset); |
| + bal(offset); |
| break; |
| default: |
| @@ -5455,10 +5649,7 @@ void MacroAssembler::CheckPageFlag( |
| int mask, |
| Condition cc, |
| Label* condition_met) { |
| - // TODO(plind): Fix li() so we can use constant embedded inside And(). |
| - // And(scratch, object, Operand(~Page::kPageAlignmentMask)); |
| - li(at, Operand(~Page::kPageAlignmentMask), CONSTANT_SIZE); // plind HACK |
| - And(scratch, object, at); |
| + And(scratch, object, Operand(~Page::kPageAlignmentMask)); |
| ld(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset)); |
| And(scratch, scratch, Operand(mask)); |
| Branch(condition_met, cc, scratch, Operand(zero_reg)); |
| @@ -5932,8 +6123,7 @@ void MacroAssembler::TruncatingDiv(Register result, |
| ASSERT(!result.is(at)); |
| MultiplierAndShift ms(divisor); |
| li(at, Operand(ms.multiplier())); |
| - Mult(dividend, Operand(at)); |
| - mfhi(result); |
| + Mulh(result, dividend, Operand(at)); |
| if (divisor > 0 && ms.multiplier() < 0) { |
| Addu(result, result, Operand(dividend)); |
| } |