Index: src/mips/macro-assembler-mips.cc |
diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc |
index 604293bb3106642b9729f1da96a091c7cf0f34b0..4335df6cf55b9593f852c8779933c9c7505d3fdc 100644 |
--- a/src/mips/macro-assembler-mips.cc |
+++ b/src/mips/macro-assembler-mips.cc |
@@ -790,6 +790,28 @@ void MacroAssembler::Div(Register rem, Register res, |
} |
+void MacroAssembler::Div(Register res, Register rs, const Operand& rt) { |
+ if (rt.is_reg()) { |
+ if (!IsMipsArchVariant(kMips32r6)) { |
+ div(rs, rt.rm()); |
+ mflo(res); |
+ } else { |
+ div(res, rs, rt.rm()); |
+ } |
+ } else { |
+ // li handles the relocation. |
+ DCHECK(!rs.is(at)); |
+ li(at, rt); |
+ if (!IsMipsArchVariant(kMips32r6)) { |
+ div(rs, at); |
+ mflo(res); |
+ } else { |
+ div(res, rs, at); |
+ } |
+ } |
+} |
+ |
+ |
void MacroAssembler::Mod(Register rd, Register rs, const Operand& rt) { |
if (rt.is_reg()) { |
if (!IsMipsArchVariant(kMips32r6)) { |
@@ -812,6 +834,28 @@ void MacroAssembler::Mod(Register rd, Register rs, const Operand& rt) { |
} |
+void MacroAssembler::Modu(Register rd, Register rs, const Operand& rt) { |
+ if (rt.is_reg()) { |
+ if (!IsMipsArchVariant(kMips32r6)) { |
+ divu(rs, rt.rm()); |
+ mfhi(rd); |
+ } else { |
+ modu(rd, rs, rt.rm()); |
+ } |
+ } else { |
+ // li handles the relocation. |
+ DCHECK(!rs.is(at)); |
+ li(at, rt); |
+ if (!IsMipsArchVariant(kMips32r6)) { |
+ divu(rs, at); |
+ mfhi(rd); |
+ } else { |
+ modu(rd, rs, at); |
+ } |
+ } |
+} |
+ |
+ |
void MacroAssembler::Divu(Register rs, const Operand& rt) { |
if (rt.is_reg()) { |
divu(rs, rt.rm()); |
@@ -824,6 +868,28 @@ void MacroAssembler::Divu(Register rs, const Operand& rt) { |
} |
+void MacroAssembler::Divu(Register res, Register rs, const Operand& rt) { |
+ if (rt.is_reg()) { |
+ if (!IsMipsArchVariant(kMips32r6)) { |
+ divu(rs, rt.rm()); |
+ mflo(res); |
+ } else { |
+ divu(res, rs, rt.rm()); |
+ } |
+ } else { |
+ // li handles the relocation. |
+ DCHECK(!rs.is(at)); |
+ li(at, rt); |
+ if (!IsMipsArchVariant(kMips32r6)) { |
+ divu(rs, at); |
+ mflo(res); |
+ } else { |
+ divu(res, rs, at); |
+ } |
+ } |
+} |
+ |
+ |
void MacroAssembler::And(Register rd, Register rs, const Operand& rt) { |
if (rt.is_reg()) { |
and_(rd, rs, rt.rm()); |
@@ -1904,7 +1970,7 @@ void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs, |
// Unsigned comparison. |
case Ugreater: |
if (r2.is(zero_reg)) { |
- bgtz(rs, offset); |
+ bne(rs, zero_reg, offset); |
} else { |
sltu(scratch, r2, rs); |
bne(scratch, zero_reg, offset); |
@@ -1912,7 +1978,7 @@ void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs, |
break; |
case Ugreater_equal: |
if (r2.is(zero_reg)) { |
- bgez(rs, offset); |
+ b(offset); |
} else { |
sltu(scratch, rs, r2); |
beq(scratch, zero_reg, offset); |
@@ -1929,7 +1995,7 @@ void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs, |
break; |
case Uless_equal: |
if (r2.is(zero_reg)) { |
- b(offset); |
+ beq(rs, zero_reg, offset); |
} else { |
sltu(scratch, r2, rs); |
beq(scratch, zero_reg, offset); |
@@ -2011,7 +2077,7 @@ void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs, |
// Unsigned comparison. |
case Ugreater: |
if (rt.imm32_ == 0) { |
- bgtz(rs, offset); |
+ bne(rs, zero_reg, offset); |
} else { |
r2 = scratch; |
li(r2, rt); |
@@ -2021,7 +2087,7 @@ void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs, |
break; |
case Ugreater_equal: |
if (rt.imm32_ == 0) { |
- bgez(rs, offset); |
+ b(offset); |
} else if (is_int16(rt.imm32_)) { |
sltiu(scratch, rs, rt.imm32_); |
beq(scratch, zero_reg, offset); |
@@ -2048,7 +2114,7 @@ void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs, |
break; |
case Uless_equal: |
if (rt.imm32_ == 0) { |
- b(offset); |
+ beq(rs, zero_reg, offset); |
} else { |
r2 = scratch; |
li(r2, rt); |
@@ -2150,7 +2216,7 @@ void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs, |
case Ugreater: |
if (r2.is(zero_reg)) { |
offset = shifted_branch_offset(L, false); |
- bgtz(rs, offset); |
+ bne(rs, zero_reg, offset); |
} else { |
sltu(scratch, r2, rs); |
offset = shifted_branch_offset(L, false); |
@@ -2160,7 +2226,7 @@ void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs, |
case Ugreater_equal: |
if (r2.is(zero_reg)) { |
offset = shifted_branch_offset(L, false); |
- bgez(rs, offset); |
+ b(offset); |
} else { |
sltu(scratch, rs, r2); |
offset = shifted_branch_offset(L, false); |
@@ -2180,7 +2246,7 @@ void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs, |
case Uless_equal: |
if (r2.is(zero_reg)) { |
offset = shifted_branch_offset(L, false); |
- b(offset); |
+ beq(rs, zero_reg, offset); |
} else { |
sltu(scratch, r2, rs); |
offset = shifted_branch_offset(L, false); |
@@ -2292,7 +2358,7 @@ void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs, |
case Ugreater_equal: |
if (rt.imm32_ == 0) { |
offset = shifted_branch_offset(L, false); |
- bgez(rs, offset); |
+ b(offset); |
} else if (is_int16(rt.imm32_)) { |
sltiu(scratch, rs, rt.imm32_); |
offset = shifted_branch_offset(L, false); |
@@ -4477,8 +4543,34 @@ void MacroAssembler::SmiToDoubleFPURegister(Register smi, |
} |
-void MacroAssembler::AdduAndCheckForOverflow(Register dst, |
- Register left, |
+void MacroAssembler::AdduAndCheckForOverflow(Register dst, Register left, |
+ const Operand& right, |
+ Register overflow_dst, |
+ Register scratch) { |
+ if (right.is_reg()) { |
+ AdduAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch); |
+ } else { |
+ if (dst.is(left)) { |
+ mov(scratch, left); // Preserve left. |
+ addiu(dst, left, right.immediate()); // Left is overwritten. |
+ xor_(scratch, dst, scratch); // Original left. |
+ // Load right since xori takes uint16 as immediate. |
+ addiu(t9, zero_reg, right.immediate()); |
+ xor_(overflow_dst, dst, t9); |
+ and_(overflow_dst, overflow_dst, scratch); |
+ } else { |
+ addiu(dst, left, right.immediate()); |
+ xor_(overflow_dst, dst, left); |
+ // Load right since xori takes uint16 as immediate. |
+ addiu(t9, zero_reg, right.immediate()); |
+ xor_(scratch, dst, t9); |
+ and_(overflow_dst, scratch, overflow_dst); |
+ } |
+ } |
+} |
+ |
+ |
+void MacroAssembler::AdduAndCheckForOverflow(Register dst, Register left, |
Register right, |
Register overflow_dst, |
Register scratch) { |
@@ -4519,8 +4611,34 @@ void MacroAssembler::AdduAndCheckForOverflow(Register dst, |
} |
-void MacroAssembler::SubuAndCheckForOverflow(Register dst, |
- Register left, |
+void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register left, |
+ const Operand& right, |
+ Register overflow_dst, |
+ Register scratch) { |
+ if (right.is_reg()) { |
+ SubuAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch); |
+ } else { |
+ if (dst.is(left)) { |
+ mov(scratch, left); // Preserve left. |
+ addiu(dst, left, -(right.immediate())); // Left is overwritten. |
+ xor_(overflow_dst, dst, scratch); // scratch is original left. |
+ // Load right since xori takes uint16 as immediate. |
+ addiu(t9, zero_reg, right.immediate()); |
+ xor_(scratch, scratch, t9); // scratch is original left. |
+ and_(overflow_dst, scratch, overflow_dst); |
+ } else { |
+ addiu(dst, left, -(right.immediate())); |
+ xor_(overflow_dst, dst, left); |
+ // Load right since xori takes uint16 as immediate. |
+ addiu(t9, zero_reg, right.immediate()); |
+ xor_(scratch, left, t9); |
+ and_(overflow_dst, scratch, overflow_dst); |
+ } |
+ } |
+} |
+ |
+ |
+void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register left, |
Register right, |
Register overflow_dst, |
Register scratch) { |