Index: src/mips64/macro-assembler-mips64.cc |
diff --git a/src/mips64/macro-assembler-mips64.cc b/src/mips64/macro-assembler-mips64.cc |
index 709db51dc81237f902104ebefd219c77de47ea33..1dbd69eb1a4c1aba14b0083c73560371905ef307 100644 |
--- a/src/mips64/macro-assembler-mips64.cc |
+++ b/src/mips64/macro-assembler-mips64.cc |
@@ -1855,7 +1855,7 @@ void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target, |
c(UN, sizeField, cmp1, cmp2); |
bc1f(&skip); |
nop(); |
- J(nan, bd); |
+ BranchLong(nan, bd); |
bind(&skip); |
} else { |
c(UN, sizeField, cmp1, cmp2); |
@@ -1874,7 +1874,7 @@ void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target, |
cmp(UN, sizeField, kDoubleCompareReg, cmp1, cmp2); |
bc1eqz(&skip, kDoubleCompareReg); |
nop(); |
- J(nan, bd); |
+ BranchLong(nan, bd); |
bind(&skip); |
} else { |
cmp(UN, sizeField, kDoubleCompareReg, cmp1, cmp2); |
@@ -1893,7 +1893,7 @@ void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target, |
Label skip; |
Condition neg_cond = NegateFpuCondition(cond); |
BranchShortF(sizeField, &skip, neg_cond, cmp1, cmp2, bd); |
- J(target, bd); |
+ BranchLong(target, bd); |
bind(&skip); |
} else { |
BranchShortF(sizeField, target, cond, cmp1, cmp2, bd); |
@@ -2312,28 +2312,30 @@ void MacroAssembler::GetLeastBitsFromInt32(Register dst, |
(cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg)))) |
-void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) { |
+void MacroAssembler::Branch(int32_t offset, BranchDelaySlot bdslot) { |
+ DCHECK(kArchVariant == kMips64r6 ? is_int26(offset) : is_int16(offset)); |
BranchShort(offset, bdslot); |
} |
-void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs, |
- const Operand& rt, |
- BranchDelaySlot bdslot) { |
- BranchShort(offset, cond, rs, rt, bdslot); |
+void MacroAssembler::Branch(int32_t offset, Condition cond, Register rs, |
+ const Operand& rt, BranchDelaySlot bdslot) { |
+ bool is_near = BranchShortCheck(offset, nullptr, cond, rs, rt, bdslot); |
+ DCHECK(is_near); |
+ USE(is_near); |
} |
void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) { |
if (L->is_bound()) { |
- if (is_near(L)) { |
+ if (is_near_branch(L)) { |
BranchShort(L, bdslot); |
} else { |
- J(L, bdslot); |
+ BranchLong(L, bdslot); |
} |
} else { |
if (is_trampoline_emitted()) { |
- J(L, bdslot); |
+ BranchLong(L, bdslot); |
} else { |
BranchShort(L, bdslot); |
} |
@@ -2345,17 +2347,15 @@ void MacroAssembler::Branch(Label* L, Condition cond, Register rs, |
const Operand& rt, |
BranchDelaySlot bdslot) { |
if (L->is_bound()) { |
- if (is_near(L)) { |
- BranchShort(L, cond, rs, rt, bdslot); |
- } else { |
+ if (!BranchShortCheck(0, L, cond, rs, rt, bdslot)) { |
if (cond != cc_always) { |
Label skip; |
Condition neg_cond = NegateCondition(cond); |
BranchShort(&skip, neg_cond, rs, rt); |
- J(L, bdslot); |
+ BranchLong(L, bdslot); |
bind(&skip); |
} else { |
- J(L, bdslot); |
+ BranchLong(L, bdslot); |
} |
} |
} else { |
@@ -2364,10 +2364,10 @@ void MacroAssembler::Branch(Label* L, Condition cond, Register rs, |
Label skip; |
Condition neg_cond = NegateCondition(cond); |
BranchShort(&skip, neg_cond, rs, rt); |
- J(L, bdslot); |
+ BranchLong(L, bdslot); |
bind(&skip); |
} else { |
- J(L, bdslot); |
+ BranchLong(L, bdslot); |
} |
} else { |
BranchShort(L, cond, rs, rt, bdslot); |
@@ -2386,7 +2386,10 @@ void MacroAssembler::Branch(Label* L, |
} |
-void MacroAssembler::BranchShort(int16_t offset, BranchDelaySlot bdslot) { |
+void MacroAssembler::BranchShortHelper(int16_t offset, Label* L, |
+ BranchDelaySlot bdslot) { |
+ DCHECK(L == nullptr || offset == 0); |
+ offset = GetOffset(offset, L, OffsetSize::kOffset16); |
b(offset); |
// Emit a nop in the branch delay slot if required. |
@@ -2395,549 +2398,544 @@ void MacroAssembler::BranchShort(int16_t offset, BranchDelaySlot bdslot) { |
} |
-void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs, |
- const Operand& rt, |
- BranchDelaySlot bdslot) { |
- BRANCH_ARGS_CHECK(cond, rs, rt); |
- DCHECK(!rs.is(zero_reg)); |
- Register r2 = no_reg; |
- Register scratch = at; |
+void MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L) { |
+ DCHECK(L == nullptr || offset == 0); |
+ offset = GetOffset(offset, L, OffsetSize::kOffset26); |
+ bc(offset); |
+} |
- if (rt.is_reg()) { |
- // NOTE: 'at' can be clobbered by Branch but it is legal to use it as rs or |
- // rt. |
- BlockTrampolinePoolScope block_trampoline_pool(this); |
- r2 = rt.rm_; |
- switch (cond) { |
- case cc_always: |
- b(offset); |
- break; |
- case eq: |
- beq(rs, r2, offset); |
- break; |
- case ne: |
- bne(rs, r2, offset); |
- break; |
- // Signed comparison. |
- case greater: |
- if (r2.is(zero_reg)) { |
- bgtz(rs, offset); |
- } else { |
- slt(scratch, r2, rs); |
- bne(scratch, zero_reg, offset); |
- } |
- break; |
- case greater_equal: |
- if (r2.is(zero_reg)) { |
- bgez(rs, offset); |
- } else { |
- slt(scratch, rs, r2); |
- beq(scratch, zero_reg, offset); |
- } |
- break; |
- case less: |
- if (r2.is(zero_reg)) { |
- bltz(rs, offset); |
- } else { |
- slt(scratch, rs, r2); |
- bne(scratch, zero_reg, offset); |
- } |
- break; |
- case less_equal: |
- if (r2.is(zero_reg)) { |
- blez(rs, offset); |
- } else { |
- slt(scratch, r2, rs); |
- beq(scratch, zero_reg, offset); |
- } |
- break; |
- // Unsigned comparison. |
- case Ugreater: |
- if (r2.is(zero_reg)) { |
- bne(rs, zero_reg, offset); |
- } else { |
- sltu(scratch, r2, rs); |
- bne(scratch, zero_reg, offset); |
- } |
- break; |
- case Ugreater_equal: |
- if (r2.is(zero_reg)) { |
- b(offset); |
- } else { |
- sltu(scratch, rs, r2); |
- beq(scratch, zero_reg, offset); |
- } |
- break; |
- case Uless: |
- if (r2.is(zero_reg)) { |
- // No code needs to be emitted. |
- return; |
- } else { |
- sltu(scratch, rs, r2); |
- bne(scratch, zero_reg, offset); |
- } |
- break; |
- case Uless_equal: |
- if (r2.is(zero_reg)) { |
- beq(rs, zero_reg, offset); |
- } else { |
- sltu(scratch, r2, rs); |
- beq(scratch, zero_reg, offset); |
- } |
- break; |
- default: |
- UNREACHABLE(); |
- } |
+ |
+void MacroAssembler::BranchShort(int32_t offset, BranchDelaySlot bdslot) { |
+ if (kArchVariant == kMips64r6 && bdslot == PROTECT) { |
+ DCHECK(is_int26(offset)); |
+ BranchShortHelperR6(offset, nullptr); |
} else { |
- // Be careful to always use shifted_branch_offset only just before the |
- // branch instruction, as the location will be remember for patching the |
- // target. |
- BlockTrampolinePoolScope block_trampoline_pool(this); |
- switch (cond) { |
- case cc_always: |
- b(offset); |
- break; |
- case eq: |
- if (rt.imm64_ == 0) { |
- beq(rs, zero_reg, offset); |
- } else { |
- // We don't want any other register but scratch clobbered. |
- DCHECK(!scratch.is(rs)); |
- r2 = scratch; |
- li(r2, rt); |
- beq(rs, r2, offset); |
- } |
- break; |
- case ne: |
- if (rt.imm64_ == 0) { |
- bne(rs, zero_reg, offset); |
- } else { |
- // We don't want any other register but scratch clobbered. |
- DCHECK(!scratch.is(rs)); |
- r2 = scratch; |
- li(r2, rt); |
- bne(rs, r2, offset); |
- } |
- break; |
- // Signed comparison. |
- case greater: |
- if (rt.imm64_ == 0) { |
- bgtz(rs, offset); |
- } else { |
- r2 = scratch; |
- li(r2, rt); |
- slt(scratch, r2, rs); |
- bne(scratch, zero_reg, offset); |
- } |
- break; |
- case greater_equal: |
- if (rt.imm64_ == 0) { |
- bgez(rs, offset); |
- } else if (is_int16(rt.imm64_)) { |
- slti(scratch, rs, static_cast<int32_t>(rt.imm64_)); |
- beq(scratch, zero_reg, offset); |
- } else { |
- r2 = scratch; |
- li(r2, rt); |
- slt(scratch, rs, r2); |
- beq(scratch, zero_reg, offset); |
- } |
- break; |
- case less: |
- if (rt.imm64_ == 0) { |
- bltz(rs, offset); |
- } else if (is_int16(rt.imm64_)) { |
- slti(scratch, rs, static_cast<int32_t>(rt.imm64_)); |
- bne(scratch, zero_reg, offset); |
- } else { |
- r2 = scratch; |
- li(r2, rt); |
- slt(scratch, rs, r2); |
- bne(scratch, zero_reg, offset); |
- } |
- break; |
- case less_equal: |
- if (rt.imm64_ == 0) { |
- blez(rs, offset); |
- } else { |
- r2 = scratch; |
- li(r2, rt); |
- slt(scratch, r2, rs); |
- beq(scratch, zero_reg, offset); |
- } |
- break; |
- // Unsigned comparison. |
- case Ugreater: |
- if (rt.imm64_ == 0) { |
- bne(rs, zero_reg, offset); |
- } else { |
- r2 = scratch; |
- li(r2, rt); |
- sltu(scratch, r2, rs); |
- bne(scratch, zero_reg, offset); |
- } |
- break; |
- case Ugreater_equal: |
- if (rt.imm64_ == 0) { |
- b(offset); |
- } else if (is_int16(rt.imm64_)) { |
- sltiu(scratch, rs, static_cast<int32_t>(rt.imm64_)); |
- beq(scratch, zero_reg, offset); |
- } else { |
- r2 = scratch; |
- li(r2, rt); |
- sltu(scratch, rs, r2); |
- beq(scratch, zero_reg, offset); |
- } |
- break; |
- case Uless: |
- if (rt.imm64_ == 0) { |
- // No code needs to be emitted. |
- return; |
- } else if (is_int16(rt.imm64_)) { |
- sltiu(scratch, rs, static_cast<int32_t>(rt.imm64_)); |
- bne(scratch, zero_reg, offset); |
- } else { |
- r2 = scratch; |
- li(r2, rt); |
- sltu(scratch, rs, r2); |
- bne(scratch, zero_reg, offset); |
- } |
- break; |
- case Uless_equal: |
- if (rt.imm64_ == 0) { |
- beq(rs, zero_reg, offset); |
- } else { |
- r2 = scratch; |
- li(r2, rt); |
- sltu(scratch, r2, rs); |
- beq(scratch, zero_reg, offset); |
- } |
- break; |
- default: |
- UNREACHABLE(); |
- } |
+ DCHECK(is_int16(offset)); |
+ BranchShortHelper(offset, nullptr, bdslot); |
} |
- // Emit a nop in the branch delay slot if required. |
- if (bdslot == PROTECT) |
- nop(); |
} |
void MacroAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) { |
- // We use branch_offset as an argument for the branch instructions to be sure |
- // it is called just before generating the branch instruction, as needed. |
+ if (kArchVariant == kMips64r6 && bdslot == PROTECT) { |
+ BranchShortHelperR6(0, L); |
+ } else { |
+ BranchShortHelper(0, L, bdslot); |
+ } |
+} |
- b(shifted_branch_offset(L, false)); |
- // Emit a nop in the branch delay slot if required. |
- if (bdslot == PROTECT) |
- nop(); |
+static inline bool IsZero(const Operand& rt) { |
+ if (rt.is_reg()) { |
+ return rt.rm().is(zero_reg); |
+ } else { |
+ return rt.immediate() == 0; |
+ } |
} |
-void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs, |
- const Operand& rt, |
- BranchDelaySlot bdslot) { |
- BRANCH_ARGS_CHECK(cond, rs, rt); |
+int32_t MacroAssembler::GetOffset(int32_t offset, Label* L, OffsetSize bits) { |
+ if (L) { |
+ offset = branch_offset_helper(L, bits) >> 2; |
+ } else { |
+ DCHECK(is_intn(offset, bits)); |
+ } |
+ return offset; |
+} |
+ |
- int32_t offset = 0; |
+Register MacroAssembler::GetRtAsRegisterHelper(const Operand& rt, |
+ Register scratch) { |
Register r2 = no_reg; |
- Register scratch = at; |
if (rt.is_reg()) { |
- BlockTrampolinePoolScope block_trampoline_pool(this); |
r2 = rt.rm_; |
- // Be careful to always use shifted_branch_offset only just before the |
- // branch instruction, as the location will be remember for patching the |
- // target. |
+ } else { |
+ r2 = scratch; |
+ li(r2, rt); |
+ } |
+ |
+ return r2; |
+} |
+ |
+ |
+bool MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L, |
+ Condition cond, Register rs, |
+ const Operand& rt) { |
+ DCHECK(L == nullptr || offset == 0); |
+ Register scratch = rs.is(at) ? t8 : at; |
+ OffsetSize bits = OffsetSize::kOffset16; |
+ |
+ // Be careful to always use shifted_branch_offset only just before the |
+ // branch instruction, as the location will be remember for patching the |
+ // target. |
+ { |
+ BlockTrampolinePoolScope block_trampoline_pool(this); |
switch (cond) { |
case cc_always: |
- offset = shifted_branch_offset(L, false); |
- b(offset); |
+ bits = OffsetSize::kOffset26; |
+ if (!is_near(L, bits)) return false; |
+ offset = GetOffset(offset, L, bits); |
+ bc(offset); |
break; |
case eq: |
- offset = shifted_branch_offset(L, false); |
- beq(rs, r2, offset); |
+ if (rs.code() == rt.rm_.reg_code) { |
+ // Pre R6 beq is used here to make the code patchable. Otherwise bc |
+ // should be used which has no condition field so is not patchable. |
+ bits = OffsetSize::kOffset16; |
+ if (!is_near(L, bits)) return false; |
+ scratch = GetRtAsRegisterHelper(rt, scratch); |
+ offset = GetOffset(offset, L, bits); |
+ beq(rs, scratch, offset); |
+ nop(); |
+ } else if (IsZero(rt)) { |
+ bits = OffsetSize::kOffset21; |
+ if (!is_near(L, bits)) return false; |
+ offset = GetOffset(offset, L, bits); |
+ beqzc(rs, offset); |
+ } else { |
+ // We don't want any other register but scratch clobbered. |
+ bits = OffsetSize::kOffset16; |
+ if (!is_near(L, bits)) return false; |
+ scratch = GetRtAsRegisterHelper(rt, scratch); |
+ offset = GetOffset(offset, L, bits); |
+ beqc(rs, scratch, offset); |
+ } |
break; |
case ne: |
- offset = shifted_branch_offset(L, false); |
- bne(rs, r2, offset); |
+ if (rs.code() == rt.rm_.reg_code) { |
+ // Pre R6 bne is used here to make the code patchable. Otherwise we |
+ // should not generate any instruction. |
+ bits = OffsetSize::kOffset16; |
+ if (!is_near(L, bits)) return false; |
+ scratch = GetRtAsRegisterHelper(rt, scratch); |
+ offset = GetOffset(offset, L, bits); |
+ bne(rs, scratch, offset); |
+ nop(); |
+ } else if (IsZero(rt)) { |
+ bits = OffsetSize::kOffset21; |
+ if (!is_near(L, bits)) return false; |
+ offset = GetOffset(offset, L, bits); |
+ bnezc(rs, offset); |
+ } else { |
+ // We don't want any other register but scratch clobbered. |
+ bits = OffsetSize::kOffset16; |
+ if (!is_near(L, bits)) return false; |
+ scratch = GetRtAsRegisterHelper(rt, scratch); |
+ offset = GetOffset(offset, L, bits); |
+ bnec(rs, scratch, offset); |
+ } |
break; |
+ |
// Signed comparison. |
case greater: |
- if (r2.is(zero_reg)) { |
- offset = shifted_branch_offset(L, false); |
- bgtz(rs, offset); |
+ // rs > rt |
+ if (rs.code() == rt.rm_.reg_code) { |
+ break; // No code needs to be emitted. |
+ } else if (rs.is(zero_reg)) { |
+ bits = OffsetSize::kOffset16; |
+ if (!is_near(L, bits)) return false; |
+ scratch = GetRtAsRegisterHelper(rt, scratch); |
+ offset = GetOffset(offset, L, bits); |
+ bltzc(scratch, offset); |
+ } else if (IsZero(rt)) { |
+ bits = OffsetSize::kOffset16; |
+ if (!is_near(L, bits)) return false; |
+ offset = GetOffset(offset, L, bits); |
+ bgtzc(rs, offset); |
} else { |
- slt(scratch, r2, rs); |
- offset = shifted_branch_offset(L, false); |
- bne(scratch, zero_reg, offset); |
+ bits = OffsetSize::kOffset16; |
+ if (!is_near(L, bits)) return false; |
+ scratch = GetRtAsRegisterHelper(rt, scratch); |
+ DCHECK(!rs.is(scratch)); |
+ offset = GetOffset(offset, L, bits); |
+ bltc(scratch, rs, offset); |
} |
break; |
case greater_equal: |
- if (r2.is(zero_reg)) { |
- offset = shifted_branch_offset(L, false); |
- bgez(rs, offset); |
+ // rs >= rt |
+ if (rs.code() == rt.rm_.reg_code) { |
+ bits = OffsetSize::kOffset26; |
+ if (!is_near(L, bits)) return false; |
+ offset = GetOffset(offset, L, bits); |
+ bc(offset); |
+ } else if (rs.is(zero_reg)) { |
+ bits = OffsetSize::kOffset16; |
+ if (!is_near(L, bits)) return false; |
+ scratch = GetRtAsRegisterHelper(rt, scratch); |
+ offset = GetOffset(offset, L, bits); |
+ blezc(scratch, offset); |
+ } else if (IsZero(rt)) { |
+ bits = OffsetSize::kOffset16; |
+ if (!is_near(L, bits)) return false; |
+ offset = GetOffset(offset, L, bits); |
+ bgezc(rs, offset); |
} else { |
- slt(scratch, rs, r2); |
- offset = shifted_branch_offset(L, false); |
- beq(scratch, zero_reg, offset); |
+ bits = OffsetSize::kOffset16; |
+ if (!is_near(L, bits)) return false; |
+ scratch = GetRtAsRegisterHelper(rt, scratch); |
+ DCHECK(!rs.is(scratch)); |
+ offset = GetOffset(offset, L, bits); |
+ bgec(rs, scratch, offset); |
} |
break; |
case less: |
- if (r2.is(zero_reg)) { |
- offset = shifted_branch_offset(L, false); |
- bltz(rs, offset); |
+ // rs < rt |
+ if (rs.code() == rt.rm_.reg_code) { |
+ break; // No code needs to be emitted. |
+ } else if (rs.is(zero_reg)) { |
+ bits = OffsetSize::kOffset16; |
+ if (!is_near(L, bits)) return false; |
+ scratch = GetRtAsRegisterHelper(rt, scratch); |
+ offset = GetOffset(offset, L, bits); |
+ bgtzc(scratch, offset); |
+ } else if (IsZero(rt)) { |
+ bits = OffsetSize::kOffset16; |
+ if (!is_near(L, bits)) return false; |
+ offset = GetOffset(offset, L, bits); |
+ bltzc(rs, offset); |
} else { |
- slt(scratch, rs, r2); |
- offset = shifted_branch_offset(L, false); |
- bne(scratch, zero_reg, offset); |
+ bits = OffsetSize::kOffset16; |
+ if (!is_near(L, bits)) return false; |
+ scratch = GetRtAsRegisterHelper(rt, scratch); |
+ DCHECK(!rs.is(scratch)); |
+ offset = GetOffset(offset, L, bits); |
+ bltc(rs, scratch, offset); |
} |
break; |
case less_equal: |
- if (r2.is(zero_reg)) { |
- offset = shifted_branch_offset(L, false); |
- blez(rs, offset); |
+ // rs <= rt |
+ if (rs.code() == rt.rm_.reg_code) { |
+ bits = OffsetSize::kOffset26; |
+ if (!is_near(L, bits)) return false; |
+ offset = GetOffset(offset, L, bits); |
+ bc(offset); |
+ } else if (rs.is(zero_reg)) { |
+ bits = OffsetSize::kOffset16; |
+ if (!is_near(L, bits)) return false; |
+ scratch = GetRtAsRegisterHelper(rt, scratch); |
+ offset = GetOffset(offset, L, bits); |
+ bgezc(scratch, offset); |
+ } else if (IsZero(rt)) { |
+ bits = OffsetSize::kOffset16; |
+ if (!is_near(L, bits)) return false; |
+ offset = GetOffset(offset, L, bits); |
+ blezc(rs, offset); |
} else { |
- slt(scratch, r2, rs); |
- offset = shifted_branch_offset(L, false); |
- beq(scratch, zero_reg, offset); |
+ bits = OffsetSize::kOffset16; |
+ if (!is_near(L, bits)) return false; |
+ scratch = GetRtAsRegisterHelper(rt, scratch); |
+ DCHECK(!rs.is(scratch)); |
+ offset = GetOffset(offset, L, bits); |
+ bgec(scratch, rs, offset); |
} |
break; |
+ |
// Unsigned comparison. |
case Ugreater: |
- if (r2.is(zero_reg)) { |
- offset = shifted_branch_offset(L, false); |
- bne(rs, zero_reg, offset); |
+ // rs > rt |
+ if (rs.code() == rt.rm_.reg_code) { |
+ break; // No code needs to be emitted. |
+ } else if (rs.is(zero_reg)) { |
+ bits = OffsetSize::kOffset21; |
+ if (!is_near(L, bits)) return false; |
+ scratch = GetRtAsRegisterHelper(rt, scratch); |
+ offset = GetOffset(offset, L, bits); |
+ bnezc(scratch, offset); |
+ } else if (IsZero(rt)) { |
+ bits = OffsetSize::kOffset21; |
+ if (!is_near(L, bits)) return false; |
+ offset = GetOffset(offset, L, bits); |
+ bnezc(rs, offset); |
} else { |
- sltu(scratch, r2, rs); |
- offset = shifted_branch_offset(L, false); |
- bne(scratch, zero_reg, offset); |
+ bits = OffsetSize::kOffset16; |
+ if (!is_near(L, bits)) return false; |
+ scratch = GetRtAsRegisterHelper(rt, scratch); |
+ DCHECK(!rs.is(scratch)); |
+ offset = GetOffset(offset, L, bits); |
+ bltuc(scratch, rs, offset); |
} |
break; |
case Ugreater_equal: |
- if (r2.is(zero_reg)) { |
- offset = shifted_branch_offset(L, false); |
- b(offset); |
+ // rs >= rt |
+ if (rs.code() == rt.rm_.reg_code) { |
+ bits = OffsetSize::kOffset26; |
+ if (!is_near(L, bits)) return false; |
+ offset = GetOffset(offset, L, bits); |
+ bc(offset); |
+ } else if (rs.is(zero_reg)) { |
+ bits = OffsetSize::kOffset21; |
+ if (!is_near(L, bits)) return false; |
+ scratch = GetRtAsRegisterHelper(rt, scratch); |
+ offset = GetOffset(offset, L, bits); |
+ beqzc(scratch, offset); |
+ } else if (IsZero(rt)) { |
+ bits = OffsetSize::kOffset26; |
+ if (!is_near(L, bits)) return false; |
+ offset = GetOffset(offset, L, bits); |
+ bc(offset); |
} else { |
- sltu(scratch, rs, r2); |
- offset = shifted_branch_offset(L, false); |
- beq(scratch, zero_reg, offset); |
+ bits = OffsetSize::kOffset16; |
+ if (!is_near(L, bits)) return false; |
+ scratch = GetRtAsRegisterHelper(rt, scratch); |
+ DCHECK(!rs.is(scratch)); |
+ offset = GetOffset(offset, L, bits); |
+ bgeuc(rs, scratch, offset); |
} |
break; |
case Uless: |
- if (r2.is(zero_reg)) { |
- // No code needs to be emitted. |
- return; |
+ // rs < rt |
+ if (rs.code() == rt.rm_.reg_code) { |
+ break; // No code needs to be emitted. |
+ } else if (rs.is(zero_reg)) { |
+ bits = OffsetSize::kOffset21; |
+ if (!is_near(L, bits)) return false; |
+ scratch = GetRtAsRegisterHelper(rt, scratch); |
+ offset = GetOffset(offset, L, bits); |
+ bnezc(scratch, offset); |
+ } else if (IsZero(rt)) { |
+ break; // No code needs to be emitted. |
} else { |
- sltu(scratch, rs, r2); |
- offset = shifted_branch_offset(L, false); |
- bne(scratch, zero_reg, offset); |
+ bits = OffsetSize::kOffset16; |
+ if (!is_near(L, bits)) return false; |
+ scratch = GetRtAsRegisterHelper(rt, scratch); |
+ DCHECK(!rs.is(scratch)); |
+ offset = GetOffset(offset, L, bits); |
+ bltuc(rs, scratch, offset); |
} |
break; |
case Uless_equal: |
- if (r2.is(zero_reg)) { |
- offset = shifted_branch_offset(L, false); |
- beq(rs, zero_reg, offset); |
+ // rs <= rt |
+ if (rs.code() == rt.rm_.reg_code) { |
+ bits = OffsetSize::kOffset26; |
+ if (!is_near(L, bits)) return false; |
+ offset = GetOffset(offset, L, bits); |
+ bc(offset); |
+ } else if (rs.is(zero_reg)) { |
+ bits = OffsetSize::kOffset26; |
+ if (!is_near(L, bits)) return false; |
+ scratch = GetRtAsRegisterHelper(rt, scratch); |
+ offset = GetOffset(offset, L, bits); |
+ bc(offset); |
+ } else if (IsZero(rt)) { |
+ bits = OffsetSize::kOffset21; |
+ if (!is_near(L, bits)) return false; |
+ offset = GetOffset(offset, L, bits); |
+ beqzc(rs, offset); |
} else { |
- sltu(scratch, r2, rs); |
- offset = shifted_branch_offset(L, false); |
- beq(scratch, zero_reg, offset); |
+ bits = OffsetSize::kOffset16; |
+ if (!is_near(L, bits)) return false; |
+ scratch = GetRtAsRegisterHelper(rt, scratch); |
+ DCHECK(!rs.is(scratch)); |
+ offset = GetOffset(offset, L, bits); |
+ bgeuc(scratch, rs, offset); |
} |
break; |
default: |
UNREACHABLE(); |
} |
- } else { |
- // Be careful to always use shifted_branch_offset only just before the |
- // branch instruction, as the location will be remember for patching the |
- // target. |
+ } |
+ CheckTrampolinePoolQuick(1); |
+ return true; |
+} |
+ |
+ |
+bool MacroAssembler::BranchShortHelper(int16_t offset, Label* L, Condition cond, |
+ Register rs, const Operand& rt, |
+ BranchDelaySlot bdslot) { |
+ DCHECK(L == nullptr || offset == 0); |
+ if (!is_near(L, OffsetSize::kOffset16)) return false; |
+ |
+ Register scratch = at; |
+ int32_t offset32; |
+ |
+ // Be careful to always use shifted_branch_offset only just before the |
+ // branch instruction, as the location will be remember for patching the |
+ // target. |
+ { |
BlockTrampolinePoolScope block_trampoline_pool(this); |
switch (cond) { |
case cc_always: |
- offset = shifted_branch_offset(L, false); |
- b(offset); |
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16); |
+ b(offset32); |
break; |
case eq: |
- if (rt.imm64_ == 0) { |
- offset = shifted_branch_offset(L, false); |
- beq(rs, zero_reg, offset); |
+ if (IsZero(rt)) { |
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16); |
+ beq(rs, zero_reg, offset32); |
} else { |
- DCHECK(!scratch.is(rs)); |
- r2 = scratch; |
- li(r2, rt); |
- offset = shifted_branch_offset(L, false); |
- beq(rs, r2, offset); |
+ // We don't want any other register but scratch clobbered. |
+ scratch = GetRtAsRegisterHelper(rt, scratch); |
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16); |
+ beq(rs, scratch, offset32); |
} |
break; |
case ne: |
- if (rt.imm64_ == 0) { |
- offset = shifted_branch_offset(L, false); |
- bne(rs, zero_reg, offset); |
+ if (IsZero(rt)) { |
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16); |
+ bne(rs, zero_reg, offset32); |
} else { |
- DCHECK(!scratch.is(rs)); |
- r2 = scratch; |
- li(r2, rt); |
- offset = shifted_branch_offset(L, false); |
- bne(rs, r2, offset); |
+ // We don't want any other register but scratch clobbered. |
+ scratch = GetRtAsRegisterHelper(rt, scratch); |
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16); |
+ bne(rs, scratch, offset32); |
} |
break; |
+ |
// Signed comparison. |
case greater: |
- if (rt.imm64_ == 0) { |
- offset = shifted_branch_offset(L, false); |
- bgtz(rs, offset); |
+ if (IsZero(rt)) { |
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16); |
+ bgtz(rs, offset32); |
} else { |
- DCHECK(!scratch.is(rs)); |
- r2 = scratch; |
- li(r2, rt); |
- slt(scratch, r2, rs); |
- offset = shifted_branch_offset(L, false); |
- bne(scratch, zero_reg, offset); |
+ Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs); |
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16); |
+ bne(scratch, zero_reg, offset32); |
} |
break; |
case greater_equal: |
- if (rt.imm64_ == 0) { |
- offset = shifted_branch_offset(L, false); |
- bgez(rs, offset); |
- } else if (is_int16(rt.imm64_)) { |
- slti(scratch, rs, static_cast<int32_t>(rt.imm64_)); |
- offset = shifted_branch_offset(L, false); |
- beq(scratch, zero_reg, offset); |
+ if (IsZero(rt)) { |
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16); |
+ bgez(rs, offset32); |
} else { |
- DCHECK(!scratch.is(rs)); |
- r2 = scratch; |
- li(r2, rt); |
- slt(scratch, rs, r2); |
- offset = shifted_branch_offset(L, false); |
- beq(scratch, zero_reg, offset); |
+ Slt(scratch, rs, rt); |
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16); |
+ beq(scratch, zero_reg, offset32); |
} |
break; |
case less: |
- if (rt.imm64_ == 0) { |
- offset = shifted_branch_offset(L, false); |
- bltz(rs, offset); |
- } else if (is_int16(rt.imm64_)) { |
- slti(scratch, rs, static_cast<int32_t>(rt.imm64_)); |
- offset = shifted_branch_offset(L, false); |
- bne(scratch, zero_reg, offset); |
+ if (IsZero(rt)) { |
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16); |
+ bltz(rs, offset32); |
} else { |
- DCHECK(!scratch.is(rs)); |
- r2 = scratch; |
- li(r2, rt); |
- slt(scratch, rs, r2); |
- offset = shifted_branch_offset(L, false); |
- bne(scratch, zero_reg, offset); |
+ Slt(scratch, rs, rt); |
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16); |
+ bne(scratch, zero_reg, offset32); |
} |
break; |
case less_equal: |
- if (rt.imm64_ == 0) { |
- offset = shifted_branch_offset(L, false); |
- blez(rs, offset); |
+ if (IsZero(rt)) { |
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16); |
+ blez(rs, offset32); |
} else { |
- DCHECK(!scratch.is(rs)); |
- r2 = scratch; |
- li(r2, rt); |
- slt(scratch, r2, rs); |
- offset = shifted_branch_offset(L, false); |
- beq(scratch, zero_reg, offset); |
+ Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs); |
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16); |
+ beq(scratch, zero_reg, offset32); |
} |
break; |
+ |
// Unsigned comparison. |
case Ugreater: |
- if (rt.imm64_ == 0) { |
- offset = shifted_branch_offset(L, false); |
- bne(rs, zero_reg, offset); |
+ if (IsZero(rt)) { |
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16); |
+ bne(rs, zero_reg, offset32); |
} else { |
- DCHECK(!scratch.is(rs)); |
- r2 = scratch; |
- li(r2, rt); |
- sltu(scratch, r2, rs); |
- offset = shifted_branch_offset(L, false); |
- bne(scratch, zero_reg, offset); |
+ Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs); |
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16); |
+ bne(scratch, zero_reg, offset32); |
} |
break; |
case Ugreater_equal: |
- if (rt.imm64_ == 0) { |
- offset = shifted_branch_offset(L, false); |
- b(offset); |
- } else if (is_int16(rt.imm64_)) { |
- sltiu(scratch, rs, static_cast<int32_t>(rt.imm64_)); |
- offset = shifted_branch_offset(L, false); |
- beq(scratch, zero_reg, offset); |
+ if (IsZero(rt)) { |
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16); |
+ b(offset32); |
} else { |
- DCHECK(!scratch.is(rs)); |
- r2 = scratch; |
- li(r2, rt); |
- sltu(scratch, rs, r2); |
- offset = shifted_branch_offset(L, false); |
- beq(scratch, zero_reg, offset); |
+ Sltu(scratch, rs, rt); |
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16); |
+ beq(scratch, zero_reg, offset32); |
} |
break; |
- case Uless: |
- if (rt.imm64_ == 0) { |
- // No code needs to be emitted. |
- return; |
- } else if (is_int16(rt.imm64_)) { |
- sltiu(scratch, rs, static_cast<int32_t>(rt.imm64_)); |
- offset = shifted_branch_offset(L, false); |
- bne(scratch, zero_reg, offset); |
+ case Uless: |
+ if (IsZero(rt)) { |
+ return true; // No code needs to be emitted. |
} else { |
- DCHECK(!scratch.is(rs)); |
- r2 = scratch; |
- li(r2, rt); |
- sltu(scratch, rs, r2); |
- offset = shifted_branch_offset(L, false); |
- bne(scratch, zero_reg, offset); |
+ Sltu(scratch, rs, rt); |
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16); |
+ bne(scratch, zero_reg, offset32); |
} |
break; |
case Uless_equal: |
- if (rt.imm64_ == 0) { |
- offset = shifted_branch_offset(L, false); |
- beq(rs, zero_reg, offset); |
+ if (IsZero(rt)) { |
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16); |
+ beq(rs, zero_reg, offset32); |
} else { |
- DCHECK(!scratch.is(rs)); |
- r2 = scratch; |
- li(r2, rt); |
- sltu(scratch, r2, rs); |
- offset = shifted_branch_offset(L, false); |
- beq(scratch, zero_reg, offset); |
+ Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs); |
+ offset32 = GetOffset(offset, L, OffsetSize::kOffset16); |
+ beq(scratch, zero_reg, offset32); |
} |
break; |
default: |
UNREACHABLE(); |
} |
} |
- // Check that offset could actually hold on an int16_t. |
- DCHECK(is_int16(offset)); |
+ |
// Emit a nop in the branch delay slot if required. |
if (bdslot == PROTECT) |
nop(); |
+ |
+ return true; |
} |
-void MacroAssembler::BranchAndLink(int16_t offset, BranchDelaySlot bdslot) { |
+bool MacroAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond, |
+ Register rs, const Operand& rt, |
+ BranchDelaySlot bdslot) { |
+ BRANCH_ARGS_CHECK(cond, rs, rt); |
+ |
+ if (!L) { |
+ if (kArchVariant == kMips64r6 && bdslot == PROTECT) { |
+ DCHECK(is_int26(offset)); |
+ return BranchShortHelperR6(offset, nullptr, cond, rs, rt); |
+ } else { |
+ DCHECK(is_int16(offset)); |
+ return BranchShortHelper(offset, nullptr, cond, rs, rt, bdslot); |
+ } |
+ } else { |
+ DCHECK(offset == 0); |
+ if (kArchVariant == kMips64r6 && bdslot == PROTECT) { |
+ return BranchShortHelperR6(0, L, cond, rs, rt); |
+ } else { |
+ return BranchShortHelper(0, L, cond, rs, rt, bdslot); |
+ } |
+ } |
+ return false; |
+} |
+ |
+ |
+void MacroAssembler::BranchShort(int32_t offset, Condition cond, Register rs, |
+ const Operand& rt, BranchDelaySlot bdslot) { |
+ BranchShortCheck(offset, nullptr, cond, rs, rt, bdslot); |
+} |
+ |
+ |
+void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs, |
+ const Operand& rt, BranchDelaySlot bdslot) { |
+ BranchShortCheck(0, L, cond, rs, rt, bdslot); |
+} |
+ |
+ |
+void MacroAssembler::BranchAndLink(int32_t offset, BranchDelaySlot bdslot) { |
BranchAndLinkShort(offset, bdslot); |
} |
-void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs, |
- const Operand& rt, |
- BranchDelaySlot bdslot) { |
- BranchAndLinkShort(offset, cond, rs, rt, bdslot); |
+void MacroAssembler::BranchAndLink(int32_t offset, Condition cond, Register rs, |
+ const Operand& rt, BranchDelaySlot bdslot) { |
+ bool is_near = BranchAndLinkShortCheck(offset, nullptr, cond, rs, rt, bdslot); |
+ DCHECK(is_near); |
+ USE(is_near); |
} |
void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) { |
if (L->is_bound()) { |
- if (is_near(L)) { |
+ if (is_near_branch(L)) { |
BranchAndLinkShort(L, bdslot); |
} else { |
- Jal(L, bdslot); |
+ BranchAndLinkLong(L, bdslot); |
} |
} else { |
if (is_trampoline_emitted()) { |
- Jal(L, bdslot); |
+ BranchAndLinkLong(L, bdslot); |
} else { |
BranchAndLinkShort(L, bdslot); |
} |
@@ -2949,13 +2947,11 @@ void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs, |
const Operand& rt, |
BranchDelaySlot bdslot) { |
if (L->is_bound()) { |
- if (is_near(L)) { |
- BranchAndLinkShort(L, cond, rs, rt, bdslot); |
- } else { |
+ if (!BranchAndLinkShortCheck(0, L, cond, rs, rt, bdslot)) { |
Label skip; |
Condition neg_cond = NegateCondition(cond); |
BranchShort(&skip, neg_cond, rs, rt); |
- Jal(L, bdslot); |
+ BranchAndLinkLong(L, bdslot); |
bind(&skip); |
} |
} else { |
@@ -2963,20 +2959,19 @@ void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs, |
Label skip; |
Condition neg_cond = NegateCondition(cond); |
BranchShort(&skip, neg_cond, rs, rt); |
- Jal(L, bdslot); |
+ BranchAndLinkLong(L, bdslot); |
bind(&skip); |
} else { |
- BranchAndLinkShort(L, cond, rs, rt, bdslot); |
+ BranchAndLinkShortCheck(0, L, cond, rs, rt, bdslot); |
} |
} |
} |
-// We need to use a bgezal or bltzal, but they can't be used directly with the |
-// slt instructions. We could use sub or add instead but we would miss overflow |
-// cases, so we keep slt and add an intermediate third instruction. |
-void MacroAssembler::BranchAndLinkShort(int16_t offset, |
- BranchDelaySlot bdslot) { |
+void MacroAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L, |
+ BranchDelaySlot bdslot) { |
+ DCHECK(L == nullptr || offset == 0); |
+ offset = GetOffset(offset, L, OffsetSize::kOffset16); |
bal(offset); |
// Emit a nop in the branch delay slot if required. |
@@ -2985,230 +2980,306 @@ void MacroAssembler::BranchAndLinkShort(int16_t offset, |
} |
-void MacroAssembler::BranchAndLinkShort(int16_t offset, Condition cond, |
- Register rs, const Operand& rt, |
- BranchDelaySlot bdslot) { |
- BRANCH_ARGS_CHECK(cond, rs, rt); |
- Register r2 = no_reg; |
- Register scratch = at; |
- |
- if (rt.is_reg()) { |
- r2 = rt.rm_; |
- } else if (cond != cc_always) { |
- r2 = scratch; |
- li(r2, rt); |
- } |
+void MacroAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L) { |
+ DCHECK(L == nullptr || offset == 0); |
+ offset = GetOffset(offset, L, OffsetSize::kOffset26); |
+ balc(offset); |
+} |
- { |
- BlockTrampolinePoolScope block_trampoline_pool(this); |
- switch (cond) { |
- case cc_always: |
- bal(offset); |
- break; |
- case eq: |
- bne(rs, r2, 2); |
- nop(); |
- bal(offset); |
- break; |
- case ne: |
- beq(rs, r2, 2); |
- nop(); |
- bal(offset); |
- break; |
- // Signed comparison. |
- case greater: |
- // rs > rt |
- slt(scratch, r2, rs); |
- beq(scratch, zero_reg, 2); |
- nop(); |
- bal(offset); |
- break; |
- case greater_equal: |
- // rs >= rt |
- slt(scratch, rs, r2); |
- bne(scratch, zero_reg, 2); |
- nop(); |
- bal(offset); |
- break; |
- case less: |
- // rs < r2 |
- slt(scratch, rs, r2); |
- bne(scratch, zero_reg, 2); |
- nop(); |
- bal(offset); |
- break; |
- case less_equal: |
- // rs <= r2 |
- slt(scratch, r2, rs); |
- bne(scratch, zero_reg, 2); |
- nop(); |
- bal(offset); |
- break; |
+void MacroAssembler::BranchAndLinkShort(int32_t offset, |
+ BranchDelaySlot bdslot) { |
+ if (kArchVariant == kMips64r6 && bdslot == PROTECT) { |
+ DCHECK(is_int26(offset)); |
+ BranchAndLinkShortHelperR6(offset, nullptr); |
+ } else { |
+ DCHECK(is_int16(offset)); |
+ BranchAndLinkShortHelper(offset, nullptr, bdslot); |
+ } |
+} |
- // Unsigned comparison. |
- case Ugreater: |
- // rs > rt |
- sltu(scratch, r2, rs); |
- beq(scratch, zero_reg, 2); |
- nop(); |
- bal(offset); |
- break; |
- case Ugreater_equal: |
- // rs >= rt |
- sltu(scratch, rs, r2); |
- bne(scratch, zero_reg, 2); |
- nop(); |
- bal(offset); |
- break; |
- case Uless: |
- // rs < r2 |
- sltu(scratch, rs, r2); |
- bne(scratch, zero_reg, 2); |
- nop(); |
- bal(offset); |
- break; |
- case Uless_equal: |
- // rs <= r2 |
- sltu(scratch, r2, rs); |
- bne(scratch, zero_reg, 2); |
- nop(); |
- bal(offset); |
- break; |
- default: |
- UNREACHABLE(); |
- } |
+void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) { |
+ if (kArchVariant == kMips64r6 && bdslot == PROTECT) { |
+ BranchAndLinkShortHelperR6(0, L); |
+ } else { |
+ BranchAndLinkShortHelper(0, L, bdslot); |
} |
- // Emit a nop in the branch delay slot if required. |
- if (bdslot == PROTECT) |
- nop(); |
} |
-void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) { |
- bal(shifted_branch_offset(L, false)); |
+bool MacroAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L, |
+ Condition cond, Register rs, |
+ const Operand& rt) { |
+ DCHECK(L == nullptr || offset == 0); |
+ Register scratch = rs.is(at) ? t8 : at; |
+ OffsetSize bits = OffsetSize::kOffset16; |
- // Emit a nop in the branch delay slot if required. |
- if (bdslot == PROTECT) |
- nop(); |
-} |
+ BlockTrampolinePoolScope block_trampoline_pool(this); |
+ DCHECK((cond == cc_always && is_int26(offset)) || is_int16(offset)); |
+ switch (cond) { |
+ case cc_always: |
+ bits = OffsetSize::kOffset26; |
+ if (!is_near(L, bits)) return false; |
+ offset = GetOffset(offset, L, bits); |
+ balc(offset); |
+ break; |
+ case eq: |
+ if (!is_near(L, bits)) return false; |
+ Subu(scratch, rs, rt); |
+ offset = GetOffset(offset, L, bits); |
+ beqzalc(scratch, offset); |
+ break; |
+ case ne: |
+ if (!is_near(L, bits)) return false; |
+ Subu(scratch, rs, rt); |
+ offset = GetOffset(offset, L, bits); |
+ bnezalc(scratch, offset); |
+ break; |
+ // Signed comparison. |
+ case greater: |
+ // rs > rt |
+ if (rs.code() == rt.rm_.reg_code) { |
+ break; // No code needs to be emitted. |
+ } else if (rs.is(zero_reg)) { |
+ if (!is_near(L, bits)) return false; |
+ scratch = GetRtAsRegisterHelper(rt, scratch); |
+ offset = GetOffset(offset, L, bits); |
+ bltzalc(scratch, offset); |
+ } else if (IsZero(rt)) { |
+ if (!is_near(L, bits)) return false; |
+ offset = GetOffset(offset, L, bits); |
+ bgtzalc(rs, offset); |
+ } else { |
+ if (!is_near(L, bits)) return false; |
+ Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs); |
+ offset = GetOffset(offset, L, bits); |
+ bnezalc(scratch, offset); |
+ } |
+ break; |
+ case greater_equal: |
+ // rs >= rt |
+ if (rs.code() == rt.rm_.reg_code) { |
+ bits = OffsetSize::kOffset26; |
+ if (!is_near(L, bits)) return false; |
+ offset = GetOffset(offset, L, bits); |
+ balc(offset); |
+ } else if (rs.is(zero_reg)) { |
+ if (!is_near(L, bits)) return false; |
+ scratch = GetRtAsRegisterHelper(rt, scratch); |
+ offset = GetOffset(offset, L, bits); |
+ blezalc(scratch, offset); |
+ } else if (IsZero(rt)) { |
+ if (!is_near(L, bits)) return false; |
+ offset = GetOffset(offset, L, bits); |
+ bgezalc(rs, offset); |
+ } else { |
+ if (!is_near(L, bits)) return false; |
+ Slt(scratch, rs, rt); |
+ offset = GetOffset(offset, L, bits); |
+ beqzalc(scratch, offset); |
+ } |
+ break; |
+ case less: |
+ // rs < rt |
+ if (rs.code() == rt.rm_.reg_code) { |
+ break; // No code needs to be emitted. |
+ } else if (rs.is(zero_reg)) { |
+ if (!is_near(L, bits)) return false; |
+ scratch = GetRtAsRegisterHelper(rt, scratch); |
+ offset = GetOffset(offset, L, bits); |
+ bgtzalc(scratch, offset); |
+ } else if (IsZero(rt)) { |
+ if (!is_near(L, bits)) return false; |
+ offset = GetOffset(offset, L, bits); |
+ bltzalc(rs, offset); |
+ } else { |
+ if (!is_near(L, bits)) return false; |
+ Slt(scratch, rs, rt); |
+ offset = GetOffset(offset, L, bits); |
+ bnezalc(scratch, offset); |
+ } |
+ break; |
+ case less_equal: |
+ // rs <= r2 |
+ if (rs.code() == rt.rm_.reg_code) { |
+ bits = OffsetSize::kOffset26; |
+ if (!is_near(L, bits)) return false; |
+ offset = GetOffset(offset, L, bits); |
+ balc(offset); |
+ } else if (rs.is(zero_reg)) { |
+ if (!is_near(L, bits)) return false; |
+ scratch = GetRtAsRegisterHelper(rt, scratch); |
+ offset = GetOffset(offset, L, bits); |
+ bgezalc(scratch, offset); |
+ } else if (IsZero(rt)) { |
+ if (!is_near(L, bits)) return false; |
+ offset = GetOffset(offset, L, bits); |
+ blezalc(rs, offset); |
+ } else { |
+ if (!is_near(L, bits)) return false; |
+ Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs); |
+ offset = GetOffset(offset, L, bits); |
+ beqzalc(scratch, offset); |
+ } |
+ break; |
-void MacroAssembler::BranchAndLinkShort(Label* L, Condition cond, Register rs, |
- const Operand& rt, |
- BranchDelaySlot bdslot) { |
- BRANCH_ARGS_CHECK(cond, rs, rt); |
- int32_t offset = 0; |
- Register r2 = no_reg; |
- Register scratch = at; |
- if (rt.is_reg()) { |
- r2 = rt.rm_; |
- } else if (cond != cc_always) { |
- r2 = scratch; |
- li(r2, rt); |
+ // Unsigned comparison. |
+ case Ugreater: |
+ // rs > r2 |
+ if (!is_near(L, bits)) return false; |
+ Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs); |
+ offset = GetOffset(offset, L, bits); |
+ bnezalc(scratch, offset); |
+ break; |
+ case Ugreater_equal: |
+ // rs >= r2 |
+ if (!is_near(L, bits)) return false; |
+ Sltu(scratch, rs, rt); |
+ offset = GetOffset(offset, L, bits); |
+ beqzalc(scratch, offset); |
+ break; |
+ case Uless: |
+ // rs < r2 |
+ if (!is_near(L, bits)) return false; |
+ Sltu(scratch, rs, rt); |
+ offset = GetOffset(offset, L, bits); |
+ bnezalc(scratch, offset); |
+ break; |
+ case Uless_equal: |
+ // rs <= r2 |
+ if (!is_near(L, bits)) return false; |
+ Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs); |
+ offset = GetOffset(offset, L, bits); |
+ beqzalc(scratch, offset); |
+ break; |
+ default: |
+ UNREACHABLE(); |
} |
+ return true; |
+} |
- { |
- BlockTrampolinePoolScope block_trampoline_pool(this); |
- switch (cond) { |
- case cc_always: |
- offset = shifted_branch_offset(L, false); |
- bal(offset); |
- break; |
- case eq: |
- bne(rs, r2, 2); |
- nop(); |
- offset = shifted_branch_offset(L, false); |
- bal(offset); |
- break; |
- case ne: |
- beq(rs, r2, 2); |
- nop(); |
- offset = shifted_branch_offset(L, false); |
- bal(offset); |
- break; |
- // Signed comparison. |
- case greater: |
- // rs > rt |
- slt(scratch, r2, rs); |
- beq(scratch, zero_reg, 2); |
- nop(); |
- offset = shifted_branch_offset(L, false); |
- bal(offset); |
- break; |
- case greater_equal: |
- // rs >= rt |
- slt(scratch, rs, r2); |
- bne(scratch, zero_reg, 2); |
- nop(); |
- offset = shifted_branch_offset(L, false); |
- bal(offset); |
- break; |
- case less: |
- // rs < r2 |
- slt(scratch, rs, r2); |
- bne(scratch, zero_reg, 2); |
- nop(); |
- offset = shifted_branch_offset(L, false); |
- bal(offset); |
- break; |
- case less_equal: |
- // rs <= r2 |
- slt(scratch, r2, rs); |
- bne(scratch, zero_reg, 2); |
- nop(); |
- offset = shifted_branch_offset(L, false); |
- bal(offset); |
- break; |
+// Pre r6 we need to use a bgezal or bltzal, but they can't be used directly |
+// with the slt instructions. We could use sub or add instead but we would miss |
+// overflow cases, so we keep slt and add an intermediate third instruction. |
+bool MacroAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L, |
+ Condition cond, Register rs, |
+ const Operand& rt, |
+ BranchDelaySlot bdslot) { |
+ DCHECK(L == nullptr || offset == 0); |
+ if (!is_near(L, OffsetSize::kOffset16)) return false; |
+ Register scratch = t8; |
+ BlockTrampolinePoolScope block_trampoline_pool(this); |
- // Unsigned comparison. |
- case Ugreater: |
- // rs > rt |
- sltu(scratch, r2, rs); |
- beq(scratch, zero_reg, 2); |
- nop(); |
- offset = shifted_branch_offset(L, false); |
- bal(offset); |
- break; |
- case Ugreater_equal: |
- // rs >= rt |
- sltu(scratch, rs, r2); |
- bne(scratch, zero_reg, 2); |
- nop(); |
- offset = shifted_branch_offset(L, false); |
- bal(offset); |
- break; |
- case Uless: |
- // rs < r2 |
- sltu(scratch, rs, r2); |
- bne(scratch, zero_reg, 2); |
- nop(); |
- offset = shifted_branch_offset(L, false); |
- bal(offset); |
- break; |
- case Uless_equal: |
- // rs <= r2 |
- sltu(scratch, r2, rs); |
- bne(scratch, zero_reg, 2); |
- nop(); |
- offset = shifted_branch_offset(L, false); |
- bal(offset); |
- break; |
+ switch (cond) { |
+ case cc_always: |
+ offset = GetOffset(offset, L, OffsetSize::kOffset16); |
+ bal(offset); |
+ break; |
+ case eq: |
+ bne(rs, GetRtAsRegisterHelper(rt, scratch), 2); |
+ nop(); |
+ offset = GetOffset(offset, L, OffsetSize::kOffset16); |
+ bal(offset); |
+ break; |
+ case ne: |
+ beq(rs, GetRtAsRegisterHelper(rt, scratch), 2); |
+ nop(); |
+ offset = GetOffset(offset, L, OffsetSize::kOffset16); |
+ bal(offset); |
+ break; |
- default: |
- UNREACHABLE(); |
- } |
+ // Signed comparison. |
+ case greater: |
+ Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs); |
+ addiu(scratch, scratch, -1); |
+ offset = GetOffset(offset, L, OffsetSize::kOffset16); |
+ bgezal(scratch, offset); |
+ break; |
+ case greater_equal: |
+ Slt(scratch, rs, rt); |
+ addiu(scratch, scratch, -1); |
+ offset = GetOffset(offset, L, OffsetSize::kOffset16); |
+ bltzal(scratch, offset); |
+ break; |
+ case less: |
+ Slt(scratch, rs, rt); |
+ addiu(scratch, scratch, -1); |
+ offset = GetOffset(offset, L, OffsetSize::kOffset16); |
+ bgezal(scratch, offset); |
+ break; |
+ case less_equal: |
+ Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs); |
+ addiu(scratch, scratch, -1); |
+ offset = GetOffset(offset, L, OffsetSize::kOffset16); |
+ bltzal(scratch, offset); |
+ break; |
+ |
+ // Unsigned comparison. |
+ case Ugreater: |
+ Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs); |
+ addiu(scratch, scratch, -1); |
+ offset = GetOffset(offset, L, OffsetSize::kOffset16); |
+ bgezal(scratch, offset); |
+ break; |
+ case Ugreater_equal: |
+ Sltu(scratch, rs, rt); |
+ addiu(scratch, scratch, -1); |
+ offset = GetOffset(offset, L, OffsetSize::kOffset16); |
+ bltzal(scratch, offset); |
+ break; |
+ case Uless: |
+ Sltu(scratch, rs, rt); |
+ addiu(scratch, scratch, -1); |
+ offset = GetOffset(offset, L, OffsetSize::kOffset16); |
+ bgezal(scratch, offset); |
+ break; |
+ case Uless_equal: |
+ Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs); |
+ addiu(scratch, scratch, -1); |
+ offset = GetOffset(offset, L, OffsetSize::kOffset16); |
+ bltzal(scratch, offset); |
+ break; |
+ |
+ default: |
+ UNREACHABLE(); |
} |
- // Check that offset could actually hold on an int16_t. |
- DCHECK(is_int16(offset)); |
// Emit a nop in the branch delay slot if required. |
if (bdslot == PROTECT) |
nop(); |
+ |
+ return true; |
+} |
+ |
+ |
+bool MacroAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L, |
+ Condition cond, Register rs, |
+ const Operand& rt, |
+ BranchDelaySlot bdslot) { |
+ BRANCH_ARGS_CHECK(cond, rs, rt); |
+ |
+ if (!L) { |
+ if (kArchVariant == kMips64r6 && bdslot == PROTECT) { |
+ DCHECK(is_int26(offset)); |
+ return BranchAndLinkShortHelperR6(offset, nullptr, cond, rs, rt); |
+ } else { |
+ DCHECK(is_int16(offset)); |
+ return BranchAndLinkShortHelper(offset, nullptr, cond, rs, rt, bdslot); |
+ } |
+ } else { |
+ DCHECK(offset == 0); |
+ if (kArchVariant == kMips64r6 && bdslot == PROTECT) { |
+ return BranchAndLinkShortHelperR6(0, L, cond, rs, rt); |
+ } else { |
+ return BranchAndLinkShortHelper(0, L, cond, rs, rt, bdslot); |
+ } |
+ } |
+ return false; |
} |
@@ -3298,6 +3369,10 @@ void MacroAssembler::Call(Register target, |
Register rs, |
const Operand& rt, |
BranchDelaySlot bd) { |
+#ifdef DEBUG |
+ int size = IsPrevInstrCompactBranch() ? kInstrSize : 0; |
+#endif |
+ |
BlockTrampolinePoolScope block_trampoline_pool(this); |
Label start; |
bind(&start); |
@@ -3312,8 +3387,10 @@ void MacroAssembler::Call(Register target, |
if (bd == PROTECT) |
nop(); |
- DCHECK_EQ(CallSize(target, cond, rs, rt, bd), |
- SizeOfCodeGeneratedSince(&start)); |
+#ifdef DEBUG |
+ CHECK_EQ(size + CallSize(target, cond, rs, rt, bd), |
+ SizeOfCodeGeneratedSince(&start)); |
+#endif |
} |
@@ -3391,31 +3468,43 @@ void MacroAssembler::Ret(Condition cond, |
} |
-void MacroAssembler::J(Label* L, BranchDelaySlot bdslot) { |
- BlockTrampolinePoolScope block_trampoline_pool(this); |
- { |
- BlockGrowBufferScope block_buf_growth(this); |
- // Buffer growth (and relocation) must be blocked for internal references |
- // until associated instructions are emitted and available to be patched. |
- RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED); |
- j(L); |
+void MacroAssembler::BranchLong(Label* L, BranchDelaySlot bdslot) { |
+ if (kArchVariant == kMips64r6 && bdslot == PROTECT && |
+ (!L->is_bound() || is_near_r6(L))) { |
+ BranchShortHelperR6(0, L); |
+ } else { |
+ EmitForbiddenSlotInstruction(); |
+ BlockTrampolinePoolScope block_trampoline_pool(this); |
+ { |
+ BlockGrowBufferScope block_buf_growth(this); |
+ // Buffer growth (and relocation) must be blocked for internal references |
+ // until associated instructions are emitted and available to be patched. |
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED); |
+ j(L); |
+ } |
+ // Emit a nop in the branch delay slot if required. |
+ if (bdslot == PROTECT) nop(); |
} |
- // Emit a nop in the branch delay slot if required. |
- if (bdslot == PROTECT) nop(); |
} |
-void MacroAssembler::Jal(Label* L, BranchDelaySlot bdslot) { |
- BlockTrampolinePoolScope block_trampoline_pool(this); |
- { |
- BlockGrowBufferScope block_buf_growth(this); |
- // Buffer growth (and relocation) must be blocked for internal references |
- // until associated instructions are emitted and available to be patched. |
- RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED); |
- jal(L); |
+void MacroAssembler::BranchAndLinkLong(Label* L, BranchDelaySlot bdslot) { |
+ if (kArchVariant == kMips64r6 && bdslot == PROTECT && |
+ (!L->is_bound() || is_near_r6(L))) { |
+ BranchAndLinkShortHelperR6(0, L); |
+ } else { |
+ EmitForbiddenSlotInstruction(); |
+ BlockTrampolinePoolScope block_trampoline_pool(this); |
+ { |
+ BlockGrowBufferScope block_buf_growth(this); |
+ // Buffer growth (and relocation) must be blocked for internal references |
+ // until associated instructions are emitted and available to be patched. |
+ RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE_ENCODED); |
+ jal(L); |
+ } |
+ // Emit a nop in the branch delay slot if required. |
+ if (bdslot == PROTECT) nop(); |
} |
- // Emit a nop in the branch delay slot if required. |
- if (bdslot == PROTECT) nop(); |
} |
@@ -6351,25 +6440,10 @@ void CodePatcher::Emit(Address addr) { |
} |
-void CodePatcher::ChangeBranchCondition(Condition cond) { |
- Instr instr = Assembler::instr_at(masm_.pc_); |
- DCHECK(Assembler::IsBranch(instr)); |
- uint32_t opcode = Assembler::GetOpcodeField(instr); |
- // Currently only the 'eq' and 'ne' cond values are supported and the simple |
- // branch instructions (with opcode being the branch type). |
- // There are some special cases (see Assembler::IsBranch()) so extending this |
- // would be tricky. |
- DCHECK(opcode == BEQ || |
- opcode == BNE || |
- opcode == BLEZ || |
- opcode == BGTZ || |
- opcode == BEQL || |
- opcode == BNEL || |
- opcode == BLEZL || |
- opcode == BGTZL); |
- opcode = (cond == eq) ? BEQ : BNE; |
- instr = (instr & ~kOpcodeMask) | opcode; |
- masm_.emit(instr); |
+void CodePatcher::ChangeBranchCondition(Instr current_instr, |
+ uint32_t new_opcode) { |
+ current_instr = (current_instr & ~kOpcodeMask) | new_opcode; |
+ masm_.emit(current_instr); |
} |