| Index: src/mips/macro-assembler-mips.cc
|
| diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc
|
| index 945dde164ed0e062c5e384581cb74bc6c2586a80..8317fc12e962a84fe68515b0926e7fc866c499d0 100644
|
| --- a/src/mips/macro-assembler-mips.cc
|
| +++ b/src/mips/macro-assembler-mips.cc
|
| @@ -1938,21 +1938,23 @@ void MacroAssembler::GetLeastBitsFromInt32(Register dst,
|
| (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
|
|
|
|
|
| -void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) {
|
| +void MacroAssembler::Branch(int32_t offset, BranchDelaySlot bdslot) {
|
| + DCHECK(IsMipsArchVariant(kMips32r6) ? is_int26(offset) : is_int16(offset));
|
| BranchShort(offset, bdslot);
|
| }
|
|
|
|
|
| -void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs,
|
| - const Operand& rt,
|
| - BranchDelaySlot bdslot) {
|
| - BranchShort(offset, cond, rs, rt, bdslot);
|
| +void MacroAssembler::Branch(int32_t offset, Condition cond, Register rs,
|
| + const Operand& rt, BranchDelaySlot bdslot) {
|
| + bool is_near = BranchShortCheck(offset, nullptr, cond, rs, rt, bdslot);
|
| + DCHECK(is_near);
|
| + USE(is_near);
|
| }
|
|
|
|
|
| void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
|
| if (L->is_bound()) {
|
| - if (is_near(L)) {
|
| + if (is_near_branch(L)) {
|
| BranchShort(L, bdslot);
|
| } else {
|
| Jr(L, bdslot);
|
| @@ -1971,9 +1973,7 @@ void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
|
| const Operand& rt,
|
| BranchDelaySlot bdslot) {
|
| if (L->is_bound()) {
|
| - if (is_near(L)) {
|
| - BranchShort(L, cond, rs, rt, bdslot);
|
| - } else {
|
| + if (!BranchShortCheck(0, L, cond, rs, rt, bdslot)) {
|
| if (cond != cc_always) {
|
| Label skip;
|
| Condition neg_cond = NegateCondition(cond);
|
| @@ -2012,7 +2012,9 @@ void MacroAssembler::Branch(Label* L,
|
| }
|
|
|
|
|
| -void MacroAssembler::BranchShort(int16_t offset, BranchDelaySlot bdslot) {
|
| +void MacroAssembler::BranchShortHelper(int16_t offset, Label* L,
|
| + BranchDelaySlot bdslot) {
|
| + offset = GetOffset(offset, L, OffsetSize::kOffset16);
|
| b(offset);
|
|
|
| // Emit a nop in the branch delay slot if required.
|
| @@ -2021,542 +2023,529 @@ void MacroAssembler::BranchShort(int16_t offset, BranchDelaySlot bdslot) {
|
| }
|
|
|
|
|
| -void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
|
| - const Operand& rt,
|
| - BranchDelaySlot bdslot) {
|
| - BRANCH_ARGS_CHECK(cond, rs, rt);
|
| - DCHECK(!rs.is(zero_reg));
|
| - Register r2 = no_reg;
|
| - Register scratch = at;
|
| +void MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L) {
|
| + offset = GetOffset(offset, L, OffsetSize::kOffset26);
|
| + bc(offset);
|
| +}
|
| +
|
| +
|
| +void MacroAssembler::BranchShort(int32_t offset, BranchDelaySlot bdslot) {
|
| + if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
|
| + DCHECK(is_int26(offset));
|
| + BranchShortHelperR6(offset, nullptr);
|
| + } else {
|
| + DCHECK(is_int16(offset));
|
| + BranchShortHelper(offset, nullptr, bdslot);
|
| + }
|
| +}
|
| +
|
|
|
| +void MacroAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
|
| + if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
|
| + BranchShortHelperR6(0, L);
|
| + } else {
|
| + BranchShortHelper(0, L, bdslot);
|
| + }
|
| +}
|
| +
|
| +
|
| +static inline bool IsZero(const Operand& rt) {
|
| + if (rt.is_reg()) {
|
| + return rt.rm().is(zero_reg);
|
| + } else {
|
| + return rt.immediate() == 0;
|
| + }
|
| +}
|
| +
|
| +
|
| +int32_t MacroAssembler::GetOffset(int32_t offset, Label* L, OffsetSize bits) {
|
| + if (L) {
|
| + offset = branch_offset_helper(L, bits) >> 2;
|
| + } else {
|
| + DCHECK(is_intn(offset, bits));
|
| + }
|
| + return offset;
|
| +}
|
| +
|
| +
|
| +Register MacroAssembler::GetRtAsRegisterHelper(const Operand& rt,
|
| + Register scratch) {
|
| + Register r2 = no_reg;
|
| if (rt.is_reg()) {
|
| - // NOTE: 'at' can be clobbered by Branch but it is legal to use it as rs or
|
| - // rt.
|
| - BlockTrampolinePoolScope block_trampoline_pool(this);
|
| r2 = rt.rm_;
|
| - switch (cond) {
|
| - case cc_always:
|
| - b(offset);
|
| - break;
|
| - case eq:
|
| - beq(rs, r2, offset);
|
| - break;
|
| - case ne:
|
| - bne(rs, r2, offset);
|
| - break;
|
| - // Signed comparison.
|
| - case greater:
|
| - if (r2.is(zero_reg)) {
|
| - bgtz(rs, offset);
|
| - } else {
|
| - slt(scratch, r2, rs);
|
| - bne(scratch, zero_reg, offset);
|
| - }
|
| - break;
|
| - case greater_equal:
|
| - if (r2.is(zero_reg)) {
|
| - bgez(rs, offset);
|
| - } else {
|
| - slt(scratch, rs, r2);
|
| - beq(scratch, zero_reg, offset);
|
| - }
|
| - break;
|
| - case less:
|
| - if (r2.is(zero_reg)) {
|
| - bltz(rs, offset);
|
| - } else {
|
| - slt(scratch, rs, r2);
|
| - bne(scratch, zero_reg, offset);
|
| - }
|
| - break;
|
| - case less_equal:
|
| - if (r2.is(zero_reg)) {
|
| - blez(rs, offset);
|
| - } else {
|
| - slt(scratch, r2, rs);
|
| - beq(scratch, zero_reg, offset);
|
| - }
|
| - break;
|
| - // Unsigned comparison.
|
| - case Ugreater:
|
| - if (r2.is(zero_reg)) {
|
| - bne(rs, zero_reg, offset);
|
| - } else {
|
| - sltu(scratch, r2, rs);
|
| - bne(scratch, zero_reg, offset);
|
| - }
|
| - break;
|
| - case Ugreater_equal:
|
| - if (r2.is(zero_reg)) {
|
| - b(offset);
|
| - } else {
|
| - sltu(scratch, rs, r2);
|
| - beq(scratch, zero_reg, offset);
|
| - }
|
| - break;
|
| - case Uless:
|
| - if (r2.is(zero_reg)) {
|
| - // No code needs to be emitted.
|
| - return;
|
| - } else {
|
| - sltu(scratch, rs, r2);
|
| - bne(scratch, zero_reg, offset);
|
| - }
|
| - break;
|
| - case Uless_equal:
|
| - if (r2.is(zero_reg)) {
|
| - beq(rs, zero_reg, offset);
|
| - } else {
|
| - sltu(scratch, r2, rs);
|
| - beq(scratch, zero_reg, offset);
|
| - }
|
| - break;
|
| - default:
|
| - UNREACHABLE();
|
| - }
|
| } else {
|
| - // Be careful to always use shifted_branch_offset only just before the
|
| - // branch instruction, as the location will be remember for patching the
|
| - // target.
|
| - BlockTrampolinePoolScope block_trampoline_pool(this);
|
| - switch (cond) {
|
| - case cc_always:
|
| - b(offset);
|
| - break;
|
| - case eq:
|
| - if (rt.imm32_ == 0) {
|
| - beq(rs, zero_reg, offset);
|
| - } else {
|
| - // We don't want any other register but scratch clobbered.
|
| - DCHECK(!scratch.is(rs));
|
| - r2 = scratch;
|
| - li(r2, rt);
|
| - beq(rs, r2, offset);
|
| - }
|
| - break;
|
| - case ne:
|
| - if (rt.imm32_ == 0) {
|
| - bne(rs, zero_reg, offset);
|
| - } else {
|
| - // We don't want any other register but scratch clobbered.
|
| - DCHECK(!scratch.is(rs));
|
| - r2 = scratch;
|
| - li(r2, rt);
|
| - bne(rs, r2, offset);
|
| - }
|
| - break;
|
| - // Signed comparison.
|
| - case greater:
|
| - if (rt.imm32_ == 0) {
|
| - bgtz(rs, offset);
|
| - } else {
|
| - r2 = scratch;
|
| - li(r2, rt);
|
| - slt(scratch, r2, rs);
|
| - bne(scratch, zero_reg, offset);
|
| - }
|
| - break;
|
| - case greater_equal:
|
| - if (rt.imm32_ == 0) {
|
| - bgez(rs, offset);
|
| - } else if (is_int16(rt.imm32_)) {
|
| - slti(scratch, rs, rt.imm32_);
|
| - beq(scratch, zero_reg, offset);
|
| - } else {
|
| - r2 = scratch;
|
| - li(r2, rt);
|
| - slt(scratch, rs, r2);
|
| - beq(scratch, zero_reg, offset);
|
| - }
|
| - break;
|
| - case less:
|
| - if (rt.imm32_ == 0) {
|
| - bltz(rs, offset);
|
| - } else if (is_int16(rt.imm32_)) {
|
| - slti(scratch, rs, rt.imm32_);
|
| - bne(scratch, zero_reg, offset);
|
| - } else {
|
| - r2 = scratch;
|
| - li(r2, rt);
|
| - slt(scratch, rs, r2);
|
| - bne(scratch, zero_reg, offset);
|
| - }
|
| - break;
|
| - case less_equal:
|
| - if (rt.imm32_ == 0) {
|
| - blez(rs, offset);
|
| - } else {
|
| - r2 = scratch;
|
| - li(r2, rt);
|
| - slt(scratch, r2, rs);
|
| - beq(scratch, zero_reg, offset);
|
| - }
|
| - break;
|
| - // Unsigned comparison.
|
| - case Ugreater:
|
| - if (rt.imm32_ == 0) {
|
| - bne(rs, zero_reg, offset);
|
| - } else {
|
| - r2 = scratch;
|
| - li(r2, rt);
|
| - sltu(scratch, r2, rs);
|
| - bne(scratch, zero_reg, offset);
|
| - }
|
| - break;
|
| - case Ugreater_equal:
|
| - if (rt.imm32_ == 0) {
|
| - b(offset);
|
| - } else if (is_int16(rt.imm32_)) {
|
| - sltiu(scratch, rs, rt.imm32_);
|
| - beq(scratch, zero_reg, offset);
|
| - } else {
|
| - r2 = scratch;
|
| - li(r2, rt);
|
| - sltu(scratch, rs, r2);
|
| - beq(scratch, zero_reg, offset);
|
| - }
|
| - break;
|
| - case Uless:
|
| - if (rt.imm32_ == 0) {
|
| - // No code needs to be emitted.
|
| - return;
|
| - } else if (is_int16(rt.imm32_)) {
|
| - sltiu(scratch, rs, rt.imm32_);
|
| - bne(scratch, zero_reg, offset);
|
| - } else {
|
| - r2 = scratch;
|
| - li(r2, rt);
|
| - sltu(scratch, rs, r2);
|
| - bne(scratch, zero_reg, offset);
|
| - }
|
| - break;
|
| - case Uless_equal:
|
| - if (rt.imm32_ == 0) {
|
| - beq(rs, zero_reg, offset);
|
| - } else {
|
| - r2 = scratch;
|
| - li(r2, rt);
|
| - sltu(scratch, r2, rs);
|
| - beq(scratch, zero_reg, offset);
|
| - }
|
| - break;
|
| - default:
|
| - UNREACHABLE();
|
| - }
|
| + r2 = scratch;
|
| + li(r2, rt);
|
| }
|
| - // Emit a nop in the branch delay slot if required.
|
| - if (bdslot == PROTECT)
|
| - nop();
|
| +
|
| + return r2;
|
| }
|
|
|
|
|
| -void MacroAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
|
| - // We use branch_offset as an argument for the branch instructions to be sure
|
| - // it is called just before generating the branch instruction, as needed.
|
| +bool MacroAssembler::BranchShortHelperR6(int32_t offset, Label* L,
|
| + Condition cond, Register rs,
|
| + const Operand& rt) {
|
| + Register scratch = rs.is(at) ? t8 : at;
|
| + OffsetSize bits = OffsetSize::kOffset16;
|
| +
|
| + // Be careful to always use shifted_branch_offset only just before the
|
| + // branch instruction, as the location will be remember for patching the
|
| + // target.
|
| + BlockTrampolinePoolScope block_trampoline_pool(this);
|
| + switch (cond) {
|
| + case cc_always:
|
| + bits = OffsetSize::kOffset26;
|
| + if (!is_near(L, bits)) return false;
|
| + offset = GetOffset(offset, L, bits);
|
| + bc(offset);
|
| + break;
|
| + case eq:
|
| + if (rs.code() == rt.rm_.reg_code) {
|
| + // Pre R6 beq is used here to make the code patchable. Otherwise bc
|
| + // should be used which has no condition field so is not patchable.
|
| + bits = OffsetSize::kOffset16;
|
| + if (!is_near(L, bits)) return false;
|
| + scratch = GetRtAsRegisterHelper(rt, scratch);
|
| + offset = GetOffset(offset, L, bits);
|
| + beq(rs, scratch, offset);
|
| + nop();
|
| + } else if (IsZero(rt)) {
|
| + bits = OffsetSize::kOffset21;
|
| + if (!is_near(L, bits)) return false;
|
| + offset = GetOffset(offset, L, bits);
|
| + beqzc(rs, offset);
|
| + } else {
|
| + // We don't want any other register but scratch clobbered.
|
| + bits = OffsetSize::kOffset16;
|
| + if (!is_near(L, bits)) return false;
|
| + scratch = GetRtAsRegisterHelper(rt, scratch);
|
| + offset = GetOffset(offset, L, bits);
|
| + beqc(rs, scratch, offset);
|
| + }
|
| + break;
|
| + case ne:
|
| + if (rs.code() == rt.rm_.reg_code) {
|
| + // Pre R6 bne is used here to make the code patchable. Otherwise we
|
| + // should not generate any instruction.
|
| + bits = OffsetSize::kOffset16;
|
| + if (!is_near(L, bits)) return false;
|
| + scratch = GetRtAsRegisterHelper(rt, scratch);
|
| + offset = GetOffset(offset, L, bits);
|
| + bne(rs, scratch, offset);
|
| + nop();
|
| + } else if (IsZero(rt)) {
|
| + bits = OffsetSize::kOffset21;
|
| + if (!is_near(L, bits)) return false;
|
| + offset = GetOffset(offset, L, bits);
|
| + bnezc(rs, offset);
|
| + } else {
|
| + // We don't want any other register but scratch clobbered.
|
| + bits = OffsetSize::kOffset16;
|
| + if (!is_near(L, bits)) return false;
|
| + scratch = GetRtAsRegisterHelper(rt, scratch);
|
| + offset = GetOffset(offset, L, bits);
|
| + bnec(rs, scratch, offset);
|
| + }
|
| + break;
|
|
|
| - b(shifted_branch_offset(L, false));
|
| + // Signed comparison.
|
| + case greater:
|
| + // rs > rt
|
| + if (rs.code() == rt.rm_.reg_code) {
|
| + break; // No code needs to be emitted.
|
| + } else if (rs.is(zero_reg)) {
|
| + bits = OffsetSize::kOffset16;
|
| + if (!is_near(L, bits)) return false;
|
| + scratch = GetRtAsRegisterHelper(rt, scratch);
|
| + offset = GetOffset(offset, L, bits);
|
| + bltzc(scratch, offset);
|
| + } else if (IsZero(rt)) {
|
| + bits = OffsetSize::kOffset16;
|
| + if (!is_near(L, bits)) return false;
|
| + offset = GetOffset(offset, L, bits);
|
| + bgtzc(rs, offset);
|
| + } else {
|
| + bits = OffsetSize::kOffset16;
|
| + if (!is_near(L, bits)) return false;
|
| + scratch = GetRtAsRegisterHelper(rt, scratch);
|
| + DCHECK(!rs.is(scratch));
|
| + offset = GetOffset(offset, L, bits);
|
| + bltc(scratch, rs, offset);
|
| + }
|
| + break;
|
| + case greater_equal:
|
| + // rs >= rt
|
| + if (rs.code() == rt.rm_.reg_code) {
|
| + bits = OffsetSize::kOffset26;
|
| + if (!is_near(L, bits)) return false;
|
| + offset = GetOffset(offset, L, bits);
|
| + bc(offset);
|
| + } else if (rs.is(zero_reg)) {
|
| + bits = OffsetSize::kOffset16;
|
| + if (!is_near(L, bits)) return false;
|
| + scratch = GetRtAsRegisterHelper(rt, scratch);
|
| + offset = GetOffset(offset, L, bits);
|
| + blezc(scratch, offset);
|
| + } else if (IsZero(rt)) {
|
| + bits = OffsetSize::kOffset16;
|
| + if (!is_near(L, bits)) return false;
|
| + offset = GetOffset(offset, L, bits);
|
| + bgezc(rs, offset);
|
| + } else {
|
| + bits = OffsetSize::kOffset16;
|
| + if (!is_near(L, bits)) return false;
|
| + scratch = GetRtAsRegisterHelper(rt, scratch);
|
| + DCHECK(!rs.is(scratch));
|
| + offset = GetOffset(offset, L, bits);
|
| + bgec(rs, scratch, offset);
|
| + }
|
| + break;
|
| + case less:
|
| + // rs < rt
|
| + if (rs.code() == rt.rm_.reg_code) {
|
| + break; // No code needs to be emitted.
|
| + } else if (rs.is(zero_reg)) {
|
| + bits = OffsetSize::kOffset16;
|
| + if (!is_near(L, bits)) return false;
|
| + scratch = GetRtAsRegisterHelper(rt, scratch);
|
| + offset = GetOffset(offset, L, bits);
|
| + bgtzc(scratch, offset);
|
| + } else if (IsZero(rt)) {
|
| + bits = OffsetSize::kOffset16;
|
| + if (!is_near(L, bits)) return false;
|
| + offset = GetOffset(offset, L, bits);
|
| + bltzc(rs, offset);
|
| + } else {
|
| + bits = OffsetSize::kOffset16;
|
| + if (!is_near(L, bits)) return false;
|
| + scratch = GetRtAsRegisterHelper(rt, scratch);
|
| + DCHECK(!rs.is(scratch));
|
| + offset = GetOffset(offset, L, bits);
|
| + bltc(rs, scratch, offset);
|
| + }
|
| + break;
|
| + case less_equal:
|
| + // rs <= rt
|
| + if (rs.code() == rt.rm_.reg_code) {
|
| + bits = OffsetSize::kOffset26;
|
| + if (!is_near(L, bits)) return false;
|
| + offset = GetOffset(offset, L, bits);
|
| + bc(offset);
|
| + } else if (rs.is(zero_reg)) {
|
| + bits = OffsetSize::kOffset16;
|
| + if (!is_near(L, bits)) return false;
|
| + scratch = GetRtAsRegisterHelper(rt, scratch);
|
| + offset = GetOffset(offset, L, bits);
|
| + bgezc(scratch, offset);
|
| + } else if (IsZero(rt)) {
|
| + bits = OffsetSize::kOffset16;
|
| + if (!is_near(L, bits)) return false;
|
| + offset = GetOffset(offset, L, bits);
|
| + blezc(rs, offset);
|
| + } else {
|
| + bits = OffsetSize::kOffset16;
|
| + if (!is_near(L, bits)) return false;
|
| + scratch = GetRtAsRegisterHelper(rt, scratch);
|
| + DCHECK(!rs.is(scratch));
|
| + offset = GetOffset(offset, L, bits);
|
| + bgec(scratch, rs, offset);
|
| + }
|
| + break;
|
| +
|
| + // Unsigned comparison.
|
| + case Ugreater:
|
| + // rs > rt
|
| + if (rs.code() == rt.rm_.reg_code) {
|
| + break; // No code needs to be emitted.
|
| + } else if (rs.is(zero_reg)) {
|
| + bits = OffsetSize::kOffset21;
|
| + if (!is_near(L, bits)) return false;
|
| + scratch = GetRtAsRegisterHelper(rt, scratch);
|
| + offset = GetOffset(offset, L, bits);
|
| + bnezc(scratch, offset);
|
| + } else if (IsZero(rt)) {
|
| + bits = OffsetSize::kOffset21;
|
| + if (!is_near(L, bits)) return false;
|
| + offset = GetOffset(offset, L, bits);
|
| + bnezc(rs, offset);
|
| + } else {
|
| + bits = OffsetSize::kOffset16;
|
| + if (!is_near(L, bits)) return false;
|
| + scratch = GetRtAsRegisterHelper(rt, scratch);
|
| + DCHECK(!rs.is(scratch));
|
| + offset = GetOffset(offset, L, bits);
|
| + bltuc(scratch, rs, offset);
|
| + }
|
| + break;
|
| + case Ugreater_equal:
|
| + // rs >= rt
|
| + if (rs.code() == rt.rm_.reg_code) {
|
| + bits = OffsetSize::kOffset26;
|
| + if (!is_near(L, bits)) return false;
|
| + offset = GetOffset(offset, L, bits);
|
| + bc(offset);
|
| + } else if (rs.is(zero_reg)) {
|
| + bits = OffsetSize::kOffset21;
|
| + if (!is_near(L, bits)) return false;
|
| + scratch = GetRtAsRegisterHelper(rt, scratch);
|
| + offset = GetOffset(offset, L, bits);
|
| + beqzc(scratch, offset);
|
| + } else if (IsZero(rt)) {
|
| + bits = OffsetSize::kOffset26;
|
| + if (!is_near(L, bits)) return false;
|
| + offset = GetOffset(offset, L, bits);
|
| + bc(offset);
|
| + } else {
|
| + bits = OffsetSize::kOffset16;
|
| + if (!is_near(L, bits)) return false;
|
| + scratch = GetRtAsRegisterHelper(rt, scratch);
|
| + DCHECK(!rs.is(scratch));
|
| + offset = GetOffset(offset, L, bits);
|
| + bgeuc(rs, scratch, offset);
|
| + }
|
| + break;
|
| + case Uless:
|
| + // rs < rt
|
| + if (rs.code() == rt.rm_.reg_code) {
|
| + break; // No code needs to be emitted.
|
| + } else if (rs.is(zero_reg)) {
|
| + bits = OffsetSize::kOffset21;
|
| + if (!is_near(L, bits)) return false;
|
| + scratch = GetRtAsRegisterHelper(rt, scratch);
|
| + offset = GetOffset(offset, L, bits);
|
| + bnezc(scratch, offset);
|
| + } else if (IsZero(rt)) {
|
| + break; // No code needs to be emitted.
|
| + } else {
|
| + bits = OffsetSize::kOffset16;
|
| + if (!is_near(L, bits)) return false;
|
| + scratch = GetRtAsRegisterHelper(rt, scratch);
|
| + DCHECK(!rs.is(scratch));
|
| + offset = GetOffset(offset, L, bits);
|
| + bltuc(rs, scratch, offset);
|
| + }
|
| + break;
|
| + case Uless_equal:
|
| + // rs <= rt
|
| + if (rs.code() == rt.rm_.reg_code) {
|
| + bits = OffsetSize::kOffset26;
|
| + if (!is_near(L, bits)) return false;
|
| + offset = GetOffset(offset, L, bits);
|
| + bc(offset);
|
| + } else if (rs.is(zero_reg)) {
|
| + bits = OffsetSize::kOffset26;
|
| + if (!is_near(L, bits)) return false;
|
| + scratch = GetRtAsRegisterHelper(rt, scratch);
|
| + offset = GetOffset(offset, L, bits);
|
| + bc(offset);
|
| + } else if (IsZero(rt)) {
|
| + bits = OffsetSize::kOffset21;
|
| + if (!is_near(L, bits)) return false;
|
| + offset = GetOffset(offset, L, bits);
|
| + beqzc(rs, offset);
|
| + } else {
|
| + bits = OffsetSize::kOffset16;
|
| + if (!is_near(L, bits)) return false;
|
| + scratch = GetRtAsRegisterHelper(rt, scratch);
|
| + DCHECK(!rs.is(scratch));
|
| + offset = GetOffset(offset, L, bits);
|
| + bgeuc(scratch, rs, offset);
|
| + }
|
| + break;
|
| + default:
|
| + UNREACHABLE();
|
| + }
|
| + return true;
|
| +}
|
| +
|
| +
|
| +bool MacroAssembler::BranchShortHelper(int16_t offset, Label* L, Condition cond,
|
| + Register rs, const Operand& rt,
|
| + BranchDelaySlot bdslot) {
|
| + if (!is_near(L, OffsetSize::kOffset16)) return false;
|
| +
|
| + Register scratch = at;
|
| + int32_t offset32;
|
| +
|
| + // Be careful to always use shifted_branch_offset only just before the
|
| + // branch instruction, as the location will be remember for patching the
|
| + // target.
|
| + BlockTrampolinePoolScope block_trampoline_pool(this);
|
| + switch (cond) {
|
| + case cc_always:
|
| + offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
|
| + b(offset32);
|
| + break;
|
| + case eq:
|
| + if (IsZero(rt)) {
|
| + offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
|
| + beq(rs, zero_reg, offset32);
|
| + } else {
|
| + // We don't want any other register but scratch clobbered.
|
| + scratch = GetRtAsRegisterHelper(rt, scratch);
|
| + offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
|
| + beq(rs, scratch, offset32);
|
| + }
|
| + break;
|
| + case ne:
|
| + if (IsZero(rt)) {
|
| + offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
|
| + bne(rs, zero_reg, offset32);
|
| + } else {
|
| + // We don't want any other register but scratch clobbered.
|
| + scratch = GetRtAsRegisterHelper(rt, scratch);
|
| + offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
|
| + bne(rs, scratch, offset32);
|
| + }
|
| + break;
|
| +
|
| + // Signed comparison.
|
| + case greater:
|
| + if (IsZero(rt)) {
|
| + offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
|
| + bgtz(rs, offset32);
|
| + } else {
|
| + Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
|
| + offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
|
| + bne(scratch, zero_reg, offset32);
|
| + }
|
| + break;
|
| + case greater_equal:
|
| + if (IsZero(rt)) {
|
| + offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
|
| + bgez(rs, offset32);
|
| + } else {
|
| + Slt(scratch, rs, rt);
|
| + offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
|
| + beq(scratch, zero_reg, offset32);
|
| + }
|
| + break;
|
| + case less:
|
| + if (IsZero(rt)) {
|
| + offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
|
| + bltz(rs, offset32);
|
| + } else {
|
| + Slt(scratch, rs, rt);
|
| + offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
|
| + bne(scratch, zero_reg, offset32);
|
| + }
|
| + break;
|
| + case less_equal:
|
| + if (IsZero(rt)) {
|
| + offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
|
| + blez(rs, offset32);
|
| + } else {
|
| + Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
|
| + offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
|
| + beq(scratch, zero_reg, offset32);
|
| + }
|
| + break;
|
| +
|
| + // Unsigned comparison.
|
| + case Ugreater:
|
| + if (IsZero(rt)) {
|
| + offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
|
| + bne(rs, zero_reg, offset32);
|
| + } else {
|
| + Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
|
| + offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
|
| + bne(scratch, zero_reg, offset32);
|
| + }
|
| + break;
|
| + case Ugreater_equal:
|
| + if (IsZero(rt)) {
|
| + offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
|
| + b(offset32);
|
| + } else {
|
| + Sltu(scratch, rs, rt);
|
| + offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
|
| + beq(scratch, zero_reg, offset32);
|
| + }
|
| + break;
|
| + case Uless:
|
| + if (IsZero(rt)) {
|
| + return true; // No code needs to be emitted.
|
| + } else {
|
| + Sltu(scratch, rs, rt);
|
| + offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
|
| + bne(scratch, zero_reg, offset32);
|
| + }
|
| + break;
|
| + case Uless_equal:
|
| + if (IsZero(rt)) {
|
| + offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
|
| + beq(rs, zero_reg, offset32);
|
| + } else {
|
| + Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
|
| + offset32 = GetOffset(offset, L, OffsetSize::kOffset16);
|
| + beq(scratch, zero_reg, offset32);
|
| + }
|
| + break;
|
| + default:
|
| + UNREACHABLE();
|
| + }
|
|
|
| // Emit a nop in the branch delay slot if required.
|
| if (bdslot == PROTECT)
|
| nop();
|
| +
|
| + return true;
|
| }
|
|
|
|
|
| -void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
|
| - const Operand& rt,
|
| - BranchDelaySlot bdslot) {
|
| +bool MacroAssembler::BranchShortCheck(int32_t offset, Label* L, Condition cond,
|
| + Register rs, const Operand& rt,
|
| + BranchDelaySlot bdslot) {
|
| BRANCH_ARGS_CHECK(cond, rs, rt);
|
|
|
| - int32_t offset = 0;
|
| - Register r2 = no_reg;
|
| - Register scratch = at;
|
| - if (rt.is_reg()) {
|
| - BlockTrampolinePoolScope block_trampoline_pool(this);
|
| - r2 = rt.rm_;
|
| - // Be careful to always use shifted_branch_offset only just before the
|
| - // branch instruction, as the location will be remember for patching the
|
| - // target.
|
| - switch (cond) {
|
| - case cc_always:
|
| - offset = shifted_branch_offset(L, false);
|
| - b(offset);
|
| - break;
|
| - case eq:
|
| - offset = shifted_branch_offset(L, false);
|
| - beq(rs, r2, offset);
|
| - break;
|
| - case ne:
|
| - offset = shifted_branch_offset(L, false);
|
| - bne(rs, r2, offset);
|
| - break;
|
| - // Signed comparison.
|
| - case greater:
|
| - if (r2.is(zero_reg)) {
|
| - offset = shifted_branch_offset(L, false);
|
| - bgtz(rs, offset);
|
| - } else {
|
| - slt(scratch, r2, rs);
|
| - offset = shifted_branch_offset(L, false);
|
| - bne(scratch, zero_reg, offset);
|
| - }
|
| - break;
|
| - case greater_equal:
|
| - if (r2.is(zero_reg)) {
|
| - offset = shifted_branch_offset(L, false);
|
| - bgez(rs, offset);
|
| - } else {
|
| - slt(scratch, rs, r2);
|
| - offset = shifted_branch_offset(L, false);
|
| - beq(scratch, zero_reg, offset);
|
| - }
|
| - break;
|
| - case less:
|
| - if (r2.is(zero_reg)) {
|
| - offset = shifted_branch_offset(L, false);
|
| - bltz(rs, offset);
|
| - } else {
|
| - slt(scratch, rs, r2);
|
| - offset = shifted_branch_offset(L, false);
|
| - bne(scratch, zero_reg, offset);
|
| - }
|
| - break;
|
| - case less_equal:
|
| - if (r2.is(zero_reg)) {
|
| - offset = shifted_branch_offset(L, false);
|
| - blez(rs, offset);
|
| - } else {
|
| - slt(scratch, r2, rs);
|
| - offset = shifted_branch_offset(L, false);
|
| - beq(scratch, zero_reg, offset);
|
| - }
|
| - break;
|
| - // Unsigned comparison.
|
| - case Ugreater:
|
| - if (r2.is(zero_reg)) {
|
| - offset = shifted_branch_offset(L, false);
|
| - bne(rs, zero_reg, offset);
|
| - } else {
|
| - sltu(scratch, r2, rs);
|
| - offset = shifted_branch_offset(L, false);
|
| - bne(scratch, zero_reg, offset);
|
| - }
|
| - break;
|
| - case Ugreater_equal:
|
| - if (r2.is(zero_reg)) {
|
| - offset = shifted_branch_offset(L, false);
|
| - b(offset);
|
| - } else {
|
| - sltu(scratch, rs, r2);
|
| - offset = shifted_branch_offset(L, false);
|
| - beq(scratch, zero_reg, offset);
|
| - }
|
| - break;
|
| - case Uless:
|
| - if (r2.is(zero_reg)) {
|
| - // No code needs to be emitted.
|
| - return;
|
| - } else {
|
| - sltu(scratch, rs, r2);
|
| - offset = shifted_branch_offset(L, false);
|
| - bne(scratch, zero_reg, offset);
|
| - }
|
| - break;
|
| - case Uless_equal:
|
| - if (r2.is(zero_reg)) {
|
| - offset = shifted_branch_offset(L, false);
|
| - beq(rs, zero_reg, offset);
|
| - } else {
|
| - sltu(scratch, r2, rs);
|
| - offset = shifted_branch_offset(L, false);
|
| - beq(scratch, zero_reg, offset);
|
| - }
|
| - break;
|
| - default:
|
| - UNREACHABLE();
|
| + if (!L) {
|
| + if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
|
| + DCHECK(is_int26(offset));
|
| + return BranchShortHelperR6(offset, nullptr, cond, rs, rt);
|
| + } else {
|
| + DCHECK(is_int16(offset));
|
| + return BranchShortHelper(offset, nullptr, cond, rs, rt, bdslot);
|
| }
|
| } else {
|
| - // Be careful to always use shifted_branch_offset only just before the
|
| - // branch instruction, as the location will be remember for patching the
|
| - // target.
|
| - BlockTrampolinePoolScope block_trampoline_pool(this);
|
| - switch (cond) {
|
| - case cc_always:
|
| - offset = shifted_branch_offset(L, false);
|
| - b(offset);
|
| - break;
|
| - case eq:
|
| - if (rt.imm32_ == 0) {
|
| - offset = shifted_branch_offset(L, false);
|
| - beq(rs, zero_reg, offset);
|
| - } else {
|
| - DCHECK(!scratch.is(rs));
|
| - r2 = scratch;
|
| - li(r2, rt);
|
| - offset = shifted_branch_offset(L, false);
|
| - beq(rs, r2, offset);
|
| - }
|
| - break;
|
| - case ne:
|
| - if (rt.imm32_ == 0) {
|
| - offset = shifted_branch_offset(L, false);
|
| - bne(rs, zero_reg, offset);
|
| - } else {
|
| - DCHECK(!scratch.is(rs));
|
| - r2 = scratch;
|
| - li(r2, rt);
|
| - offset = shifted_branch_offset(L, false);
|
| - bne(rs, r2, offset);
|
| - }
|
| - break;
|
| - // Signed comparison.
|
| - case greater:
|
| - if (rt.imm32_ == 0) {
|
| - offset = shifted_branch_offset(L, false);
|
| - bgtz(rs, offset);
|
| - } else {
|
| - DCHECK(!scratch.is(rs));
|
| - r2 = scratch;
|
| - li(r2, rt);
|
| - slt(scratch, r2, rs);
|
| - offset = shifted_branch_offset(L, false);
|
| - bne(scratch, zero_reg, offset);
|
| - }
|
| - break;
|
| - case greater_equal:
|
| - if (rt.imm32_ == 0) {
|
| - offset = shifted_branch_offset(L, false);
|
| - bgez(rs, offset);
|
| - } else if (is_int16(rt.imm32_)) {
|
| - slti(scratch, rs, rt.imm32_);
|
| - offset = shifted_branch_offset(L, false);
|
| - beq(scratch, zero_reg, offset);
|
| - } else {
|
| - DCHECK(!scratch.is(rs));
|
| - r2 = scratch;
|
| - li(r2, rt);
|
| - slt(scratch, rs, r2);
|
| - offset = shifted_branch_offset(L, false);
|
| - beq(scratch, zero_reg, offset);
|
| - }
|
| - break;
|
| - case less:
|
| - if (rt.imm32_ == 0) {
|
| - offset = shifted_branch_offset(L, false);
|
| - bltz(rs, offset);
|
| - } else if (is_int16(rt.imm32_)) {
|
| - slti(scratch, rs, rt.imm32_);
|
| - offset = shifted_branch_offset(L, false);
|
| - bne(scratch, zero_reg, offset);
|
| - } else {
|
| - DCHECK(!scratch.is(rs));
|
| - r2 = scratch;
|
| - li(r2, rt);
|
| - slt(scratch, rs, r2);
|
| - offset = shifted_branch_offset(L, false);
|
| - bne(scratch, zero_reg, offset);
|
| - }
|
| - break;
|
| - case less_equal:
|
| - if (rt.imm32_ == 0) {
|
| - offset = shifted_branch_offset(L, false);
|
| - blez(rs, offset);
|
| - } else {
|
| - DCHECK(!scratch.is(rs));
|
| - r2 = scratch;
|
| - li(r2, rt);
|
| - slt(scratch, r2, rs);
|
| - offset = shifted_branch_offset(L, false);
|
| - beq(scratch, zero_reg, offset);
|
| - }
|
| - break;
|
| - // Unsigned comparison.
|
| - case Ugreater:
|
| - if (rt.imm32_ == 0) {
|
| - offset = shifted_branch_offset(L, false);
|
| - bne(rs, zero_reg, offset);
|
| - } else {
|
| - DCHECK(!scratch.is(rs));
|
| - r2 = scratch;
|
| - li(r2, rt);
|
| - sltu(scratch, r2, rs);
|
| - offset = shifted_branch_offset(L, false);
|
| - bne(scratch, zero_reg, offset);
|
| - }
|
| - break;
|
| - case Ugreater_equal:
|
| - if (rt.imm32_ == 0) {
|
| - offset = shifted_branch_offset(L, false);
|
| - b(offset);
|
| - } else if (is_int16(rt.imm32_)) {
|
| - sltiu(scratch, rs, rt.imm32_);
|
| - offset = shifted_branch_offset(L, false);
|
| - beq(scratch, zero_reg, offset);
|
| - } else {
|
| - DCHECK(!scratch.is(rs));
|
| - r2 = scratch;
|
| - li(r2, rt);
|
| - sltu(scratch, rs, r2);
|
| - offset = shifted_branch_offset(L, false);
|
| - beq(scratch, zero_reg, offset);
|
| - }
|
| - break;
|
| - case Uless:
|
| - if (rt.imm32_ == 0) {
|
| - // No code needs to be emitted.
|
| - return;
|
| - } else if (is_int16(rt.imm32_)) {
|
| - sltiu(scratch, rs, rt.imm32_);
|
| - offset = shifted_branch_offset(L, false);
|
| - bne(scratch, zero_reg, offset);
|
| - } else {
|
| - DCHECK(!scratch.is(rs));
|
| - r2 = scratch;
|
| - li(r2, rt);
|
| - sltu(scratch, rs, r2);
|
| - offset = shifted_branch_offset(L, false);
|
| - bne(scratch, zero_reg, offset);
|
| - }
|
| - break;
|
| - case Uless_equal:
|
| - if (rt.imm32_ == 0) {
|
| - offset = shifted_branch_offset(L, false);
|
| - beq(rs, zero_reg, offset);
|
| - } else {
|
| - DCHECK(!scratch.is(rs));
|
| - r2 = scratch;
|
| - li(r2, rt);
|
| - sltu(scratch, r2, rs);
|
| - offset = shifted_branch_offset(L, false);
|
| - beq(scratch, zero_reg, offset);
|
| - }
|
| - break;
|
| - default:
|
| - UNREACHABLE();
|
| + DCHECK(offset == 0);
|
| + if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
|
| + return BranchShortHelperR6(0, L, cond, rs, rt);
|
| + } else {
|
| + return BranchShortHelper(0, L, cond, rs, rt, bdslot);
|
| }
|
| }
|
| - // Check that offset could actually hold on an int16_t.
|
| - DCHECK(is_int16(offset));
|
| - // Emit a nop in the branch delay slot if required.
|
| - if (bdslot == PROTECT)
|
| - nop();
|
| + return false;
|
| +}
|
| +
|
| +
|
| +void MacroAssembler::BranchShort(int32_t offset, Condition cond, Register rs,
|
| + const Operand& rt, BranchDelaySlot bdslot) {
|
| + BranchShortCheck(offset, nullptr, cond, rs, rt, bdslot);
|
| }
|
|
|
|
|
| -void MacroAssembler::BranchAndLink(int16_t offset, BranchDelaySlot bdslot) {
|
| +void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
|
| + const Operand& rt, BranchDelaySlot bdslot) {
|
| + BranchShortCheck(0, L, cond, rs, rt, bdslot);
|
| +}
|
| +
|
| +
|
| +void MacroAssembler::BranchAndLink(int32_t offset, BranchDelaySlot bdslot) {
|
| BranchAndLinkShort(offset, bdslot);
|
| }
|
|
|
|
|
| -void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs,
|
| - const Operand& rt,
|
| - BranchDelaySlot bdslot) {
|
| - BranchAndLinkShort(offset, cond, rs, rt, bdslot);
|
| +void MacroAssembler::BranchAndLink(int32_t offset, Condition cond, Register rs,
|
| + const Operand& rt, BranchDelaySlot bdslot) {
|
| + bool is_near = BranchAndLinkShortCheck(offset, nullptr, cond, rs, rt, bdslot);
|
| + DCHECK(is_near);
|
| + USE(is_near);
|
| }
|
|
|
|
|
| void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
|
| if (L->is_bound()) {
|
| - if (is_near(L)) {
|
| + if (is_near_branch(L)) {
|
| BranchAndLinkShort(L, bdslot);
|
| } else {
|
| Jalr(L, bdslot);
|
| @@ -2575,9 +2564,7 @@ void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
|
| const Operand& rt,
|
| BranchDelaySlot bdslot) {
|
| if (L->is_bound()) {
|
| - if (is_near(L)) {
|
| - BranchAndLinkShort(L, cond, rs, rt, bdslot);
|
| - } else {
|
| + if (!BranchAndLinkShortCheck(0, L, cond, rs, rt, bdslot)) {
|
| Label skip;
|
| Condition neg_cond = NegateCondition(cond);
|
| BranchShort(&skip, neg_cond, rs, rt);
|
| @@ -2592,17 +2579,15 @@ void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
|
| Jalr(L, bdslot);
|
| bind(&skip);
|
| } else {
|
| - BranchAndLinkShort(L, cond, rs, rt, bdslot);
|
| + BranchAndLinkShortCheck(0, L, cond, rs, rt, bdslot);
|
| }
|
| }
|
| }
|
|
|
|
|
| -// We need to use a bgezal or bltzal, but they can't be used directly with the
|
| -// slt instructions. We could use sub or add instead but we would miss overflow
|
| -// cases, so we keep slt and add an intermediate third instruction.
|
| -void MacroAssembler::BranchAndLinkShort(int16_t offset,
|
| - BranchDelaySlot bdslot) {
|
| +void MacroAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
|
| + BranchDelaySlot bdslot) {
|
| + offset = GetOffset(offset, L, OffsetSize::kOffset16);
|
| bal(offset);
|
|
|
| // Emit a nop in the branch delay slot if required.
|
| @@ -2611,371 +2596,303 @@ void MacroAssembler::BranchAndLinkShort(int16_t offset,
|
| }
|
|
|
|
|
| -void MacroAssembler::BranchAndLinkShort(int16_t offset, Condition cond,
|
| - Register rs, const Operand& rt,
|
| - BranchDelaySlot bdslot) {
|
| - BRANCH_ARGS_CHECK(cond, rs, rt);
|
| - Register r2 = no_reg;
|
| - Register scratch = at;
|
| +void MacroAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L) {
|
| + offset = GetOffset(offset, L, OffsetSize::kOffset26);
|
| + balc(offset);
|
| +}
|
|
|
| - if (rt.is_reg()) {
|
| - r2 = rt.rm_;
|
| - } else if (cond != cc_always) {
|
| - r2 = scratch;
|
| - li(r2, rt);
|
| - }
|
|
|
| - if (!IsMipsArchVariant(kMips32r6)) {
|
| - BlockTrampolinePoolScope block_trampoline_pool(this);
|
| - switch (cond) {
|
| - case cc_always:
|
| - bal(offset);
|
| - break;
|
| - case eq:
|
| - bne(rs, r2, 2);
|
| - nop();
|
| - bal(offset);
|
| - break;
|
| - case ne:
|
| - beq(rs, r2, 2);
|
| - nop();
|
| - bal(offset);
|
| - break;
|
| -
|
| - // Signed comparison.
|
| - case greater:
|
| - slt(scratch, r2, rs);
|
| - addiu(scratch, scratch, -1);
|
| - bgezal(scratch, offset);
|
| - break;
|
| - case greater_equal:
|
| - slt(scratch, rs, r2);
|
| - addiu(scratch, scratch, -1);
|
| - bltzal(scratch, offset);
|
| - break;
|
| - case less:
|
| - slt(scratch, rs, r2);
|
| - addiu(scratch, scratch, -1);
|
| - bgezal(scratch, offset);
|
| - break;
|
| - case less_equal:
|
| - slt(scratch, r2, rs);
|
| - addiu(scratch, scratch, -1);
|
| - bltzal(scratch, offset);
|
| - break;
|
| -
|
| - // Unsigned comparison.
|
| - case Ugreater:
|
| - sltu(scratch, r2, rs);
|
| - addiu(scratch, scratch, -1);
|
| - bgezal(scratch, offset);
|
| - break;
|
| - case Ugreater_equal:
|
| - sltu(scratch, rs, r2);
|
| - addiu(scratch, scratch, -1);
|
| - bltzal(scratch, offset);
|
| - break;
|
| - case Uless:
|
| - sltu(scratch, rs, r2);
|
| - addiu(scratch, scratch, -1);
|
| - bgezal(scratch, offset);
|
| - break;
|
| - case Uless_equal:
|
| - sltu(scratch, r2, rs);
|
| - addiu(scratch, scratch, -1);
|
| - bltzal(scratch, offset);
|
| - break;
|
| -
|
| - default:
|
| - UNREACHABLE();
|
| - }
|
| +void MacroAssembler::BranchAndLinkShort(int32_t offset,
|
| + BranchDelaySlot bdslot) {
|
| + if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
|
| + DCHECK(is_int26(offset));
|
| + BranchAndLinkShortHelperR6(offset, nullptr);
|
| } else {
|
| - BlockTrampolinePoolScope block_trampoline_pool(this);
|
| - switch (cond) {
|
| - case cc_always:
|
| - bal(offset);
|
| - break;
|
| - case eq:
|
| - bne(rs, r2, 2);
|
| - nop();
|
| - bal(offset);
|
| - break;
|
| - case ne:
|
| - beq(rs, r2, 2);
|
| - nop();
|
| - bal(offset);
|
| - break;
|
| -
|
| - // Signed comparison.
|
| - case greater:
|
| - // rs > rt
|
| - slt(scratch, r2, rs);
|
| - beq(scratch, zero_reg, 2);
|
| - nop();
|
| - bal(offset);
|
| - break;
|
| - case greater_equal:
|
| - // rs >= rt
|
| - slt(scratch, rs, r2);
|
| - bne(scratch, zero_reg, 2);
|
| - nop();
|
| - bal(offset);
|
| - break;
|
| - case less:
|
| - // rs < r2
|
| - slt(scratch, rs, r2);
|
| - bne(scratch, zero_reg, 2);
|
| - nop();
|
| - bal(offset);
|
| - break;
|
| - case less_equal:
|
| - // rs <= r2
|
| - slt(scratch, r2, rs);
|
| - bne(scratch, zero_reg, 2);
|
| - nop();
|
| - bal(offset);
|
| - break;
|
| + DCHECK(is_int16(offset));
|
| + BranchAndLinkShortHelper(offset, nullptr, bdslot);
|
| + }
|
| +}
|
|
|
|
|
| - // Unsigned comparison.
|
| - case Ugreater:
|
| - // rs > rt
|
| - sltu(scratch, r2, rs);
|
| - beq(scratch, zero_reg, 2);
|
| - nop();
|
| - bal(offset);
|
| - break;
|
| - case Ugreater_equal:
|
| - // rs >= rt
|
| - sltu(scratch, rs, r2);
|
| - bne(scratch, zero_reg, 2);
|
| - nop();
|
| - bal(offset);
|
| - break;
|
| - case Uless:
|
| - // rs < r2
|
| - sltu(scratch, rs, r2);
|
| - bne(scratch, zero_reg, 2);
|
| - nop();
|
| - bal(offset);
|
| - break;
|
| - case Uless_equal:
|
| - // rs <= r2
|
| - sltu(scratch, r2, rs);
|
| - bne(scratch, zero_reg, 2);
|
| - nop();
|
| - bal(offset);
|
| - break;
|
| - default:
|
| - UNREACHABLE();
|
| - }
|
| +void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
|
| + if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
|
| + BranchAndLinkShortHelperR6(0, L);
|
| + } else {
|
| + BranchAndLinkShortHelper(0, L, bdslot);
|
| }
|
| -
|
| - // Emit a nop in the branch delay slot if required.
|
| - if (bdslot == PROTECT)
|
| - nop();
|
| }
|
|
|
|
|
| -void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
|
| - bal(shifted_branch_offset(L, false));
|
| +bool MacroAssembler::BranchAndLinkShortHelperR6(int32_t offset, Label* L,
|
| + Condition cond, Register rs,
|
| + const Operand& rt) {
|
| + Register scratch = rs.is(at) ? t8 : at;
|
| + OffsetSize bits = OffsetSize::kOffset16;
|
|
|
| - // Emit a nop in the branch delay slot if required.
|
| - if (bdslot == PROTECT)
|
| - nop();
|
| -}
|
| + BlockTrampolinePoolScope block_trampoline_pool(this);
|
| + DCHECK((cond == cc_always && is_int26(offset)) || is_int16(offset));
|
| + switch (cond) {
|
| + case cc_always:
|
| + bits = OffsetSize::kOffset26;
|
| + if (!is_near(L, bits)) return false;
|
| + offset = GetOffset(offset, L, bits);
|
| + balc(offset);
|
| + break;
|
| + case eq:
|
| + if (!is_near(L, bits)) return false;
|
| + Subu(scratch, rs, rt);
|
| + offset = GetOffset(offset, L, bits);
|
| + beqzalc(scratch, offset);
|
| + break;
|
| + case ne:
|
| + if (!is_near(L, bits)) return false;
|
| + Subu(scratch, rs, rt);
|
| + offset = GetOffset(offset, L, bits);
|
| + bnezalc(scratch, offset);
|
| + break;
|
|
|
| + // Signed comparison.
|
| + case greater:
|
| + // rs > rt
|
| + if (rs.code() == rt.rm_.reg_code) {
|
| + break; // No code needs to be emitted.
|
| + } else if (rs.is(zero_reg)) {
|
| + if (!is_near(L, bits)) return false;
|
| + scratch = GetRtAsRegisterHelper(rt, scratch);
|
| + offset = GetOffset(offset, L, bits);
|
| + bltzalc(scratch, offset);
|
| + } else if (IsZero(rt)) {
|
| + if (!is_near(L, bits)) return false;
|
| + offset = GetOffset(offset, L, bits);
|
| + bgtzalc(rs, offset);
|
| + } else {
|
| + if (!is_near(L, bits)) return false;
|
| + Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
|
| + offset = GetOffset(offset, L, bits);
|
| + bnezalc(scratch, offset);
|
| + }
|
| + break;
|
| + case greater_equal:
|
| + // rs >= rt
|
| + if (rs.code() == rt.rm_.reg_code) {
|
| + bits = OffsetSize::kOffset26;
|
| + if (!is_near(L, bits)) return false;
|
| + offset = GetOffset(offset, L, bits);
|
| + balc(offset);
|
| + } else if (rs.is(zero_reg)) {
|
| + if (!is_near(L, bits)) return false;
|
| + scratch = GetRtAsRegisterHelper(rt, scratch);
|
| + offset = GetOffset(offset, L, bits);
|
| + blezalc(scratch, offset);
|
| + } else if (IsZero(rt)) {
|
| + if (!is_near(L, bits)) return false;
|
| + offset = GetOffset(offset, L, bits);
|
| + bgezalc(rs, offset);
|
| + } else {
|
| + if (!is_near(L, bits)) return false;
|
| + Slt(scratch, rs, rt);
|
| + offset = GetOffset(offset, L, bits);
|
| + beqzalc(scratch, offset);
|
| + }
|
| + break;
|
| + case less:
|
| + // rs < rt
|
| + if (rs.code() == rt.rm_.reg_code) {
|
| + break; // No code needs to be emitted.
|
| + } else if (rs.is(zero_reg)) {
|
| + if (!is_near(L, bits)) return false;
|
| + scratch = GetRtAsRegisterHelper(rt, scratch);
|
| + offset = GetOffset(offset, L, bits);
|
| + bgtzalc(scratch, offset);
|
| + } else if (IsZero(rt)) {
|
| + if (!is_near(L, bits)) return false;
|
| + offset = GetOffset(offset, L, bits);
|
| + bltzalc(rs, offset);
|
| + } else {
|
| + if (!is_near(L, bits)) return false;
|
| + Slt(scratch, rs, rt);
|
| + offset = GetOffset(offset, L, bits);
|
| + bnezalc(scratch, offset);
|
| + }
|
| + break;
|
| + case less_equal:
|
| + // rs <= r2
|
| + if (rs.code() == rt.rm_.reg_code) {
|
| + bits = OffsetSize::kOffset26;
|
| + if (!is_near(L, bits)) return false;
|
| + offset = GetOffset(offset, L, bits);
|
| + balc(offset);
|
| + } else if (rs.is(zero_reg)) {
|
| + if (!is_near(L, bits)) return false;
|
| + scratch = GetRtAsRegisterHelper(rt, scratch);
|
| + offset = GetOffset(offset, L, bits);
|
| + bgezalc(scratch, offset);
|
| + } else if (IsZero(rt)) {
|
| + if (!is_near(L, bits)) return false;
|
| + offset = GetOffset(offset, L, bits);
|
| + blezalc(rs, offset);
|
| + } else {
|
| + if (!is_near(L, bits)) return false;
|
| + Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
|
| + offset = GetOffset(offset, L, bits);
|
| + beqzalc(scratch, offset);
|
| + }
|
| + break;
|
|
|
| -void MacroAssembler::BranchAndLinkShort(Label* L, Condition cond, Register rs,
|
| - const Operand& rt,
|
| - BranchDelaySlot bdslot) {
|
| - BRANCH_ARGS_CHECK(cond, rs, rt);
|
|
|
| - int32_t offset = 0;
|
| - Register r2 = no_reg;
|
| - Register scratch = at;
|
| - if (rt.is_reg()) {
|
| - r2 = rt.rm_;
|
| - } else if (cond != cc_always) {
|
| - r2 = scratch;
|
| - li(r2, rt);
|
| + // Unsigned comparison.
|
| + case Ugreater:
|
| + // rs > r2
|
| + if (!is_near(L, bits)) return false;
|
| + Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
|
| + offset = GetOffset(offset, L, bits);
|
| + bnezalc(scratch, offset);
|
| + break;
|
| + case Ugreater_equal:
|
| + // rs >= r2
|
| + if (!is_near(L, bits)) return false;
|
| + Sltu(scratch, rs, rt);
|
| + offset = GetOffset(offset, L, bits);
|
| + beqzalc(scratch, offset);
|
| + break;
|
| + case Uless:
|
| + // rs < r2
|
| + if (!is_near(L, bits)) return false;
|
| + Sltu(scratch, rs, rt);
|
| + offset = GetOffset(offset, L, bits);
|
| + bnezalc(scratch, offset);
|
| + break;
|
| + case Uless_equal:
|
| + // rs <= r2
|
| + if (!is_near(L, bits)) return false;
|
| + Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
|
| + offset = GetOffset(offset, L, bits);
|
| + beqzalc(scratch, offset);
|
| + break;
|
| + default:
|
| + UNREACHABLE();
|
| }
|
| + return true;
|
| +}
|
|
|
| - if (!IsMipsArchVariant(kMips32r6)) {
|
| - BlockTrampolinePoolScope block_trampoline_pool(this);
|
| - switch (cond) {
|
| - case cc_always:
|
| - offset = shifted_branch_offset(L, false);
|
| - bal(offset);
|
| - break;
|
| - case eq:
|
| - bne(rs, r2, 2);
|
| - nop();
|
| - offset = shifted_branch_offset(L, false);
|
| - bal(offset);
|
| - break;
|
| - case ne:
|
| - beq(rs, r2, 2);
|
| - nop();
|
| - offset = shifted_branch_offset(L, false);
|
| - bal(offset);
|
| - break;
|
| -
|
| - // Signed comparison.
|
| - case greater:
|
| - slt(scratch, r2, rs);
|
| - addiu(scratch, scratch, -1);
|
| - offset = shifted_branch_offset(L, false);
|
| - bgezal(scratch, offset);
|
| - break;
|
| - case greater_equal:
|
| - slt(scratch, rs, r2);
|
| - addiu(scratch, scratch, -1);
|
| - offset = shifted_branch_offset(L, false);
|
| - bltzal(scratch, offset);
|
| - break;
|
| - case less:
|
| - slt(scratch, rs, r2);
|
| - addiu(scratch, scratch, -1);
|
| - offset = shifted_branch_offset(L, false);
|
| - bgezal(scratch, offset);
|
| - break;
|
| - case less_equal:
|
| - slt(scratch, r2, rs);
|
| - addiu(scratch, scratch, -1);
|
| - offset = shifted_branch_offset(L, false);
|
| - bltzal(scratch, offset);
|
| - break;
|
| -
|
| - // Unsigned comparison.
|
| - case Ugreater:
|
| - sltu(scratch, r2, rs);
|
| - addiu(scratch, scratch, -1);
|
| - offset = shifted_branch_offset(L, false);
|
| - bgezal(scratch, offset);
|
| - break;
|
| - case Ugreater_equal:
|
| - sltu(scratch, rs, r2);
|
| - addiu(scratch, scratch, -1);
|
| - offset = shifted_branch_offset(L, false);
|
| - bltzal(scratch, offset);
|
| - break;
|
| - case Uless:
|
| - sltu(scratch, rs, r2);
|
| - addiu(scratch, scratch, -1);
|
| - offset = shifted_branch_offset(L, false);
|
| - bgezal(scratch, offset);
|
| - break;
|
| - case Uless_equal:
|
| - sltu(scratch, r2, rs);
|
| - addiu(scratch, scratch, -1);
|
| - offset = shifted_branch_offset(L, false);
|
| - bltzal(scratch, offset);
|
| - break;
|
| -
|
| - default:
|
| - UNREACHABLE();
|
| - }
|
| - } else {
|
| - BlockTrampolinePoolScope block_trampoline_pool(this);
|
| - switch (cond) {
|
| - case cc_always:
|
| - offset = shifted_branch_offset(L, false);
|
| - bal(offset);
|
| - break;
|
| - case eq:
|
| - bne(rs, r2, 2);
|
| - nop();
|
| - offset = shifted_branch_offset(L, false);
|
| - bal(offset);
|
| - break;
|
| - case ne:
|
| - beq(rs, r2, 2);
|
| - nop();
|
| - offset = shifted_branch_offset(L, false);
|
| - bal(offset);
|
| - break;
|
| -
|
| - // Signed comparison.
|
| - case greater:
|
| - // rs > rt
|
| - slt(scratch, r2, rs);
|
| - beq(scratch, zero_reg, 2);
|
| - nop();
|
| - offset = shifted_branch_offset(L, false);
|
| - bal(offset);
|
| - break;
|
| - case greater_equal:
|
| - // rs >= rt
|
| - slt(scratch, rs, r2);
|
| - bne(scratch, zero_reg, 2);
|
| - nop();
|
| - offset = shifted_branch_offset(L, false);
|
| - bal(offset);
|
| - break;
|
| - case less:
|
| - // rs < r2
|
| - slt(scratch, rs, r2);
|
| - bne(scratch, zero_reg, 2);
|
| - nop();
|
| - offset = shifted_branch_offset(L, false);
|
| - bal(offset);
|
| - break;
|
| - case less_equal:
|
| - // rs <= r2
|
| - slt(scratch, r2, rs);
|
| - bne(scratch, zero_reg, 2);
|
| - nop();
|
| - offset = shifted_branch_offset(L, false);
|
| - bal(offset);
|
| - break;
|
|
|
| +// Pre r6 we need to use a bgezal or bltzal, but they can't be used directly
|
| +// with the slt instructions. We could use sub or add instead but we would miss
|
| +// overflow cases, so we keep slt and add an intermediate third instruction.
|
| +bool MacroAssembler::BranchAndLinkShortHelper(int16_t offset, Label* L,
|
| + Condition cond, Register rs,
|
| + const Operand& rt,
|
| + BranchDelaySlot bdslot) {
|
| + if (!is_near(L, OffsetSize::kOffset16)) return false;
|
|
|
| - // Unsigned comparison.
|
| - case Ugreater:
|
| - // rs > rt
|
| - sltu(scratch, r2, rs);
|
| - beq(scratch, zero_reg, 2);
|
| - nop();
|
| - offset = shifted_branch_offset(L, false);
|
| - bal(offset);
|
| - break;
|
| - case Ugreater_equal:
|
| - // rs >= rt
|
| - sltu(scratch, rs, r2);
|
| - bne(scratch, zero_reg, 2);
|
| - nop();
|
| - offset = shifted_branch_offset(L, false);
|
| - bal(offset);
|
| - break;
|
| - case Uless:
|
| - // rs < r2
|
| - sltu(scratch, rs, r2);
|
| - bne(scratch, zero_reg, 2);
|
| - nop();
|
| - offset = shifted_branch_offset(L, false);
|
| - bal(offset);
|
| - break;
|
| - case Uless_equal:
|
| - // rs <= r2
|
| - sltu(scratch, r2, rs);
|
| - bne(scratch, zero_reg, 2);
|
| - nop();
|
| - offset = shifted_branch_offset(L, false);
|
| - bal(offset);
|
| - break;
|
| + Register scratch = t8;
|
| + BlockTrampolinePoolScope block_trampoline_pool(this);
|
|
|
| - default:
|
| - UNREACHABLE();
|
| - }
|
| - }
|
| + switch (cond) {
|
| + case cc_always:
|
| + offset = GetOffset(offset, L, OffsetSize::kOffset16);
|
| + bal(offset);
|
| + break;
|
| + case eq:
|
| + bne(rs, GetRtAsRegisterHelper(rt, scratch), 2);
|
| + nop();
|
| + offset = GetOffset(offset, L, OffsetSize::kOffset16);
|
| + bal(offset);
|
| + break;
|
| + case ne:
|
| + beq(rs, GetRtAsRegisterHelper(rt, scratch), 2);
|
| + nop();
|
| + offset = GetOffset(offset, L, OffsetSize::kOffset16);
|
| + bal(offset);
|
| + break;
|
| +
|
| + // Signed comparison.
|
| + case greater:
|
| + Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
|
| + addiu(scratch, scratch, -1);
|
| + offset = GetOffset(offset, L, OffsetSize::kOffset16);
|
| + bgezal(scratch, offset);
|
| + break;
|
| + case greater_equal:
|
| + Slt(scratch, rs, rt);
|
| + addiu(scratch, scratch, -1);
|
| + offset = GetOffset(offset, L, OffsetSize::kOffset16);
|
| + bltzal(scratch, offset);
|
| + break;
|
| + case less:
|
| + Slt(scratch, rs, rt);
|
| + addiu(scratch, scratch, -1);
|
| + offset = GetOffset(offset, L, OffsetSize::kOffset16);
|
| + bgezal(scratch, offset);
|
| + break;
|
| + case less_equal:
|
| + Slt(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
|
| + addiu(scratch, scratch, -1);
|
| + offset = GetOffset(offset, L, OffsetSize::kOffset16);
|
| + bltzal(scratch, offset);
|
| + break;
|
| +
|
| + // Unsigned comparison.
|
| + case Ugreater:
|
| + Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
|
| + addiu(scratch, scratch, -1);
|
| + offset = GetOffset(offset, L, OffsetSize::kOffset16);
|
| + bgezal(scratch, offset);
|
| + break;
|
| + case Ugreater_equal:
|
| + Sltu(scratch, rs, rt);
|
| + addiu(scratch, scratch, -1);
|
| + offset = GetOffset(offset, L, OffsetSize::kOffset16);
|
| + bltzal(scratch, offset);
|
| + break;
|
| + case Uless:
|
| + Sltu(scratch, rs, rt);
|
| + addiu(scratch, scratch, -1);
|
| + offset = GetOffset(offset, L, OffsetSize::kOffset16);
|
| + bgezal(scratch, offset);
|
| + break;
|
| + case Uless_equal:
|
| + Sltu(scratch, GetRtAsRegisterHelper(rt, scratch), rs);
|
| + addiu(scratch, scratch, -1);
|
| + offset = GetOffset(offset, L, OffsetSize::kOffset16);
|
| + bltzal(scratch, offset);
|
| + break;
|
|
|
| - // Check that offset could actually hold on an int16_t.
|
| - DCHECK(is_int16(offset));
|
| + default:
|
| + UNREACHABLE();
|
| + }
|
|
|
| // Emit a nop in the branch delay slot if required.
|
| if (bdslot == PROTECT)
|
| nop();
|
| +
|
| + return true;
|
| +}
|
| +
|
| +
|
| +bool MacroAssembler::BranchAndLinkShortCheck(int32_t offset, Label* L,
|
| + Condition cond, Register rs,
|
| + const Operand& rt,
|
| + BranchDelaySlot bdslot) {
|
| + BRANCH_ARGS_CHECK(cond, rs, rt);
|
| +
|
| + if (!L) {
|
| + if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
|
| + DCHECK(is_int26(offset));
|
| + return BranchAndLinkShortHelperR6(offset, nullptr, cond, rs, rt);
|
| + } else {
|
| + DCHECK(is_int16(offset));
|
| + return BranchAndLinkShortHelper(offset, nullptr, cond, rs, rt, bdslot);
|
| + }
|
| + } else {
|
| + DCHECK(offset == 0);
|
| + if (IsMipsArchVariant(kMips32r6) && bdslot == PROTECT) {
|
| + return BranchAndLinkShortHelperR6(0, L, cond, rs, rt);
|
| + } else {
|
| + return BranchAndLinkShortHelper(0, L, cond, rs, rt, bdslot);
|
| + }
|
| + }
|
| + return false;
|
| }
|
|
|
|
|
| @@ -3065,6 +2982,10 @@ void MacroAssembler::Call(Register target,
|
| Register rs,
|
| const Operand& rt,
|
| BranchDelaySlot bd) {
|
| +#ifdef DEBUG
|
| + int size = IsPrevInstrCompactBranch() ? kInstrSize : 0;
|
| +#endif
|
| +
|
| BlockTrampolinePoolScope block_trampoline_pool(this);
|
| Label start;
|
| bind(&start);
|
| @@ -3079,8 +3000,10 @@ void MacroAssembler::Call(Register target,
|
| if (bd == PROTECT)
|
| nop();
|
|
|
| - DCHECK_EQ(CallSize(target, cond, rs, rt, bd),
|
| - SizeOfCodeGeneratedSince(&start));
|
| +#ifdef DEBUG
|
| + CHECK_EQ(size + CallSize(target, cond, rs, rt, bd),
|
| + SizeOfCodeGeneratedSince(&start));
|
| +#endif
|
| }
|
|
|
|
|
| @@ -5888,25 +5811,10 @@ void CodePatcher::Emit(Address addr) {
|
| }
|
|
|
|
|
| -void CodePatcher::ChangeBranchCondition(Condition cond) {
|
| - Instr instr = Assembler::instr_at(masm_.pc_);
|
| - DCHECK(Assembler::IsBranch(instr));
|
| - uint32_t opcode = Assembler::GetOpcodeField(instr);
|
| - // Currently only the 'eq' and 'ne' cond values are supported and the simple
|
| - // branch instructions (with opcode being the branch type).
|
| - // There are some special cases (see Assembler::IsBranch()) so extending this
|
| - // would be tricky.
|
| - DCHECK(opcode == BEQ ||
|
| - opcode == BNE ||
|
| - opcode == BLEZ ||
|
| - opcode == BGTZ ||
|
| - opcode == BEQL ||
|
| - opcode == BNEL ||
|
| - opcode == BLEZL ||
|
| - opcode == BGTZL);
|
| - opcode = (cond == eq) ? BEQ : BNE;
|
| - instr = (instr & ~kOpcodeMask) | opcode;
|
| - masm_.emit(instr);
|
| +void CodePatcher::ChangeBranchCondition(Instr current_instr,
|
| + uint32_t new_opcode) {
|
| + current_instr = (current_instr & ~kOpcodeMask) | new_opcode;
|
| + masm_.emit(current_instr);
|
| }
|
|
|
|
|
|
|