Index: src/mips/assembler-mips.h |
diff --git a/src/mips/assembler-mips.h b/src/mips/assembler-mips.h |
index 7f5b96714d864c62e33413247f6c1654b61934ff..04279f01d912c341c4351c45399bec84fad58e50 100644 |
--- a/src/mips/assembler-mips.h |
+++ b/src/mips/assembler-mips.h |
@@ -408,27 +408,36 @@ class Assembler : public AssemblerBase { |
// Note: The same Label can be used for forward and backward branches |
// but it may be bound only once. |
void bind(Label* L); // Binds an unbound label L to current code position. |
+ |
+ enum OffsetSize : int { kOffset26 = 26, kOffset21 = 21, kOffset16 = 16 }; |
+ |
// Determines if Label is bound and near enough so that branch instruction |
// can be used to reach it, instead of jump instruction. |
bool is_near(Label* L); |
+ bool is_near(Label* L, OffsetSize bits); |
+ bool is_near_branch(Label* L); |
// Returns the branch offset to the given label from the current code |
// position. Links the label to the current position if it is still unbound. |
// Manages the jump elimination optimization if the second parameter is true. |
- int32_t branch_offset(Label* L, bool jump_elimination_allowed); |
- int32_t branch_offset_compact(Label* L, bool jump_elimination_allowed); |
- int32_t branch_offset21(Label* L, bool jump_elimination_allowed); |
- int32_t branch_offset21_compact(Label* L, bool jump_elimination_allowed); |
- int32_t shifted_branch_offset(Label* L, bool jump_elimination_allowed) { |
- int32_t o = branch_offset(L, jump_elimination_allowed); |
- DCHECK((o & 3) == 0); // Assert the offset is aligned. |
- return o >> 2; |
- } |
- int32_t shifted_branch_offset_compact(Label* L, |
- bool jump_elimination_allowed) { |
- int32_t o = branch_offset_compact(L, jump_elimination_allowed); |
- DCHECK((o & 3) == 0); // Assert the offset is aligned. |
- return o >> 2; |
+ int32_t branch_offset_helper(Label* L, OffsetSize bits); |
+ inline int32_t branch_offset(Label* L) { |
+ return branch_offset_helper(L, OffsetSize::kOffset16); |
+ } |
+ inline int32_t branch_offset21(Label* L) { |
+ return branch_offset_helper(L, OffsetSize::kOffset21); |
+ } |
+ inline int32_t branch_offset26(Label* L) { |
+ return branch_offset_helper(L, OffsetSize::kOffset26); |
+ } |
+ inline int32_t shifted_branch_offset(Label* L) { |
+ return branch_offset(L) >> 2; |
+ } |
+ inline int32_t shifted_branch_offset21(Label* L) { |
+ return branch_offset21(L) >> 2; |
+ } |
+ inline int32_t shifted_branch_offset26(Label* L) { |
+ return branch_offset26(L) >> 2; |
} |
uint32_t jump_address(Label* L); |
@@ -571,111 +580,111 @@ class Assembler : public AssemblerBase { |
// --------Branch-and-jump-instructions---------- |
// We don't use likely variant of instructions. |
void b(int16_t offset); |
- void b(Label* L) { b(branch_offset(L, false)>>2); } |
+ inline void b(Label* L) { b(shifted_branch_offset(L)); } |
void bal(int16_t offset); |
- void bal(Label* L) { bal(branch_offset(L, false)>>2); } |
+ inline void bal(Label* L) { bal(shifted_branch_offset(L)); } |
void bc(int32_t offset); |
- void bc(Label* L) { bc(branch_offset(L, false) >> 2); } |
+ inline void bc(Label* L) { bc(shifted_branch_offset26(L)); } |
void balc(int32_t offset); |
- void balc(Label* L) { balc(branch_offset(L, false) >> 2); } |
+ inline void balc(Label* L) { balc(shifted_branch_offset26(L)); } |
void beq(Register rs, Register rt, int16_t offset); |
- void beq(Register rs, Register rt, Label* L) { |
- beq(rs, rt, branch_offset(L, false) >> 2); |
+ inline void beq(Register rs, Register rt, Label* L) { |
+ beq(rs, rt, shifted_branch_offset(L)); |
} |
void bgez(Register rs, int16_t offset); |
void bgezc(Register rt, int16_t offset); |
- void bgezc(Register rt, Label* L) { |
- bgezc(rt, branch_offset_compact(L, false)>>2); |
+ inline void bgezc(Register rt, Label* L) { |
+ bgezc(rt, shifted_branch_offset(L)); |
} |
void bgeuc(Register rs, Register rt, int16_t offset); |
- void bgeuc(Register rs, Register rt, Label* L) { |
- bgeuc(rs, rt, branch_offset_compact(L, false)>>2); |
+ inline void bgeuc(Register rs, Register rt, Label* L) { |
+ bgeuc(rs, rt, shifted_branch_offset(L)); |
} |
void bgec(Register rs, Register rt, int16_t offset); |
- void bgec(Register rs, Register rt, Label* L) { |
- bgec(rs, rt, branch_offset_compact(L, false)>>2); |
+ inline void bgec(Register rs, Register rt, Label* L) { |
+ bgec(rs, rt, shifted_branch_offset(L)); |
} |
void bgezal(Register rs, int16_t offset); |
void bgezalc(Register rt, int16_t offset); |
- void bgezalc(Register rt, Label* L) { |
- bgezalc(rt, branch_offset_compact(L, false)>>2); |
+ inline void bgezalc(Register rt, Label* L) { |
+ bgezalc(rt, shifted_branch_offset(L)); |
} |
void bgezall(Register rs, int16_t offset); |
- void bgezall(Register rs, Label* L) { |
- bgezall(rs, branch_offset(L, false)>>2); |
+ inline void bgezall(Register rs, Label* L) { |
+ bgezall(rs, branch_offset(L) >> 2); |
} |
void bgtz(Register rs, int16_t offset); |
void bgtzc(Register rt, int16_t offset); |
- void bgtzc(Register rt, Label* L) { |
- bgtzc(rt, branch_offset_compact(L, false)>>2); |
+ inline void bgtzc(Register rt, Label* L) { |
+ bgtzc(rt, shifted_branch_offset(L)); |
} |
void blez(Register rs, int16_t offset); |
void blezc(Register rt, int16_t offset); |
- void blezc(Register rt, Label* L) { |
- blezc(rt, branch_offset_compact(L, false)>>2); |
+ inline void blezc(Register rt, Label* L) { |
+ blezc(rt, shifted_branch_offset(L)); |
} |
void bltz(Register rs, int16_t offset); |
void bltzc(Register rt, int16_t offset); |
- void bltzc(Register rt, Label* L) { |
- bltzc(rt, branch_offset_compact(L, false)>>2); |
+ inline void bltzc(Register rt, Label* L) { |
+ bltzc(rt, shifted_branch_offset(L)); |
} |
void bltuc(Register rs, Register rt, int16_t offset); |
- void bltuc(Register rs, Register rt, Label* L) { |
- bltuc(rs, rt, branch_offset_compact(L, false)>>2); |
+ inline void bltuc(Register rs, Register rt, Label* L) { |
+ bltuc(rs, rt, shifted_branch_offset(L)); |
} |
void bltc(Register rs, Register rt, int16_t offset); |
- void bltc(Register rs, Register rt, Label* L) { |
- bltc(rs, rt, branch_offset_compact(L, false)>>2); |
+ inline void bltc(Register rs, Register rt, Label* L) { |
+ bltc(rs, rt, shifted_branch_offset(L)); |
} |
void bltzal(Register rs, int16_t offset); |
void blezalc(Register rt, int16_t offset); |
- void blezalc(Register rt, Label* L) { |
- blezalc(rt, branch_offset_compact(L, false)>>2); |
+ inline void blezalc(Register rt, Label* L) { |
+ blezalc(rt, shifted_branch_offset(L)); |
} |
void bltzalc(Register rt, int16_t offset); |
- void bltzalc(Register rt, Label* L) { |
- bltzalc(rt, branch_offset_compact(L, false)>>2); |
+ inline void bltzalc(Register rt, Label* L) { |
+ bltzalc(rt, shifted_branch_offset(L)); |
} |
void bgtzalc(Register rt, int16_t offset); |
- void bgtzalc(Register rt, Label* L) { |
- bgtzalc(rt, branch_offset_compact(L, false)>>2); |
+ inline void bgtzalc(Register rt, Label* L) { |
+ bgtzalc(rt, shifted_branch_offset(L)); |
} |
void beqzalc(Register rt, int16_t offset); |
- void beqzalc(Register rt, Label* L) { |
- beqzalc(rt, branch_offset_compact(L, false)>>2); |
+ inline void beqzalc(Register rt, Label* L) { |
+ beqzalc(rt, shifted_branch_offset(L)); |
} |
void beqc(Register rs, Register rt, int16_t offset); |
- void beqc(Register rs, Register rt, Label* L) { |
- beqc(rs, rt, branch_offset_compact(L, false)>>2); |
+ inline void beqc(Register rs, Register rt, Label* L) { |
+ beqc(rs, rt, shifted_branch_offset(L)); |
} |
void beqzc(Register rs, int32_t offset); |
- void beqzc(Register rs, Label* L) { |
- beqzc(rs, branch_offset21_compact(L, false)>>2); |
+ inline void beqzc(Register rs, Label* L) { |
+ beqzc(rs, shifted_branch_offset21(L)); |
} |
void bnezalc(Register rt, int16_t offset); |
- void bnezalc(Register rt, Label* L) { |
- bnezalc(rt, branch_offset_compact(L, false)>>2); |
+ inline void bnezalc(Register rt, Label* L) { |
+ bnezalc(rt, shifted_branch_offset(L)); |
} |
void bnec(Register rs, Register rt, int16_t offset); |
- void bnec(Register rs, Register rt, Label* L) { |
- bnec(rs, rt, branch_offset_compact(L, false)>>2); |
+ inline void bnec(Register rs, Register rt, Label* L) { |
+ bnec(rs, rt, shifted_branch_offset(L)); |
} |
void bnezc(Register rt, int32_t offset); |
- void bnezc(Register rt, Label* L) { |
- bnezc(rt, branch_offset21_compact(L, false)>>2); |
+ inline void bnezc(Register rt, Label* L) { |
+ bnezc(rt, shifted_branch_offset21(L)); |
} |
void bne(Register rs, Register rt, int16_t offset); |
- void bne(Register rs, Register rt, Label* L) { |
- bne(rs, rt, branch_offset(L, false)>>2); |
+ inline void bne(Register rs, Register rt, Label* L) { |
+ bne(rs, rt, shifted_branch_offset(L)); |
} |
void bovc(Register rs, Register rt, int16_t offset); |
- void bovc(Register rs, Register rt, Label* L) { |
- bovc(rs, rt, branch_offset_compact(L, false)>>2); |
+ inline void bovc(Register rs, Register rt, Label* L) { |
+ bovc(rs, rt, shifted_branch_offset(L)); |
} |
void bnvc(Register rs, Register rt, int16_t offset); |
- void bnvc(Register rs, Register rt, Label* L) { |
- bnvc(rs, rt, branch_offset_compact(L, false)>>2); |
+ inline void bnvc(Register rs, Register rt, Label* L) { |
+ bnvc(rs, rt, shifted_branch_offset(L)); |
} |
// Never use the int16_t b(l)cond version with a branch offset |
@@ -920,12 +929,12 @@ class Assembler : public AssemblerBase { |
void cmp_d(FPUCondition cond, FPURegister fd, FPURegister fs, FPURegister ft); |
void bc1eqz(int16_t offset, FPURegister ft); |
- void bc1eqz(Label* L, FPURegister ft) { |
- bc1eqz(branch_offset(L, false)>>2, ft); |
+ inline void bc1eqz(Label* L, FPURegister ft) { |
+ bc1eqz(shifted_branch_offset(L), ft); |
} |
void bc1nez(int16_t offset, FPURegister ft); |
- void bc1nez(Label* L, FPURegister ft) { |
- bc1nez(branch_offset(L, false)>>2, ft); |
+ inline void bc1nez(Label* L, FPURegister ft) { |
+ bc1nez(shifted_branch_offset(L), ft); |
} |
// Conditions and branches for non MIPSr6. |
@@ -935,9 +944,13 @@ class Assembler : public AssemblerBase { |
void c_d(FPUCondition cond, FPURegister ft, FPURegister fs, uint16_t cc = 0); |
void bc1f(int16_t offset, uint16_t cc = 0); |
- void bc1f(Label* L, uint16_t cc = 0) { bc1f(branch_offset(L, false)>>2, cc); } |
+ inline void bc1f(Label* L, uint16_t cc = 0) { |
+ bc1f(shifted_branch_offset(L), cc); |
+ } |
void bc1t(int16_t offset, uint16_t cc = 0); |
- void bc1t(Label* L, uint16_t cc = 0) { bc1t(branch_offset(L, false)>>2, cc); } |
+ inline void bc1t(Label* L, uint16_t cc = 0) { |
+ bc1t(shifted_branch_offset(L), cc); |
+ } |
void fcmp(FPURegister src1, const double src2, FPUCondition cond); |
// Check the code size generated from label to here. |
@@ -1056,8 +1069,14 @@ class Assembler : public AssemblerBase { |
// Check if an instruction is a branch of some kind. |
static bool IsBranch(Instr instr); |
+ static bool IsBc(Instr instr); |
+ static bool IsBzc(Instr instr); |
static bool IsBeq(Instr instr); |
static bool IsBne(Instr instr); |
+ static bool IsBeqzc(Instr instr); |
+ static bool IsBnezc(Instr instr); |
+ static bool IsBeqc(Instr instr); |
+ static bool IsBnec(Instr instr); |
static bool IsJump(Instr instr); |
static bool IsJ(Instr instr); |
@@ -1179,6 +1198,8 @@ class Assembler : public AssemblerBase { |
return block_buffer_growth_; |
} |
+ bool IsPrevInstrCompactBranch() { return prev_instr_compact_branch_; } |
+ |
private: |
inline static void set_target_internal_reference_encoded_at(Address pc, |
Address target); |
@@ -1221,10 +1242,14 @@ class Assembler : public AssemblerBase { |
// The bound position, before this we cannot do instruction elimination. |
int last_bound_pos_; |
+ // Readable constants for compact branch handling in emit() |
+ enum class CompactBranchType : bool { NO = false, COMPACT_BRANCH = true }; |
+ |
// Code emission. |
inline void CheckBuffer(); |
void GrowBuffer(); |
- inline void emit(Instr x); |
+ inline void emit(Instr x, |
+ CompactBranchType is_compact_branch = CompactBranchType::NO); |
inline void CheckTrampolinePoolQuick(int extra_instructions = 0); |
// Instruction generation. |
@@ -1276,21 +1301,22 @@ class Assembler : public AssemblerBase { |
FPUControlRegister fs, |
SecondaryField func = NULLSF); |
- |
- void GenInstrImmediate(Opcode opcode, |
- Register rs, |
- Register rt, |
- int32_t j); |
- void GenInstrImmediate(Opcode opcode, |
- Register rs, |
- SecondaryField SF, |
- int32_t j); |
- void GenInstrImmediate(Opcode opcode, |
- Register r1, |
- FPURegister r2, |
- int32_t j); |
- void GenInstrImmediate(Opcode opcode, Register rs, int32_t j); |
- void GenInstrImmediate(Opcode opcode, int32_t offset26); |
+ void GenInstrImmediate( |
+ Opcode opcode, Register rs, Register rt, int32_t j, |
+ CompactBranchType is_compact_branch = CompactBranchType::NO); |
+ void GenInstrImmediate( |
+ Opcode opcode, Register rs, SecondaryField SF, int32_t j, |
+ CompactBranchType is_compact_branch = CompactBranchType::NO); |
+ void GenInstrImmediate( |
+ Opcode opcode, Register r1, FPURegister r2, int32_t j, |
+ CompactBranchType is_compact_branch = CompactBranchType::NO); |
+ void GenInstrImmediate( |
+ Opcode opcode, Register rs, int32_t offset21, |
+ CompactBranchType is_compact_branch = CompactBranchType::NO); |
+ void GenInstrImmediate(Opcode opcode, Register rs, uint32_t offset21); |
+ void GenInstrImmediate( |
+ Opcode opcode, int32_t offset26, |
+ CompactBranchType is_compact_branch = CompactBranchType::NO); |
void GenInstrJump(Opcode opcode, |
@@ -1365,12 +1391,17 @@ class Assembler : public AssemblerBase { |
bool trampoline_emitted_; |
static const int kTrampolineSlotsSize = 4 * kInstrSize; |
static const int kMaxBranchOffset = (1 << (18 - 1)) - 1; |
+ static const int kMaxCompactBranchOffset = (1 << (28 - 1)) - 1; |
static const int kInvalidSlotPos = -1; |
// Internal reference positions, required for unbounded internal reference |
// labels. |
std::set<int> internal_reference_positions_; |
+ void EmittedCompactBranchInstruction() { prev_instr_compact_branch_ = true; } |
+ void ClearCompactBranchState() { prev_instr_compact_branch_ = false; } |
+ bool prev_instr_compact_branch_ = false; |
+ |
Trampoline trampoline_; |
bool internal_trampoline_exception_; |