Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(698)

Unified Diff: src/mips/assembler-mips.cc

Issue 453043002: MIPS: Add support for arch. revision 6 to mips32 port. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Addressed comments. Created 6 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « src/mips/assembler-mips.h ('k') | src/mips/code-stubs-mips.cc » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: src/mips/assembler-mips.cc
diff --git a/src/mips/assembler-mips.cc b/src/mips/assembler-mips.cc
index 936a73b5f9e2930682e590a0de934606b523497a..c78851eee84f9388765e5e05e27946ad11234b1d 100644
--- a/src/mips/assembler-mips.cc
+++ b/src/mips/assembler-mips.cc
@@ -98,10 +98,32 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
#ifndef __mips__
// For the simulator build, use FPU.
supported_ |= 1u << FPU;
+#if defined(_MIPS_ARCH_MIPS32R6)
+ // FP64 mode is implied on r6.
+ supported_ |= 1u << FP64;
+#endif
+#if defined(FPU_MODE_FP64)
+ supported_ |= 1u << FP64;
+#endif
#else
// Probe for additional features at runtime.
base::CPU cpu;
if (cpu.has_fpu()) supported_ |= 1u << FPU;
+#if defined(FPU_MODE_FPXX)
+ if (cpu.is_fp64_mode()) supported_ |= 1u << FP64;
+#elif defined(FPU_MODE_FP64)
+ supported_ |= 1u << FP64;
+#endif
+#if defined(_MIPS_ARCH_MIPS32RX)
+ if (cpu.architecture() == 6) {
+ supported_ |= 1u << MIPSr6;
+ } else if (cpu.architecture() == 2) {
+ supported_ |= 1u << MIPSr1;
+ supported_ |= 1u << MIPSr2;
+ } else {
+ supported_ |= 1u << MIPSr1;
+ }
+#endif
#endif
}
@@ -484,7 +506,9 @@ bool Assembler::IsBranch(Instr instr) {
opcode == BGTZL ||
(opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
rt_field == BLTZAL || rt_field == BGEZAL)) ||
- (opcode == COP1 && rs_field == BC1); // Coprocessor branch.
+ (opcode == COP1 && rs_field == BC1) || // Coprocessor branch.
+ (opcode == COP1 && rs_field == BC1EQZ) ||
+ (opcode == COP1 && rs_field == BC1NEZ);
}
@@ -529,12 +553,18 @@ bool Assembler::IsJal(Instr instr) {
bool Assembler::IsJr(Instr instr) {
- return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JR;
+ if (!IsMipsArchVariant(kMips32r6)) {
+ return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JR;
+ } else {
+ return GetOpcodeField(instr) == SPECIAL &&
+ GetRdField(instr) == 0 && GetFunctionField(instr) == JALR;
+ }
}
bool Assembler::IsJalr(Instr instr) {
- return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JALR;
+ return GetOpcodeField(instr) == SPECIAL &&
+ GetRdField(instr) != 0 && GetFunctionField(instr) == JALR;
}
@@ -1019,6 +1049,88 @@ int32_t Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
}
+int32_t Assembler::branch_offset_compact(Label* L,
+ bool jump_elimination_allowed) {
+ int32_t target_pos;
+ if (L->is_bound()) {
+ target_pos = L->pos();
+ } else {
+ if (L->is_linked()) {
+ target_pos = L->pos();
+ L->link_to(pc_offset());
+ } else {
+ L->link_to(pc_offset());
+ if (!trampoline_emitted_) {
+ unbound_labels_count_++;
+ next_buffer_check_ -= kTrampolineSlotsSize;
+ }
+ return kEndOfChain;
+ }
+ }
+
+ int32_t offset = target_pos - pc_offset();
+ DCHECK((offset & 3) == 0);
+ DCHECK(is_int16(offset >> 2));
+
+ return offset;
+}
+
+
+int32_t Assembler::branch_offset21(Label* L, bool jump_elimination_allowed) {
+ int32_t target_pos;
+
+ if (L->is_bound()) {
+ target_pos = L->pos();
+ } else {
+ if (L->is_linked()) {
+ target_pos = L->pos();
+ L->link_to(pc_offset());
+ } else {
+ L->link_to(pc_offset());
+ if (!trampoline_emitted_) {
+ unbound_labels_count_++;
+ next_buffer_check_ -= kTrampolineSlotsSize;
+ }
+ return kEndOfChain;
+ }
+ }
+
+ int32_t offset = target_pos - (pc_offset() + kBranchPCOffset);
+ DCHECK((offset & 3) == 0);
+ DCHECK(((offset >> 2) & 0xFFE00000) == 0); // Offset is 21bit width.
+
+ return offset;
+}
+
+
+int32_t Assembler::branch_offset21_compact(Label* L,
+ bool jump_elimination_allowed) {
+ int32_t target_pos;
+
+ if (L->is_bound()) {
+ target_pos = L->pos();
+ } else {
+ if (L->is_linked()) {
+ target_pos = L->pos();
+ L->link_to(pc_offset());
+ } else {
+ L->link_to(pc_offset());
+ if (!trampoline_emitted_) {
+ unbound_labels_count_++;
+ next_buffer_check_ -= kTrampolineSlotsSize;
+ }
+ return kEndOfChain;
+ }
+ }
+
+ int32_t offset = target_pos - pc_offset();
+ DCHECK((offset & 3) == 0);
+ DCHECK(((offset >> 2) & 0xFFe00000) == 0); // Offset is 21bit width.
+
+ return offset;
+}
+
+
void Assembler::label_at_put(Label* L, int at_offset) {
int target_pos;
if (L->is_bound()) {
@@ -1072,7 +1184,33 @@ void Assembler::bgez(Register rs, int16_t offset) {
}
+void Assembler::bgezc(Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rt.is(zero_reg)));
+ GenInstrImmediate(BLEZL, rt, rt, offset);
+}
+
+
+void Assembler::bgeuc(Register rs, Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rs.is(zero_reg)));
+ DCHECK(!(rt.is(zero_reg)));
+ DCHECK(rs.code() != rt.code());
+ GenInstrImmediate(BLEZ, rs, rt, offset);
+}
+
+
+void Assembler::bgec(Register rs, Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rs.is(zero_reg)));
+ DCHECK(!(rt.is(zero_reg)));
+ DCHECK(rs.code() != rt.code());
+ GenInstrImmediate(BLEZL, rs, rt, offset);
+}
+
+
void Assembler::bgezal(Register rs, int16_t offset) {
+ DCHECK(!IsMipsArchVariant(kMips32r6) || rs.is(zero_reg));
BlockTrampolinePoolScope block_trampoline_pool(this);
positions_recorder()->WriteRecordedPositions();
GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
@@ -1087,6 +1225,13 @@ void Assembler::bgtz(Register rs, int16_t offset) {
}
+void Assembler::bgtzc(Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rt.is(zero_reg)));
+ GenInstrImmediate(BGTZL, zero_reg, rt, offset);
+}
+
+
void Assembler::blez(Register rs, int16_t offset) {
BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrImmediate(BLEZ, rs, zero_reg, offset);
@@ -1094,6 +1239,38 @@ void Assembler::blez(Register rs, int16_t offset) {
}
+void Assembler::blezc(Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rt.is(zero_reg)));
+ GenInstrImmediate(BLEZL, zero_reg, rt, offset);
+}
+
+
+void Assembler::bltzc(Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rt.is(zero_reg)));
+ GenInstrImmediate(BGTZL, rt, rt, offset);
+}
+
+
+void Assembler::bltuc(Register rs, Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rs.is(zero_reg)));
+ DCHECK(!(rt.is(zero_reg)));
+ DCHECK(rs.code() != rt.code());
+ GenInstrImmediate(BGTZ, rs, rt, offset);
+}
+
+
+void Assembler::bltc(Register rs, Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rs.is(zero_reg)));
+ DCHECK(!(rt.is(zero_reg)));
+ DCHECK(rs.code() != rt.code());
+ GenInstrImmediate(BGTZL, rs, rt, offset);
+}
+
+
void Assembler::bltz(Register rs, int16_t offset) {
BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrImmediate(REGIMM, rs, BLTZ, offset);
@@ -1102,6 +1279,7 @@ void Assembler::bltz(Register rs, int16_t offset) {
void Assembler::bltzal(Register rs, int16_t offset) {
+ DCHECK(!IsMipsArchVariant(kMips32r6) || rs.is(zero_reg));
BlockTrampolinePoolScope block_trampoline_pool(this);
positions_recorder()->WriteRecordedPositions();
GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
@@ -1116,6 +1294,101 @@ void Assembler::bne(Register rs, Register rt, int16_t offset) {
}
+void Assembler::bovc(Register rs, Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rs.is(zero_reg)));
+ DCHECK(rs.code() >= rt.code());
+ GenInstrImmediate(ADDI, rs, rt, offset);
+}
+
+
+void Assembler::bnvc(Register rs, Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rs.is(zero_reg)));
+ DCHECK(rs.code() >= rt.code());
+ GenInstrImmediate(DADDI, rs, rt, offset);
+}
+
+
+void Assembler::blezalc(Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rt.is(zero_reg)));
+ GenInstrImmediate(BLEZ, zero_reg, rt, offset);
+}
+
+
+void Assembler::bgezalc(Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rt.is(zero_reg)));
+ GenInstrImmediate(BLEZ, rt, rt, offset);
+}
+
+
+void Assembler::bgezall(Register rs, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rs.is(zero_reg)));
+ GenInstrImmediate(REGIMM, rs, BGEZALL, offset);
+}
+
+
+void Assembler::bltzalc(Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rt.is(zero_reg)));
+ GenInstrImmediate(BGTZ, rt, rt, offset);
+}
+
+
+void Assembler::bgtzalc(Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rt.is(zero_reg)));
+ GenInstrImmediate(BGTZ, zero_reg, rt, offset);
+}
+
+
+void Assembler::beqzalc(Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rt.is(zero_reg)));
+ GenInstrImmediate(ADDI, zero_reg, rt, offset);
+}
+
+
+void Assembler::bnezalc(Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rt.is(zero_reg)));
+ GenInstrImmediate(DADDI, zero_reg, rt, offset);
+}
+
+
+void Assembler::beqc(Register rs, Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(rs.code() < rt.code());
+ GenInstrImmediate(ADDI, rs, rt, offset);
+}
+
+
+void Assembler::beqzc(Register rs, int32_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rs.is(zero_reg)));
+ Instr instr = BEQZC | (rs.code() << kRsShift) | offset;
+ emit(instr);
+}
+
+
+void Assembler::bnec(Register rs, Register rt, int16_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(rs.code() < rt.code());
+ GenInstrImmediate(DADDI, rs, rt, offset);
+}
+
+
+void Assembler::bnezc(Register rs, int32_t offset) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK(!(rs.is(zero_reg)));
+ Instr instr = BNEZC | (rs.code() << kRsShift) | offset;
+ emit(instr);
+}
+
+
void Assembler::j(int32_t target) {
#if DEBUG
// Get pc of delay slot.
@@ -1129,12 +1402,16 @@ void Assembler::j(int32_t target) {
void Assembler::jr(Register rs) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- if (rs.is(ra)) {
- positions_recorder()->WriteRecordedPositions();
+ if (!IsMipsArchVariant(kMips32r6)) {
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ if (rs.is(ra)) {
+ positions_recorder()->WriteRecordedPositions();
+ }
+ GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
+ } else {
+ jalr(rs, zero_reg);
}
- GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
- BlockTrampolinePoolFor(1); // For associated delay slot.
}
@@ -1205,7 +1482,41 @@ void Assembler::subu(Register rd, Register rs, Register rt) {
void Assembler::mul(Register rd, Register rs, Register rt) {
- GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL);
+ if (!IsMipsArchVariant(kMips32r6)) {
+ GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL);
+ } else {
+ GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH);
+ }
+}
+
+
+void Assembler::mulu(Register rd, Register rs, Register rt) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH_U);
+}
+
+
+void Assembler::muh(Register rd, Register rs, Register rt) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH);
+}
+
+
+void Assembler::muhu(Register rd, Register rs, Register rt) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH_U);
+}
+
+
+void Assembler::mod(Register rd, Register rs, Register rt) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD);
+}
+
+
+void Assembler::modu(Register rd, Register rs, Register rt) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD_U);
}
@@ -1224,11 +1535,23 @@ void Assembler::div(Register rs, Register rt) {
}
+void Assembler::div(Register rd, Register rs, Register rt) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD);
+}
+
+
void Assembler::divu(Register rs, Register rt) {
GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU);
}
+void Assembler::divu(Register rd, Register rs, Register rt) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD_U);
+}
+
+
// Logical.
void Assembler::and_(Register rd, Register rs, Register rt) {
@@ -1311,7 +1634,7 @@ void Assembler::srav(Register rd, Register rt, Register rs) {
void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
// Should be called via MacroAssembler::Ror.
DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa));
- DCHECK(kArchVariant == kMips32r2);
+ DCHECK(IsMipsArchVariant(kMips32r2));
Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
| (rd.code() << kRdShift) | (sa << kSaShift) | SRL;
emit(instr);
@@ -1321,7 +1644,7 @@ void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
void Assembler::rotrv(Register rd, Register rt, Register rs) {
// Should be called via MacroAssembler::Ror.
DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid() );
- DCHECK(kArchVariant == kMips32r2);
+ DCHECK(IsMipsArchVariant(kMips32r2));
Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
| (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
emit(instr);
@@ -1445,6 +1768,14 @@ void Assembler::lui(Register rd, int32_t j) {
}
+void Assembler::aui(Register rs, Register rt, int32_t j) {
+ // This instruction uses same opcode as 'lui'. The difference in encoding is
+ // 'lui' has zero reg. for rs field.
+ DCHECK(is_uint16(j));
+ GenInstrImmediate(LUI, rs, rt, j);
+}
+
+
// -------------Misc-instructions--------------
// Break / Trap instructions.
@@ -1588,15 +1919,19 @@ void Assembler::movf(Register rd, Register rs, uint16_t cc) {
// Bit twiddling.
void Assembler::clz(Register rd, Register rs) {
- // Clz instr requires same GPR number in 'rd' and 'rt' fields.
- GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ);
+ if (!IsMipsArchVariant(kMips32r6)) {
+ // Clz instr requires same GPR number in 'rd' and 'rt' fields.
+ GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ);
+ } else {
+ GenInstrRegister(SPECIAL, rs, zero_reg, rd, 1, CLZ_R6);
+ }
}
void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
// Should be called via MacroAssembler::Ins.
// Ins instr has 'rt' field as dest, and two uint5: msb, lsb.
- DCHECK(kArchVariant == kMips32r2);
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS);
}
@@ -1604,13 +1939,13 @@ void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
// Should be called via MacroAssembler::Ext.
// Ext instr has 'rt' field as dest, and two uint5: msb, lsb.
- DCHECK(kArchVariant == kMips32r2);
+ DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT);
}
void Assembler::pref(int32_t hint, const MemOperand& rs) {
- DCHECK(kArchVariant != kLoongson);
+ DCHECK(!IsMipsArchVariant(kLoongson));
DCHECK(is_uint5(hint) && is_uint16(rs.offset_));
Instr instr = PREF | (rs.rm().code() << kRsShift) | (hint << kRtShift)
| (rs.offset_);
@@ -1629,12 +1964,20 @@ void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
// Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
// load to two 32-bit loads.
- GenInstrImmediate(LWC1, src.rm(), fd, src.offset_ +
- Register::kMantissaOffset);
- FPURegister nextfpreg;
- nextfpreg.setcode(fd.code() + 1);
- GenInstrImmediate(LWC1, src.rm(), nextfpreg, src.offset_ +
- Register::kExponentOffset);
+ if (IsFp64Mode()) {
+ GenInstrImmediate(LWC1, src.rm(), fd, src.offset_ +
+ Register::kMantissaOffset);
+ GenInstrImmediate(LW, src.rm(), at, src.offset_ +
+ Register::kExponentOffset);
+ mthc1(at, fd);
+ } else {
+ GenInstrImmediate(LWC1, src.rm(), fd, src.offset_ +
+ Register::kMantissaOffset);
+ FPURegister nextfpreg;
+ nextfpreg.setcode(fd.code() + 1);
+ GenInstrImmediate(LWC1, src.rm(), nextfpreg, src.offset_ +
+ Register::kExponentOffset);
+ }
}
@@ -1646,12 +1989,20 @@ void Assembler::swc1(FPURegister fd, const MemOperand& src) {
void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
// Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
// store to two 32-bit stores.
- GenInstrImmediate(SWC1, src.rm(), fd, src.offset_ +
- Register::kMantissaOffset);
- FPURegister nextfpreg;
- nextfpreg.setcode(fd.code() + 1);
- GenInstrImmediate(SWC1, src.rm(), nextfpreg, src.offset_ +
- Register::kExponentOffset);
+ if (IsFp64Mode()) {
+ GenInstrImmediate(SWC1, src.rm(), fd, src.offset_ +
+ Register::kMantissaOffset);
+ mfhc1(at, fd);
+ GenInstrImmediate(SW, src.rm(), at, src.offset_ +
+ Register::kExponentOffset);
+ } else {
+ GenInstrImmediate(SWC1, src.rm(), fd, src.offset_ +
+ Register::kMantissaOffset);
+ FPURegister nextfpreg;
+ nextfpreg.setcode(fd.code() + 1);
+ GenInstrImmediate(SWC1, src.rm(), nextfpreg, src.offset_ +
+ Register::kExponentOffset);
+ }
}
@@ -1660,11 +2011,21 @@ void Assembler::mtc1(Register rt, FPURegister fs) {
}
+void Assembler::mthc1(Register rt, FPURegister fs) {
+ GenInstrRegister(COP1, MTHC1, rt, fs, f0);
+}
+
+
void Assembler::mfc1(Register rt, FPURegister fs) {
GenInstrRegister(COP1, MFC1, rt, fs, f0);
}
+void Assembler::mfhc1(Register rt, FPURegister fs) {
+ GenInstrRegister(COP1, MFHC1, rt, fs, f0);
+}
+
+
void Assembler::ctc1(Register rt, FPUControlRegister fs) {
GenInstrRegister(COP1, CTC1, rt, fs);
}
@@ -1785,25 +2146,25 @@ void Assembler::ceil_w_d(FPURegister fd, FPURegister fs) {
void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
- DCHECK(kArchVariant == kMips32r2);
+ DCHECK(IsMipsArchVariant(kMips32r2));
GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
}
void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
- DCHECK(kArchVariant == kMips32r2);
+ DCHECK(IsMipsArchVariant(kMips32r2));
GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
}
void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) {
- DCHECK(kArchVariant == kMips32r2);
+ DCHECK(IsMipsArchVariant(kMips32r2));
GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S);
}
void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) {
- DCHECK(kArchVariant == kMips32r2);
+ DCHECK(IsMipsArchVariant(kMips32r2));
GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D);
}
@@ -1838,13 +2199,45 @@ void Assembler::ceil_l_d(FPURegister fd, FPURegister fs) {
}
+void Assembler::min(SecondaryField fmt, FPURegister fd, FPURegister ft,
+ FPURegister fs) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK((fmt == D) || (fmt == S));
+ GenInstrRegister(COP1, fmt, ft, fs, fd, MIN);
+}
+
+
+void Assembler::mina(SecondaryField fmt, FPURegister fd, FPURegister ft,
+ FPURegister fs) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK((fmt == D) || (fmt == S));
+ GenInstrRegister(COP1, fmt, ft, fs, fd, MINA);
+}
+
+
+void Assembler::max(SecondaryField fmt, FPURegister fd, FPURegister ft,
+ FPURegister fs) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK((fmt == D) || (fmt == S));
+ GenInstrRegister(COP1, fmt, ft, fs, fd, MAX);
+}
+
+
+void Assembler::maxa(SecondaryField fmt, FPURegister fd, FPURegister ft,
+ FPURegister fs) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK((fmt == D) || (fmt == S));
+ GenInstrRegister(COP1, fmt, ft, fs, fd, MAXA);
+}
+
+
void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W);
}
void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
- DCHECK(kArchVariant == kMips32r2);
+ DCHECK(IsMipsArchVariant(kMips32r2));
GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
}
@@ -1860,7 +2253,7 @@ void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) {
void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
- DCHECK(kArchVariant == kMips32r2);
+ DCHECK(IsMipsArchVariant(kMips32r2));
GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
}
@@ -1870,7 +2263,32 @@ void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) {
}
-// Conditions.
+// Conditions for >= MIPSr6.
+void Assembler::cmp(FPUCondition cond, SecondaryField fmt,
+ FPURegister fd, FPURegister fs, FPURegister ft) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ DCHECK((fmt & ~(31 << kRsShift)) == 0);
+ Instr instr = COP1 | fmt | ft.code() << kFtShift |
+ fs.code() << kFsShift | fd.code() << kFdShift | (0 << 5) | cond;
+ emit(instr);
+}
+
+
+void Assembler::bc1eqz(int16_t offset, FPURegister ft) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ Instr instr = COP1 | BC1EQZ | ft.code() << kFtShift | (offset & kImm16Mask);
+ emit(instr);
+}
+
+
+void Assembler::bc1nez(int16_t offset, FPURegister ft) {
+ DCHECK(IsMipsArchVariant(kMips32r6));
+ Instr instr = COP1 | BC1NEZ | ft.code() << kFtShift | (offset & kImm16Mask);
+ emit(instr);
+}
+
+
+// Conditions for < MIPSr6.
void Assembler::c(FPUCondition cond, SecondaryField fmt,
FPURegister fs, FPURegister ft, uint16_t cc) {
DCHECK(is_uint3(cc));
@@ -2184,7 +2602,7 @@ void Assembler::set_target_address_at(Address pc,
// lui rt, upper-16.
// ori rt rt, lower-16.
*p = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift);
- *(p+1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask);
+ *(p + 1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask);
// The following code is an optimization for the common case of Call()
// or Jump() which is load to register, and jump through register:
@@ -2227,20 +2645,20 @@ void Assembler::set_target_address_at(Address pc,
if (IsJalr(instr3)) {
// Try to convert JALR to JAL.
if (in_range && GetRt(instr2) == GetRs(instr3)) {
- *(p+2) = JAL | target_field;
+ *(p + 2) = JAL | target_field;
patched_jump = true;
}
} else if (IsJr(instr3)) {
// Try to convert JR to J, skip returns (jr ra).
bool is_ret = static_cast<int>(GetRs(instr3)) == ra.code();
if (in_range && !is_ret && GetRt(instr2) == GetRs(instr3)) {
- *(p+2) = J | target_field;
+ *(p + 2) = J | target_field;
patched_jump = true;
}
} else if (IsJal(instr3)) {
if (in_range) {
// We are patching an already converted JAL.
- *(p+2) = JAL | target_field;
+ *(p + 2) = JAL | target_field;
} else {
// Patch JAL, but out of range, revert to JALR.
// JALR rs reg is the rt reg specified in the ORI instruction.
@@ -2252,12 +2670,16 @@ void Assembler::set_target_address_at(Address pc,
} else if (IsJ(instr3)) {
if (in_range) {
// We are patching an already converted J (jump).
- *(p+2) = J | target_field;
+ *(p + 2) = J | target_field;
} else {
// Trying patch J, but out of range, just go back to JR.
// JR 'rs' reg is the 'rt' reg specified in the ORI instruction (instr2).
uint32_t rs_field = GetRt(instr2) << kRsShift;
- *(p+2) = SPECIAL | rs_field | JR;
+ if (IsMipsArchVariant(kMips32r6)) {
+ *(p + 2) = SPECIAL | rs_field | (zero_reg.code() << kRdShift) | JALR;
+ } else {
+ *(p + 2) = SPECIAL | rs_field | JR;
+ }
}
patched_jump = true;
}
@@ -2285,19 +2707,23 @@ void Assembler::JumpLabelToJumpRegister(Address pc) {
uint32_t rs_field = GetRt(instr2) << kRsShift;
uint32_t rd_field = ra.code() << kRdShift; // Return-address (ra) reg.
- *(p+2) = SPECIAL | rs_field | rd_field | JALR;
+ *(p + 2) = SPECIAL | rs_field | rd_field | JALR;
patched = true;
} else if (IsJ(instr3)) {
DCHECK(GetOpcodeField(instr1) == LUI);
DCHECK(GetOpcodeField(instr2) == ORI);
uint32_t rs_field = GetRt(instr2) << kRsShift;
- *(p+2) = SPECIAL | rs_field | JR;
+ if (IsMipsArchVariant(kMips32r6)) {
+ *(p + 2) = SPECIAL | rs_field | (zero_reg.code() << kRdShift) | JALR;
+ } else {
+ *(p + 2) = SPECIAL | rs_field | JR;
+ }
patched = true;
}
if (patched) {
- CpuFeatures::FlushICache(pc+2, sizeof(Address));
+ CpuFeatures::FlushICache(pc + 2, sizeof(Address));
}
}
« no previous file with comments | « src/mips/assembler-mips.h ('k') | src/mips/code-stubs-mips.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698