Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(711)

Unified Diff: src/mips/assembler-mips.cc

Issue 458983003: Revert "Reland "MIPS: Add support for arch. revision 6 to mips32 port."" (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 6 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « src/mips/assembler-mips.h ('k') | src/mips/code-stubs-mips.cc » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: src/mips/assembler-mips.cc
diff --git a/src/mips/assembler-mips.cc b/src/mips/assembler-mips.cc
index d15e4b1e7c92620e03dfef0b7dd1d54095c42702..936a73b5f9e2930682e590a0de934606b523497a 100644
--- a/src/mips/assembler-mips.cc
+++ b/src/mips/assembler-mips.cc
@@ -98,32 +98,10 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
#ifndef __mips__
// For the simulator build, use FPU.
supported_ |= 1u << FPU;
-#if defined(_MIPS_ARCH_MIPS32R6)
- // FP64 mode is implied on r6.
- supported_ |= 1u << FP64FPU;
-#endif
-#if defined(FPU_MODE_FP64)
- supported_ |= 1u << FP64FPU;
-#endif
#else
// Probe for additional features at runtime.
base::CPU cpu;
if (cpu.has_fpu()) supported_ |= 1u << FPU;
-#if defined(FPU_MODE_FPXX)
- if (cpu.is_fp64_mode()) supported_ |= 1u << FP64FPU;
-#elif defined(FPU_MODE_FP64)
- supported_ |= 1u << FP64FPU;
-#endif
-#if defined(_MIPS_ARCH_MIPS32RX)
- if (cpu.architecture() == 6) {
- supported_ |= 1u << MIPSr6;
- } else if (cpu.architecture() == 2) {
- supported_ |= 1u << MIPSr1;
- supported_ |= 1u << MIPSr2;
- } else {
- supported_ |= 1u << MIPSr1;
- }
-#endif
#endif
}
@@ -506,9 +484,7 @@ bool Assembler::IsBranch(Instr instr) {
opcode == BGTZL ||
(opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
rt_field == BLTZAL || rt_field == BGEZAL)) ||
- (opcode == COP1 && rs_field == BC1) || // Coprocessor branch.
- (opcode == COP1 && rs_field == BC1EQZ) ||
- (opcode == COP1 && rs_field == BC1NEZ);
+ (opcode == COP1 && rs_field == BC1); // Coprocessor branch.
}
@@ -553,18 +529,12 @@ bool Assembler::IsJal(Instr instr) {
bool Assembler::IsJr(Instr instr) {
- if (!IsMipsArchVariant(kMips32r6)) {
- return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JR;
- } else {
- return GetOpcodeField(instr) == SPECIAL &&
- GetRdField(instr) == 0 && GetFunctionField(instr) == JALR;
- }
+ return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JR;
}
bool Assembler::IsJalr(Instr instr) {
- return GetOpcodeField(instr) == SPECIAL &&
- GetRdField(instr) != 0 && GetFunctionField(instr) == JALR;
+ return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JALR;
}
@@ -1049,88 +1019,6 @@ int32_t Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
}
-int32_t Assembler::branch_offset_compact(Label* L,
- bool jump_elimination_allowed) {
- int32_t target_pos;
- if (L->is_bound()) {
- target_pos = L->pos();
- } else {
- if (L->is_linked()) {
- target_pos = L->pos();
- L->link_to(pc_offset());
- } else {
- L->link_to(pc_offset());
- if (!trampoline_emitted_) {
- unbound_labels_count_++;
- next_buffer_check_ -= kTrampolineSlotsSize;
- }
- return kEndOfChain;
- }
- }
-
- int32_t offset = target_pos - pc_offset();
- DCHECK((offset & 3) == 0);
- DCHECK(is_int16(offset >> 2));
-
- return offset;
-}
-
-
-int32_t Assembler::branch_offset21(Label* L, bool jump_elimination_allowed) {
- int32_t target_pos;
-
- if (L->is_bound()) {
- target_pos = L->pos();
- } else {
- if (L->is_linked()) {
- target_pos = L->pos();
- L->link_to(pc_offset());
- } else {
- L->link_to(pc_offset());
- if (!trampoline_emitted_) {
- unbound_labels_count_++;
- next_buffer_check_ -= kTrampolineSlotsSize;
- }
- return kEndOfChain;
- }
- }
-
- int32_t offset = target_pos - (pc_offset() + kBranchPCOffset);
- DCHECK((offset & 3) == 0);
- DCHECK(((offset >> 2) & 0xFFE00000) == 0); // Offset is 21bit width.
-
- return offset;
-}
-
-
-int32_t Assembler::branch_offset21_compact(Label* L,
- bool jump_elimination_allowed) {
- int32_t target_pos;
-
- if (L->is_bound()) {
- target_pos = L->pos();
- } else {
- if (L->is_linked()) {
- target_pos = L->pos();
- L->link_to(pc_offset());
- } else {
- L->link_to(pc_offset());
- if (!trampoline_emitted_) {
- unbound_labels_count_++;
- next_buffer_check_ -= kTrampolineSlotsSize;
- }
- return kEndOfChain;
- }
- }
-
- int32_t offset = target_pos - pc_offset();
- DCHECK((offset & 3) == 0);
- DCHECK(((offset >> 2) & 0xFFe00000) == 0); // Offset is 21bit width.
-
- return offset;
-}
-
-
void Assembler::label_at_put(Label* L, int at_offset) {
int target_pos;
if (L->is_bound()) {
@@ -1184,33 +1072,7 @@ void Assembler::bgez(Register rs, int16_t offset) {
}
-void Assembler::bgezc(Register rt, int16_t offset) {
- DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK(!(rt.is(zero_reg)));
- GenInstrImmediate(BLEZL, rt, rt, offset);
-}
-
-
-void Assembler::bgeuc(Register rs, Register rt, int16_t offset) {
- DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK(!(rs.is(zero_reg)));
- DCHECK(!(rt.is(zero_reg)));
- DCHECK(rs.code() != rt.code());
- GenInstrImmediate(BLEZ, rs, rt, offset);
-}
-
-
-void Assembler::bgec(Register rs, Register rt, int16_t offset) {
- DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK(!(rs.is(zero_reg)));
- DCHECK(!(rt.is(zero_reg)));
- DCHECK(rs.code() != rt.code());
- GenInstrImmediate(BLEZL, rs, rt, offset);
-}
-
-
void Assembler::bgezal(Register rs, int16_t offset) {
- DCHECK(!IsMipsArchVariant(kMips32r6) || rs.is(zero_reg));
BlockTrampolinePoolScope block_trampoline_pool(this);
positions_recorder()->WriteRecordedPositions();
GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
@@ -1225,13 +1087,6 @@ void Assembler::bgtz(Register rs, int16_t offset) {
}
-void Assembler::bgtzc(Register rt, int16_t offset) {
- DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK(!(rt.is(zero_reg)));
- GenInstrImmediate(BGTZL, zero_reg, rt, offset);
-}
-
-
void Assembler::blez(Register rs, int16_t offset) {
BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrImmediate(BLEZ, rs, zero_reg, offset);
@@ -1239,38 +1094,6 @@ void Assembler::blez(Register rs, int16_t offset) {
}
-void Assembler::blezc(Register rt, int16_t offset) {
- DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK(!(rt.is(zero_reg)));
- GenInstrImmediate(BLEZL, zero_reg, rt, offset);
-}
-
-
-void Assembler::bltzc(Register rt, int16_t offset) {
- DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK(!(rt.is(zero_reg)));
- GenInstrImmediate(BGTZL, rt, rt, offset);
-}
-
-
-void Assembler::bltuc(Register rs, Register rt, int16_t offset) {
- DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK(!(rs.is(zero_reg)));
- DCHECK(!(rt.is(zero_reg)));
- DCHECK(rs.code() != rt.code());
- GenInstrImmediate(BGTZ, rs, rt, offset);
-}
-
-
-void Assembler::bltc(Register rs, Register rt, int16_t offset) {
- DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK(!(rs.is(zero_reg)));
- DCHECK(!(rt.is(zero_reg)));
- DCHECK(rs.code() != rt.code());
- GenInstrImmediate(BGTZL, rs, rt, offset);
-}
-
-
void Assembler::bltz(Register rs, int16_t offset) {
BlockTrampolinePoolScope block_trampoline_pool(this);
GenInstrImmediate(REGIMM, rs, BLTZ, offset);
@@ -1279,7 +1102,6 @@ void Assembler::bltz(Register rs, int16_t offset) {
void Assembler::bltzal(Register rs, int16_t offset) {
- DCHECK(!IsMipsArchVariant(kMips32r6) || rs.is(zero_reg));
BlockTrampolinePoolScope block_trampoline_pool(this);
positions_recorder()->WriteRecordedPositions();
GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
@@ -1294,101 +1116,6 @@ void Assembler::bne(Register rs, Register rt, int16_t offset) {
}
-void Assembler::bovc(Register rs, Register rt, int16_t offset) {
- DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK(!(rs.is(zero_reg)));
- DCHECK(rs.code() >= rt.code());
- GenInstrImmediate(ADDI, rs, rt, offset);
-}
-
-
-void Assembler::bnvc(Register rs, Register rt, int16_t offset) {
- DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK(!(rs.is(zero_reg)));
- DCHECK(rs.code() >= rt.code());
- GenInstrImmediate(DADDI, rs, rt, offset);
-}
-
-
-void Assembler::blezalc(Register rt, int16_t offset) {
- DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK(!(rt.is(zero_reg)));
- GenInstrImmediate(BLEZ, zero_reg, rt, offset);
-}
-
-
-void Assembler::bgezalc(Register rt, int16_t offset) {
- DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK(!(rt.is(zero_reg)));
- GenInstrImmediate(BLEZ, rt, rt, offset);
-}
-
-
-void Assembler::bgezall(Register rs, int16_t offset) {
- DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK(!(rs.is(zero_reg)));
- GenInstrImmediate(REGIMM, rs, BGEZALL, offset);
-}
-
-
-void Assembler::bltzalc(Register rt, int16_t offset) {
- DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK(!(rt.is(zero_reg)));
- GenInstrImmediate(BGTZ, rt, rt, offset);
-}
-
-
-void Assembler::bgtzalc(Register rt, int16_t offset) {
- DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK(!(rt.is(zero_reg)));
- GenInstrImmediate(BGTZ, zero_reg, rt, offset);
-}
-
-
-void Assembler::beqzalc(Register rt, int16_t offset) {
- DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK(!(rt.is(zero_reg)));
- GenInstrImmediate(ADDI, zero_reg, rt, offset);
-}
-
-
-void Assembler::bnezalc(Register rt, int16_t offset) {
- DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK(!(rt.is(zero_reg)));
- GenInstrImmediate(DADDI, zero_reg, rt, offset);
-}
-
-
-void Assembler::beqc(Register rs, Register rt, int16_t offset) {
- DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK(rs.code() < rt.code());
- GenInstrImmediate(ADDI, rs, rt, offset);
-}
-
-
-void Assembler::beqzc(Register rs, int32_t offset) {
- DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK(!(rs.is(zero_reg)));
- Instr instr = BEQZC | (rs.code() << kRsShift) | offset;
- emit(instr);
-}
-
-
-void Assembler::bnec(Register rs, Register rt, int16_t offset) {
- DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK(rs.code() < rt.code());
- GenInstrImmediate(DADDI, rs, rt, offset);
-}
-
-
-void Assembler::bnezc(Register rs, int32_t offset) {
- DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK(!(rs.is(zero_reg)));
- Instr instr = BNEZC | (rs.code() << kRsShift) | offset;
- emit(instr);
-}
-
-
void Assembler::j(int32_t target) {
#if DEBUG
// Get pc of delay slot.
@@ -1402,16 +1129,12 @@ void Assembler::j(int32_t target) {
void Assembler::jr(Register rs) {
- if (!IsMipsArchVariant(kMips32r6)) {
- BlockTrampolinePoolScope block_trampoline_pool(this);
- if (rs.is(ra)) {
- positions_recorder()->WriteRecordedPositions();
- }
- GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
- BlockTrampolinePoolFor(1); // For associated delay slot.
- } else {
- jalr(rs, zero_reg);
+ BlockTrampolinePoolScope block_trampoline_pool(this);
+ if (rs.is(ra)) {
+ positions_recorder()->WriteRecordedPositions();
}
+ GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
+ BlockTrampolinePoolFor(1); // For associated delay slot.
}
@@ -1482,41 +1205,7 @@ void Assembler::subu(Register rd, Register rs, Register rt) {
void Assembler::mul(Register rd, Register rs, Register rt) {
- if (!IsMipsArchVariant(kMips32r6)) {
- GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL);
- } else {
- GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH);
- }
-}
-
-
-void Assembler::mulu(Register rd, Register rs, Register rt) {
- DCHECK(IsMipsArchVariant(kMips32r6));
- GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH_U);
-}
-
-
-void Assembler::muh(Register rd, Register rs, Register rt) {
- DCHECK(IsMipsArchVariant(kMips32r6));
- GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH);
-}
-
-
-void Assembler::muhu(Register rd, Register rs, Register rt) {
- DCHECK(IsMipsArchVariant(kMips32r6));
- GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH_U);
-}
-
-
-void Assembler::mod(Register rd, Register rs, Register rt) {
- DCHECK(IsMipsArchVariant(kMips32r6));
- GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD);
-}
-
-
-void Assembler::modu(Register rd, Register rs, Register rt) {
- DCHECK(IsMipsArchVariant(kMips32r6));
- GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD_U);
+ GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL);
}
@@ -1535,23 +1224,11 @@ void Assembler::div(Register rs, Register rt) {
}
-void Assembler::div(Register rd, Register rs, Register rt) {
- DCHECK(IsMipsArchVariant(kMips32r6));
- GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD);
-}
-
-
void Assembler::divu(Register rs, Register rt) {
GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU);
}
-void Assembler::divu(Register rd, Register rs, Register rt) {
- DCHECK(IsMipsArchVariant(kMips32r6));
- GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD_U);
-}
-
-
// Logical.
void Assembler::and_(Register rd, Register rs, Register rt) {
@@ -1634,7 +1311,7 @@ void Assembler::srav(Register rd, Register rt, Register rs) {
void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
// Should be called via MacroAssembler::Ror.
DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa));
- DCHECK(IsMipsArchVariant(kMips32r2));
+ DCHECK(kArchVariant == kMips32r2);
Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
| (rd.code() << kRdShift) | (sa << kSaShift) | SRL;
emit(instr);
@@ -1644,7 +1321,7 @@ void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
void Assembler::rotrv(Register rd, Register rt, Register rs) {
// Should be called via MacroAssembler::Ror.
DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid() );
- DCHECK(IsMipsArchVariant(kMips32r2));
+ DCHECK(kArchVariant == kMips32r2);
Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
| (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
emit(instr);
@@ -1768,14 +1445,6 @@ void Assembler::lui(Register rd, int32_t j) {
}
-void Assembler::aui(Register rs, Register rt, int32_t j) {
- // This instruction uses same opcode as 'lui'. The difference in encoding is
- // 'lui' has zero reg. for rs field.
- DCHECK(is_uint16(j));
- GenInstrImmediate(LUI, rs, rt, j);
-}
-
-
// -------------Misc-instructions--------------
// Break / Trap instructions.
@@ -1919,19 +1588,15 @@ void Assembler::movf(Register rd, Register rs, uint16_t cc) {
// Bit twiddling.
void Assembler::clz(Register rd, Register rs) {
- if (!IsMipsArchVariant(kMips32r6)) {
- // Clz instr requires same GPR number in 'rd' and 'rt' fields.
- GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ);
- } else {
- GenInstrRegister(SPECIAL, rs, zero_reg, rd, 1, CLZ_R6);
- }
+ // Clz instr requires same GPR number in 'rd' and 'rt' fields.
+ GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ);
}
void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
// Should be called via MacroAssembler::Ins.
// Ins instr has 'rt' field as dest, and two uint5: msb, lsb.
- DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
+ DCHECK(kArchVariant == kMips32r2);
GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS);
}
@@ -1939,13 +1604,13 @@ void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
// Should be called via MacroAssembler::Ext.
// Ext instr has 'rt' field as dest, and two uint5: msb, lsb.
- DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
+ DCHECK(kArchVariant == kMips32r2);
GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT);
}
void Assembler::pref(int32_t hint, const MemOperand& rs) {
- DCHECK(!IsMipsArchVariant(kLoongson));
+ DCHECK(kArchVariant != kLoongson);
DCHECK(is_uint5(hint) && is_uint16(rs.offset_));
Instr instr = PREF | (rs.rm().code() << kRsShift) | (hint << kRtShift)
| (rs.offset_);
@@ -1964,20 +1629,12 @@ void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
// Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
// load to two 32-bit loads.
- if (IsFp64Mode()) {
- GenInstrImmediate(LWC1, src.rm(), fd, src.offset_ +
- Register::kMantissaOffset);
- GenInstrImmediate(LW, src.rm(), at, src.offset_ +
- Register::kExponentOffset);
- mthc1(at, fd);
- } else {
- GenInstrImmediate(LWC1, src.rm(), fd, src.offset_ +
- Register::kMantissaOffset);
- FPURegister nextfpreg;
- nextfpreg.setcode(fd.code() + 1);
- GenInstrImmediate(LWC1, src.rm(), nextfpreg, src.offset_ +
- Register::kExponentOffset);
- }
+ GenInstrImmediate(LWC1, src.rm(), fd, src.offset_ +
+ Register::kMantissaOffset);
+ FPURegister nextfpreg;
+ nextfpreg.setcode(fd.code() + 1);
+ GenInstrImmediate(LWC1, src.rm(), nextfpreg, src.offset_ +
+ Register::kExponentOffset);
}
@@ -1989,20 +1646,12 @@ void Assembler::swc1(FPURegister fd, const MemOperand& src) {
void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
// Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
// store to two 32-bit stores.
- if (IsFp64Mode()) {
- GenInstrImmediate(SWC1, src.rm(), fd, src.offset_ +
- Register::kMantissaOffset);
- mfhc1(at, fd);
- GenInstrImmediate(SW, src.rm(), at, src.offset_ +
- Register::kExponentOffset);
- } else {
- GenInstrImmediate(SWC1, src.rm(), fd, src.offset_ +
- Register::kMantissaOffset);
- FPURegister nextfpreg;
- nextfpreg.setcode(fd.code() + 1);
- GenInstrImmediate(SWC1, src.rm(), nextfpreg, src.offset_ +
- Register::kExponentOffset);
- }
+ GenInstrImmediate(SWC1, src.rm(), fd, src.offset_ +
+ Register::kMantissaOffset);
+ FPURegister nextfpreg;
+ nextfpreg.setcode(fd.code() + 1);
+ GenInstrImmediate(SWC1, src.rm(), nextfpreg, src.offset_ +
+ Register::kExponentOffset);
}
@@ -2011,21 +1660,11 @@ void Assembler::mtc1(Register rt, FPURegister fs) {
}
-void Assembler::mthc1(Register rt, FPURegister fs) {
- GenInstrRegister(COP1, MTHC1, rt, fs, f0);
-}
-
-
void Assembler::mfc1(Register rt, FPURegister fs) {
GenInstrRegister(COP1, MFC1, rt, fs, f0);
}
-void Assembler::mfhc1(Register rt, FPURegister fs) {
- GenInstrRegister(COP1, MFHC1, rt, fs, f0);
-}
-
-
void Assembler::ctc1(Register rt, FPUControlRegister fs) {
GenInstrRegister(COP1, CTC1, rt, fs);
}
@@ -2146,25 +1785,25 @@ void Assembler::ceil_w_d(FPURegister fd, FPURegister fs) {
void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
- DCHECK(IsMipsArchVariant(kMips32r2));
+ DCHECK(kArchVariant == kMips32r2);
GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
}
void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
- DCHECK(IsMipsArchVariant(kMips32r2));
+ DCHECK(kArchVariant == kMips32r2);
GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
}
void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) {
- DCHECK(IsMipsArchVariant(kMips32r2));
+ DCHECK(kArchVariant == kMips32r2);
GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S);
}
void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) {
- DCHECK(IsMipsArchVariant(kMips32r2));
+ DCHECK(kArchVariant == kMips32r2);
GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D);
}
@@ -2199,45 +1838,13 @@ void Assembler::ceil_l_d(FPURegister fd, FPURegister fs) {
}
-void Assembler::min(SecondaryField fmt, FPURegister fd, FPURegister ft,
- FPURegister fs) {
- DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK((fmt == D) || (fmt == S));
- GenInstrRegister(COP1, fmt, ft, fs, fd, MIN);
-}
-
-
-void Assembler::mina(SecondaryField fmt, FPURegister fd, FPURegister ft,
- FPURegister fs) {
- DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK((fmt == D) || (fmt == S));
- GenInstrRegister(COP1, fmt, ft, fs, fd, MINA);
-}
-
-
-void Assembler::max(SecondaryField fmt, FPURegister fd, FPURegister ft,
- FPURegister fs) {
- DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK((fmt == D) || (fmt == S));
- GenInstrRegister(COP1, fmt, ft, fs, fd, MAX);
-}
-
-
-void Assembler::maxa(SecondaryField fmt, FPURegister fd, FPURegister ft,
- FPURegister fs) {
- DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK((fmt == D) || (fmt == S));
- GenInstrRegister(COP1, fmt, ft, fs, fd, MAXA);
-}
-
-
void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W);
}
void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
- DCHECK(IsMipsArchVariant(kMips32r2));
+ DCHECK(kArchVariant == kMips32r2);
GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
}
@@ -2253,7 +1860,7 @@ void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) {
void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
- DCHECK(IsMipsArchVariant(kMips32r2));
+ DCHECK(kArchVariant == kMips32r2);
GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
}
@@ -2263,32 +1870,7 @@ void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) {
}
-// Conditions for >= MIPSr6.
-void Assembler::cmp(FPUCondition cond, SecondaryField fmt,
- FPURegister fd, FPURegister fs, FPURegister ft) {
- DCHECK(IsMipsArchVariant(kMips32r6));
- DCHECK((fmt & ~(31 << kRsShift)) == 0);
- Instr instr = COP1 | fmt | ft.code() << kFtShift |
- fs.code() << kFsShift | fd.code() << kFdShift | (0 << 5) | cond;
- emit(instr);
-}
-
-
-void Assembler::bc1eqz(int16_t offset, FPURegister ft) {
- DCHECK(IsMipsArchVariant(kMips32r6));
- Instr instr = COP1 | BC1EQZ | ft.code() << kFtShift | (offset & kImm16Mask);
- emit(instr);
-}
-
-
-void Assembler::bc1nez(int16_t offset, FPURegister ft) {
- DCHECK(IsMipsArchVariant(kMips32r6));
- Instr instr = COP1 | BC1NEZ | ft.code() << kFtShift | (offset & kImm16Mask);
- emit(instr);
-}
-
-
-// Conditions for < MIPSr6.
+// Conditions.
void Assembler::c(FPUCondition cond, SecondaryField fmt,
FPURegister fs, FPURegister ft, uint16_t cc) {
DCHECK(is_uint3(cc));
@@ -2602,7 +2184,7 @@ void Assembler::set_target_address_at(Address pc,
// lui rt, upper-16.
// ori rt rt, lower-16.
*p = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift);
- *(p + 1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask);
+ *(p+1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask);
// The following code is an optimization for the common case of Call()
// or Jump() which is load to register, and jump through register:
@@ -2645,20 +2227,20 @@ void Assembler::set_target_address_at(Address pc,
if (IsJalr(instr3)) {
// Try to convert JALR to JAL.
if (in_range && GetRt(instr2) == GetRs(instr3)) {
- *(p + 2) = JAL | target_field;
+ *(p+2) = JAL | target_field;
patched_jump = true;
}
} else if (IsJr(instr3)) {
// Try to convert JR to J, skip returns (jr ra).
bool is_ret = static_cast<int>(GetRs(instr3)) == ra.code();
if (in_range && !is_ret && GetRt(instr2) == GetRs(instr3)) {
- *(p + 2) = J | target_field;
+ *(p+2) = J | target_field;
patched_jump = true;
}
} else if (IsJal(instr3)) {
if (in_range) {
// We are patching an already converted JAL.
- *(p + 2) = JAL | target_field;
+ *(p+2) = JAL | target_field;
} else {
// Patch JAL, but out of range, revert to JALR.
// JALR rs reg is the rt reg specified in the ORI instruction.
@@ -2670,16 +2252,12 @@ void Assembler::set_target_address_at(Address pc,
} else if (IsJ(instr3)) {
if (in_range) {
// We are patching an already converted J (jump).
- *(p + 2) = J | target_field;
+ *(p+2) = J | target_field;
} else {
// Trying patch J, but out of range, just go back to JR.
// JR 'rs' reg is the 'rt' reg specified in the ORI instruction (instr2).
uint32_t rs_field = GetRt(instr2) << kRsShift;
- if (IsMipsArchVariant(kMips32r6)) {
- *(p + 2) = SPECIAL | rs_field | (zero_reg.code() << kRdShift) | JALR;
- } else {
- *(p + 2) = SPECIAL | rs_field | JR;
- }
+ *(p+2) = SPECIAL | rs_field | JR;
}
patched_jump = true;
}
@@ -2707,23 +2285,19 @@ void Assembler::JumpLabelToJumpRegister(Address pc) {
uint32_t rs_field = GetRt(instr2) << kRsShift;
uint32_t rd_field = ra.code() << kRdShift; // Return-address (ra) reg.
- *(p + 2) = SPECIAL | rs_field | rd_field | JALR;
+ *(p+2) = SPECIAL | rs_field | rd_field | JALR;
patched = true;
} else if (IsJ(instr3)) {
DCHECK(GetOpcodeField(instr1) == LUI);
DCHECK(GetOpcodeField(instr2) == ORI);
uint32_t rs_field = GetRt(instr2) << kRsShift;
- if (IsMipsArchVariant(kMips32r6)) {
- *(p + 2) = SPECIAL | rs_field | (zero_reg.code() << kRdShift) | JALR;
- } else {
- *(p + 2) = SPECIAL | rs_field | JR;
- }
+ *(p+2) = SPECIAL | rs_field | JR;
patched = true;
}
if (patched) {
- CpuFeatures::FlushICache(pc + 2, sizeof(Address));
+ CpuFeatures::FlushICache(pc+2, sizeof(Address));
}
}
« no previous file with comments | « src/mips/assembler-mips.h ('k') | src/mips/code-stubs-mips.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698