Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(565)

Unified Diff: src/mips64/assembler-mips64.cc

Issue 371923006: Add mips64 port. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Rebase Created 6 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « src/mips64/assembler-mips64.h ('k') | src/mips64/assembler-mips64-inl.h » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: src/mips64/assembler-mips64.cc
diff --git a/src/mips/assembler-mips.cc b/src/mips64/assembler-mips64.cc
similarity index 82%
copy from src/mips/assembler-mips.cc
copy to src/mips64/assembler-mips64.cc
index c63714032c50904a5c1f2e6f746bb523b29e6ac9..2796c95b37dd51bb8458c93db81cb55205d964b7 100644
--- a/src/mips/assembler-mips.cc
+++ b/src/mips64/assembler-mips64.cc
@@ -35,15 +35,16 @@
#include "src/v8.h"
-#if V8_TARGET_ARCH_MIPS
+#if V8_TARGET_ARCH_MIPS64
#include "src/base/cpu.h"
-#include "src/mips/assembler-mips-inl.h"
+#include "src/mips64/assembler-mips64-inl.h"
#include "src/serialize.h"
namespace v8 {
namespace internal {
+
// Get the CPU features enabled by the build. For cross compilation the
// preprocessor symbols CAN_USE_FPU_INSTRUCTIONS
// can be defined to enable FPU instructions when building the
@@ -121,14 +122,14 @@ int ToNumber(Register reg) {
5, // a1
6, // a2
7, // a3
- 8, // t0
- 9, // t1
- 10, // t2
- 11, // t3
- 12, // t4
- 13, // t5
- 14, // t6
- 15, // t7
+ 8, // a4
+ 9, // a5
+ 10, // a6
+ 11, // a7
+ 12, // t0
+ 13, // t1
+ 14, // t2
+ 15, // t3
16, // s0
17, // s1
18, // s2
@@ -156,8 +157,8 @@ Register ToRegister(int num) {
zero_reg,
at,
v0, v1,
- a0, a1, a2, a3,
- t0, t1, t2, t3, t4, t5, t6, t7,
+ a0, a1, a2, a3, a4, a5, a6, a7,
+ t0, t1, t2, t3,
s0, s1, s2, s3, s4, s5, s6, s7,
t8, t9,
k0, k1,
@@ -222,22 +223,22 @@ Operand::Operand(Handle<Object> handle) {
Object* obj = *handle;
if (obj->IsHeapObject()) {
ASSERT(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
- imm32_ = reinterpret_cast<intptr_t>(handle.location());
+ imm64_ = reinterpret_cast<intptr_t>(handle.location());
rmode_ = RelocInfo::EMBEDDED_OBJECT;
} else {
// No relocation needed.
- imm32_ = reinterpret_cast<intptr_t>(obj);
- rmode_ = RelocInfo::NONE32;
+ imm64_ = reinterpret_cast<intptr_t>(obj);
+ rmode_ = RelocInfo::NONE64;
}
}
-MemOperand::MemOperand(Register rm, int32_t offset) : Operand(rm) {
+MemOperand::MemOperand(Register rm, int64_t offset) : Operand(rm) {
offset_ = offset;
}
-MemOperand::MemOperand(Register rm, int32_t unit, int32_t multiplier,
+MemOperand::MemOperand(Register rm, int64_t unit, int64_t multiplier,
OffsetAddend offset_addend) : Operand(rm) {
offset_ = unit * multiplier + offset_addend;
}
@@ -247,33 +248,33 @@ MemOperand::MemOperand(Register rm, int32_t unit, int32_t multiplier,
// Specific instructions, constants, and masks.
static const int kNegOffset = 0x00008000;
-// addiu(sp, sp, 4) aka Pop() operation or part of Pop(r)
+// daddiu(sp, sp, 8) aka Pop() operation or part of Pop(r)
// operations as post-increment of sp.
-const Instr kPopInstruction = ADDIU | (kRegister_sp_Code << kRsShift)
+const Instr kPopInstruction = DADDIU | (kRegister_sp_Code << kRsShift)
| (kRegister_sp_Code << kRtShift)
| (kPointerSize & kImm16Mask); // NOLINT
-// addiu(sp, sp, -4) part of Push(r) operation as pre-decrement of sp.
-const Instr kPushInstruction = ADDIU | (kRegister_sp_Code << kRsShift)
+// daddiu(sp, sp, -8) part of Push(r) operation as pre-decrement of sp.
+const Instr kPushInstruction = DADDIU | (kRegister_sp_Code << kRsShift)
| (kRegister_sp_Code << kRtShift)
| (-kPointerSize & kImm16Mask); // NOLINT
-// sw(r, MemOperand(sp, 0))
-const Instr kPushRegPattern = SW | (kRegister_sp_Code << kRsShift)
- | (0 & kImm16Mask); // NOLINT
-// lw(r, MemOperand(sp, 0))
-const Instr kPopRegPattern = LW | (kRegister_sp_Code << kRsShift)
- | (0 & kImm16Mask); // NOLINT
+// sd(r, MemOperand(sp, 0))
+const Instr kPushRegPattern = SD | (kRegister_sp_Code << kRsShift)
+ | (0 & kImm16Mask); // NOLINT
+// ld(r, MemOperand(sp, 0))
+const Instr kPopRegPattern = LD | (kRegister_sp_Code << kRsShift)
+ | (0 & kImm16Mask); // NOLINT
const Instr kLwRegFpOffsetPattern = LW | (kRegister_fp_Code << kRsShift)
- | (0 & kImm16Mask); // NOLINT
+ | (0 & kImm16Mask); // NOLINT
const Instr kSwRegFpOffsetPattern = SW | (kRegister_fp_Code << kRsShift)
- | (0 & kImm16Mask); // NOLINT
+ | (0 & kImm16Mask); // NOLINT
const Instr kLwRegFpNegOffsetPattern = LW | (kRegister_fp_Code << kRsShift)
- | (kNegOffset & kImm16Mask); // NOLINT
+ | (kNegOffset & kImm16Mask); // NOLINT
const Instr kSwRegFpNegOffsetPattern = SW | (kRegister_fp_Code << kRsShift)
- | (kNegOffset & kImm16Mask); // NOLINT
+ | (kNegOffset & kImm16Mask); // NOLINT
// A mask for the Rt register for push, pop, lw, sw instructions.
const Instr kRtMask = kRtFieldMask;
const Instr kLwSwInstrTypeMask = 0xffe00000;
@@ -616,7 +617,7 @@ Instr Assembler::SetSwOffset(Instr instr, int16_t offset) {
bool Assembler::IsAddImmediate(Instr instr) {
- return ((instr & kOpcodeMask) == ADDIU);
+ return ((instr & kOpcodeMask) == ADDIU || (instr & kOpcodeMask) == DADDIU);
}
@@ -631,7 +632,7 @@ bool Assembler::IsAndImmediate(Instr instr) {
}
-int Assembler::target_at(int32_t pos) {
+int64_t Assembler::target_at(int64_t pos) {
Instr instr = instr_at(pos);
if ((instr & ~kImm16Mask) == 0) {
// Emitted label constant, not part of a branch.
@@ -645,10 +646,9 @@ int Assembler::target_at(int32_t pos) {
// Check we have a branch or jump instruction.
ASSERT(IsBranch(instr) || IsJ(instr) || IsLui(instr));
// Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
- // the compiler uses arithmectic shifts for signed integers.
+ // the compiler uses arithmetic shifts for signed integers.
if (IsBranch(instr)) {
int32_t imm18 = ((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
-
if (imm18 == kEndOfChain) {
// EndOfChain sentinel is returned directly, not relative to pc or pos.
return kEndOfChain;
@@ -658,16 +658,23 @@ int Assembler::target_at(int32_t pos) {
} else if (IsLui(instr)) {
Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
+ Instr instr_ori2 = instr_at(pos + 3 * Assembler::kInstrSize);
ASSERT(IsOri(instr_ori));
- int32_t imm = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
- imm |= (instr_ori & static_cast<int32_t>(kImm16Mask));
+ ASSERT(IsOri(instr_ori2));
+
+ // TODO(plind) create named constants for shift values.
+ int64_t imm = static_cast<int64_t>(instr_lui & kImm16Mask) << 48;
+ imm |= static_cast<int64_t>(instr_ori & kImm16Mask) << 32;
+ imm |= static_cast<int64_t>(instr_ori2 & kImm16Mask) << 16;
+ // Sign extend address;
+ imm >>= 16;
if (imm == kEndOfJumpChain) {
// EndOfChain sentinel is returned directly, not relative to pc or pos.
return kEndOfChain;
} else {
- uint32_t instr_address = reinterpret_cast<int32_t>(buffer_ + pos);
- int32_t delta = instr_address - imm;
+ uint64_t instr_address = reinterpret_cast<int64_t>(buffer_ + pos);
+ int64_t delta = instr_address - imm;
ASSERT(pos > delta);
return pos - delta;
}
@@ -677,9 +684,9 @@ int Assembler::target_at(int32_t pos) {
// EndOfChain sentinel is returned directly, not relative to pc or pos.
return kEndOfChain;
} else {
- uint32_t instr_address = reinterpret_cast<int32_t>(buffer_ + pos);
+ uint64_t instr_address = reinterpret_cast<int64_t>(buffer_ + pos);
instr_address &= kImm28Mask;
- int32_t delta = instr_address - imm28;
+ int64_t delta = instr_address - imm28;
ASSERT(pos > delta);
return pos - delta;
}
@@ -687,7 +694,7 @@ int Assembler::target_at(int32_t pos) {
}
-void Assembler::target_at_put(int32_t pos, int32_t target_pos) {
+void Assembler::target_at_put(int64_t pos, int64_t target_pos) {
Instr instr = instr_at(pos);
if ((instr & ~kImm16Mask) == 0) {
ASSERT(target_pos == kEndOfChain || target_pos >= 0);
@@ -710,19 +717,25 @@ void Assembler::target_at_put(int32_t pos, int32_t target_pos) {
} else if (IsLui(instr)) {
Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
+ Instr instr_ori2 = instr_at(pos + 3 * Assembler::kInstrSize);
ASSERT(IsOri(instr_ori));
- uint32_t imm = reinterpret_cast<uint32_t>(buffer_) + target_pos;
+ ASSERT(IsOri(instr_ori2));
+
+ uint64_t imm = reinterpret_cast<uint64_t>(buffer_) + target_pos;
ASSERT((imm & 3) == 0);
instr_lui &= ~kImm16Mask;
instr_ori &= ~kImm16Mask;
+ instr_ori2 &= ~kImm16Mask;
instr_at_put(pos + 0 * Assembler::kInstrSize,
- instr_lui | ((imm & kHiMask) >> kLuiShift));
+ instr_lui | ((imm >> 32) & kImm16Mask));
instr_at_put(pos + 1 * Assembler::kInstrSize,
- instr_ori | (imm & kImm16Mask));
+ instr_ori | ((imm >> 16) & kImm16Mask));
+ instr_at_put(pos + 3 * Assembler::kInstrSize,
+ instr_ori2 | (imm & kImm16Mask));
} else {
- uint32_t imm28 = reinterpret_cast<uint32_t>(buffer_) + target_pos;
+ uint64_t imm28 = reinterpret_cast<uint64_t>(buffer_) + target_pos;
imm28 &= kImm28Mask;
ASSERT((imm28 & 3) == 0);
@@ -970,8 +983,8 @@ int32_t Assembler::get_trampoline_entry(int32_t pos) {
}
-uint32_t Assembler::jump_address(Label* L) {
- int32_t target_pos;
+uint64_t Assembler::jump_address(Label* L) {
+ int64_t target_pos;
if (L->is_bound()) {
target_pos = L->pos();
@@ -985,7 +998,7 @@ uint32_t Assembler::jump_address(Label* L) {
}
}
- uint32_t imm = reinterpret_cast<uint32_t>(buffer_) + target_pos;
+ uint64_t imm = reinterpret_cast<uint64_t>(buffer_) + target_pos;
ASSERT((imm & 3) == 0);
return imm;
@@ -1116,11 +1129,11 @@ void Assembler::bne(Register rs, Register rt, int16_t offset) {
}
-void Assembler::j(int32_t target) {
+void Assembler::j(int64_t target) {
#if DEBUG
// Get pc of delay slot.
- uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
- bool in_range = (ipc ^ static_cast<uint32_t>(target) >>
+ uint64_t ipc = reinterpret_cast<uint64_t>(pc_ + 1 * kInstrSize);
+ bool in_range = (ipc ^ static_cast<uint64_t>(target) >>
(kImm26Bits + kImmFieldShift)) == 0;
ASSERT(in_range && ((target & 3) == 0));
#endif
@@ -1138,11 +1151,11 @@ void Assembler::jr(Register rs) {
}
-void Assembler::jal(int32_t target) {
+void Assembler::jal(int64_t target) {
#ifdef DEBUG
// Get pc of delay slot.
- uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
- bool in_range = (ipc ^ static_cast<uint32_t>(target) >>
+ uint64_t ipc = reinterpret_cast<uint64_t>(pc_ + 1 * kInstrSize);
+ bool in_range = (ipc ^ static_cast<uint64_t>(target) >>
(kImm26Bits + kImmFieldShift)) == 0;
ASSERT(in_range && ((target & 3) == 0));
#endif
@@ -1159,10 +1172,10 @@ void Assembler::jalr(Register rs, Register rd) {
}
-void Assembler::j_or_jr(int32_t target, Register rs) {
+void Assembler::j_or_jr(int64_t target, Register rs) {
// Get pc of delay slot.
- uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
- bool in_range = (ipc ^ static_cast<uint32_t>(target) >>
+ uint64_t ipc = reinterpret_cast<uint64_t>(pc_ + 1 * kInstrSize);
+ bool in_range = (ipc ^ static_cast<uint64_t>(target) >>
(kImm26Bits + kImmFieldShift)) == 0;
if (in_range) {
j(target);
@@ -1172,10 +1185,10 @@ void Assembler::j_or_jr(int32_t target, Register rs) {
}
-void Assembler::jal_or_jalr(int32_t target, Register rs) {
+void Assembler::jal_or_jalr(int64_t target, Register rs) {
// Get pc of delay slot.
- uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
- bool in_range = (ipc ^ static_cast<uint32_t>(target) >>
+ uint64_t ipc = reinterpret_cast<uint64_t>(pc_ + 1 * kInstrSize);
+ bool in_range = (ipc ^ static_cast<uint64_t>(target) >>
(kImm26Bits+kImmFieldShift)) == 0;
if (in_range) {
jal(target);
@@ -1219,6 +1232,11 @@ void Assembler::multu(Register rs, Register rt) {
}
+void Assembler::daddiu(Register rd, Register rs, int32_t j) {
+ GenInstrImmediate(DADDIU, rs, rd, j);
+}
+
+
void Assembler::div(Register rs, Register rt) {
GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIV);
}
@@ -1229,6 +1247,36 @@ void Assembler::divu(Register rs, Register rt) {
}
+void Assembler::daddu(Register rd, Register rs, Register rt) {
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, DADDU);
+}
+
+
+void Assembler::dsubu(Register rd, Register rs, Register rt) {
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSUBU);
+}
+
+
+void Assembler::dmult(Register rs, Register rt) {
+ GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DMULT);
+}
+
+
+void Assembler::dmultu(Register rs, Register rt) {
+ GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DMULTU);
+}
+
+
+void Assembler::ddiv(Register rs, Register rt) {
+ GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DDIV);
+}
+
+
+void Assembler::ddivu(Register rs, Register rt) {
+ GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DDIVU);
+}
+
+
// Logical.
void Assembler::and_(Register rd, Register rs, Register rt) {
@@ -1311,7 +1359,7 @@ void Assembler::srav(Register rd, Register rt, Register rs) {
void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
// Should be called via MacroAssembler::Ror.
ASSERT(rd.is_valid() && rt.is_valid() && is_uint5(sa));
- ASSERT(kArchVariant == kMips32r2);
+ ASSERT(kArchVariant == kMips64r2);
Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
| (rd.code() << kRdShift) | (sa << kSaShift) | SRL;
emit(instr);
@@ -1321,21 +1369,84 @@ void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
void Assembler::rotrv(Register rd, Register rt, Register rs) {
// Should be called via MacroAssembler::Ror.
ASSERT(rd.is_valid() && rt.is_valid() && rs.is_valid() );
- ASSERT(kArchVariant == kMips32r2);
+ ASSERT(kArchVariant == kMips64r2);
Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
| (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
emit(instr);
}
+void Assembler::dsll(Register rd, Register rt, uint16_t sa) {
+ GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSLL);
+}
+
+
+void Assembler::dsllv(Register rd, Register rt, Register rs) {
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSLLV);
+}
+
+
+void Assembler::dsrl(Register rd, Register rt, uint16_t sa) {
+ GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSRL);
+}
+
+
+void Assembler::dsrlv(Register rd, Register rt, Register rs) {
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSRLV);
+}
+
+
+void Assembler::drotr(Register rd, Register rt, uint16_t sa) {
+ ASSERT(rd.is_valid() && rt.is_valid() && is_uint5(sa));
+ Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
+ | (rd.code() << kRdShift) | (sa << kSaShift) | DSRL;
+ emit(instr);
+}
+
+
+void Assembler::drotrv(Register rd, Register rt, Register rs) {
+ ASSERT(rd.is_valid() && rt.is_valid() && rs.is_valid() );
+ Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
+ | (rd.code() << kRdShift) | (1 << kSaShift) | DSRLV;
+ emit(instr);
+}
+
+
+void Assembler::dsra(Register rd, Register rt, uint16_t sa) {
+ GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSRA);
+}
+
+
+void Assembler::dsrav(Register rd, Register rt, Register rs) {
+ GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSRAV);
+}
+
+
+void Assembler::dsll32(Register rd, Register rt, uint16_t sa) {
+ GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSLL32);
+}
+
+
+void Assembler::dsrl32(Register rd, Register rt, uint16_t sa) {
+ GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSRL32);
+}
+
+
+void Assembler::dsra32(Register rd, Register rt, uint16_t sa) {
+ GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSRA32);
+}
+
+
// ------------Memory-instructions-------------
// Helper for base-reg + offset, when offset is larger than int16.
void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) {
ASSERT(!src.rm().is(at));
- lui(at, (src.offset_ >> kLuiShift) & kImm16Mask);
+ ASSERT(is_int32(src.offset_));
+ daddiu(at, zero_reg, (src.offset_ >> kLuiShift) & kImm16Mask);
+ dsll(at, at, kLuiShift);
ori(at, at, src.offset_ & kImm16Mask); // Load 32-bit offset.
- addu(at, at, src.rm()); // Add base register.
+ daddu(at, at, src.rm()); // Add base register.
}
@@ -1389,6 +1500,16 @@ void Assembler::lw(Register rd, const MemOperand& rs) {
}
+void Assembler::lwu(Register rd, const MemOperand& rs) {
+ if (is_int16(rs.offset_)) {
+ GenInstrImmediate(LWU, rs.rm(), rd, rs.offset_);
+ } else { // Offset > 16 bits, use multiple instructions to load.
+ LoadRegPlusOffsetToAt(rs);
+ GenInstrImmediate(LWU, at, rd, 0); // Equiv to lwu(rd, MemOperand(at, 0));
+ }
+}
+
+
void Assembler::lwl(Register rd, const MemOperand& rs) {
GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_);
}
@@ -1445,6 +1566,46 @@ void Assembler::lui(Register rd, int32_t j) {
}
+void Assembler::ldl(Register rd, const MemOperand& rs) {
+ GenInstrImmediate(LDL, rs.rm(), rd, rs.offset_);
+}
+
+
+void Assembler::ldr(Register rd, const MemOperand& rs) {
+ GenInstrImmediate(LDR, rs.rm(), rd, rs.offset_);
+}
+
+
+void Assembler::sdl(Register rd, const MemOperand& rs) {
+ GenInstrImmediate(SDL, rs.rm(), rd, rs.offset_);
+}
+
+
+void Assembler::sdr(Register rd, const MemOperand& rs) {
+ GenInstrImmediate(SDR, rs.rm(), rd, rs.offset_);
+}
+
+
+void Assembler::ld(Register rd, const MemOperand& rs) {
+ if (is_int16(rs.offset_)) {
+ GenInstrImmediate(LD, rs.rm(), rd, rs.offset_);
+ } else { // Offset > 16 bits, use multiple instructions to load.
+ LoadRegPlusOffsetToAt(rs);
+ GenInstrImmediate(LD, at, rd, 0); // Equiv to lw(rd, MemOperand(at, 0));
+ }
+}
+
+
+void Assembler::sd(Register rd, const MemOperand& rs) {
+ if (is_int16(rs.offset_)) {
+ GenInstrImmediate(SD, rs.rm(), rd, rs.offset_);
+ } else { // Offset > 16 bits, use multiple instructions to store.
+ LoadRegPlusOffsetToAt(rs);
+ GenInstrImmediate(SD, at, rd, 0); // Equiv to sw(rd, MemOperand(at, 0));
+ }
+}
+
+
// -------------Misc-instructions--------------
// Break / Trap instructions.
@@ -1467,14 +1628,14 @@ void Assembler::break_(uint32_t code, bool break_as_stop) {
void Assembler::stop(const char* msg, uint32_t code) {
ASSERT(code > kMaxWatchpointCode);
ASSERT(code <= kMaxStopCode);
-#if V8_HOST_ARCH_MIPS
+#if defined(V8_HOST_ARCH_MIPS) || defined(V8_HOST_ARCH_MIPS64)
break_(0x54321);
#else // V8_HOST_ARCH_MIPS
- BlockTrampolinePoolFor(2);
+ BlockTrampolinePoolFor(3);
// The Simulator will handle the stop instruction and get the message address.
// On MIPS stop() is just a special kind of break_().
break_(code, true);
- emit(reinterpret_cast<Instr>(msg));
+ emit(reinterpret_cast<uint64_t>(msg));
#endif
}
@@ -1596,7 +1757,7 @@ void Assembler::clz(Register rd, Register rs) {
void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
// Should be called via MacroAssembler::Ins.
// Ins instr has 'rt' field as dest, and two uint5: msb, lsb.
- ASSERT(kArchVariant == kMips32r2);
+ ASSERT(kArchVariant == kMips64r2);
GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS);
}
@@ -1604,7 +1765,7 @@ void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
// Should be called via MacroAssembler::Ext.
// Ext instr has 'rt' field as dest, and two uint5: msb, lsb.
- ASSERT(kArchVariant == kMips32r2);
+ ASSERT(kArchVariant == kMips64r2);
GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT);
}
@@ -1627,14 +1788,7 @@ void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
- // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
- // load to two 32-bit loads.
- GenInstrImmediate(LWC1, src.rm(), fd, src.offset_ +
- Register::kMantissaOffset);
- FPURegister nextfpreg;
- nextfpreg.setcode(fd.code() + 1);
- GenInstrImmediate(LWC1, src.rm(), nextfpreg, src.offset_ +
- Register::kExponentOffset);
+ GenInstrImmediate(LDC1, src.rm(), fd, src.offset_);
}
@@ -1644,14 +1798,7 @@ void Assembler::swc1(FPURegister fd, const MemOperand& src) {
void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
- // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
- // store to two 32-bit stores.
- GenInstrImmediate(SWC1, src.rm(), fd, src.offset_ +
- Register::kMantissaOffset);
- FPURegister nextfpreg;
- nextfpreg.setcode(fd.code() + 1);
- GenInstrImmediate(SWC1, src.rm(), nextfpreg, src.offset_ +
- Register::kExponentOffset);
+ GenInstrImmediate(SDC1, src.rm(), fd, src.offset_);
}
@@ -1660,11 +1807,31 @@ void Assembler::mtc1(Register rt, FPURegister fs) {
}
+void Assembler::mthc1(Register rt, FPURegister fs) {
+ GenInstrRegister(COP1, MTHC1, rt, fs, f0);
+}
+
+
+void Assembler::dmtc1(Register rt, FPURegister fs) {
+ GenInstrRegister(COP1, DMTC1, rt, fs, f0);
+}
+
+
void Assembler::mfc1(Register rt, FPURegister fs) {
GenInstrRegister(COP1, MFC1, rt, fs, f0);
}
+void Assembler::mfhc1(Register rt, FPURegister fs) {
+ GenInstrRegister(COP1, MFHC1, rt, fs, f0);
+}
+
+
+void Assembler::dmfc1(Register rt, FPURegister fs) {
+ GenInstrRegister(COP1, DMFC1, rt, fs, f0);
+}
+
+
void Assembler::ctc1(Register rt, FPUControlRegister fs) {
GenInstrRegister(COP1, CTC1, rt, fs);
}
@@ -1703,6 +1870,7 @@ void Assembler::mul_d(FPURegister fd, FPURegister fs, FPURegister ft) {
void Assembler::madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
FPURegister ft) {
+ ASSERT(kArchVariant != kLoongson);
GenInstrRegister(COP1X, fr, ft, fs, fd, MADD_D);
}
@@ -1785,25 +1953,25 @@ void Assembler::ceil_w_d(FPURegister fd, FPURegister fs) {
void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
- ASSERT(kArchVariant == kMips32r2);
+ ASSERT(kArchVariant == kMips64r2);
GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
}
void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
- ASSERT(kArchVariant == kMips32r2);
+ ASSERT(kArchVariant == kMips64r2);
GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
}
void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) {
- ASSERT(kArchVariant == kMips32r2);
+ ASSERT(kArchVariant == kMips64r2);
GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S);
}
void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) {
- ASSERT(kArchVariant == kMips32r2);
+ ASSERT(kArchVariant == kMips64r2);
GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D);
}
@@ -1844,7 +2012,7 @@ void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
- ASSERT(kArchVariant == kMips32r2);
+ ASSERT(kArchVariant == kMips64r2);
GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
}
@@ -1860,7 +2028,7 @@ void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) {
void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
- ASSERT(kArchVariant == kMips32r2);
+ ASSERT(kArchVariant == kMips64r2);
GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
}
@@ -1933,9 +2101,16 @@ int Assembler::RelocateInternalReference(byte* pc, intptr_t pc_delta) {
if (IsLui(instr)) {
Instr instr_lui = instr_at(pc + 0 * Assembler::kInstrSize);
Instr instr_ori = instr_at(pc + 1 * Assembler::kInstrSize);
+ Instr instr_ori2 = instr_at(pc + 3 * Assembler::kInstrSize);
ASSERT(IsOri(instr_ori));
- int32_t imm = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
- imm |= (instr_ori & static_cast<int32_t>(kImm16Mask));
+ ASSERT(IsOri(instr_ori2));
+ // TODO(plind): symbolic names for the shifts.
+ int64_t imm = (instr_lui & static_cast<int64_t>(kImm16Mask)) << 48;
+ imm |= (instr_ori & static_cast<int64_t>(kImm16Mask)) << 32;
+ imm |= (instr_ori2 & static_cast<int64_t>(kImm16Mask)) << 16;
+ // Sign extend address.
+ imm >>= 16;
+
if (imm == kEndOfJumpChain) {
return 0; // Number of instructions patched.
}
@@ -1944,17 +2119,21 @@ int Assembler::RelocateInternalReference(byte* pc, intptr_t pc_delta) {
instr_lui &= ~kImm16Mask;
instr_ori &= ~kImm16Mask;
+ instr_ori2 &= ~kImm16Mask;
instr_at_put(pc + 0 * Assembler::kInstrSize,
- instr_lui | ((imm >> kLuiShift) & kImm16Mask));
+ instr_lui | ((imm >> 32) & kImm16Mask));
instr_at_put(pc + 1 * Assembler::kInstrSize,
- instr_ori | (imm & kImm16Mask));
- return 2; // Number of instructions patched.
+ instr_ori | (imm >> 16 & kImm16Mask));
+ instr_at_put(pc + 3 * Assembler::kInstrSize,
+ instr_ori2 | (imm & kImm16Mask));
+ return 4; // Number of instructions patched.
} else {
uint32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
if (static_cast<int32_t>(imm28) == kEndOfJumpChain) {
return 0; // Number of instructions patched.
}
+
imm28 += pc_delta;
imm28 &= kImm28Mask;
ASSERT((imm28 & 3) == 0);
@@ -1990,11 +2169,12 @@ void Assembler::GrowBuffer() {
desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
// Copy the data.
- int pc_delta = desc.buffer - buffer_;
- int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
+ intptr_t pc_delta = desc.buffer - buffer_;
+ intptr_t rc_delta = (desc.buffer + desc.buffer_size) -
+ (buffer_ + buffer_size_);
MemMove(desc.buffer, buffer_, desc.instr_size);
- MemMove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
- desc.reloc_size);
+ MemMove(reloc_info_writer.pos() + rc_delta,
+ reloc_info_writer.pos(), desc.reloc_size);
// Switch buffers.
DeleteArray(buffer_);
@@ -2033,9 +2213,9 @@ void Assembler::dd(uint32_t data) {
void Assembler::emit_code_stub_address(Code* stub) {
CheckBuffer();
- *reinterpret_cast<uint32_t*>(pc_) =
- reinterpret_cast<uint32_t>(stub->instruction_start());
- pc_ += sizeof(uint32_t);
+ *reinterpret_cast<uint64_t*>(pc_) =
+ reinterpret_cast<uint64_t>(stub->instruction_start());
+ pc_ += sizeof(uint64_t);
}
@@ -2105,15 +2285,19 @@ void Assembler::CheckTrampolinePool() {
int pool_start = pc_offset();
for (int i = 0; i < unbound_labels_count_; i++) {
- uint32_t imm32;
- imm32 = jump_address(&after_pool);
+ uint64_t imm64;
+ imm64 = jump_address(&after_pool);
{ BlockGrowBufferScope block_buf_growth(this);
// Buffer growth (and relocation) must be blocked for internal
// references until associated instructions are emitted and available
// to be patched.
RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
- lui(at, (imm32 & kHiMask) >> kLuiShift);
- ori(at, at, (imm32 & kImm16Mask));
+ // TODO(plind): Verify this, presume I cannot use macro-assembler
+ // here.
+ lui(at, (imm64 >> 32) & kImm16Mask);
+ ori(at, at, (imm64 >> 16) & kImm16Mask);
+ dsll(at, at, 16);
+ ori(at, at, imm64 & kImm16Mask);
}
jr(at);
nop();
@@ -2137,15 +2321,24 @@ void Assembler::CheckTrampolinePool() {
Address Assembler::target_address_at(Address pc) {
- Instr instr1 = instr_at(pc);
- Instr instr2 = instr_at(pc + kInstrSize);
- // Interpret 2 instructions generated by li: lui/ori
- if ((GetOpcodeField(instr1) == LUI) && (GetOpcodeField(instr2) == ORI)) {
- // Assemble the 32 bit value.
- return reinterpret_cast<Address>(
- (GetImmediate16(instr1) << 16) | GetImmediate16(instr2));
+ Instr instr0 = instr_at(pc);
+ Instr instr1 = instr_at(pc + 1 * kInstrSize);
+ Instr instr3 = instr_at(pc + 3 * kInstrSize);
+
+ // Interpret 4 instructions for address generated by li: See listing in
+ // Assembler::set_target_address_at() just below.
+ if ((GetOpcodeField(instr0) == LUI) && (GetOpcodeField(instr1) == ORI) &&
+ (GetOpcodeField(instr3) == ORI)) {
+ // Assemble the 48 bit value.
+ int64_t addr = static_cast<int64_t>(
+ ((uint64_t)(GetImmediate16(instr0)) << 32) |
+ ((uint64_t)(GetImmediate16(instr1)) << 16) |
+ ((uint64_t)(GetImmediate16(instr3))));
+
+ // Sign extend to get canonical address.
+ addr = (addr << 16) >> 16;
+ return reinterpret_cast<Address>(addr);
}
-
// We should never get here, force a bad address if we do.
UNREACHABLE();
return (Address)0x0;
@@ -2161,9 +2354,14 @@ void Assembler::QuietNaN(HeapObject* object) {
}
-// On Mips, a target address is stored in a lui/ori instruction pair, each
-// of which load 16 bits of the 32-bit address to a register.
-// Patching the address must replace both instr, and flush the i-cache.
+// On Mips64, a target address is stored in a 4-instruction sequence:
+// 0: lui(rd, (j.imm64_ >> 32) & kImm16Mask);
+// 1: ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
+// 2: dsll(rd, rd, 16);
+// 3: ori(rd, rd, j.imm32_ & kImm16Mask);
+//
+// Patching the address must replace all the lui & ori instructions,
+// and flush the i-cache.
//
// There is an optimization below, which emits a nop when the address
// fits in just 16 bits. This is unlikely to help, and should be benchmarked,
@@ -2171,101 +2369,37 @@ void Assembler::QuietNaN(HeapObject* object) {
void Assembler::set_target_address_at(Address pc,
Address target,
ICacheFlushMode icache_flush_mode) {
- Instr instr2 = instr_at(pc + kInstrSize);
- uint32_t rt_code = GetRtField(instr2);
+// There is an optimization where only 4 instructions are used to load address
+// in code on MIP64 because only 48-bits of address is effectively used.
+// It relies on fact the upper [63:48] bits are not used for virtual address
+// translation and they have to be set according to value of bit 47 in order
+// get canonical address.
+ Instr instr1 = instr_at(pc + kInstrSize);
+ uint32_t rt_code = GetRt(instr1);
uint32_t* p = reinterpret_cast<uint32_t*>(pc);
- uint32_t itarget = reinterpret_cast<uint32_t>(target);
+ uint64_t itarget = reinterpret_cast<uint64_t>(target);
#ifdef DEBUG
- // Check we have the result from a li macro-instruction, using instr pair.
- Instr instr1 = instr_at(pc);
- CHECK((GetOpcodeField(instr1) == LUI && GetOpcodeField(instr2) == ORI));
+ // Check we have the result from a li macro-instruction.
+ Instr instr0 = instr_at(pc);
+ Instr instr3 = instr_at(pc + kInstrSize * 3);
+ CHECK((GetOpcodeField(instr0) == LUI && GetOpcodeField(instr1) == ORI &&
+ GetOpcodeField(instr3) == ORI));
#endif
- // Must use 2 instructions to insure patchable code => just use lui and ori.
+ // Must use 4 instructions to insure patchable code.
// lui rt, upper-16.
+ // ori rt, rt, lower-16.
+ // dsll rt, rt, 16.
// ori rt rt, lower-16.
- *p = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift);
- *(p+1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask);
-
- // The following code is an optimization for the common case of Call()
- // or Jump() which is load to register, and jump through register:
- // li(t9, address); jalr(t9) (or jr(t9)).
- // If the destination address is in the same 256 MB page as the call, it
- // is faster to do a direct jal, or j, rather than jump thru register, since
- // that lets the cpu pipeline prefetch the target address. However each
- // time the address above is patched, we have to patch the direct jal/j
- // instruction, as well as possibly revert to jalr/jr if we now cross a
- // 256 MB page. Note that with the jal/j instructions, we do not need to
- // load the register, but that code is left, since it makes it easy to
- // revert this process. A further optimization could try replacing the
- // li sequence with nops.
- // This optimization can only be applied if the rt-code from instr2 is the
- // register used for the jalr/jr. Finally, we have to skip 'jr ra', which is
- // mips return. Occasionally this lands after an li().
-
- Instr instr3 = instr_at(pc + 2 * kInstrSize);
- uint32_t ipc = reinterpret_cast<uint32_t>(pc + 3 * kInstrSize);
- bool in_range = ((ipc ^ itarget) >> (kImm26Bits + kImmFieldShift)) == 0;
- uint32_t target_field =
- static_cast<uint32_t>(itarget & kJumpAddrMask) >> kImmFieldShift;
- bool patched_jump = false;
-
-#ifndef ALLOW_JAL_IN_BOUNDARY_REGION
- // This is a workaround to the 24k core E156 bug (affect some 34k cores also).
- // Since the excluded space is only 64KB out of 256MB (0.02 %), we will just
- // apply this workaround for all cores so we don't have to identify the core.
- if (in_range) {
- // The 24k core E156 bug has some very specific requirements, we only check
- // the most simple one: if the address of the delay slot instruction is in
- // the first or last 32 KB of the 256 MB segment.
- uint32_t segment_mask = ((256 * MB) - 1) ^ ((32 * KB) - 1);
- uint32_t ipc_segment_addr = ipc & segment_mask;
- if (ipc_segment_addr == 0 || ipc_segment_addr == segment_mask)
- in_range = false;
- }
-#endif
-
- if (IsJalr(instr3)) {
- // Try to convert JALR to JAL.
- if (in_range && GetRt(instr2) == GetRs(instr3)) {
- *(p+2) = JAL | target_field;
- patched_jump = true;
- }
- } else if (IsJr(instr3)) {
- // Try to convert JR to J, skip returns (jr ra).
- bool is_ret = static_cast<int>(GetRs(instr3)) == ra.code();
- if (in_range && !is_ret && GetRt(instr2) == GetRs(instr3)) {
- *(p+2) = J | target_field;
- patched_jump = true;
- }
- } else if (IsJal(instr3)) {
- if (in_range) {
- // We are patching an already converted JAL.
- *(p+2) = JAL | target_field;
- } else {
- // Patch JAL, but out of range, revert to JALR.
- // JALR rs reg is the rt reg specified in the ORI instruction.
- uint32_t rs_field = GetRt(instr2) << kRsShift;
- uint32_t rd_field = ra.code() << kRdShift; // Return-address (ra) reg.
- *(p+2) = SPECIAL | rs_field | rd_field | JALR;
- }
- patched_jump = true;
- } else if (IsJ(instr3)) {
- if (in_range) {
- // We are patching an already converted J (jump).
- *(p+2) = J | target_field;
- } else {
- // Trying patch J, but out of range, just go back to JR.
- // JR 'rs' reg is the 'rt' reg specified in the ORI instruction (instr2).
- uint32_t rs_field = GetRt(instr2) << kRsShift;
- *(p+2) = SPECIAL | rs_field | JR;
- }
- patched_jump = true;
- }
+ *p = LUI | (rt_code << kRtShift) | ((itarget >> 32) & kImm16Mask);
+ *(p + 1) = ORI | (rt_code << kRtShift) | (rt_code << kRsShift)
+ | ((itarget >> 16) & kImm16Mask);
+ *(p + 3) = ORI | (rt_code << kRsShift) | (rt_code << kRtShift)
+ | (itarget & kImm16Mask);
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
- CpuFeatures::FlushICache(pc, (patched_jump ? 3 : 2) * sizeof(int32_t));
+ CpuFeatures::FlushICache(pc, 4 * Assembler::kInstrSize);
}
}
@@ -2278,7 +2412,7 @@ void Assembler::JumpLabelToJumpRegister(Address pc) {
Instr instr1 = instr_at(pc);
#endif
Instr instr2 = instr_at(pc + 1 * kInstrSize);
- Instr instr3 = instr_at(pc + 2 * kInstrSize);
+ Instr instr3 = instr_at(pc + 6 * kInstrSize);
bool patched = false;
if (IsJal(instr3)) {
@@ -2287,19 +2421,19 @@ void Assembler::JumpLabelToJumpRegister(Address pc) {
uint32_t rs_field = GetRt(instr2) << kRsShift;
uint32_t rd_field = ra.code() << kRdShift; // Return-address (ra) reg.
- *(p+2) = SPECIAL | rs_field | rd_field | JALR;
+ *(p+6) = SPECIAL | rs_field | rd_field | JALR;
patched = true;
} else if (IsJ(instr3)) {
ASSERT(GetOpcodeField(instr1) == LUI);
ASSERT(GetOpcodeField(instr2) == ORI);
uint32_t rs_field = GetRt(instr2) << kRsShift;
- *(p+2) = SPECIAL | rs_field | JR;
+ *(p+6) = SPECIAL | rs_field | JR;
patched = true;
}
if (patched) {
- CpuFeatures::FlushICache(pc+2, sizeof(Address));
+ CpuFeatures::FlushICache(pc+6, sizeof(int32_t));
}
}
@@ -2320,4 +2454,4 @@ void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
} } // namespace v8::internal
-#endif // V8_TARGET_ARCH_MIPS
+#endif // V8_TARGET_ARCH_MIPS64
« no previous file with comments | « src/mips64/assembler-mips64.h ('k') | src/mips64/assembler-mips64-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698