Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(749)

Unified Diff: src/mips64/macro-assembler-mips64.h

Issue 371923006: Add mips64 port. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Rebase Created 6 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « src/mips64/lithium-mips64.cc ('k') | src/mips64/macro-assembler-mips64.cc » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: src/mips64/macro-assembler-mips64.h
diff --git a/src/mips/macro-assembler-mips.h b/src/mips64/macro-assembler-mips64.h
similarity index 92%
copy from src/mips/macro-assembler-mips.h
copy to src/mips64/macro-assembler-mips64.h
index 8644827f8e9b6bf4cc8f309f52add3e3d805ad8d..4c262f68e514588046caffd5055e35a8998bc278 100644
--- a/src/mips/macro-assembler-mips.h
+++ b/src/mips64/macro-assembler-mips64.h
@@ -7,7 +7,7 @@
#include "src/assembler.h"
#include "src/globals.h"
-#include "src/mips/assembler-mips.h"
+#include "src/mips64/assembler-mips64.h"
namespace v8 {
namespace internal {
@@ -61,11 +61,17 @@ enum BranchDelaySlot {
// Flags used for the li macro-assembler function.
enum LiFlags {
// If the constant value can be represented in just 16 bits, then
- // optimize the li to use a single instruction, rather than lui/ori pair.
+ // optimize the li to use a single instruction, rather than lui/ori/dsll
+ // sequence.
OPTIMIZE_SIZE = 0,
- // Always use 2 instructions (lui/ori pair), even if the constant could
- // be loaded with just one, so that this value is patchable later.
- CONSTANT_SIZE = 1
+ // Always use 6 instructions (lui/ori/dsll sequence), even if the constant
+ // could be loaded with just one, so that this value is patchable later.
+ CONSTANT_SIZE = 1,
+ // For address loads only 4 instruction are required. Used to mark
+ // constant load that will be used as address without relocation
+ // information. It ensures predictable code size, so specific sites
+ // in code are patchable.
+ ADDRESS_LOAD = 2
};
@@ -106,8 +112,23 @@ inline MemOperand FieldMemOperand(Register object, int offset) {
}
+inline MemOperand UntagSmiMemOperand(Register rm, int offset) {
+ // Assumes that Smis are shifted by 32 bits and little endianness.
+ STATIC_ASSERT(kSmiShift == 32);
+ return MemOperand(rm, offset + (kSmiShift / kBitsPerByte));
+}
+
+
+inline MemOperand UntagSmiFieldMemOperand(Register rm, int offset) {
+ return UntagSmiMemOperand(rm, offset - kHeapObjectTag);
+}
+
+
// Generate a MemOperand for storing arguments 5..N on the stack
// when calling CallCFunction().
+// TODO(plind): Currently ONLY used for O32. Should be fixed for
+// n64, and used in RegExp code, and other places
+// with more than 8 arguments.
inline MemOperand CFunctionArgumentOperand(int index) {
ASSERT(index > kCArgSlotCount);
// Argument 5 takes the slot just past the four Arg-slots.
@@ -227,11 +248,11 @@ class MacroAssembler: public Assembler {
inline void Move(Register dst_low, Register dst_high, FPURegister src) {
mfc1(dst_low, src);
- mfc1(dst_high, FPURegister::from_code(src.code() + 1));
+ mfhc1(dst_high, src);
}
inline void FmoveHigh(Register dst_high, FPURegister src) {
- mfc1(dst_high, FPURegister::from_code(src.code() + 1));
+ mfhc1(dst_high, src);
}
inline void FmoveLow(Register dst_low, FPURegister src) {
@@ -240,7 +261,7 @@ class MacroAssembler: public Assembler {
inline void Move(FPURegister dst, Register src_low, Register src_high) {
mtc1(src_low, dst);
- mtc1(src_high, FPURegister::from_code(dst.code() + 1));
+ mthc1(src_high, dst);
}
// Conditional move.
@@ -546,8 +567,7 @@ class MacroAssembler: public Assembler {
Register scratch2,
Register heap_number_map,
Label* gc_required,
- TaggingMode tagging_mode = TAG_RESULT,
- MutableMode mode = IMMUTABLE);
+ TaggingMode tagging_mode = TAG_RESULT);
void AllocateHeapNumberWithValue(Register result,
FPURegister value,
Register scratch1,
@@ -576,12 +596,19 @@ class MacroAssembler: public Assembler {
}
DEFINE_INSTRUCTION(Addu);
+ DEFINE_INSTRUCTION(Daddu);
DEFINE_INSTRUCTION(Subu);
+ DEFINE_INSTRUCTION(Dsubu);
DEFINE_INSTRUCTION(Mul);
+ DEFINE_INSTRUCTION(Dmul);
DEFINE_INSTRUCTION2(Mult);
+ DEFINE_INSTRUCTION2(Dmult);
DEFINE_INSTRUCTION2(Multu);
+ DEFINE_INSTRUCTION2(Dmultu);
DEFINE_INSTRUCTION2(Div);
+ DEFINE_INSTRUCTION2(Ddiv);
DEFINE_INSTRUCTION2(Divu);
+ DEFINE_INSTRUCTION2(Ddivu);
DEFINE_INSTRUCTION(And);
DEFINE_INSTRUCTION(Or);
@@ -594,6 +621,7 @@ class MacroAssembler: public Assembler {
// MIPS32 R2 instruction macro.
DEFINE_INSTRUCTION(Ror);
+ DEFINE_INSTRUCTION(Dror);
#undef DEFINE_INSTRUCTION
#undef DEFINE_INSTRUCTION2
@@ -608,10 +636,12 @@ class MacroAssembler: public Assembler {
void Ulw(Register rd, const MemOperand& rs);
void Usw(Register rd, const MemOperand& rs);
+ void Uld(Register rd, const MemOperand& rs, Register scratch = at);
+ void Usd(Register rd, const MemOperand& rs, Register scratch = at);
// Load int32 in the rd register.
void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE);
- inline void li(Register rd, int32_t j, LiFlags mode = OPTIMIZE_SIZE) {
+ inline void li(Register rd, int64_t j, LiFlags mode = OPTIMIZE_SIZE) {
li(rd, Operand(j), mode);
}
void li(Register dst, Handle<Object> value, LiFlags mode = OPTIMIZE_SIZE);
@@ -626,8 +656,8 @@ class MacroAssembler: public Assembler {
void MultiPushReversedFPU(RegList regs);
void push(Register src) {
- Addu(sp, sp, Operand(-kPointerSize));
- sw(src, MemOperand(sp, 0));
+ Daddu(sp, sp, Operand(-kPointerSize));
+ sd(src, MemOperand(sp, 0));
}
void Push(Register src) { push(src); }
@@ -637,35 +667,38 @@ class MacroAssembler: public Assembler {
// Push two registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2) {
- Subu(sp, sp, Operand(2 * kPointerSize));
- sw(src1, MemOperand(sp, 1 * kPointerSize));
- sw(src2, MemOperand(sp, 0 * kPointerSize));
+ Dsubu(sp, sp, Operand(2 * kPointerSize));
+ sd(src1, MemOperand(sp, 1 * kPointerSize));
+ sd(src2, MemOperand(sp, 0 * kPointerSize));
}
// Push three registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Register src3) {
- Subu(sp, sp, Operand(3 * kPointerSize));
- sw(src1, MemOperand(sp, 2 * kPointerSize));
- sw(src2, MemOperand(sp, 1 * kPointerSize));
- sw(src3, MemOperand(sp, 0 * kPointerSize));
+ Dsubu(sp, sp, Operand(3 * kPointerSize));
+ sd(src1, MemOperand(sp, 2 * kPointerSize));
+ sd(src2, MemOperand(sp, 1 * kPointerSize));
+ sd(src3, MemOperand(sp, 0 * kPointerSize));
}
// Push four registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Register src3, Register src4) {
- Subu(sp, sp, Operand(4 * kPointerSize));
- sw(src1, MemOperand(sp, 3 * kPointerSize));
- sw(src2, MemOperand(sp, 2 * kPointerSize));
- sw(src3, MemOperand(sp, 1 * kPointerSize));
- sw(src4, MemOperand(sp, 0 * kPointerSize));
+ Dsubu(sp, sp, Operand(4 * kPointerSize));
+ sd(src1, MemOperand(sp, 3 * kPointerSize));
+ sd(src2, MemOperand(sp, 2 * kPointerSize));
+ sd(src3, MemOperand(sp, 1 * kPointerSize));
+ sd(src4, MemOperand(sp, 0 * kPointerSize));
}
void Push(Register src, Condition cond, Register tst1, Register tst2) {
// Since we don't have conditional execution we use a Branch.
Branch(3, cond, tst1, Operand(tst2));
- Subu(sp, sp, Operand(kPointerSize));
- sw(src, MemOperand(sp, 0));
+ Dsubu(sp, sp, Operand(kPointerSize));
+ sd(src, MemOperand(sp, 0));
}
+ void PushRegisterAsTwoSmis(Register src, Register scratch = at);
+ void PopRegisterAsTwoSmis(Register dst, Register scratch = at);
+
// Pops multiple values from the stack and load them in the
// registers specified in regs. Pop order is the opposite as in MultiPush.
void MultiPop(RegList regs);
@@ -675,29 +708,29 @@ class MacroAssembler: public Assembler {
void MultiPopReversedFPU(RegList regs);
void pop(Register dst) {
- lw(dst, MemOperand(sp, 0));
- Addu(sp, sp, Operand(kPointerSize));
+ ld(dst, MemOperand(sp, 0));
+ Daddu(sp, sp, Operand(kPointerSize));
}
void Pop(Register dst) { pop(dst); }
// Pop two registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2) {
ASSERT(!src1.is(src2));
- lw(src2, MemOperand(sp, 0 * kPointerSize));
- lw(src1, MemOperand(sp, 1 * kPointerSize));
- Addu(sp, sp, 2 * kPointerSize);
+ ld(src2, MemOperand(sp, 0 * kPointerSize));
+ ld(src1, MemOperand(sp, 1 * kPointerSize));
+ Daddu(sp, sp, 2 * kPointerSize);
}
// Pop three registers. Pops rightmost register first (from lower address).
void Pop(Register src1, Register src2, Register src3) {
- lw(src3, MemOperand(sp, 0 * kPointerSize));
- lw(src2, MemOperand(sp, 1 * kPointerSize));
- lw(src1, MemOperand(sp, 2 * kPointerSize));
- Addu(sp, sp, 3 * kPointerSize);
+ ld(src3, MemOperand(sp, 0 * kPointerSize));
+ ld(src2, MemOperand(sp, 1 * kPointerSize));
+ ld(src1, MemOperand(sp, 2 * kPointerSize));
+ Daddu(sp, sp, 3 * kPointerSize);
}
void Pop(uint32_t count = 1) {
- Addu(sp, sp, Operand(count * kPointerSize));
+ Daddu(sp, sp, Operand(count * kPointerSize));
}
// Push and pop the registers that can hold pointers, as defined by the
@@ -719,7 +752,7 @@ class MacroAssembler: public Assembler {
// Does not handle errors.
void FlushICache(Register address, unsigned instructions);
- // MIPS32 R2 instruction macro.
+ // MIPS64 R2 instruction macro.
void Ins(Register rt, Register rs, uint16_t pos, uint16_t size);
void Ext(Register rt, Register rs, uint16_t pos, uint16_t size);
@@ -730,6 +763,14 @@ class MacroAssembler: public Assembler {
void Cvt_d_uw(FPURegister fd, FPURegister fs, FPURegister scratch);
void Cvt_d_uw(FPURegister fd, Register rs, FPURegister scratch);
+ // Convert double to unsigned long.
+ void Trunc_l_ud(FPURegister fd, FPURegister fs, FPURegister scratch);
+
+ void Trunc_l_d(FPURegister fd, FPURegister fs);
+ void Round_l_d(FPURegister fd, FPURegister fs);
+ void Floor_l_d(FPURegister fd, FPURegister fs);
+ void Ceil_l_d(FPURegister fd, FPURegister fs);
+
// Convert double to unsigned word.
void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch);
void Trunc_uw_d(FPURegister fd, Register rs, FPURegister scratch);
@@ -738,6 +779,13 @@ class MacroAssembler: public Assembler {
void Round_w_d(FPURegister fd, FPURegister fs);
void Floor_w_d(FPURegister fd, FPURegister fs);
void Ceil_w_d(FPURegister fd, FPURegister fs);
+
+ void Madd_d(FPURegister fd,
+ FPURegister fr,
+ FPURegister fs,
+ FPURegister ft,
+ FPURegister scratch);
+
// Wrapper function for the different cmp/branch types.
void BranchF(Label* target,
Label* nan,
@@ -1069,7 +1117,7 @@ class MacroAssembler: public Assembler {
Condition IsObjectStringType(Register obj,
Register type,
Register result) {
- lw(type, FieldMemOperand(obj, HeapObject::kMapOffset));
+ ld(type, FieldMemOperand(obj, HeapObject::kMapOffset));
lbu(type, FieldMemOperand(type, Map::kInstanceTypeOffset));
And(type, type, Operand(kIsNotStringMask));
ASSERT_EQ(0, kStringTag);
@@ -1224,7 +1272,7 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
// Arguments 1-4 are placed in registers a0 thru a3 respectively.
// Arguments 5..n are stored to stack using following:
- // sw(t0, CFunctionArgumentOperand(5));
+ // sw(a4, CFunctionArgumentOperand(5));
// Calls a C function and cleans up the space for arguments allocated
// by PrepareCallCFunction. The called function is not allowed to trigger a
@@ -1338,16 +1386,22 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
// -------------------------------------------------------------------------
// Smi utilities.
- void SmiTag(Register reg) {
- Addu(reg, reg, reg);
- }
-
// Test for overflow < 0: use BranchOnOverflow() or BranchOnNoOverflow().
void SmiTagCheckOverflow(Register reg, Register overflow);
void SmiTagCheckOverflow(Register dst, Register src, Register overflow);
void SmiTag(Register dst, Register src) {
- Addu(dst, src, src);
+ STATIC_ASSERT(kSmiTag == 0);
+ if (SmiValuesAre32Bits()) {
+ STATIC_ASSERT(kSmiShift == 32);
+ dsll32(dst, src, 0);
+ } else {
+ Addu(dst, src, src);
+ }
+ }
+
+ void SmiTag(Register reg) {
+ SmiTag(reg, reg);
}
// Try to convert int32 to smi. If the value is to large, preserve
@@ -1356,23 +1410,62 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
void TrySmiTag(Register reg, Register scratch, Label* not_a_smi) {
TrySmiTag(reg, reg, scratch, not_a_smi);
}
+
void TrySmiTag(Register dst,
Register src,
Register scratch,
Label* not_a_smi) {
- SmiTagCheckOverflow(at, src, scratch);
- BranchOnOverflow(not_a_smi, scratch);
- mov(dst, at);
+ if (SmiValuesAre32Bits()) {
+ SmiTag(dst, src);
+ } else {
+ SmiTagCheckOverflow(at, src, scratch);
+ BranchOnOverflow(not_a_smi, scratch);
+ mov(dst, at);
+ }
+ }
+
+ void SmiUntag(Register dst, Register src) {
+ if (SmiValuesAre32Bits()) {
+ STATIC_ASSERT(kSmiShift == 32);
+ dsra32(dst, src, 0);
+ } else {
+ sra(dst, src, kSmiTagSize);
+ }
}
void SmiUntag(Register reg) {
- sra(reg, reg, kSmiTagSize);
+ SmiUntag(reg, reg);
}
- void SmiUntag(Register dst, Register src) {
- sra(dst, src, kSmiTagSize);
+ // Left-shifted from int32 equivalent of Smi.
+ void SmiScale(Register dst, Register src, int scale) {
+ if (SmiValuesAre32Bits()) {
+ // The int portion is upper 32-bits of 64-bit word.
+ dsra(dst, src, kSmiShift - scale);
+ } else {
+ ASSERT(scale >= kSmiTagSize);
+ sll(dst, src, scale - kSmiTagSize);
+ }
}
+ // Combine load with untagging or scaling.
+ void SmiLoadUntag(Register dst, MemOperand src);
+
+ void SmiLoadScale(Register dst, MemOperand src, int scale);
+
+ // Returns 2 values: the Smi and a scaled version of the int within the Smi.
+ void SmiLoadWithScale(Register d_smi,
+ Register d_scaled,
+ MemOperand src,
+ int scale);
+
+ // Returns 2 values: the untagged Smi (int32) and scaled version of that int.
+ void SmiLoadUntagWithScale(Register d_int,
+ Register d_scaled,
+ MemOperand src,
+ int scale);
+
+
// Test if the register contains a smi.
inline void SmiTst(Register value, Register scratch) {
And(scratch, value, Operand(kSmiTagMask));
@@ -1382,11 +1475,11 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
}
// Untag the source value into destination and jump if source is a smi.
- // Souce and destination can be the same register.
+ // Source and destination can be the same register.
void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
// Untag the source value into destination and jump if source is not a smi.
- // Souce and destination can be the same register.
+ // Source and destination can be the same register.
void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
// Jump the register contains a smi.
@@ -1510,25 +1603,16 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
template<typename Field>
void DecodeFieldToSmi(Register dst, Register src) {
static const int shift = Field::kShift;
- static const int mask = Field::kMask >> shift << kSmiTagSize;
- STATIC_ASSERT((mask & (0x80000000u >> (kSmiTagSize - 1))) == 0);
- STATIC_ASSERT(kSmiTag == 0);
- if (shift < kSmiTagSize) {
- sll(dst, src, kSmiTagSize - shift);
- And(dst, dst, Operand(mask));
- } else if (shift > kSmiTagSize) {
- srl(dst, src, shift - kSmiTagSize);
- And(dst, dst, Operand(mask));
- } else {
- And(dst, src, Operand(mask));
- }
+ static const int mask = Field::kMask >> shift;
+ dsrl(dst, src, shift);
+ And(dst, dst, Operand(mask));
+ dsll32(dst, dst, 0);
}
template<typename Field>
void DecodeFieldToSmi(Register reg) {
DecodeField<Field>(reg, reg);
}
-
// Generates function and stub prologue code.
void StubPrologue();
void Prologue(bool code_pre_aging);
« no previous file with comments | « src/mips64/lithium-mips64.cc ('k') | src/mips64/macro-assembler-mips64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698