Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(781)

Unified Diff: src/mips64/macro-assembler-mips64.cc

Issue 1334793004: MIPS64: Add big-endian support for mips64. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Rebase ToT. Created 5 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « src/mips64/macro-assembler-mips64.h ('k') | src/mips64/simulator-mips64.h » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: src/mips64/macro-assembler-mips64.cc
diff --git a/src/mips64/macro-assembler-mips64.cc b/src/mips64/macro-assembler-mips64.cc
index 1a2829db8ef42580a5fedd2cf4d731b386dd3483..26229c9d8725c55cdf750f3cd3dbda3c827038db 100644
--- a/src/mips64/macro-assembler-mips64.cc
+++ b/src/mips64/macro-assembler-mips64.cc
@@ -1209,7 +1209,24 @@ void MacroAssembler::Uld(Register rd, const MemOperand& rs, Register scratch) {
// Assert fail if the offset from start of object IS actually aligned.
// ONLY use with known misalignment, since there is performance cost.
DCHECK((rs.offset() + kHeapObjectTag) & (kPointerSize - 1));
- // TODO(plind): endian dependency.
+ if (kArchEndian == kLittle) {
+ lwu(rd, rs);
+ lw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
+ dsll32(scratch, scratch, 0);
+ } else {
+ lw(rd, rs);
+ lwu(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
+ dsll32(rd, rd, 0);
+ }
+ Daddu(rd, rd, scratch);
+}
+
+
+// Load consequent 32-bit word pair in 64-bit reg. and put first word in low
+// bits,
+// second word in high bits.
+void MacroAssembler::LoadWordPair(Register rd, const MemOperand& rs,
+ Register scratch) {
lwu(rd, rs);
lw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
dsll32(scratch, scratch, 0);
@@ -1223,7 +1240,21 @@ void MacroAssembler::Usd(Register rd, const MemOperand& rs, Register scratch) {
// Assert fail if the offset from start of object IS actually aligned.
// ONLY use with known misalignment, since there is performance cost.
DCHECK((rs.offset() + kHeapObjectTag) & (kPointerSize - 1));
- // TODO(plind): endian dependency.
+ if (kArchEndian == kLittle) {
+ sw(rd, rs);
+ dsrl32(scratch, rd, 0);
+ sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
+ } else {
+ sw(rd, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
+ dsrl32(scratch, rd, 0);
+ sw(scratch, rs);
+ }
+}
+
+
+// Do 64-bit store as two consequent 32-bit stores to unaligned address.
+void MacroAssembler::StoreWordPair(Register rd, const MemOperand& rs,
+ Register scratch) {
sw(rd, rs);
dsrl32(scratch, rd, 0);
sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
@@ -3774,21 +3805,39 @@ void MacroAssembler::CopyBytes(Register src,
// TODO(kalmard) check if this can be optimized to use sw in most cases.
// Can't use unaligned access - copy byte by byte.
- sb(scratch, MemOperand(dst, 0));
- dsrl(scratch, scratch, 8);
- sb(scratch, MemOperand(dst, 1));
- dsrl(scratch, scratch, 8);
- sb(scratch, MemOperand(dst, 2));
- dsrl(scratch, scratch, 8);
- sb(scratch, MemOperand(dst, 3));
- dsrl(scratch, scratch, 8);
- sb(scratch, MemOperand(dst, 4));
- dsrl(scratch, scratch, 8);
- sb(scratch, MemOperand(dst, 5));
- dsrl(scratch, scratch, 8);
- sb(scratch, MemOperand(dst, 6));
- dsrl(scratch, scratch, 8);
- sb(scratch, MemOperand(dst, 7));
+ if (kArchEndian == kLittle) {
+ sb(scratch, MemOperand(dst, 0));
+ dsrl(scratch, scratch, 8);
+ sb(scratch, MemOperand(dst, 1));
+ dsrl(scratch, scratch, 8);
+ sb(scratch, MemOperand(dst, 2));
+ dsrl(scratch, scratch, 8);
+ sb(scratch, MemOperand(dst, 3));
+ dsrl(scratch, scratch, 8);
+ sb(scratch, MemOperand(dst, 4));
+ dsrl(scratch, scratch, 8);
+ sb(scratch, MemOperand(dst, 5));
+ dsrl(scratch, scratch, 8);
+ sb(scratch, MemOperand(dst, 6));
+ dsrl(scratch, scratch, 8);
+ sb(scratch, MemOperand(dst, 7));
+ } else {
+ sb(scratch, MemOperand(dst, 7));
+ dsrl(scratch, scratch, 8);
+ sb(scratch, MemOperand(dst, 6));
+ dsrl(scratch, scratch, 8);
+ sb(scratch, MemOperand(dst, 5));
+ dsrl(scratch, scratch, 8);
+ sb(scratch, MemOperand(dst, 4));
+ dsrl(scratch, scratch, 8);
+ sb(scratch, MemOperand(dst, 3));
+ dsrl(scratch, scratch, 8);
+ sb(scratch, MemOperand(dst, 2));
+ dsrl(scratch, scratch, 8);
+ sb(scratch, MemOperand(dst, 1));
+ dsrl(scratch, scratch, 8);
+ sb(scratch, MemOperand(dst, 0));
+ }
Daddu(dst, dst, 8);
Dsubu(length, length, Operand(kPointerSize));
@@ -3986,7 +4035,11 @@ void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) {
if (IsMipsSoftFloatABI) {
- Move(dst, v0, v1);
+ if (kArchEndian == kLittle) {
+ Move(dst, v0, v1);
+ } else {
+ Move(dst, v1, v0);
+ }
} else {
Move(dst, f0); // Reg f0 is o32 ABI FP return value.
}
@@ -3995,9 +4048,13 @@ void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) {
void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) {
if (IsMipsSoftFloatABI) {
- Move(dst, a0, a1);
+ if (kArchEndian == kLittle) {
+ Move(dst, a0, a1);
+ } else {
+ Move(dst, a1, a0);
+ }
} else {
- Move(dst, f12); // Reg f12 is o32 ABI FP first argument value.
+ Move(dst, f12); // Reg f12 is n64 ABI FP first argument value.
}
}
@@ -4006,7 +4063,11 @@ void MacroAssembler::MovToFloatParameter(DoubleRegister src) {
if (!IsMipsSoftFloatABI) {
Move(f12, src);
} else {
- Move(a0, a1, src);
+ if (kArchEndian == kLittle) {
+ Move(a0, a1, src);
+ } else {
+ Move(a1, a0, src);
+ }
}
}
@@ -4015,7 +4076,11 @@ void MacroAssembler::MovToFloatResult(DoubleRegister src) {
if (!IsMipsSoftFloatABI) {
Move(f0, src);
} else {
- Move(v0, v1, src);
+ if (kArchEndian == kLittle) {
+ Move(v0, v1, src);
+ } else {
+ Move(v1, v0, src);
+ }
}
}
@@ -4033,8 +4098,13 @@ void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
Move(fparg2, src2);
}
} else {
- Move(a0, a1, src1);
- Move(a2, a3, src2);
+ if (kArchEndian == kLittle) {
+ Move(a0, a1, src1);
+ Move(a2, a3, src2);
+ } else {
+ Move(a1, a0, src1);
+ Move(a3, a2, src2);
+ }
}
}
@@ -5121,7 +5191,7 @@ void MacroAssembler::InitializeNewString(Register string,
sd(scratch1, FieldMemOperand(string, String::kLengthOffset));
li(scratch1, Operand(String::kEmptyHashField));
sd(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
- sd(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
+ sw(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
}
@@ -5650,8 +5720,8 @@ void MacroAssembler::HasColor(Register object,
GetMarkBits(object, bitmap_scratch, mask_scratch);
Label other_color;
- // Note that we are using a 4-byte aligned 8-byte load.
- Uld(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ // Note that we are using two 4-byte aligned loads.
+ LoadWordPair(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
And(t8, t9, Operand(mask_scratch));
Branch(&other_color, first_bit == 1 ? eq : ne, t8, Operand(zero_reg));
// Shift left 1 by adding.
@@ -5724,7 +5794,8 @@ void MacroAssembler::EnsureNotWhite(
// Since both black and grey have a 1 in the first position and white does
// not have a 1 there we only need to check one bit.
// Note that we are using a 4-byte aligned 8-byte load.
- Uld(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ LoadWordPair(load_scratch,
+ MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
And(t8, mask_scratch, load_scratch);
Branch(&done, ne, t8, Operand(zero_reg));
@@ -5803,14 +5874,14 @@ void MacroAssembler::EnsureNotWhite(
bind(&is_data_object);
// Value is a data object, and it is white. Mark it black. Since we know
// that the object is white we can make it black by flipping one bit.
- Uld(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ LoadWordPair(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
Or(t8, t8, Operand(mask_scratch));
- Usd(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+ StoreWordPair(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
And(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
- Uld(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
+ LoadWordPair(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
Daddu(t8, t8, Operand(length));
- Usd(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
+ StoreWordPair(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
bind(&done);
}
@@ -5823,14 +5894,14 @@ void MacroAssembler::LoadInstanceDescriptors(Register map,
void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
- ld(dst, FieldMemOperand(map, Map::kBitField3Offset));
+ lwu(dst, FieldMemOperand(map, Map::kBitField3Offset));
DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
}
void MacroAssembler::EnumLength(Register dst, Register map) {
STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
- ld(dst, FieldMemOperand(map, Map::kBitField3Offset));
+ lwu(dst, FieldMemOperand(map, Map::kBitField3Offset));
And(dst, dst, Operand(Map::EnumLengthBits::kMask));
SmiTag(dst);
}
« no previous file with comments | « src/mips64/macro-assembler-mips64.h ('k') | src/mips64/simulator-mips64.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698