| Index: src/mips64/macro-assembler-mips64.cc
|
| diff --git a/src/mips64/macro-assembler-mips64.cc b/src/mips64/macro-assembler-mips64.cc
|
| index 0aa67852787c212df27800cd2b580984eb33db76..7406d83370bfee5b2c59f1f3e1640054fc72d365 100644
|
| --- a/src/mips64/macro-assembler-mips64.cc
|
| +++ b/src/mips64/macro-assembler-mips64.cc
|
| @@ -1211,9 +1211,15 @@ void MacroAssembler::Uld(Register rd, const MemOperand& rs, Register scratch) {
|
| // ONLY use with known misalignment, since there is performance cost.
|
| DCHECK((rs.offset() + kHeapObjectTag) & (kPointerSize - 1));
|
| // TODO(plind): endian dependency.
|
| - lwu(rd, rs);
|
| - lw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
|
| - dsll32(scratch, scratch, 0);
|
| + if (kArchEndian == kLittle) {
|
| + lwu(rd, rs);
|
| + lw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
|
| + dsll32(scratch, scratch, 0);
|
| + } else {
|
| + lw(rd, rs);
|
| + lwu(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
|
| + dsll32(rd, rd, 0);
|
| + }
|
| Daddu(rd, rd, scratch);
|
| }
|
|
|
| @@ -1225,9 +1231,15 @@ void MacroAssembler::Usd(Register rd, const MemOperand& rs, Register scratch) {
|
| // ONLY use with known misalignment, since there is performance cost.
|
| DCHECK((rs.offset() + kHeapObjectTag) & (kPointerSize - 1));
|
| // TODO(plind): endian dependency.
|
| - sw(rd, rs);
|
| - dsrl32(scratch, rd, 0);
|
| - sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
|
| + if (kArchEndian == kLittle) {
|
| + sw(rd, rs);
|
| + dsrl32(scratch, rd, 0);
|
| + sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
|
| + } else {
|
| + sw(rd, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
|
| + dsrl32(scratch, rd, 0);
|
| + sw(scratch, rs);
|
| + }
|
| }
|
|
|
|
|
| @@ -3789,21 +3801,39 @@ void MacroAssembler::CopyBytes(Register src,
|
|
|
| // TODO(kalmard) check if this can be optimized to use sw in most cases.
|
| // Can't use unaligned access - copy byte by byte.
|
| - sb(scratch, MemOperand(dst, 0));
|
| - dsrl(scratch, scratch, 8);
|
| - sb(scratch, MemOperand(dst, 1));
|
| - dsrl(scratch, scratch, 8);
|
| - sb(scratch, MemOperand(dst, 2));
|
| - dsrl(scratch, scratch, 8);
|
| - sb(scratch, MemOperand(dst, 3));
|
| - dsrl(scratch, scratch, 8);
|
| - sb(scratch, MemOperand(dst, 4));
|
| - dsrl(scratch, scratch, 8);
|
| - sb(scratch, MemOperand(dst, 5));
|
| - dsrl(scratch, scratch, 8);
|
| - sb(scratch, MemOperand(dst, 6));
|
| - dsrl(scratch, scratch, 8);
|
| - sb(scratch, MemOperand(dst, 7));
|
| + if (kArchEndian == kLittle) {
|
| + sb(scratch, MemOperand(dst, 0));
|
| + dsrl(scratch, scratch, 8);
|
| + sb(scratch, MemOperand(dst, 1));
|
| + dsrl(scratch, scratch, 8);
|
| + sb(scratch, MemOperand(dst, 2));
|
| + dsrl(scratch, scratch, 8);
|
| + sb(scratch, MemOperand(dst, 3));
|
| + dsrl(scratch, scratch, 8);
|
| + sb(scratch, MemOperand(dst, 4));
|
| + dsrl(scratch, scratch, 8);
|
| + sb(scratch, MemOperand(dst, 5));
|
| + dsrl(scratch, scratch, 8);
|
| + sb(scratch, MemOperand(dst, 6));
|
| + dsrl(scratch, scratch, 8);
|
| + sb(scratch, MemOperand(dst, 7));
|
| + } else {
|
| + sb(scratch, MemOperand(dst, 7));
|
| + dsrl(scratch, scratch, 8);
|
| + sb(scratch, MemOperand(dst, 6));
|
| + dsrl(scratch, scratch, 8);
|
| + sb(scratch, MemOperand(dst, 5));
|
| + dsrl(scratch, scratch, 8);
|
| + sb(scratch, MemOperand(dst, 4));
|
| + dsrl(scratch, scratch, 8);
|
| + sb(scratch, MemOperand(dst, 3));
|
| + dsrl(scratch, scratch, 8);
|
| + sb(scratch, MemOperand(dst, 2));
|
| + dsrl(scratch, scratch, 8);
|
| + sb(scratch, MemOperand(dst, 1));
|
| + dsrl(scratch, scratch, 8);
|
| + sb(scratch, MemOperand(dst, 0));
|
| + }
|
| Daddu(dst, dst, 8);
|
|
|
| Dsubu(length, length, Operand(kPointerSize));
|
| @@ -4001,7 +4031,11 @@ void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
|
|
|
| void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) {
|
| if (IsMipsSoftFloatABI) {
|
| - Move(dst, v0, v1);
|
| + if (kArchEndian == kLittle) {
|
| + Move(dst, v0, v1);
|
| + } else {
|
| + Move(dst, v1, v0);
|
| + }
|
| } else {
|
| Move(dst, f0); // Reg f0 is o32 ABI FP return value.
|
| }
|
| @@ -4010,9 +4044,13 @@ void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) {
|
|
|
| void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) {
|
| if (IsMipsSoftFloatABI) {
|
| - Move(dst, a0, a1);
|
| + if (kArchEndian == kLittle) {
|
| + Move(dst, a0, a1);
|
| + } else {
|
| + Move(dst, a1, a0);
|
| + }
|
| } else {
|
| - Move(dst, f12); // Reg f12 is o32 ABI FP first argument value.
|
| + Move(dst, f12); // Reg f12 is n64 ABI FP first argument value.
|
| }
|
| }
|
|
|
| @@ -4021,7 +4059,11 @@ void MacroAssembler::MovToFloatParameter(DoubleRegister src) {
|
| if (!IsMipsSoftFloatABI) {
|
| Move(f12, src);
|
| } else {
|
| - Move(a0, a1, src);
|
| + if (kArchEndian == kLittle) {
|
| + Move(a0, a1, src);
|
| + } else {
|
| + Move(a1, a0, src);
|
| + }
|
| }
|
| }
|
|
|
| @@ -4030,7 +4072,11 @@ void MacroAssembler::MovToFloatResult(DoubleRegister src) {
|
| if (!IsMipsSoftFloatABI) {
|
| Move(f0, src);
|
| } else {
|
| - Move(v0, v1, src);
|
| + if (kArchEndian == kLittle) {
|
| + Move(v0, v1, src);
|
| + } else {
|
| + Move(v1, v0, src);
|
| + }
|
| }
|
| }
|
|
|
| @@ -4048,8 +4094,13 @@ void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
|
| Move(fparg2, src2);
|
| }
|
| } else {
|
| - Move(a0, a1, src1);
|
| - Move(a2, a3, src2);
|
| + if (kArchEndian == kLittle) {
|
| + Move(a0, a1, src1);
|
| + Move(a2, a3, src2);
|
| + } else {
|
| + Move(a1, a0, src1);
|
| + Move(a3, a2, src2);
|
| + }
|
| }
|
| }
|
|
|
|
|