| Index: src/mips/macro-assembler-mips.cc
|
| diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc
|
| index 9312eed02f8ae702853d80ef5ff91b4d5fa37043..7af9f91823e1cf0a7a0e5ef271907be45106888f 100644
|
| --- a/src/mips/macro-assembler-mips.cc
|
| +++ b/src/mips/macro-assembler-mips.cc
|
| @@ -1192,14 +1192,199 @@ void MacroAssembler::Lsa(Register rd, Register rt, Register rs, uint8_t sa,
|
| // ------------Pseudo-instructions-------------
|
|
|
| void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
|
| - lwr(rd, rs);
|
| - lwl(rd, MemOperand(rs.rm(), rs.offset() + 3));
|
| + DCHECK(!rd.is(at));
|
| + DCHECK(!rs.rm().is(at));
|
| + if (IsMipsArchVariant(kMips32r6)) {
|
| + lw(rd, rs);
|
| + } else {
|
| + DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
|
| + IsMipsArchVariant(kLoongson));
|
| + if (is_int16(rs.offset() + kMipsLwrOffset) &&
|
| + is_int16(rs.offset() + kMipsLwlOffset)) {
|
| + if (!rd.is(rs.rm())) {
|
| + lwr(rd, MemOperand(rs.rm(), rs.offset() + kMipsLwrOffset));
|
| + lwl(rd, MemOperand(rs.rm(), rs.offset() + kMipsLwlOffset));
|
| + } else {
|
| + lwr(at, MemOperand(rs.rm(), rs.offset() + kMipsLwrOffset));
|
| + lwl(at, MemOperand(rs.rm(), rs.offset() + kMipsLwlOffset));
|
| + mov(rd, at);
|
| + }
|
| + } else { // Offset > 16 bits, use multiple instructions to load.
|
| + LoadRegPlusOffsetToAt(rs);
|
| + lwr(rd, MemOperand(at, kMipsLwrOffset));
|
| + lwl(rd, MemOperand(at, kMipsLwlOffset));
|
| + }
|
| + }
|
| }
|
|
|
|
|
| void MacroAssembler::Usw(Register rd, const MemOperand& rs) {
|
| - swr(rd, rs);
|
| - swl(rd, MemOperand(rs.rm(), rs.offset() + 3));
|
| + DCHECK(!rd.is(at));
|
| + DCHECK(!rs.rm().is(at));
|
| + if (IsMipsArchVariant(kMips32r6)) {
|
| + sw(rd, rs);
|
| + } else {
|
| + DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
|
| + IsMipsArchVariant(kLoongson));
|
| + if (is_int16(rs.offset() + kMipsSwrOffset) &&
|
| + is_int16(rs.offset() + kMipsSwlOffset)) {
|
| + swr(rd, MemOperand(rs.rm(), rs.offset() + kMipsSwrOffset));
|
| + swl(rd, MemOperand(rs.rm(), rs.offset() + kMipsSwlOffset));
|
| + } else {
|
| + LoadRegPlusOffsetToAt(rs);
|
| + swr(rd, MemOperand(at, kMipsSwrOffset));
|
| + swl(rd, MemOperand(at, kMipsSwlOffset));
|
| + }
|
| + }
|
| +}
|
| +
|
| +void MacroAssembler::Ulh(Register rd, const MemOperand& rs) {
|
| + DCHECK(!rd.is(at));
|
| + DCHECK(!rs.rm().is(at));
|
| + if (IsMipsArchVariant(kMips32r6)) {
|
| + lh(rd, rs);
|
| + } else {
|
| + DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
|
| + IsMipsArchVariant(kLoongson));
|
| + if (is_int16(rs.offset()) && is_int16(rs.offset() + 1)) {
|
| +#if defined(V8_TARGET_LITTLE_ENDIAN)
|
| + lbu(at, rs);
|
| + lb(rd, MemOperand(rs.rm(), rs.offset() + 1));
|
| +#elif defined(V8_TARGET_BIG_ENDIAN)
|
| + lbu(at, MemOperand(rs.rm(), rs.offset() + 1));
|
| + lb(rd, rs);
|
| +#endif
|
| + } else { // Offset > 16 bits, use multiple instructions to load.
|
| + LoadRegPlusOffsetToAt(rs);
|
| +#if defined(V8_TARGET_LITTLE_ENDIAN)
|
| + lb(rd, MemOperand(at, 1));
|
| + lbu(at, MemOperand(at, 0));
|
| +#elif defined(V8_TARGET_BIG_ENDIAN)
|
| + lb(rd, MemOperand(at, 0));
|
| + lbu(at, MemOperand(at, 1));
|
| +#endif
|
| + }
|
| + sll(rd, rd, 8);
|
| + or_(rd, rd, at);
|
| + }
|
| +}
|
| +
|
| +void MacroAssembler::Ulhu(Register rd, const MemOperand& rs) {
|
| + DCHECK(!rd.is(at));
|
| + DCHECK(!rs.rm().is(at));
|
| + if (IsMipsArchVariant(kMips32r6)) {
|
| + lhu(rd, rs);
|
| + } else {
|
| + DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
|
| + IsMipsArchVariant(kLoongson));
|
| + if (is_int16(rs.offset()) && is_int16(rs.offset() + 1)) {
|
| +#if defined(V8_TARGET_LITTLE_ENDIAN)
|
| + lbu(at, rs);
|
| + lbu(rd, MemOperand(rs.rm(), rs.offset() + 1));
|
| +#elif defined(V8_TARGET_BIG_ENDIAN)
|
| + lbu(at, MemOperand(rs.rm(), rs.offset() + 1));
|
| + lbu(rd, rs);
|
| +#endif
|
| + } else { // Offset > 16 bits, use multiple instructions to load.
|
| + LoadRegPlusOffsetToAt(rs);
|
| +#if defined(V8_TARGET_LITTLE_ENDIAN)
|
| + lbu(rd, MemOperand(at, 1));
|
| + lbu(at, MemOperand(at, 0));
|
| +#elif defined(V8_TARGET_BIG_ENDIAN)
|
| + lbu(rd, MemOperand(at, 0));
|
| + lbu(at, MemOperand(at, 1));
|
| +#endif
|
| + }
|
| + sll(rd, rd, 8);
|
| + or_(rd, rd, at);
|
| + }
|
| +}
|
| +
|
| +void MacroAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) {
|
| + DCHECK(!rd.is(at));
|
| + DCHECK(!rs.rm().is(at));
|
| + DCHECK(!rs.rm().is(scratch));
|
| + DCHECK(!scratch.is(at));
|
| + if (IsMipsArchVariant(kMips32r6)) {
|
| + sh(rd, rs);
|
| + } else {
|
| + DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
|
| + IsMipsArchVariant(kLoongson));
|
| + MemOperand source = rs;
|
| + // If offset > 16 bits, load address to at with offset 0.
|
| + if (!is_int16(rs.offset()) || !is_int16(rs.offset() + 1)) {
|
| + LoadRegPlusOffsetToAt(rs);
|
| + source = MemOperand(at, 0);
|
| + }
|
| +
|
| + if (!scratch.is(rd)) {
|
| + mov(scratch, rd);
|
| + }
|
| +
|
| +#if defined(V8_TARGET_LITTLE_ENDIAN)
|
| + sb(scratch, source);
|
| + srl(scratch, scratch, 8);
|
| + sb(scratch, MemOperand(source.rm(), source.offset() + 1));
|
| +#elif defined(V8_TARGET_BIG_ENDIAN)
|
| + sb(scratch, MemOperand(source.rm(), source.offset() + 1));
|
| + srl(scratch, scratch, 8);
|
| + sb(scratch, source);
|
| +#endif
|
| + }
|
| +}
|
| +
|
| +void MacroAssembler::Ulwc1(FPURegister fd, const MemOperand& rs,
|
| + Register scratch) {
|
| + if (IsMipsArchVariant(kMips32r6)) {
|
| + lwc1(fd, rs);
|
| + } else {
|
| + DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
|
| + IsMipsArchVariant(kLoongson));
|
| + Ulw(scratch, rs);
|
| + mtc1(scratch, fd);
|
| + }
|
| +}
|
| +
|
| +void MacroAssembler::Uswc1(FPURegister fd, const MemOperand& rs,
|
| + Register scratch) {
|
| + if (IsMipsArchVariant(kMips32r6)) {
|
| + swc1(fd, rs);
|
| + } else {
|
| + DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
|
| + IsMipsArchVariant(kLoongson));
|
| + mfc1(scratch, fd);
|
| + Usw(scratch, rs);
|
| + }
|
| +}
|
| +
|
| +void MacroAssembler::Uldc1(FPURegister fd, const MemOperand& rs,
|
| + Register scratch) {
|
| + DCHECK(!scratch.is(at));
|
| + if (IsMipsArchVariant(kMips32r6)) {
|
| + ldc1(fd, rs);
|
| + } else {
|
| + DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
|
| + IsMipsArchVariant(kLoongson));
|
| + Ulw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kMantissaOffset));
|
| + mtc1(scratch, fd);
|
| + Ulw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kExponentOffset));
|
| + Mthc1(scratch, fd);
|
| + }
|
| +}
|
| +
|
| +void MacroAssembler::Usdc1(FPURegister fd, const MemOperand& rs,
|
| + Register scratch) {
|
| + DCHECK(!scratch.is(at));
|
| + if (IsMipsArchVariant(kMips32r6)) {
|
| + sdc1(fd, rs);
|
| + } else {
|
| + DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r1) ||
|
| + IsMipsArchVariant(kLoongson));
|
| + mfc1(scratch, fd);
|
| + Usw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kMantissaOffset));
|
| + Mfhc1(scratch, fd);
|
| + Usw(scratch, MemOperand(rs.rm(), rs.offset() + Register::kExponentOffset));
|
| + }
|
| }
|
|
|
|
|
|
|