| Index: src/mips64/macro-assembler-mips64.cc
|
| diff --git a/src/mips64/macro-assembler-mips64.cc b/src/mips64/macro-assembler-mips64.cc
|
| index 53b4ef2beae2f7bdbe002910ff951bbb8a48b0b8..f4f478095abe5bf3c3c5c656e8829bb6b3ade77d 100644
|
| --- a/src/mips64/macro-assembler-mips64.cc
|
| +++ b/src/mips64/macro-assembler-mips64.cc
|
| @@ -1957,6 +1957,61 @@ void MacroAssembler::Ins(Register rt,
|
| ins_(rt, rs, pos, size);
|
| }
|
|
|
| +void MacroAssembler::Neg_s(FPURegister fd, FPURegister fs) {
|
| + Register scratch1 = t8;
|
| + Register scratch2 = t9;
|
| + if (kArchVariant == kMips64r2) {
|
| + Label is_nan, done;
|
| + BranchF32(nullptr, &is_nan, eq, fs, fs);
|
| + Branch(USE_DELAY_SLOT, &done);
|
| + // For NaN input, neg_s will return the same NaN value,
|
| + // while the sign has to be changed separately.
|
| + neg_s(fd, fs); // In delay slot.
|
| + bind(&is_nan);
|
| + mfc1(scratch1, fs);
|
| + And(scratch2, scratch1, Operand(~kBinary32SignMask));
|
| + And(scratch1, scratch1, Operand(kBinary32SignMask));
|
| + Xor(scratch1, scratch1, Operand(kBinary32SignMask));
|
| + Or(scratch2, scratch2, scratch1);
|
| + mtc1(scratch2, fd);
|
| + bind(&done);
|
| + } else {
|
| + mfc1(scratch1, fs);
|
| + And(scratch2, scratch1, Operand(~kBinary32SignMask));
|
| + And(scratch1, scratch1, Operand(kBinary32SignMask));
|
| + Xor(scratch1, scratch1, Operand(kBinary32SignMask));
|
| + Or(scratch2, scratch2, scratch1);
|
| + mtc1(scratch2, fd);
|
| + }
|
| +}
|
| +
|
| +void MacroAssembler::Neg_d(FPURegister fd, FPURegister fs) {
|
| + Register scratch1 = t8;
|
| + Register scratch2 = t9;
|
| + if (kArchVariant == kMips64r2) {
|
| + Label is_nan, done;
|
| + BranchF64(nullptr, &is_nan, eq, fs, fs);
|
| + Branch(USE_DELAY_SLOT, &done);
|
| + // For NaN input, neg_d will return the same NaN value,
|
| + // while the sign has to be changed separately.
|
| + neg_d(fd, fs); // In delay slot.
|
| + bind(&is_nan);
|
| + dmfc1(scratch1, fs);
|
| + And(scratch2, scratch1, Operand(~Double::kSignMask));
|
| + And(scratch1, scratch1, Operand(Double::kSignMask));
|
| + Xor(scratch1, scratch1, Operand(Double::kSignMask));
|
| + Or(scratch2, scratch2, scratch1);
|
| + dmtc1(scratch2, fd);
|
| + bind(&done);
|
| + } else {
|
| + dmfc1(scratch1, fs);
|
| + And(scratch2, scratch1, Operand(~Double::kSignMask));
|
| + And(scratch1, scratch1, Operand(Double::kSignMask));
|
| + Xor(scratch1, scratch1, Operand(Double::kSignMask));
|
| + Or(scratch2, scratch2, scratch1);
|
| + dmtc1(scratch2, fd);
|
| + }
|
| +}
|
|
|
| void MacroAssembler::Cvt_d_uw(FPURegister fd, FPURegister fs) {
|
| // Move the data from fs to t8.
|
| @@ -2535,7 +2590,7 @@ void MacroAssembler::Move(FPURegister dst, double imm) {
|
| if (imm_bits == bit_cast<int64_t>(0.0) && has_double_zero_reg_set_) {
|
| mov_d(dst, kDoubleRegZero);
|
| } else if (imm_bits == bit_cast<int64_t>(-0.0) && has_double_zero_reg_set_) {
|
| - neg_d(dst, kDoubleRegZero);
|
| + Neg_d(dst, kDoubleRegZero);
|
| } else {
|
| uint32_t lo, hi;
|
| DoubleAsTwoUInt32(imm, &lo, &hi);
|
|
|