| Index: src/mips64/macro-assembler-mips64.cc
|
| diff --git a/src/mips64/macro-assembler-mips64.cc b/src/mips64/macro-assembler-mips64.cc
|
| index 0e7955de00d4be4d9b81802bc2ba5e8f6ab80f8b..154276672216da103507f4a92f2ad689bde09ac8 100644
|
| --- a/src/mips64/macro-assembler-mips64.cc
|
| +++ b/src/mips64/macro-assembler-mips64.cc
|
| @@ -1629,10 +1629,10 @@ void MacroAssembler::Madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
|
|
|
|
|
| void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
|
| - Label* nan, Condition cc, FPURegister cmp1,
|
| + Label* nan, Condition cond, FPURegister cmp1,
|
| FPURegister cmp2, BranchDelaySlot bd) {
|
| BlockTrampolinePoolScope block_trampoline_pool(this);
|
| - if (cc == al) {
|
| + if (cond == al) {
|
| Branch(bd, target);
|
| return;
|
| }
|
| @@ -1640,22 +1640,69 @@ void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
|
| if (kArchVariant == kMips64r6) {
|
| sizeField = sizeField == D ? L : W;
|
| }
|
| +
|
| DCHECK(nan || target);
|
| // Check for unordered (NaN) cases.
|
| if (nan) {
|
| + bool long_branch = nan->is_bound() ? is_near(nan) : is_trampoline_emitted();
|
| if (kArchVariant != kMips64r6) {
|
| - c(UN, D, cmp1, cmp2);
|
| - bc1t(nan);
|
| + if (long_branch) {
|
| + Label skip;
|
| + c(UN, D, cmp1, cmp2);
|
| + bc1f(&skip);
|
| + nop();
|
| + Jr(nan, bd);
|
| + bind(&skip);
|
| + } else {
|
| + c(UN, D, cmp1, cmp2);
|
| + bc1t(nan);
|
| + if (bd == PROTECT) {
|
| + nop();
|
| + }
|
| + }
|
| } else {
|
| - // Use f31 for comparison result. It has to be unavailable to lithium
|
| + // Use kDoubleCompareReg for comparison result. It has to be unavailable
|
| + // to lithium
|
| // register allocator.
|
| - DCHECK(!cmp1.is(f31) && !cmp2.is(f31));
|
| - cmp(UN, L, f31, cmp1, cmp2);
|
| - bc1nez(nan, f31);
|
| + DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
|
| + if (long_branch) {
|
| + Label skip;
|
| + cmp(UN, L, kDoubleCompareReg, cmp1, cmp2);
|
| + bc1eqz(&skip, kDoubleCompareReg);
|
| + nop();
|
| + Jr(nan, bd);
|
| + bind(&skip);
|
| + } else {
|
| + cmp(UN, L, kDoubleCompareReg, cmp1, cmp2);
|
| + bc1nez(nan, kDoubleCompareReg);
|
| + if (bd == PROTECT) {
|
| + nop();
|
| + }
|
| + }
|
| + }
|
| + }
|
| +
|
| + if (target) {
|
| + bool long_branch =
|
| + target->is_bound() ? is_near(target) : is_trampoline_emitted();
|
| + if (long_branch) {
|
| + Label skip;
|
| + Condition neg_cond = NegateFpuCondition(cond);
|
| + BranchShortF(sizeField, &skip, neg_cond, cmp1, cmp2, bd);
|
| + Jr(target, bd);
|
| + bind(&skip);
|
| + } else {
|
| + BranchShortF(sizeField, target, cond, cmp1, cmp2, bd);
|
| }
|
| }
|
| +}
|
|
|
| +
|
| +void MacroAssembler::BranchShortF(SecondaryField sizeField, Label* target,
|
| + Condition cc, FPURegister cmp1,
|
| + FPURegister cmp2, BranchDelaySlot bd) {
|
| if (kArchVariant != kMips64r6) {
|
| + BlockTrampolinePoolScope block_trampoline_pool(this);
|
| if (target) {
|
| // Here NaN cases were either handled by this function or are assumed to
|
| // have been handled by the caller.
|
| @@ -1664,18 +1711,34 @@ void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
|
| c(OLT, sizeField, cmp1, cmp2);
|
| bc1t(target);
|
| break;
|
| + case ult:
|
| + c(ULT, sizeField, cmp1, cmp2);
|
| + bc1t(target);
|
| + break;
|
| case gt:
|
| c(ULE, sizeField, cmp1, cmp2);
|
| bc1f(target);
|
| break;
|
| + case ugt:
|
| + c(OLE, sizeField, cmp1, cmp2);
|
| + bc1f(target);
|
| + break;
|
| case ge:
|
| c(ULT, sizeField, cmp1, cmp2);
|
| bc1f(target);
|
| break;
|
| + case uge:
|
| + c(OLT, sizeField, cmp1, cmp2);
|
| + bc1f(target);
|
| + break;
|
| case le:
|
| c(OLE, sizeField, cmp1, cmp2);
|
| bc1t(target);
|
| break;
|
| + case ule:
|
| + c(ULE, sizeField, cmp1, cmp2);
|
| + bc1t(target);
|
| + break;
|
| case eq:
|
| c(EQ, sizeField, cmp1, cmp2);
|
| bc1t(target);
|
| @@ -1684,11 +1747,11 @@ void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
|
| c(UEQ, sizeField, cmp1, cmp2);
|
| bc1t(target);
|
| break;
|
| - case ne:
|
| + case ne: // Unordered or not equal.
|
| c(EQ, sizeField, cmp1, cmp2);
|
| bc1f(target);
|
| break;
|
| - case nue:
|
| + case ogl:
|
| c(UEQ, sizeField, cmp1, cmp2);
|
| bc1f(target);
|
| break;
|
| @@ -1697,44 +1760,62 @@ void MacroAssembler::BranchFCommon(SecondaryField sizeField, Label* target,
|
| }
|
| }
|
| } else {
|
| + BlockTrampolinePoolScope block_trampoline_pool(this);
|
| if (target) {
|
| // Here NaN cases were either handled by this function or are assumed to
|
| // have been handled by the caller.
|
| // Unsigned conditions are treated as their signed counterpart.
|
| - // Use f31 for comparison result, it is valid in fp64 (FR = 1) mode.
|
| - DCHECK(!cmp1.is(f31) && !cmp2.is(f31));
|
| + // Use kDoubleCompareReg for comparison result, it is valid in fp64 (FR =
|
| + // 1) mode.
|
| + DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
|
| switch (cc) {
|
| case lt:
|
| - cmp(OLT, sizeField, f31, cmp1, cmp2);
|
| - bc1nez(target, f31);
|
| + cmp(OLT, sizeField, kDoubleCompareReg, cmp1, cmp2);
|
| + bc1nez(target, kDoubleCompareReg);
|
| + break;
|
| + case ult:
|
| + cmp(ULT, sizeField, kDoubleCompareReg, cmp1, cmp2);
|
| + bc1nez(target, kDoubleCompareReg);
|
| break;
|
| case gt:
|
| - cmp(ULE, sizeField, f31, cmp1, cmp2);
|
| - bc1eqz(target, f31);
|
| + cmp(ULE, sizeField, kDoubleCompareReg, cmp1, cmp2);
|
| + bc1eqz(target, kDoubleCompareReg);
|
| + break;
|
| + case ugt:
|
| + cmp(OLE, sizeField, kDoubleCompareReg, cmp1, cmp2);
|
| + bc1eqz(target, kDoubleCompareReg);
|
| break;
|
| case ge:
|
| - cmp(ULT, sizeField, f31, cmp1, cmp2);
|
| - bc1eqz(target, f31);
|
| + cmp(ULT, sizeField, kDoubleCompareReg, cmp1, cmp2);
|
| + bc1eqz(target, kDoubleCompareReg);
|
| + break;
|
| + case uge:
|
| + cmp(OLT, sizeField, kDoubleCompareReg, cmp1, cmp2);
|
| + bc1eqz(target, kDoubleCompareReg);
|
| break;
|
| case le:
|
| - cmp(OLE, sizeField, f31, cmp1, cmp2);
|
| - bc1nez(target, f31);
|
| + cmp(OLE, sizeField, kDoubleCompareReg, cmp1, cmp2);
|
| + bc1nez(target, kDoubleCompareReg);
|
| + break;
|
| + case ule:
|
| + cmp(ULE, sizeField, kDoubleCompareReg, cmp1, cmp2);
|
| + bc1nez(target, kDoubleCompareReg);
|
| break;
|
| case eq:
|
| - cmp(EQ, sizeField, f31, cmp1, cmp2);
|
| - bc1nez(target, f31);
|
| + cmp(EQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
|
| + bc1nez(target, kDoubleCompareReg);
|
| break;
|
| case ueq:
|
| - cmp(UEQ, sizeField, f31, cmp1, cmp2);
|
| - bc1nez(target, f31);
|
| + cmp(UEQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
|
| + bc1nez(target, kDoubleCompareReg);
|
| break;
|
| case ne:
|
| - cmp(EQ, sizeField, f31, cmp1, cmp2);
|
| - bc1eqz(target, f31);
|
| + cmp(EQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
|
| + bc1eqz(target, kDoubleCompareReg);
|
| break;
|
| - case nue:
|
| - cmp(UEQ, sizeField, f31, cmp1, cmp2);
|
| - bc1eqz(target, f31);
|
| + case ogl:
|
| + cmp(UEQ, sizeField, kDoubleCompareReg, cmp1, cmp2);
|
| + bc1eqz(target, kDoubleCompareReg);
|
| break;
|
| default:
|
| CHECK(0);
|
|
|