| Index: src/ppc/lithium-codegen-ppc.cc
|
| diff --git a/src/ppc/lithium-codegen-ppc.cc b/src/ppc/lithium-codegen-ppc.cc
|
| index 7b6052c3e624ef89eb678241ef74dbffcfadf5dc..c0cafeebd3cb32e90d87645746072d6f5acc0826 100644
|
| --- a/src/ppc/lithium-codegen-ppc.cc
|
| +++ b/src/ppc/lithium-codegen-ppc.cc
|
| @@ -1065,12 +1065,12 @@ void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
|
| __ ExtractBitRange(dividend, dividend, shift - 1, 0);
|
| __ neg(dividend, dividend, LeaveOE, SetRC);
|
| if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
| - DeoptimizeIf(eq, instr, "minus zero", cr0);
|
| + DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, cr0);
|
| }
|
| } else if (!hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
| __ li(dividend, Operand::Zero());
|
| } else {
|
| - DeoptimizeIf(al, instr, "minus zero");
|
| + DeoptimizeIf(al, instr, Deoptimizer::kMinusZero);
|
| }
|
| __ b(&done);
|
| }
|
| @@ -1092,7 +1092,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
|
| DCHECK(!dividend.is(result));
|
|
|
| if (divisor == 0) {
|
| - DeoptimizeIf(al, instr, "division by zero");
|
| + DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
|
| return;
|
| }
|
|
|
| @@ -1107,7 +1107,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
|
| Label remainder_not_zero;
|
| __ bne(&remainder_not_zero, cr0);
|
| __ cmpwi(dividend, Operand::Zero());
|
| - DeoptimizeIf(lt, instr, "minus zero");
|
| + DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
|
| __ bind(&remainder_not_zero);
|
| }
|
| }
|
| @@ -1131,7 +1131,7 @@ void LCodeGen::DoModI(LModI* instr) {
|
| // Check for x % 0.
|
| if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
|
| __ cmpwi(right_reg, Operand::Zero());
|
| - DeoptimizeIf(eq, instr, "division by zero");
|
| + DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
|
| }
|
|
|
| // Check for kMinInt % -1, divw will return undefined, which is not what we
|
| @@ -1139,7 +1139,7 @@ void LCodeGen::DoModI(LModI* instr) {
|
| if (hmod->CheckFlag(HValue::kCanOverflow)) {
|
| Label no_overflow_possible;
|
| if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
| - DeoptimizeIf(overflow, instr, "minus zero", cr0);
|
| + DeoptimizeIf(overflow, instr, Deoptimizer::kMinusZero, cr0);
|
| } else {
|
| __ bnooverflow(&no_overflow_possible, cr0);
|
| __ li(result_reg, Operand::Zero());
|
| @@ -1155,7 +1155,7 @@ void LCodeGen::DoModI(LModI* instr) {
|
| if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
| __ bne(&done, cr0);
|
| __ cmpwi(left_reg, Operand::Zero());
|
| - DeoptimizeIf(lt, instr, "minus zero");
|
| + DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
|
| }
|
|
|
| __ bind(&done);
|
| @@ -1173,13 +1173,13 @@ void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
|
| HDiv* hdiv = instr->hydrogen();
|
| if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
|
| __ cmpwi(dividend, Operand::Zero());
|
| - DeoptimizeIf(eq, instr, "minus zero");
|
| + DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
|
| }
|
| // Check for (kMinInt / -1).
|
| if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
|
| __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
|
| __ cmpw(dividend, r0);
|
| - DeoptimizeIf(eq, instr, "overflow");
|
| + DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
|
| }
|
|
|
| int32_t shift = WhichPowerOf2Abs(divisor);
|
| @@ -1187,7 +1187,7 @@ void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
|
| // Deoptimize if remainder will not be 0.
|
| if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && shift) {
|
| __ TestBitRange(dividend, shift - 1, 0, r0);
|
| - DeoptimizeIf(ne, instr, "lost precision", cr0);
|
| + DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, cr0);
|
| }
|
|
|
| if (divisor == -1) { // Nice shortcut, not needed for correctness.
|
| @@ -1217,7 +1217,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
|
| DCHECK(!dividend.is(result));
|
|
|
| if (divisor == 0) {
|
| - DeoptimizeIf(al, instr, "division by zero");
|
| + DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
|
| return;
|
| }
|
|
|
| @@ -1225,7 +1225,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
|
| HDiv* hdiv = instr->hydrogen();
|
| if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
|
| __ cmpwi(dividend, Operand::Zero());
|
| - DeoptimizeIf(eq, instr, "minus zero");
|
| + DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
|
| }
|
|
|
| __ TruncatingDiv(result, dividend, Abs(divisor));
|
| @@ -1236,7 +1236,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
|
| __ mov(ip, Operand(divisor));
|
| __ mullw(scratch, result, ip);
|
| __ cmpw(scratch, dividend);
|
| - DeoptimizeIf(ne, instr, "lost precision");
|
| + DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
|
| }
|
| }
|
|
|
| @@ -1261,7 +1261,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
|
| // Check for x / 0.
|
| if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
|
| __ cmpwi(divisor, Operand::Zero());
|
| - DeoptimizeIf(eq, instr, "division by zero");
|
| + DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
|
| }
|
|
|
| // Check for (0 / -x) that will produce negative zero.
|
| @@ -1270,7 +1270,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
|
| __ cmpwi(dividend, Operand::Zero());
|
| __ bne(÷nd_not_zero);
|
| __ cmpwi(divisor, Operand::Zero());
|
| - DeoptimizeIf(lt, instr, "minus zero");
|
| + DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
|
| __ bind(÷nd_not_zero);
|
| }
|
|
|
| @@ -1278,7 +1278,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
|
| if (hdiv->CheckFlag(HValue::kCanOverflow)) {
|
| Label no_overflow_possible;
|
| if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
|
| - DeoptimizeIf(overflow, instr, "overflow", cr0);
|
| + DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
|
| } else {
|
| // When truncating, we want kMinInt / -1 = kMinInt.
|
| __ bnooverflow(&no_overflow_possible, cr0);
|
| @@ -1292,7 +1292,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
|
| Register scratch = scratch0();
|
| __ mullw(scratch, divisor, result);
|
| __ cmpw(dividend, scratch);
|
| - DeoptimizeIf(ne, instr, "lost precision");
|
| + DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
|
| }
|
| }
|
|
|
| @@ -1319,7 +1319,7 @@ void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
|
| if (divisor == -1 && hdiv->CheckFlag(HValue::kLeftCanBeMinInt)) {
|
| __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
|
| __ cmpw(dividend, r0);
|
| - DeoptimizeIf(eq, instr, "overflow");
|
| + DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
|
| }
|
| #else
|
| if (hdiv->CheckFlag(HValue::kLeftCanBeMinInt)) {
|
| @@ -1331,7 +1331,7 @@ void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
|
|
|
| __ neg(result, dividend, oe, SetRC);
|
| if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
| - DeoptimizeIf(eq, instr, "minus zero", cr0);
|
| + DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, cr0);
|
| }
|
|
|
| // If the negation could not overflow, simply shifting is OK.
|
| @@ -1347,7 +1347,7 @@ void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
|
|
|
| // Dividing by -1 is basically negation, unless we overflow.
|
| if (divisor == -1) {
|
| - DeoptimizeIf(overflow, instr, "overflow", cr0);
|
| + DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
|
| return;
|
| }
|
|
|
| @@ -1369,7 +1369,7 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
|
| DCHECK(!dividend.is(result));
|
|
|
| if (divisor == 0) {
|
| - DeoptimizeIf(al, instr, "division by zero");
|
| + DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
|
| return;
|
| }
|
|
|
| @@ -1377,7 +1377,7 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
|
| HMathFloorOfDiv* hdiv = instr->hydrogen();
|
| if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
|
| __ cmpwi(dividend, Operand::Zero());
|
| - DeoptimizeIf(eq, instr, "minus zero");
|
| + DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
|
| }
|
|
|
| // Easy case: We need no dynamic check for the dividend and the flooring
|
| @@ -1428,7 +1428,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
|
| // Check for x / 0.
|
| if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
|
| __ cmpwi(divisor, Operand::Zero());
|
| - DeoptimizeIf(eq, instr, "division by zero");
|
| + DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
|
| }
|
|
|
| // Check for (0 / -x) that will produce negative zero.
|
| @@ -1437,7 +1437,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
|
| __ cmpwi(dividend, Operand::Zero());
|
| __ bne(÷nd_not_zero);
|
| __ cmpwi(divisor, Operand::Zero());
|
| - DeoptimizeIf(lt, instr, "minus zero");
|
| + DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
|
| __ bind(÷nd_not_zero);
|
| }
|
|
|
| @@ -1445,7 +1445,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
|
| if (hdiv->CheckFlag(HValue::kCanOverflow)) {
|
| Label no_overflow_possible;
|
| if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
|
| - DeoptimizeIf(overflow, instr, "overflow", cr0);
|
| + DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
|
| } else {
|
| // When truncating, we want kMinInt / -1 = kMinInt.
|
| __ bnooverflow(&no_overflow_possible, cr0);
|
| @@ -1515,7 +1515,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
|
| // The case of a null constant will be handled separately.
|
| // If constant is negative and left is null, the result should be -0.
|
| __ cmpi(left, Operand::Zero());
|
| - DeoptimizeIf(eq, instr, "minus zero");
|
| + DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
|
| }
|
|
|
| switch (constant) {
|
| @@ -1527,12 +1527,12 @@ void LCodeGen::DoMulI(LMulI* instr) {
|
| __ li(r0, Operand::Zero()); // clear xer
|
| __ mtxer(r0);
|
| __ neg(result, left, SetOE, SetRC);
|
| - DeoptimizeIf(overflow, instr, "overflow", cr0);
|
| + DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
|
| #if V8_TARGET_ARCH_PPC64
|
| } else {
|
| __ neg(result, left);
|
| __ TestIfInt32(result, scratch, r0);
|
| - DeoptimizeIf(ne, instr, "overflow");
|
| + DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
|
| }
|
| #endif
|
| } else {
|
| @@ -1552,7 +1552,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
|
| __ cmpwi(left, Operand::Zero());
|
| }
|
| #endif
|
| - DeoptimizeIf(lt, instr, "minus zero");
|
| + DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
|
| }
|
| __ li(result, Operand::Zero());
|
| break;
|
| @@ -1605,7 +1605,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
|
| __ Mul(result, left, right);
|
| }
|
| __ TestIfInt32(result, scratch, r0);
|
| - DeoptimizeIf(ne, instr, "overflow");
|
| + DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
|
| if (instr->hydrogen()->representation().IsSmi()) {
|
| __ SmiTag(result);
|
| }
|
| @@ -1620,7 +1620,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
|
| __ mullw(result, left, right);
|
| }
|
| __ TestIfInt32(scratch, result, r0);
|
| - DeoptimizeIf(ne, instr, "overflow");
|
| + DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
|
| #endif
|
| } else {
|
| if (instr->hydrogen()->representation().IsSmi()) {
|
| @@ -1647,7 +1647,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
|
| #endif
|
| // Bail out if the result is minus zero.
|
| __ cmpi(result, Operand::Zero());
|
| - DeoptimizeIf(eq, instr, "minus zero");
|
| + DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
|
| __ bind(&done);
|
| }
|
| }
|
| @@ -1733,7 +1733,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
|
| #if V8_TARGET_ARCH_PPC64
|
| __ extsw(result, result, SetRC);
|
| #endif
|
| - DeoptimizeIf(lt, instr, "negative value", cr0);
|
| + DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue, cr0);
|
| } else {
|
| __ srw(result, left, scratch);
|
| }
|
| @@ -1773,7 +1773,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
|
| } else {
|
| if (instr->can_deopt()) {
|
| __ cmpwi(left, Operand::Zero());
|
| - DeoptimizeIf(lt, instr, "negative value");
|
| + DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue);
|
| }
|
| __ Move(result, left);
|
| }
|
| @@ -1792,7 +1792,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
|
| } else {
|
| __ SmiTagCheckOverflow(result, left, scratch);
|
| }
|
| - DeoptimizeIf(lt, instr, "overflow", cr0);
|
| + DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
|
| #endif
|
| } else {
|
| __ slwi(result, left, Operand(shift_count));
|
| @@ -1837,7 +1837,7 @@ void LCodeGen::DoSubI(LSubI* instr) {
|
| __ extsw(scratch0(), scratch0(), SetRC);
|
| }
|
| #endif
|
| - DeoptimizeIf(lt, instr, "overflow", cr0);
|
| + DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
|
| }
|
|
|
| #if V8_TARGET_ARCH_PPC64
|
| @@ -1917,9 +1917,9 @@ void LCodeGen::DoDateField(LDateField* instr) {
|
| DCHECK(!scratch.is(object));
|
|
|
| __ TestIfSmi(object, r0);
|
| - DeoptimizeIf(eq, instr, "Smi", cr0);
|
| + DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
|
| __ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE);
|
| - DeoptimizeIf(ne, instr, "not a date object");
|
| + DeoptimizeIf(ne, instr, Deoptimizer::kNotADateObject);
|
|
|
| if (index->value() == 0) {
|
| __ LoadP(result, FieldMemOperand(object, JSDate::kValueOffset));
|
| @@ -2053,7 +2053,7 @@ void LCodeGen::DoAddI(LAddI* instr) {
|
| __ extsw(scratch0(), scratch0(), SetRC);
|
| }
|
| #endif
|
| - DeoptimizeIf(lt, instr, "overflow", cr0);
|
| + DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
|
| }
|
|
|
| #if V8_TARGET_ARCH_PPC64
|
| @@ -2295,7 +2295,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
|
| } else if (expected.NeedsMap()) {
|
| // If we need a map later and have a Smi -> deopt.
|
| __ TestIfSmi(reg, r0);
|
| - DeoptimizeIf(eq, instr, "Smi", cr0);
|
| + DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
|
| }
|
|
|
| const Register map = scratch0();
|
| @@ -2352,7 +2352,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
|
| if (!expected.IsGeneric()) {
|
| // We've seen something for the first time -> deopt.
|
| // This can only happen if we are not generic already.
|
| - DeoptimizeIf(al, instr, "unexpected object");
|
| + DeoptimizeIf(al, instr, Deoptimizer::kUnexpectedObject);
|
| }
|
| }
|
| }
|
| @@ -3011,7 +3011,7 @@ void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
|
| if (instr->hydrogen()->RequiresHoleCheck()) {
|
| __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
|
| __ cmp(result, ip);
|
| - DeoptimizeIf(eq, instr, "hole");
|
| + DeoptimizeIf(eq, instr, Deoptimizer::kHole);
|
| }
|
| }
|
|
|
| @@ -3061,7 +3061,7 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
|
| Register payload = ToRegister(instr->temp());
|
| __ LoadP(payload, FieldMemOperand(cell, Cell::kValueOffset));
|
| __ CompareRoot(payload, Heap::kTheHoleValueRootIndex);
|
| - DeoptimizeIf(eq, instr, "hole");
|
| + DeoptimizeIf(eq, instr, Deoptimizer::kHole);
|
| }
|
|
|
| // Store the value.
|
| @@ -3078,7 +3078,7 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
|
| __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
|
| __ cmp(result, ip);
|
| if (instr->hydrogen()->DeoptimizesOnHole()) {
|
| - DeoptimizeIf(eq, instr, "hole");
|
| + DeoptimizeIf(eq, instr, Deoptimizer::kHole);
|
| } else {
|
| Label skip;
|
| __ bne(&skip);
|
| @@ -3102,7 +3102,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
|
| __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
|
| __ cmp(scratch, ip);
|
| if (instr->hydrogen()->DeoptimizesOnHole()) {
|
| - DeoptimizeIf(eq, instr, "hole");
|
| + DeoptimizeIf(eq, instr, Deoptimizer::kHole);
|
| } else {
|
| __ bne(&skip_assignment);
|
| }
|
| @@ -3194,7 +3194,7 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
|
| // Check that the function has a prototype or an initial map.
|
| __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
|
| __ cmp(result, ip);
|
| - DeoptimizeIf(eq, instr, "hole");
|
| + DeoptimizeIf(eq, instr, Deoptimizer::kHole);
|
|
|
| // If the function does not have an initial map, we're done.
|
| Label done;
|
| @@ -3354,7 +3354,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
|
| if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
|
| __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
|
| __ cmplw(result, r0);
|
| - DeoptimizeIf(ge, instr, "negative value");
|
| + DeoptimizeIf(ge, instr, Deoptimizer::kNegativeValue);
|
| }
|
| break;
|
| case FLOAT32_ELEMENTS:
|
| @@ -3417,7 +3417,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
|
| __ lwz(scratch, MemOperand(scratch, Register::kExponentOffset));
|
| }
|
| __ Cmpi(scratch, Operand(kHoleNanUpper32), r0);
|
| - DeoptimizeIf(eq, instr, "hole");
|
| + DeoptimizeIf(eq, instr, Deoptimizer::kHole);
|
| }
|
| }
|
|
|
| @@ -3472,11 +3472,11 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
|
| if (requires_hole_check) {
|
| if (IsFastSmiElementsKind(hinstr->elements_kind())) {
|
| __ TestIfSmi(result, r0);
|
| - DeoptimizeIf(ne, instr, "not a Smi", cr0);
|
| + DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0);
|
| } else {
|
| __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
|
| __ cmp(result, scratch);
|
| - DeoptimizeIf(eq, instr, "hole");
|
| + DeoptimizeIf(eq, instr, Deoptimizer::kHole);
|
| }
|
| }
|
| }
|
| @@ -3635,9 +3635,9 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
|
|
|
| // Deoptimize if the receiver is not a JS object.
|
| __ TestIfSmi(receiver, r0);
|
| - DeoptimizeIf(eq, instr, "Smi");
|
| + DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
|
| __ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE);
|
| - DeoptimizeIf(lt, instr, "not a JavaScript object");
|
| + DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject);
|
|
|
| __ b(&result_in_receiver);
|
| __ bind(&global_object);
|
| @@ -3670,7 +3670,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
|
| // adaptor frame below it.
|
| const uint32_t kArgumentsLimit = 1 * KB;
|
| __ cmpli(length, Operand(kArgumentsLimit));
|
| - DeoptimizeIf(gt, instr, "too many arguments");
|
| + DeoptimizeIf(gt, instr, Deoptimizer::kTooManyArguments);
|
|
|
| // Push the receiver and use the register to keep the original
|
| // number of arguments.
|
| @@ -3803,7 +3803,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
|
| __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
|
| __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
|
| __ cmp(scratch, ip);
|
| - DeoptimizeIf(ne, instr, "not a heap number");
|
| + DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
|
|
|
| Label done;
|
| Register exponent = scratch0();
|
| @@ -3873,7 +3873,7 @@ void LCodeGen::EmitMathAbs(LMathAbs* instr) {
|
| __ mtxer(r0);
|
| __ neg(result, result, SetOE, SetRC);
|
| // Deoptimize on overflow.
|
| - DeoptimizeIf(overflow, instr, "overflow", cr0);
|
| + DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0);
|
| __ bind(&done);
|
| }
|
|
|
| @@ -3890,7 +3890,7 @@ void LCodeGen::EmitInteger32MathAbs(LMathAbs* instr) {
|
| // Deoptimize on overflow.
|
| __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
|
| __ cmpw(input, r0);
|
| - DeoptimizeIf(eq, instr, "overflow");
|
| + DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
|
|
|
| __ neg(result, result);
|
| __ bind(&done);
|
| @@ -3949,7 +3949,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
|
|
|
| __ TryInt32Floor(result, input, input_high, scratch, double_scratch0(), &done,
|
| &exact);
|
| - DeoptimizeIf(al, instr, "lost precision or NaN");
|
| + DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN);
|
|
|
| __ bind(&exact);
|
| if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
| @@ -3957,7 +3957,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
|
| __ cmpi(result, Operand::Zero());
|
| __ bne(&done);
|
| __ cmpwi(input_high, Operand::Zero());
|
| - DeoptimizeIf(lt, instr, "minus zero");
|
| + DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
|
| }
|
| __ bind(&done);
|
| }
|
| @@ -3976,7 +3976,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
|
| __ LoadDoubleLiteral(dot_five, 0.5, r0);
|
| __ fabs(double_scratch1, input);
|
| __ fcmpu(double_scratch1, dot_five);
|
| - DeoptimizeIf(unordered, instr, "lost precision or NaN");
|
| + DeoptimizeIf(unordered, instr, Deoptimizer::kLostPrecisionOrNaN);
|
| // If input is in [-0.5, -0], the result is -0.
|
| // If input is in [+0, +0.5[, the result is +0.
|
| // If the input is +0.5, the result is 1.
|
| @@ -3989,7 +3989,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
|
| #endif
|
| __ cmpi(scratch1, Operand::Zero());
|
| // [-0.5, -0].
|
| - DeoptimizeIf(lt, instr, "minus zero");
|
| + DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
|
| }
|
| Label return_zero;
|
| __ fcmpu(input, dot_five);
|
| @@ -4007,7 +4007,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
|
| // Reuse dot_five (double_scratch0) as we no longer need this value.
|
| __ TryInt32Floor(result, input_plus_dot_five, scratch1, scratch2,
|
| double_scratch0(), &done, &done);
|
| - DeoptimizeIf(al, instr, "lost precision or NaN");
|
| + DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN);
|
| __ bind(&done);
|
| }
|
|
|
| @@ -4073,7 +4073,7 @@ void LCodeGen::DoPower(LPower* instr) {
|
| __ LoadP(r10, FieldMemOperand(r5, HeapObject::kMapOffset));
|
| __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
|
| __ cmp(r10, ip);
|
| - DeoptimizeIf(ne, instr, "not a heap number");
|
| + DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
|
| __ bind(&no_deopt);
|
| MathPowStub stub(isolate(), MathPowStub::TAGGED);
|
| __ CallStub(&stub);
|
| @@ -4452,7 +4452,7 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
|
| __ stop("eliminated bounds check failed");
|
| __ bind(&done);
|
| } else {
|
| - DeoptimizeIf(cc, instr, "out of bounds");
|
| + DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds);
|
| }
|
| }
|
|
|
| @@ -4725,7 +4725,7 @@ void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
|
| Register temp = ToRegister(instr->temp());
|
| Label no_memento_found;
|
| __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
|
| - DeoptimizeIf(eq, instr, "memento found");
|
| + DeoptimizeIf(eq, instr, Deoptimizer::kMementoFound);
|
| __ bind(&no_memento_found);
|
| }
|
|
|
| @@ -5040,13 +5040,13 @@ void LCodeGen::DoSmiTag(LSmiTag* instr) {
|
| if (hchange->CheckFlag(HValue::kCanOverflow) &&
|
| hchange->value()->CheckFlag(HValue::kUint32)) {
|
| __ TestUnsignedSmiCandidate(input, r0);
|
| - DeoptimizeIf(ne, instr, "overflow", cr0);
|
| + DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, cr0);
|
| }
|
| #if !V8_TARGET_ARCH_PPC64
|
| if (hchange->CheckFlag(HValue::kCanOverflow) &&
|
| !hchange->value()->CheckFlag(HValue::kUint32)) {
|
| __ SmiTagCheckOverflow(output, input, r0);
|
| - DeoptimizeIf(lt, instr, "overflow", cr0);
|
| + DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
|
| } else {
|
| #endif
|
| __ SmiTag(output, input);
|
| @@ -5065,7 +5065,7 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
|
| // If the input is a HeapObject, value of scratch won't be zero.
|
| __ andi(scratch, input, Operand(kHeapObjectTag));
|
| __ SmiUntag(result, input);
|
| - DeoptimizeIf(ne, instr, "not a Smi", cr0);
|
| + DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0);
|
| } else {
|
| __ SmiUntag(result, input);
|
| }
|
| @@ -5095,7 +5095,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
|
| if (can_convert_undefined_to_nan) {
|
| __ bne(&convert);
|
| } else {
|
| - DeoptimizeIf(ne, instr, "not a heap number");
|
| + DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
|
| }
|
| // load heap number
|
| __ lfd(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
|
| @@ -5111,7 +5111,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
|
| __ bne(&done);
|
| __ Cmpi(scratch, Operand(HeapNumber::kSignMask), r0);
|
| #endif
|
| - DeoptimizeIf(eq, instr, "minus zero");
|
| + DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
|
| }
|
| __ b(&done);
|
| if (can_convert_undefined_to_nan) {
|
| @@ -5119,7 +5119,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
|
| // Convert undefined (and hole) to NaN.
|
| __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
|
| __ cmp(input_reg, ip);
|
| - DeoptimizeIf(ne, instr, "not a heap number/undefined");
|
| + DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined);
|
| __ LoadRoot(scratch, Heap::kNanValueRootIndex);
|
| __ lfd(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
|
| __ b(&done);
|
| @@ -5181,10 +5181,11 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
|
| __ bind(&check_false);
|
| __ LoadRoot(ip, Heap::kFalseValueRootIndex);
|
| __ cmp(input_reg, ip);
|
| - DeoptimizeIf(ne, instr, "not a heap number/undefined/true/false", cr7);
|
| + DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedTrueFalse,
|
| + cr7);
|
| __ li(input_reg, Operand::Zero());
|
| } else {
|
| - DeoptimizeIf(ne, instr, "not a heap number", cr7);
|
| + DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, cr7);
|
|
|
| __ lfd(double_scratch2,
|
| FieldMemOperand(input_reg, HeapNumber::kValueOffset));
|
| @@ -5194,7 +5195,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
|
| }
|
| __ TryDoubleToInt32Exact(input_reg, double_scratch2, scratch1,
|
| double_scratch);
|
| - DeoptimizeIf(ne, instr, "lost precision or NaN", cr7);
|
| + DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, cr7);
|
|
|
| if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
| __ cmpi(input_reg, Operand::Zero());
|
| @@ -5203,7 +5204,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
|
| FieldMemOperand(scratch2, HeapNumber::kValueOffset +
|
| Register::kExponentOffset));
|
| __ cmpwi(scratch1, Operand::Zero());
|
| - DeoptimizeIf(lt, instr, "minus zero", cr7);
|
| + DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, cr7);
|
| }
|
| }
|
| __ bind(&done);
|
| @@ -5272,7 +5273,7 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
|
| __ TryDoubleToInt32Exact(result_reg, double_input, scratch1,
|
| double_scratch);
|
| // Deoptimize if the input wasn't a int32 (inside a double).
|
| - DeoptimizeIf(ne, instr, "lost precision or NaN");
|
| + DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
|
| if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
| Label done;
|
| __ cmpi(result_reg, Operand::Zero());
|
| @@ -5283,7 +5284,7 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
|
| __ MovDoubleHighToInt(scratch1, double_input);
|
| #endif
|
| __ cmpi(scratch1, Operand::Zero());
|
| - DeoptimizeIf(lt, instr, "minus zero");
|
| + DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
|
| __ bind(&done);
|
| }
|
| }
|
| @@ -5302,7 +5303,7 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
|
| __ TryDoubleToInt32Exact(result_reg, double_input, scratch1,
|
| double_scratch);
|
| // Deoptimize if the input wasn't a int32 (inside a double).
|
| - DeoptimizeIf(ne, instr, "lost precision or NaN");
|
| + DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
|
| if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
|
| Label done;
|
| __ cmpi(result_reg, Operand::Zero());
|
| @@ -5313,7 +5314,7 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
|
| __ MovDoubleHighToInt(scratch1, double_input);
|
| #endif
|
| __ cmpi(scratch1, Operand::Zero());
|
| - DeoptimizeIf(lt, instr, "minus zero");
|
| + DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
|
| __ bind(&done);
|
| }
|
| }
|
| @@ -5321,7 +5322,7 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
|
| __ SmiTag(result_reg);
|
| #else
|
| __ SmiTagCheckOverflow(result_reg, r0);
|
| - DeoptimizeIf(lt, instr, "overflow", cr0);
|
| + DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0);
|
| #endif
|
| }
|
|
|
| @@ -5329,7 +5330,7 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
|
| void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
|
| LOperand* input = instr->value();
|
| __ TestIfSmi(ToRegister(input), r0);
|
| - DeoptimizeIf(ne, instr, "not a Smi", cr0);
|
| + DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0);
|
| }
|
|
|
|
|
| @@ -5337,7 +5338,7 @@ void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
|
| if (!instr->hydrogen()->value()->type().IsHeapObject()) {
|
| LOperand* input = instr->value();
|
| __ TestIfSmi(ToRegister(input), r0);
|
| - DeoptimizeIf(eq, instr, "Smi", cr0);
|
| + DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
|
| }
|
| }
|
|
|
| @@ -5358,13 +5359,13 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
|
|
|
| // If there is only one type in the interval check for equality.
|
| if (first == last) {
|
| - DeoptimizeIf(ne, instr, "wrong instance type");
|
| + DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
|
| } else {
|
| - DeoptimizeIf(lt, instr, "wrong instance type");
|
| + DeoptimizeIf(lt, instr, Deoptimizer::kWrongInstanceType);
|
| // Omit check for the last type.
|
| if (last != LAST_TYPE) {
|
| __ cmpli(scratch, Operand(last));
|
| - DeoptimizeIf(gt, instr, "wrong instance type");
|
| + DeoptimizeIf(gt, instr, Deoptimizer::kWrongInstanceType);
|
| }
|
| }
|
| } else {
|
| @@ -5375,11 +5376,12 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
|
| if (base::bits::IsPowerOfTwo32(mask)) {
|
| DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
|
| __ andi(r0, scratch, Operand(mask));
|
| - DeoptimizeIf(tag == 0 ? ne : eq, instr, "wrong instance type", cr0);
|
| + DeoptimizeIf(tag == 0 ? ne : eq, instr, Deoptimizer::kWrongInstanceType,
|
| + cr0);
|
| } else {
|
| __ andi(scratch, scratch, Operand(mask));
|
| __ cmpi(scratch, Operand(tag));
|
| - DeoptimizeIf(ne, instr, "wrong instance type");
|
| + DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
|
| }
|
| }
|
| }
|
| @@ -5398,7 +5400,7 @@ void LCodeGen::DoCheckValue(LCheckValue* instr) {
|
| } else {
|
| __ Cmpi(reg, Operand(object), r0);
|
| }
|
| - DeoptimizeIf(ne, instr, "value mismatch");
|
| + DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch);
|
| }
|
|
|
|
|
| @@ -5413,7 +5415,7 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
|
| __ StoreToSafepointRegisterSlot(r3, scratch0());
|
| }
|
| __ TestIfSmi(scratch0(), r0);
|
| - DeoptimizeIf(eq, instr, "instance migration failed", cr0);
|
| + DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed, cr0);
|
| }
|
|
|
|
|
| @@ -5471,7 +5473,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
|
| if (instr->hydrogen()->HasMigrationTarget()) {
|
| __ bne(deferred->entry());
|
| } else {
|
| - DeoptimizeIf(ne, instr, "wrong map");
|
| + DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
|
| }
|
|
|
| __ bind(&success);
|
| @@ -5510,7 +5512,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
|
| // Check for undefined. Undefined is converted to zero for clamping
|
| // conversions.
|
| __ Cmpi(input_reg, Operand(factory()->undefined_value()), r0);
|
| - DeoptimizeIf(ne, instr, "not a heap number/undefined");
|
| + DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined);
|
| __ li(result_reg, Operand::Zero());
|
| __ b(&done);
|
|
|
| @@ -5982,19 +5984,19 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
|
| void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
|
| __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
|
| __ cmp(r3, ip);
|
| - DeoptimizeIf(eq, instr, "undefined");
|
| + DeoptimizeIf(eq, instr, Deoptimizer::kUndefined);
|
|
|
| Register null_value = r8;
|
| __ LoadRoot(null_value, Heap::kNullValueRootIndex);
|
| __ cmp(r3, null_value);
|
| - DeoptimizeIf(eq, instr, "null");
|
| + DeoptimizeIf(eq, instr, Deoptimizer::kNull);
|
|
|
| __ TestIfSmi(r3, r0);
|
| - DeoptimizeIf(eq, instr, "Smi", cr0);
|
| + DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0);
|
|
|
| STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
|
| __ CompareObjectType(r3, r4, r4, LAST_JS_PROXY_TYPE);
|
| - DeoptimizeIf(le, instr, "wrong instance type");
|
| + DeoptimizeIf(le, instr, Deoptimizer::kWrongInstanceType);
|
|
|
| Label use_cache, call_runtime;
|
| __ CheckEnumCache(null_value, &call_runtime);
|
| @@ -6010,7 +6012,7 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
|
| __ LoadP(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
|
| __ LoadRoot(ip, Heap::kMetaMapRootIndex);
|
| __ cmp(r4, ip);
|
| - DeoptimizeIf(ne, instr, "wrong map");
|
| + DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
|
| __ bind(&use_cache);
|
| }
|
|
|
| @@ -6030,7 +6032,7 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
|
| __ LoadP(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
|
| __ LoadP(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
|
| __ cmpi(result, Operand::Zero());
|
| - DeoptimizeIf(eq, instr, "no cache");
|
| + DeoptimizeIf(eq, instr, Deoptimizer::kNoCache);
|
|
|
| __ bind(&done);
|
| }
|
| @@ -6041,7 +6043,7 @@ void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
|
| Register map = ToRegister(instr->map());
|
| __ LoadP(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
|
| __ cmp(map, scratch0());
|
| - DeoptimizeIf(ne, instr, "wrong map");
|
| + DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
|
| }
|
|
|
|
|
|
|