Index: src/ppc/lithium-codegen-ppc.cc |
diff --git a/src/ppc/lithium-codegen-ppc.cc b/src/ppc/lithium-codegen-ppc.cc |
index c0cafeebd3cb32e90d87645746072d6f5acc0826..7b6052c3e624ef89eb678241ef74dbffcfadf5dc 100644 |
--- a/src/ppc/lithium-codegen-ppc.cc |
+++ b/src/ppc/lithium-codegen-ppc.cc |
@@ -1065,12 +1065,12 @@ |
__ ExtractBitRange(dividend, dividend, shift - 1, 0); |
__ neg(dividend, dividend, LeaveOE, SetRC); |
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, cr0); |
+ DeoptimizeIf(eq, instr, "minus zero", cr0); |
} |
} else if (!hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
__ li(dividend, Operand::Zero()); |
} else { |
- DeoptimizeIf(al, instr, Deoptimizer::kMinusZero); |
+ DeoptimizeIf(al, instr, "minus zero"); |
} |
__ b(&done); |
} |
@@ -1092,7 +1092,7 @@ |
DCHECK(!dividend.is(result)); |
if (divisor == 0) { |
- DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero); |
+ DeoptimizeIf(al, instr, "division by zero"); |
return; |
} |
@@ -1107,7 +1107,7 @@ |
Label remainder_not_zero; |
__ bne(&remainder_not_zero, cr0); |
__ cmpwi(dividend, Operand::Zero()); |
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); |
+ DeoptimizeIf(lt, instr, "minus zero"); |
__ bind(&remainder_not_zero); |
} |
} |
@@ -1131,7 +1131,7 @@ |
// Check for x % 0. |
if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { |
__ cmpwi(right_reg, Operand::Zero()); |
- DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero); |
+ DeoptimizeIf(eq, instr, "division by zero"); |
} |
// Check for kMinInt % -1, divw will return undefined, which is not what we |
@@ -1139,7 +1139,7 @@ |
if (hmod->CheckFlag(HValue::kCanOverflow)) { |
Label no_overflow_possible; |
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
- DeoptimizeIf(overflow, instr, Deoptimizer::kMinusZero, cr0); |
+ DeoptimizeIf(overflow, instr, "minus zero", cr0); |
} else { |
__ bnooverflow(&no_overflow_possible, cr0); |
__ li(result_reg, Operand::Zero()); |
@@ -1155,7 +1155,7 @@ |
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
__ bne(&done, cr0); |
__ cmpwi(left_reg, Operand::Zero()); |
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); |
+ DeoptimizeIf(lt, instr, "minus zero"); |
} |
__ bind(&done); |
@@ -1173,13 +1173,13 @@ |
HDiv* hdiv = instr->hydrogen(); |
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
__ cmpwi(dividend, Operand::Zero()); |
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); |
+ DeoptimizeIf(eq, instr, "minus zero"); |
} |
// Check for (kMinInt / -1). |
if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { |
__ lis(r0, Operand(SIGN_EXT_IMM16(0x8000))); |
__ cmpw(dividend, r0); |
- DeoptimizeIf(eq, instr, Deoptimizer::kOverflow); |
+ DeoptimizeIf(eq, instr, "overflow"); |
} |
int32_t shift = WhichPowerOf2Abs(divisor); |
@@ -1187,7 +1187,7 @@ |
// Deoptimize if remainder will not be 0. |
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && shift) { |
__ TestBitRange(dividend, shift - 1, 0, r0); |
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision, cr0); |
+ DeoptimizeIf(ne, instr, "lost precision", cr0); |
} |
if (divisor == -1) { // Nice shortcut, not needed for correctness. |
@@ -1217,7 +1217,7 @@ |
DCHECK(!dividend.is(result)); |
if (divisor == 0) { |
- DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero); |
+ DeoptimizeIf(al, instr, "division by zero"); |
return; |
} |
@@ -1225,7 +1225,7 @@ |
HDiv* hdiv = instr->hydrogen(); |
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
__ cmpwi(dividend, Operand::Zero()); |
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); |
+ DeoptimizeIf(eq, instr, "minus zero"); |
} |
__ TruncatingDiv(result, dividend, Abs(divisor)); |
@@ -1236,7 +1236,7 @@ |
__ mov(ip, Operand(divisor)); |
__ mullw(scratch, result, ip); |
__ cmpw(scratch, dividend); |
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision); |
+ DeoptimizeIf(ne, instr, "lost precision"); |
} |
} |
@@ -1261,7 +1261,7 @@ |
// Check for x / 0. |
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { |
__ cmpwi(divisor, Operand::Zero()); |
- DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero); |
+ DeoptimizeIf(eq, instr, "division by zero"); |
} |
// Check for (0 / -x) that will produce negative zero. |
@@ -1270,7 +1270,7 @@ |
__ cmpwi(dividend, Operand::Zero()); |
__ bne(÷nd_not_zero); |
__ cmpwi(divisor, Operand::Zero()); |
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); |
+ DeoptimizeIf(lt, instr, "minus zero"); |
__ bind(÷nd_not_zero); |
} |
@@ -1278,7 +1278,7 @@ |
if (hdiv->CheckFlag(HValue::kCanOverflow)) { |
Label no_overflow_possible; |
if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { |
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0); |
+ DeoptimizeIf(overflow, instr, "overflow", cr0); |
} else { |
// When truncating, we want kMinInt / -1 = kMinInt. |
__ bnooverflow(&no_overflow_possible, cr0); |
@@ -1292,7 +1292,7 @@ |
Register scratch = scratch0(); |
__ mullw(scratch, divisor, result); |
__ cmpw(dividend, scratch); |
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision); |
+ DeoptimizeIf(ne, instr, "lost precision"); |
} |
} |
@@ -1319,7 +1319,7 @@ |
if (divisor == -1 && hdiv->CheckFlag(HValue::kLeftCanBeMinInt)) { |
__ lis(r0, Operand(SIGN_EXT_IMM16(0x8000))); |
__ cmpw(dividend, r0); |
- DeoptimizeIf(eq, instr, Deoptimizer::kOverflow); |
+ DeoptimizeIf(eq, instr, "overflow"); |
} |
#else |
if (hdiv->CheckFlag(HValue::kLeftCanBeMinInt)) { |
@@ -1331,7 +1331,7 @@ |
__ neg(result, dividend, oe, SetRC); |
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { |
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero, cr0); |
+ DeoptimizeIf(eq, instr, "minus zero", cr0); |
} |
// If the negation could not overflow, simply shifting is OK. |
@@ -1347,7 +1347,7 @@ |
// Dividing by -1 is basically negation, unless we overflow. |
if (divisor == -1) { |
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0); |
+ DeoptimizeIf(overflow, instr, "overflow", cr0); |
return; |
} |
@@ -1369,7 +1369,7 @@ |
DCHECK(!dividend.is(result)); |
if (divisor == 0) { |
- DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero); |
+ DeoptimizeIf(al, instr, "division by zero"); |
return; |
} |
@@ -1377,7 +1377,7 @@ |
HMathFloorOfDiv* hdiv = instr->hydrogen(); |
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
__ cmpwi(dividend, Operand::Zero()); |
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); |
+ DeoptimizeIf(eq, instr, "minus zero"); |
} |
// Easy case: We need no dynamic check for the dividend and the flooring |
@@ -1428,7 +1428,7 @@ |
// Check for x / 0. |
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { |
__ cmpwi(divisor, Operand::Zero()); |
- DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero); |
+ DeoptimizeIf(eq, instr, "division by zero"); |
} |
// Check for (0 / -x) that will produce negative zero. |
@@ -1437,7 +1437,7 @@ |
__ cmpwi(dividend, Operand::Zero()); |
__ bne(÷nd_not_zero); |
__ cmpwi(divisor, Operand::Zero()); |
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); |
+ DeoptimizeIf(lt, instr, "minus zero"); |
__ bind(÷nd_not_zero); |
} |
@@ -1445,7 +1445,7 @@ |
if (hdiv->CheckFlag(HValue::kCanOverflow)) { |
Label no_overflow_possible; |
if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { |
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0); |
+ DeoptimizeIf(overflow, instr, "overflow", cr0); |
} else { |
// When truncating, we want kMinInt / -1 = kMinInt. |
__ bnooverflow(&no_overflow_possible, cr0); |
@@ -1515,7 +1515,7 @@ |
// The case of a null constant will be handled separately. |
// If constant is negative and left is null, the result should be -0. |
__ cmpi(left, Operand::Zero()); |
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); |
+ DeoptimizeIf(eq, instr, "minus zero"); |
} |
switch (constant) { |
@@ -1527,12 +1527,12 @@ |
__ li(r0, Operand::Zero()); // clear xer |
__ mtxer(r0); |
__ neg(result, left, SetOE, SetRC); |
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0); |
+ DeoptimizeIf(overflow, instr, "overflow", cr0); |
#if V8_TARGET_ARCH_PPC64 |
} else { |
__ neg(result, left); |
__ TestIfInt32(result, scratch, r0); |
- DeoptimizeIf(ne, instr, Deoptimizer::kOverflow); |
+ DeoptimizeIf(ne, instr, "overflow"); |
} |
#endif |
} else { |
@@ -1552,7 +1552,7 @@ |
__ cmpwi(left, Operand::Zero()); |
} |
#endif |
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); |
+ DeoptimizeIf(lt, instr, "minus zero"); |
} |
__ li(result, Operand::Zero()); |
break; |
@@ -1605,7 +1605,7 @@ |
__ Mul(result, left, right); |
} |
__ TestIfInt32(result, scratch, r0); |
- DeoptimizeIf(ne, instr, Deoptimizer::kOverflow); |
+ DeoptimizeIf(ne, instr, "overflow"); |
if (instr->hydrogen()->representation().IsSmi()) { |
__ SmiTag(result); |
} |
@@ -1620,7 +1620,7 @@ |
__ mullw(result, left, right); |
} |
__ TestIfInt32(scratch, result, r0); |
- DeoptimizeIf(ne, instr, Deoptimizer::kOverflow); |
+ DeoptimizeIf(ne, instr, "overflow"); |
#endif |
} else { |
if (instr->hydrogen()->representation().IsSmi()) { |
@@ -1647,7 +1647,7 @@ |
#endif |
// Bail out if the result is minus zero. |
__ cmpi(result, Operand::Zero()); |
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); |
+ DeoptimizeIf(eq, instr, "minus zero"); |
__ bind(&done); |
} |
} |
@@ -1733,7 +1733,7 @@ |
#if V8_TARGET_ARCH_PPC64 |
__ extsw(result, result, SetRC); |
#endif |
- DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue, cr0); |
+ DeoptimizeIf(lt, instr, "negative value", cr0); |
} else { |
__ srw(result, left, scratch); |
} |
@@ -1773,7 +1773,7 @@ |
} else { |
if (instr->can_deopt()) { |
__ cmpwi(left, Operand::Zero()); |
- DeoptimizeIf(lt, instr, Deoptimizer::kNegativeValue); |
+ DeoptimizeIf(lt, instr, "negative value"); |
} |
__ Move(result, left); |
} |
@@ -1792,7 +1792,7 @@ |
} else { |
__ SmiTagCheckOverflow(result, left, scratch); |
} |
- DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0); |
+ DeoptimizeIf(lt, instr, "overflow", cr0); |
#endif |
} else { |
__ slwi(result, left, Operand(shift_count)); |
@@ -1837,7 +1837,7 @@ |
__ extsw(scratch0(), scratch0(), SetRC); |
} |
#endif |
- DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0); |
+ DeoptimizeIf(lt, instr, "overflow", cr0); |
} |
#if V8_TARGET_ARCH_PPC64 |
@@ -1917,9 +1917,9 @@ |
DCHECK(!scratch.is(object)); |
__ TestIfSmi(object, r0); |
- DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0); |
+ DeoptimizeIf(eq, instr, "Smi", cr0); |
__ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE); |
- DeoptimizeIf(ne, instr, Deoptimizer::kNotADateObject); |
+ DeoptimizeIf(ne, instr, "not a date object"); |
if (index->value() == 0) { |
__ LoadP(result, FieldMemOperand(object, JSDate::kValueOffset)); |
@@ -2053,7 +2053,7 @@ |
__ extsw(scratch0(), scratch0(), SetRC); |
} |
#endif |
- DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0); |
+ DeoptimizeIf(lt, instr, "overflow", cr0); |
} |
#if V8_TARGET_ARCH_PPC64 |
@@ -2295,7 +2295,7 @@ |
} else if (expected.NeedsMap()) { |
// If we need a map later and have a Smi -> deopt. |
__ TestIfSmi(reg, r0); |
- DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0); |
+ DeoptimizeIf(eq, instr, "Smi", cr0); |
} |
const Register map = scratch0(); |
@@ -2352,7 +2352,7 @@ |
if (!expected.IsGeneric()) { |
// We've seen something for the first time -> deopt. |
// This can only happen if we are not generic already. |
- DeoptimizeIf(al, instr, Deoptimizer::kUnexpectedObject); |
+ DeoptimizeIf(al, instr, "unexpected object"); |
} |
} |
} |
@@ -3011,7 +3011,7 @@ |
if (instr->hydrogen()->RequiresHoleCheck()) { |
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex); |
__ cmp(result, ip); |
- DeoptimizeIf(eq, instr, Deoptimizer::kHole); |
+ DeoptimizeIf(eq, instr, "hole"); |
} |
} |
@@ -3061,7 +3061,7 @@ |
Register payload = ToRegister(instr->temp()); |
__ LoadP(payload, FieldMemOperand(cell, Cell::kValueOffset)); |
__ CompareRoot(payload, Heap::kTheHoleValueRootIndex); |
- DeoptimizeIf(eq, instr, Deoptimizer::kHole); |
+ DeoptimizeIf(eq, instr, "hole"); |
} |
// Store the value. |
@@ -3078,7 +3078,7 @@ |
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex); |
__ cmp(result, ip); |
if (instr->hydrogen()->DeoptimizesOnHole()) { |
- DeoptimizeIf(eq, instr, Deoptimizer::kHole); |
+ DeoptimizeIf(eq, instr, "hole"); |
} else { |
Label skip; |
__ bne(&skip); |
@@ -3102,7 +3102,7 @@ |
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex); |
__ cmp(scratch, ip); |
if (instr->hydrogen()->DeoptimizesOnHole()) { |
- DeoptimizeIf(eq, instr, Deoptimizer::kHole); |
+ DeoptimizeIf(eq, instr, "hole"); |
} else { |
__ bne(&skip_assignment); |
} |
@@ -3194,7 +3194,7 @@ |
// Check that the function has a prototype or an initial map. |
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex); |
__ cmp(result, ip); |
- DeoptimizeIf(eq, instr, Deoptimizer::kHole); |
+ DeoptimizeIf(eq, instr, "hole"); |
// If the function does not have an initial map, we're done. |
Label done; |
@@ -3354,7 +3354,7 @@ |
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { |
__ lis(r0, Operand(SIGN_EXT_IMM16(0x8000))); |
__ cmplw(result, r0); |
- DeoptimizeIf(ge, instr, Deoptimizer::kNegativeValue); |
+ DeoptimizeIf(ge, instr, "negative value"); |
} |
break; |
case FLOAT32_ELEMENTS: |
@@ -3417,7 +3417,7 @@ |
__ lwz(scratch, MemOperand(scratch, Register::kExponentOffset)); |
} |
__ Cmpi(scratch, Operand(kHoleNanUpper32), r0); |
- DeoptimizeIf(eq, instr, Deoptimizer::kHole); |
+ DeoptimizeIf(eq, instr, "hole"); |
} |
} |
@@ -3472,11 +3472,11 @@ |
if (requires_hole_check) { |
if (IsFastSmiElementsKind(hinstr->elements_kind())) { |
__ TestIfSmi(result, r0); |
- DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0); |
+ DeoptimizeIf(ne, instr, "not a Smi", cr0); |
} else { |
__ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); |
__ cmp(result, scratch); |
- DeoptimizeIf(eq, instr, Deoptimizer::kHole); |
+ DeoptimizeIf(eq, instr, "hole"); |
} |
} |
} |
@@ -3635,9 +3635,9 @@ |
// Deoptimize if the receiver is not a JS object. |
__ TestIfSmi(receiver, r0); |
- DeoptimizeIf(eq, instr, Deoptimizer::kSmi); |
+ DeoptimizeIf(eq, instr, "Smi"); |
__ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE); |
- DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject); |
+ DeoptimizeIf(lt, instr, "not a JavaScript object"); |
__ b(&result_in_receiver); |
__ bind(&global_object); |
@@ -3670,7 +3670,7 @@ |
// adaptor frame below it. |
const uint32_t kArgumentsLimit = 1 * KB; |
__ cmpli(length, Operand(kArgumentsLimit)); |
- DeoptimizeIf(gt, instr, Deoptimizer::kTooManyArguments); |
+ DeoptimizeIf(gt, instr, "too many arguments"); |
// Push the receiver and use the register to keep the original |
// number of arguments. |
@@ -3803,7 +3803,7 @@ |
__ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); |
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); |
__ cmp(scratch, ip); |
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber); |
+ DeoptimizeIf(ne, instr, "not a heap number"); |
Label done; |
Register exponent = scratch0(); |
@@ -3873,7 +3873,7 @@ |
__ mtxer(r0); |
__ neg(result, result, SetOE, SetRC); |
// Deoptimize on overflow. |
- DeoptimizeIf(overflow, instr, Deoptimizer::kOverflow, cr0); |
+ DeoptimizeIf(overflow, instr, "overflow", cr0); |
__ bind(&done); |
} |
@@ -3890,7 +3890,7 @@ |
// Deoptimize on overflow. |
__ lis(r0, Operand(SIGN_EXT_IMM16(0x8000))); |
__ cmpw(input, r0); |
- DeoptimizeIf(eq, instr, Deoptimizer::kOverflow); |
+ DeoptimizeIf(eq, instr, "overflow"); |
__ neg(result, result); |
__ bind(&done); |
@@ -3949,7 +3949,7 @@ |
__ TryInt32Floor(result, input, input_high, scratch, double_scratch0(), &done, |
&exact); |
- DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN); |
+ DeoptimizeIf(al, instr, "lost precision or NaN"); |
__ bind(&exact); |
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
@@ -3957,7 +3957,7 @@ |
__ cmpi(result, Operand::Zero()); |
__ bne(&done); |
__ cmpwi(input_high, Operand::Zero()); |
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); |
+ DeoptimizeIf(lt, instr, "minus zero"); |
} |
__ bind(&done); |
} |
@@ -3976,7 +3976,7 @@ |
__ LoadDoubleLiteral(dot_five, 0.5, r0); |
__ fabs(double_scratch1, input); |
__ fcmpu(double_scratch1, dot_five); |
- DeoptimizeIf(unordered, instr, Deoptimizer::kLostPrecisionOrNaN); |
+ DeoptimizeIf(unordered, instr, "lost precision or NaN"); |
// If input is in [-0.5, -0], the result is -0. |
// If input is in [+0, +0.5[, the result is +0. |
// If the input is +0.5, the result is 1. |
@@ -3989,7 +3989,7 @@ |
#endif |
__ cmpi(scratch1, Operand::Zero()); |
// [-0.5, -0]. |
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); |
+ DeoptimizeIf(lt, instr, "minus zero"); |
} |
Label return_zero; |
__ fcmpu(input, dot_five); |
@@ -4007,7 +4007,7 @@ |
// Reuse dot_five (double_scratch0) as we no longer need this value. |
__ TryInt32Floor(result, input_plus_dot_five, scratch1, scratch2, |
double_scratch0(), &done, &done); |
- DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN); |
+ DeoptimizeIf(al, instr, "lost precision or NaN"); |
__ bind(&done); |
} |
@@ -4073,7 +4073,7 @@ |
__ LoadP(r10, FieldMemOperand(r5, HeapObject::kMapOffset)); |
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); |
__ cmp(r10, ip); |
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber); |
+ DeoptimizeIf(ne, instr, "not a heap number"); |
__ bind(&no_deopt); |
MathPowStub stub(isolate(), MathPowStub::TAGGED); |
__ CallStub(&stub); |
@@ -4452,7 +4452,7 @@ |
__ stop("eliminated bounds check failed"); |
__ bind(&done); |
} else { |
- DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds); |
+ DeoptimizeIf(cc, instr, "out of bounds"); |
} |
} |
@@ -4725,7 +4725,7 @@ |
Register temp = ToRegister(instr->temp()); |
Label no_memento_found; |
__ TestJSArrayForAllocationMemento(object, temp, &no_memento_found); |
- DeoptimizeIf(eq, instr, Deoptimizer::kMementoFound); |
+ DeoptimizeIf(eq, instr, "memento found"); |
__ bind(&no_memento_found); |
} |
@@ -5040,13 +5040,13 @@ |
if (hchange->CheckFlag(HValue::kCanOverflow) && |
hchange->value()->CheckFlag(HValue::kUint32)) { |
__ TestUnsignedSmiCandidate(input, r0); |
- DeoptimizeIf(ne, instr, Deoptimizer::kOverflow, cr0); |
+ DeoptimizeIf(ne, instr, "overflow", cr0); |
} |
#if !V8_TARGET_ARCH_PPC64 |
if (hchange->CheckFlag(HValue::kCanOverflow) && |
!hchange->value()->CheckFlag(HValue::kUint32)) { |
__ SmiTagCheckOverflow(output, input, r0); |
- DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0); |
+ DeoptimizeIf(lt, instr, "overflow", cr0); |
} else { |
#endif |
__ SmiTag(output, input); |
@@ -5065,7 +5065,7 @@ |
// If the input is a HeapObject, value of scratch won't be zero. |
__ andi(scratch, input, Operand(kHeapObjectTag)); |
__ SmiUntag(result, input); |
- DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0); |
+ DeoptimizeIf(ne, instr, "not a Smi", cr0); |
} else { |
__ SmiUntag(result, input); |
} |
@@ -5095,7 +5095,7 @@ |
if (can_convert_undefined_to_nan) { |
__ bne(&convert); |
} else { |
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber); |
+ DeoptimizeIf(ne, instr, "not a heap number"); |
} |
// load heap number |
__ lfd(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset)); |
@@ -5111,7 +5111,7 @@ |
__ bne(&done); |
__ Cmpi(scratch, Operand(HeapNumber::kSignMask), r0); |
#endif |
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); |
+ DeoptimizeIf(eq, instr, "minus zero"); |
} |
__ b(&done); |
if (can_convert_undefined_to_nan) { |
@@ -5119,7 +5119,7 @@ |
// Convert undefined (and hole) to NaN. |
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex); |
__ cmp(input_reg, ip); |
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined); |
+ DeoptimizeIf(ne, instr, "not a heap number/undefined"); |
__ LoadRoot(scratch, Heap::kNanValueRootIndex); |
__ lfd(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset)); |
__ b(&done); |
@@ -5181,11 +5181,10 @@ |
__ bind(&check_false); |
__ LoadRoot(ip, Heap::kFalseValueRootIndex); |
__ cmp(input_reg, ip); |
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedTrueFalse, |
- cr7); |
+ DeoptimizeIf(ne, instr, "not a heap number/undefined/true/false", cr7); |
__ li(input_reg, Operand::Zero()); |
} else { |
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber, cr7); |
+ DeoptimizeIf(ne, instr, "not a heap number", cr7); |
__ lfd(double_scratch2, |
FieldMemOperand(input_reg, HeapNumber::kValueOffset)); |
@@ -5195,7 +5194,7 @@ |
} |
__ TryDoubleToInt32Exact(input_reg, double_scratch2, scratch1, |
double_scratch); |
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN, cr7); |
+ DeoptimizeIf(ne, instr, "lost precision or NaN", cr7); |
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
__ cmpi(input_reg, Operand::Zero()); |
@@ -5204,7 +5203,7 @@ |
FieldMemOperand(scratch2, HeapNumber::kValueOffset + |
Register::kExponentOffset)); |
__ cmpwi(scratch1, Operand::Zero()); |
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero, cr7); |
+ DeoptimizeIf(lt, instr, "minus zero", cr7); |
} |
} |
__ bind(&done); |
@@ -5273,7 +5272,7 @@ |
__ TryDoubleToInt32Exact(result_reg, double_input, scratch1, |
double_scratch); |
// Deoptimize if the input wasn't a int32 (inside a double). |
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN); |
+ DeoptimizeIf(ne, instr, "lost precision or NaN"); |
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
Label done; |
__ cmpi(result_reg, Operand::Zero()); |
@@ -5284,7 +5283,7 @@ |
__ MovDoubleHighToInt(scratch1, double_input); |
#endif |
__ cmpi(scratch1, Operand::Zero()); |
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); |
+ DeoptimizeIf(lt, instr, "minus zero"); |
__ bind(&done); |
} |
} |
@@ -5303,7 +5302,7 @@ |
__ TryDoubleToInt32Exact(result_reg, double_input, scratch1, |
double_scratch); |
// Deoptimize if the input wasn't a int32 (inside a double). |
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN); |
+ DeoptimizeIf(ne, instr, "lost precision or NaN"); |
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
Label done; |
__ cmpi(result_reg, Operand::Zero()); |
@@ -5314,7 +5313,7 @@ |
__ MovDoubleHighToInt(scratch1, double_input); |
#endif |
__ cmpi(scratch1, Operand::Zero()); |
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); |
+ DeoptimizeIf(lt, instr, "minus zero"); |
__ bind(&done); |
} |
} |
@@ -5322,7 +5321,7 @@ |
__ SmiTag(result_reg); |
#else |
__ SmiTagCheckOverflow(result_reg, r0); |
- DeoptimizeIf(lt, instr, Deoptimizer::kOverflow, cr0); |
+ DeoptimizeIf(lt, instr, "overflow", cr0); |
#endif |
} |
@@ -5330,7 +5329,7 @@ |
void LCodeGen::DoCheckSmi(LCheckSmi* instr) { |
LOperand* input = instr->value(); |
__ TestIfSmi(ToRegister(input), r0); |
- DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi, cr0); |
+ DeoptimizeIf(ne, instr, "not a Smi", cr0); |
} |
@@ -5338,7 +5337,7 @@ |
if (!instr->hydrogen()->value()->type().IsHeapObject()) { |
LOperand* input = instr->value(); |
__ TestIfSmi(ToRegister(input), r0); |
- DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0); |
+ DeoptimizeIf(eq, instr, "Smi", cr0); |
} |
} |
@@ -5359,13 +5358,13 @@ |
// If there is only one type in the interval check for equality. |
if (first == last) { |
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType); |
+ DeoptimizeIf(ne, instr, "wrong instance type"); |
} else { |
- DeoptimizeIf(lt, instr, Deoptimizer::kWrongInstanceType); |
+ DeoptimizeIf(lt, instr, "wrong instance type"); |
// Omit check for the last type. |
if (last != LAST_TYPE) { |
__ cmpli(scratch, Operand(last)); |
- DeoptimizeIf(gt, instr, Deoptimizer::kWrongInstanceType); |
+ DeoptimizeIf(gt, instr, "wrong instance type"); |
} |
} |
} else { |
@@ -5376,12 +5375,11 @@ |
if (base::bits::IsPowerOfTwo32(mask)) { |
DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag)); |
__ andi(r0, scratch, Operand(mask)); |
- DeoptimizeIf(tag == 0 ? ne : eq, instr, Deoptimizer::kWrongInstanceType, |
- cr0); |
+ DeoptimizeIf(tag == 0 ? ne : eq, instr, "wrong instance type", cr0); |
} else { |
__ andi(scratch, scratch, Operand(mask)); |
__ cmpi(scratch, Operand(tag)); |
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType); |
+ DeoptimizeIf(ne, instr, "wrong instance type"); |
} |
} |
} |
@@ -5400,7 +5398,7 @@ |
} else { |
__ Cmpi(reg, Operand(object), r0); |
} |
- DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch); |
+ DeoptimizeIf(ne, instr, "value mismatch"); |
} |
@@ -5415,7 +5413,7 @@ |
__ StoreToSafepointRegisterSlot(r3, scratch0()); |
} |
__ TestIfSmi(scratch0(), r0); |
- DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed, cr0); |
+ DeoptimizeIf(eq, instr, "instance migration failed", cr0); |
} |
@@ -5473,7 +5471,7 @@ |
if (instr->hydrogen()->HasMigrationTarget()) { |
__ bne(deferred->entry()); |
} else { |
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap); |
+ DeoptimizeIf(ne, instr, "wrong map"); |
} |
__ bind(&success); |
@@ -5512,7 +5510,7 @@ |
// Check for undefined. Undefined is converted to zero for clamping |
// conversions. |
__ Cmpi(input_reg, Operand(factory()->undefined_value()), r0); |
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined); |
+ DeoptimizeIf(ne, instr, "not a heap number/undefined"); |
__ li(result_reg, Operand::Zero()); |
__ b(&done); |
@@ -5984,19 +5982,19 @@ |
void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { |
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex); |
__ cmp(r3, ip); |
- DeoptimizeIf(eq, instr, Deoptimizer::kUndefined); |
+ DeoptimizeIf(eq, instr, "undefined"); |
Register null_value = r8; |
__ LoadRoot(null_value, Heap::kNullValueRootIndex); |
__ cmp(r3, null_value); |
- DeoptimizeIf(eq, instr, Deoptimizer::kNull); |
+ DeoptimizeIf(eq, instr, "null"); |
__ TestIfSmi(r3, r0); |
- DeoptimizeIf(eq, instr, Deoptimizer::kSmi, cr0); |
+ DeoptimizeIf(eq, instr, "Smi", cr0); |
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); |
__ CompareObjectType(r3, r4, r4, LAST_JS_PROXY_TYPE); |
- DeoptimizeIf(le, instr, Deoptimizer::kWrongInstanceType); |
+ DeoptimizeIf(le, instr, "wrong instance type"); |
Label use_cache, call_runtime; |
__ CheckEnumCache(null_value, &call_runtime); |
@@ -6012,7 +6010,7 @@ |
__ LoadP(r4, FieldMemOperand(r3, HeapObject::kMapOffset)); |
__ LoadRoot(ip, Heap::kMetaMapRootIndex); |
__ cmp(r4, ip); |
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap); |
+ DeoptimizeIf(ne, instr, "wrong map"); |
__ bind(&use_cache); |
} |
@@ -6032,7 +6030,7 @@ |
__ LoadP(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset)); |
__ LoadP(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx()))); |
__ cmpi(result, Operand::Zero()); |
- DeoptimizeIf(eq, instr, Deoptimizer::kNoCache); |
+ DeoptimizeIf(eq, instr, "no cache"); |
__ bind(&done); |
} |
@@ -6043,7 +6041,7 @@ |
Register map = ToRegister(instr->map()); |
__ LoadP(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset)); |
__ cmp(map, scratch0()); |
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap); |
+ DeoptimizeIf(ne, instr, "wrong map"); |
} |