Index: src/arm/lithium-codegen-arm.cc |
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc |
index c7bff674610bc8cb21ca03b7093032cae00771e8..1b12643fbc421f06b123b14ce2ef971f5597bf17 100644 |
--- a/src/arm/lithium-codegen-arm.cc |
+++ b/src/arm/lithium-codegen-arm.cc |
@@ -840,7 +840,7 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment, |
void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr, |
- const char* detail, |
+ Deoptimizer::DeoptReason deopt_reason, |
Deoptimizer::BailoutType bailout_type) { |
LEnvironment* environment = instr->environment(); |
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); |
@@ -894,7 +894,7 @@ void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr, |
} |
Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(), |
- instr->Mnemonic(), detail); |
+ instr->Mnemonic(), deopt_reason); |
DCHECK(info()->IsStub() || frame_is_built_); |
// Go through jump table if we need to handle condition, build frame, or |
// restore caller doubles. |
@@ -917,11 +917,11 @@ void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr, |
void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr, |
- const char* detail) { |
+ Deoptimizer::DeoptReason deopt_reason) { |
Deoptimizer::BailoutType bailout_type = info()->IsStub() |
? Deoptimizer::LAZY |
: Deoptimizer::EAGER; |
- DeoptimizeIf(condition, instr, detail, bailout_type); |
+ DeoptimizeIf(condition, instr, deopt_reason, bailout_type); |
} |
@@ -1157,7 +1157,7 @@ void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) { |
__ and_(dividend, dividend, Operand(mask)); |
__ rsb(dividend, dividend, Operand::Zero(), SetCC); |
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
- DeoptimizeIf(eq, instr, "minus zero"); |
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); |
} |
__ b(&done); |
} |
@@ -1175,7 +1175,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) { |
DCHECK(!dividend.is(result)); |
if (divisor == 0) { |
- DeoptimizeIf(al, instr, "division by zero"); |
+ DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero); |
return; |
} |
@@ -1190,7 +1190,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) { |
Label remainder_not_zero; |
__ b(ne, &remainder_not_zero); |
__ cmp(dividend, Operand::Zero()); |
- DeoptimizeIf(lt, instr, "minus zero"); |
+ DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); |
__ bind(&remainder_not_zero); |
} |
} |
@@ -1210,7 +1210,7 @@ void LCodeGen::DoModI(LModI* instr) { |
// case because we can't return a NaN. |
if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { |
__ cmp(right_reg, Operand::Zero()); |
- DeoptimizeIf(eq, instr, "division by zero"); |
+ DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero); |
} |
// Check for kMinInt % -1, sdiv will return kMinInt, which is not what we |
@@ -1221,7 +1221,7 @@ void LCodeGen::DoModI(LModI* instr) { |
__ b(ne, &no_overflow_possible); |
__ cmp(right_reg, Operand(-1)); |
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
- DeoptimizeIf(eq, instr, "minus zero"); |
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); |
} else { |
__ b(ne, &no_overflow_possible); |
__ mov(result_reg, Operand::Zero()); |
@@ -1242,7 +1242,7 @@ void LCodeGen::DoModI(LModI* instr) { |
__ cmp(result_reg, Operand::Zero()); |
__ b(ne, &done); |
__ cmp(left_reg, Operand::Zero()); |
- DeoptimizeIf(lt, instr, "minus zero"); |
+ DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); |
} |
__ bind(&done); |
@@ -1267,7 +1267,7 @@ void LCodeGen::DoModI(LModI* instr) { |
// NaN. |
if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { |
__ cmp(right_reg, Operand::Zero()); |
- DeoptimizeIf(eq, instr, "division by zero"); |
+ DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero); |
} |
__ Move(result_reg, left_reg); |
@@ -1297,7 +1297,7 @@ void LCodeGen::DoModI(LModI* instr) { |
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
__ b(ne, &done); |
__ cmp(left_reg, Operand::Zero()); |
- DeoptimizeIf(mi, instr, "minus zero"); |
+ DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero); |
} |
__ bind(&done); |
} |
@@ -1315,19 +1315,19 @@ void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { |
HDiv* hdiv = instr->hydrogen(); |
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
__ cmp(dividend, Operand::Zero()); |
- DeoptimizeIf(eq, instr, "minus zero"); |
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); |
} |
// Check for (kMinInt / -1). |
if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { |
__ cmp(dividend, Operand(kMinInt)); |
- DeoptimizeIf(eq, instr, "overflow"); |
+ DeoptimizeIf(eq, instr, Deoptimizer::kOverflow); |
} |
// Deoptimize if remainder will not be 0. |
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && |
divisor != 1 && divisor != -1) { |
int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); |
__ tst(dividend, Operand(mask)); |
- DeoptimizeIf(ne, instr, "lost precision"); |
+ DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision); |
} |
if (divisor == -1) { // Nice shortcut, not needed for correctness. |
@@ -1355,7 +1355,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) { |
DCHECK(!dividend.is(result)); |
if (divisor == 0) { |
- DeoptimizeIf(al, instr, "division by zero"); |
+ DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero); |
return; |
} |
@@ -1363,7 +1363,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) { |
HDiv* hdiv = instr->hydrogen(); |
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
__ cmp(dividend, Operand::Zero()); |
- DeoptimizeIf(eq, instr, "minus zero"); |
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); |
} |
__ TruncatingDiv(result, dividend, Abs(divisor)); |
@@ -1373,7 +1373,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) { |
__ mov(ip, Operand(divisor)); |
__ smull(scratch0(), ip, result, ip); |
__ sub(scratch0(), scratch0(), dividend, SetCC); |
- DeoptimizeIf(ne, instr, "lost precision"); |
+ DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision); |
} |
} |
@@ -1388,7 +1388,7 @@ void LCodeGen::DoDivI(LDivI* instr) { |
// Check for x / 0. |
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { |
__ cmp(divisor, Operand::Zero()); |
- DeoptimizeIf(eq, instr, "division by zero"); |
+ DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero); |
} |
// Check for (0 / -x) that will produce negative zero. |
@@ -1400,7 +1400,7 @@ void LCodeGen::DoDivI(LDivI* instr) { |
} |
__ b(pl, &positive); |
__ cmp(dividend, Operand::Zero()); |
- DeoptimizeIf(eq, instr, "minus zero"); |
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); |
__ bind(&positive); |
} |
@@ -1412,7 +1412,7 @@ void LCodeGen::DoDivI(LDivI* instr) { |
// support because, on ARM, sdiv kMinInt, -1 -> kMinInt. |
__ cmp(dividend, Operand(kMinInt)); |
__ cmp(divisor, Operand(-1), eq); |
- DeoptimizeIf(eq, instr, "overflow"); |
+ DeoptimizeIf(eq, instr, Deoptimizer::kOverflow); |
} |
if (CpuFeatures::IsSupported(SUDIV)) { |
@@ -1435,7 +1435,7 @@ void LCodeGen::DoDivI(LDivI* instr) { |
Register remainder = scratch0(); |
__ Mls(remainder, result, divisor, dividend); |
__ cmp(remainder, Operand::Zero()); |
- DeoptimizeIf(ne, instr, "lost precision"); |
+ DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision); |
} |
} |
@@ -1486,13 +1486,13 @@ void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) { |
// If the divisor is negative, we have to negate and handle edge cases. |
__ rsb(result, dividend, Operand::Zero(), SetCC); |
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
- DeoptimizeIf(eq, instr, "minus zero"); |
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); |
} |
// Dividing by -1 is basically negation, unless we overflow. |
if (divisor == -1) { |
if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { |
- DeoptimizeIf(vs, instr, "overflow"); |
+ DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); |
} |
return; |
} |
@@ -1515,7 +1515,7 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { |
DCHECK(!dividend.is(result)); |
if (divisor == 0) { |
- DeoptimizeIf(al, instr, "division by zero"); |
+ DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero); |
return; |
} |
@@ -1523,7 +1523,7 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { |
HMathFloorOfDiv* hdiv = instr->hydrogen(); |
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
__ cmp(dividend, Operand::Zero()); |
- DeoptimizeIf(eq, instr, "minus zero"); |
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); |
} |
// Easy case: We need no dynamic check for the dividend and the flooring |
@@ -1564,7 +1564,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) { |
// Check for x / 0. |
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { |
__ cmp(right, Operand::Zero()); |
- DeoptimizeIf(eq, instr, "division by zero"); |
+ DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero); |
} |
// Check for (0 / -x) that will produce negative zero. |
@@ -1576,7 +1576,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) { |
} |
__ b(pl, &positive); |
__ cmp(left, Operand::Zero()); |
- DeoptimizeIf(eq, instr, "minus zero"); |
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); |
__ bind(&positive); |
} |
@@ -1588,7 +1588,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) { |
// support because, on ARM, sdiv kMinInt, -1 -> kMinInt. |
__ cmp(left, Operand(kMinInt)); |
__ cmp(right, Operand(-1), eq); |
- DeoptimizeIf(eq, instr, "overflow"); |
+ DeoptimizeIf(eq, instr, Deoptimizer::kOverflow); |
} |
if (CpuFeatures::IsSupported(SUDIV)) { |
@@ -1634,14 +1634,14 @@ void LCodeGen::DoMulI(LMulI* instr) { |
// The case of a null constant will be handled separately. |
// If constant is negative and left is null, the result should be -0. |
__ cmp(left, Operand::Zero()); |
- DeoptimizeIf(eq, instr, "minus zero"); |
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); |
} |
switch (constant) { |
case -1: |
if (overflow) { |
__ rsb(result, left, Operand::Zero(), SetCC); |
- DeoptimizeIf(vs, instr, "overflow"); |
+ DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); |
} else { |
__ rsb(result, left, Operand::Zero()); |
} |
@@ -1651,7 +1651,7 @@ void LCodeGen::DoMulI(LMulI* instr) { |
// If left is strictly negative and the constant is null, the |
// result is -0. Deoptimize if required, otherwise return 0. |
__ cmp(left, Operand::Zero()); |
- DeoptimizeIf(mi, instr, "minus zero"); |
+ DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero); |
} |
__ mov(result, Operand::Zero()); |
break; |
@@ -1701,7 +1701,7 @@ void LCodeGen::DoMulI(LMulI* instr) { |
__ smull(result, scratch, left, right); |
} |
__ cmp(scratch, Operand(result, ASR, 31)); |
- DeoptimizeIf(ne, instr, "overflow"); |
+ DeoptimizeIf(ne, instr, Deoptimizer::kOverflow); |
} else { |
if (instr->hydrogen()->representation().IsSmi()) { |
__ SmiUntag(result, left); |
@@ -1717,7 +1717,7 @@ void LCodeGen::DoMulI(LMulI* instr) { |
__ b(pl, &done); |
// Bail out if the result is minus zero. |
__ cmp(result, Operand::Zero()); |
- DeoptimizeIf(eq, instr, "minus zero"); |
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); |
__ bind(&done); |
} |
} |
@@ -1780,7 +1780,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) { |
case Token::SHR: |
if (instr->can_deopt()) { |
__ mov(result, Operand(left, LSR, scratch), SetCC); |
- DeoptimizeIf(mi, instr, "negative value"); |
+ DeoptimizeIf(mi, instr, Deoptimizer::kNegativeValue); |
} else { |
__ mov(result, Operand(left, LSR, scratch)); |
} |
@@ -1817,7 +1817,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) { |
} else { |
if (instr->can_deopt()) { |
__ tst(left, Operand(0x80000000)); |
- DeoptimizeIf(ne, instr, "negative value"); |
+ DeoptimizeIf(ne, instr, Deoptimizer::kNegativeValue); |
} |
__ Move(result, left); |
} |
@@ -1832,7 +1832,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) { |
} else { |
__ SmiTag(result, left, SetCC); |
} |
- DeoptimizeIf(vs, instr, "overflow"); |
+ DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); |
} else { |
__ mov(result, Operand(left, LSL, shift_count)); |
} |
@@ -1864,7 +1864,7 @@ void LCodeGen::DoSubI(LSubI* instr) { |
} |
if (can_overflow) { |
- DeoptimizeIf(vs, instr, "overflow"); |
+ DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); |
} |
} |
@@ -1885,7 +1885,7 @@ void LCodeGen::DoRSubI(LRSubI* instr) { |
} |
if (can_overflow) { |
- DeoptimizeIf(vs, instr, "overflow"); |
+ DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); |
} |
} |
@@ -1953,9 +1953,9 @@ void LCodeGen::DoDateField(LDateField* instr) { |
DCHECK(!scratch.is(object)); |
__ SmiTst(object); |
- DeoptimizeIf(eq, instr, "Smi"); |
+ DeoptimizeIf(eq, instr, Deoptimizer::kSmi); |
__ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE); |
- DeoptimizeIf(ne, instr, "not a date object"); |
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotADateObject); |
if (index->value() == 0) { |
__ ldr(result, FieldMemOperand(object, JSDate::kValueOffset)); |
@@ -2072,7 +2072,7 @@ void LCodeGen::DoAddI(LAddI* instr) { |
} |
if (can_overflow) { |
- DeoptimizeIf(vs, instr, "overflow"); |
+ DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); |
} |
} |
@@ -2297,7 +2297,7 @@ void LCodeGen::DoBranch(LBranch* instr) { |
} else if (expected.NeedsMap()) { |
// If we need a map later and have a Smi -> deopt. |
__ SmiTst(reg); |
- DeoptimizeIf(eq, instr, "Smi"); |
+ DeoptimizeIf(eq, instr, Deoptimizer::kSmi); |
} |
const Register map = scratch0(); |
@@ -2353,7 +2353,7 @@ void LCodeGen::DoBranch(LBranch* instr) { |
if (!expected.IsGeneric()) { |
// We've seen something for the first time -> deopt. |
// This can only happen if we are not generic already. |
- DeoptimizeIf(al, instr, "unexpected object"); |
+ DeoptimizeIf(al, instr, Deoptimizer::kUnexpectedObject); |
} |
} |
} |
@@ -2999,7 +2999,7 @@ void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { |
if (instr->hydrogen()->RequiresHoleCheck()) { |
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex); |
__ cmp(result, ip); |
- DeoptimizeIf(eq, instr, "hole"); |
+ DeoptimizeIf(eq, instr, Deoptimizer::kHole); |
} |
} |
@@ -3054,7 +3054,7 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) { |
Register payload = ToRegister(instr->temp()); |
__ ldr(payload, FieldMemOperand(cell, Cell::kValueOffset)); |
__ CompareRoot(payload, Heap::kTheHoleValueRootIndex); |
- DeoptimizeIf(eq, instr, "hole"); |
+ DeoptimizeIf(eq, instr, Deoptimizer::kHole); |
} |
// Store the value. |
@@ -3071,7 +3071,7 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { |
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex); |
__ cmp(result, ip); |
if (instr->hydrogen()->DeoptimizesOnHole()) { |
- DeoptimizeIf(eq, instr, "hole"); |
+ DeoptimizeIf(eq, instr, Deoptimizer::kHole); |
} else { |
__ mov(result, Operand(factory()->undefined_value()), LeaveCC, eq); |
} |
@@ -3092,7 +3092,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { |
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex); |
__ cmp(scratch, ip); |
if (instr->hydrogen()->DeoptimizesOnHole()) { |
- DeoptimizeIf(eq, instr, "hole"); |
+ DeoptimizeIf(eq, instr, Deoptimizer::kHole); |
} else { |
__ b(ne, &skip_assignment); |
} |
@@ -3173,7 +3173,7 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) { |
// Check that the function has a prototype or an initial map. |
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex); |
__ cmp(result, ip); |
- DeoptimizeIf(eq, instr, "hole"); |
+ DeoptimizeIf(eq, instr, Deoptimizer::kHole); |
// If the function does not have an initial map, we're done. |
Label done; |
@@ -3299,7 +3299,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) { |
__ ldr(result, mem_operand); |
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { |
__ cmp(result, Operand(0x80000000)); |
- DeoptimizeIf(cs, instr, "negative value"); |
+ DeoptimizeIf(cs, instr, Deoptimizer::kNegativeValue); |
} |
break; |
case FLOAT32_ELEMENTS: |
@@ -3352,7 +3352,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) { |
if (instr->hydrogen()->RequiresHoleCheck()) { |
__ ldr(scratch, MemOperand(scratch, sizeof(kHoleNanLower32))); |
__ cmp(scratch, Operand(kHoleNanUpper32)); |
- DeoptimizeIf(eq, instr, "hole"); |
+ DeoptimizeIf(eq, instr, Deoptimizer::kHole); |
} |
} |
@@ -3386,11 +3386,11 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { |
if (instr->hydrogen()->RequiresHoleCheck()) { |
if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { |
__ SmiTst(result); |
- DeoptimizeIf(ne, instr, "not a Smi"); |
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi); |
} else { |
__ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); |
__ cmp(result, scratch); |
- DeoptimizeIf(eq, instr, "hole"); |
+ DeoptimizeIf(eq, instr, Deoptimizer::kHole); |
} |
} |
} |
@@ -3532,9 +3532,9 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) { |
// Deoptimize if the receiver is not a JS object. |
__ SmiTst(receiver); |
- DeoptimizeIf(eq, instr, "Smi"); |
+ DeoptimizeIf(eq, instr, Deoptimizer::kSmi); |
__ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE); |
- DeoptimizeIf(lt, instr, "not a JavaScript object"); |
+ DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject); |
__ b(&result_in_receiver); |
__ bind(&global_object); |
@@ -3569,7 +3569,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) { |
// adaptor frame below it. |
const uint32_t kArgumentsLimit = 1 * KB; |
__ cmp(length, Operand(kArgumentsLimit)); |
- DeoptimizeIf(hi, instr, "too many arguments"); |
+ DeoptimizeIf(hi, instr, Deoptimizer::kTooManyArguments); |
// Push the receiver and use the register to keep the original |
// number of arguments. |
@@ -3695,7 +3695,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) { |
__ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); |
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); |
__ cmp(scratch, Operand(ip)); |
- DeoptimizeIf(ne, instr, "not a heap number"); |
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber); |
Label done; |
Register exponent = scratch0(); |
@@ -3763,7 +3763,7 @@ void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) { |
// if input is positive. |
__ rsb(result, input, Operand::Zero(), SetCC, mi); |
// Deoptimize on overflow. |
- DeoptimizeIf(vs, instr, "overflow"); |
+ DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); |
} |
@@ -3810,7 +3810,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) { |
Label done, exact; |
__ TryInt32Floor(result, input, input_high, double_scratch0(), &done, &exact); |
- DeoptimizeIf(al, instr, "lost precision or NaN"); |
+ DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN); |
__ bind(&exact); |
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
@@ -3818,7 +3818,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) { |
__ cmp(result, Operand::Zero()); |
__ b(ne, &done); |
__ cmp(input_high, Operand::Zero()); |
- DeoptimizeIf(mi, instr, "minus zero"); |
+ DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero); |
} |
__ bind(&done); |
} |
@@ -3844,7 +3844,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) { |
__ VmovHigh(input_high, input); |
__ cmp(input_high, Operand::Zero()); |
// [-0.5, -0]. |
- DeoptimizeIf(mi, instr, "minus zero"); |
+ DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero); |
} |
__ VFPCompareAndSetFlags(input, dot_five); |
__ mov(result, Operand(1), LeaveCC, eq); // +0.5. |
@@ -3858,7 +3858,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) { |
// Reuse dot_five (double_scratch0) as we no longer need this value. |
__ TryInt32Floor(result, input_plus_dot_five, input_high, double_scratch0(), |
&done, &done); |
- DeoptimizeIf(al, instr, "lost precision or NaN"); |
+ DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN); |
__ bind(&done); |
} |
@@ -3922,7 +3922,7 @@ void LCodeGen::DoPower(LPower* instr) { |
__ ldr(r6, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset)); |
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); |
__ cmp(r6, Operand(ip)); |
- DeoptimizeIf(ne, instr, "not a heap number"); |
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber); |
__ bind(&no_deopt); |
MathPowStub stub(isolate(), MathPowStub::TAGGED); |
__ CallStub(&stub); |
@@ -4331,7 +4331,7 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { |
__ stop("eliminated bounds check failed"); |
__ bind(&done); |
} else { |
- DeoptimizeIf(cc, instr, "out of bounds"); |
+ DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds); |
} |
} |
@@ -4579,7 +4579,7 @@ void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { |
Register temp = ToRegister(instr->temp()); |
Label no_memento_found; |
__ TestJSArrayForAllocationMemento(object, temp, &no_memento_found); |
- DeoptimizeIf(eq, instr, "memento found"); |
+ DeoptimizeIf(eq, instr, Deoptimizer::kMementoFound); |
__ bind(&no_memento_found); |
} |
@@ -4914,12 +4914,12 @@ void LCodeGen::DoSmiTag(LSmiTag* instr) { |
if (hchange->CheckFlag(HValue::kCanOverflow) && |
hchange->value()->CheckFlag(HValue::kUint32)) { |
__ tst(input, Operand(0xc0000000)); |
- DeoptimizeIf(ne, instr, "overflow"); |
+ DeoptimizeIf(ne, instr, Deoptimizer::kOverflow); |
} |
if (hchange->CheckFlag(HValue::kCanOverflow) && |
!hchange->value()->CheckFlag(HValue::kUint32)) { |
__ SmiTag(output, input, SetCC); |
- DeoptimizeIf(vs, instr, "overflow"); |
+ DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); |
} else { |
__ SmiTag(output, input); |
} |
@@ -4933,7 +4933,7 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) { |
STATIC_ASSERT(kHeapObjectTag == 1); |
// If the input is a HeapObject, SmiUntag will set the carry flag. |
__ SmiUntag(result, input, SetCC); |
- DeoptimizeIf(cs, instr, "not a Smi"); |
+ DeoptimizeIf(cs, instr, Deoptimizer::kNotASmi); |
} else { |
__ SmiUntag(result, input); |
} |
@@ -4961,7 +4961,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg, |
if (can_convert_undefined_to_nan) { |
__ b(ne, &convert); |
} else { |
- DeoptimizeIf(ne, instr, "not a heap number"); |
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber); |
} |
// load heap number |
__ vldr(result_reg, input_reg, HeapNumber::kValueOffset - kHeapObjectTag); |
@@ -4971,7 +4971,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg, |
__ b(ne, &done); |
__ VmovHigh(scratch, result_reg); |
__ cmp(scratch, Operand(HeapNumber::kSignMask)); |
- DeoptimizeIf(eq, instr, "minus zero"); |
+ DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); |
} |
__ jmp(&done); |
if (can_convert_undefined_to_nan) { |
@@ -4979,7 +4979,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg, |
// Convert undefined (and hole) to NaN. |
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex); |
__ cmp(input_reg, Operand(ip)); |
- DeoptimizeIf(ne, instr, "not a heap number/undefined"); |
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined); |
__ LoadRoot(scratch, Heap::kNanValueRootIndex); |
__ vldr(result_reg, scratch, HeapNumber::kValueOffset - kHeapObjectTag); |
__ jmp(&done); |
@@ -5047,22 +5047,22 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { |
__ bind(&check_false); |
__ LoadRoot(ip, Heap::kFalseValueRootIndex); |
__ cmp(scratch2, Operand(ip)); |
- DeoptimizeIf(ne, instr, "not a heap number/undefined/true/false"); |
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedBoolean); |
__ mov(input_reg, Operand::Zero()); |
} else { |
- DeoptimizeIf(ne, instr, "not a heap number"); |
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber); |
__ sub(ip, scratch2, Operand(kHeapObjectTag)); |
__ vldr(double_scratch2, ip, HeapNumber::kValueOffset); |
__ TryDoubleToInt32Exact(input_reg, double_scratch2, double_scratch); |
- DeoptimizeIf(ne, instr, "lost precision or NaN"); |
+ DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN); |
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
__ cmp(input_reg, Operand::Zero()); |
__ b(ne, &done); |
__ VmovHigh(scratch1, double_scratch2); |
__ tst(scratch1, Operand(HeapNumber::kSignMask)); |
- DeoptimizeIf(ne, instr, "minus zero"); |
+ DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero); |
} |
} |
__ bind(&done); |
@@ -5131,14 +5131,14 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) { |
} else { |
__ TryDoubleToInt32Exact(result_reg, double_input, double_scratch); |
// Deoptimize if the input wasn't a int32 (inside a double). |
- DeoptimizeIf(ne, instr, "lost precision or NaN"); |
+ DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN); |
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
Label done; |
__ cmp(result_reg, Operand::Zero()); |
__ b(ne, &done); |
__ VmovHigh(scratch1, double_input); |
__ tst(scratch1, Operand(HeapNumber::kSignMask)); |
- DeoptimizeIf(ne, instr, "minus zero"); |
+ DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero); |
__ bind(&done); |
} |
} |
@@ -5156,26 +5156,26 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { |
} else { |
__ TryDoubleToInt32Exact(result_reg, double_input, double_scratch); |
// Deoptimize if the input wasn't a int32 (inside a double). |
- DeoptimizeIf(ne, instr, "lost precision or NaN"); |
+ DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN); |
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
Label done; |
__ cmp(result_reg, Operand::Zero()); |
__ b(ne, &done); |
__ VmovHigh(scratch1, double_input); |
__ tst(scratch1, Operand(HeapNumber::kSignMask)); |
- DeoptimizeIf(ne, instr, "minus zero"); |
+ DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero); |
__ bind(&done); |
} |
} |
__ SmiTag(result_reg, SetCC); |
- DeoptimizeIf(vs, instr, "overflow"); |
+ DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); |
} |
void LCodeGen::DoCheckSmi(LCheckSmi* instr) { |
LOperand* input = instr->value(); |
__ SmiTst(ToRegister(input)); |
- DeoptimizeIf(ne, instr, "not a Smi"); |
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi); |
} |
@@ -5183,7 +5183,7 @@ void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { |
if (!instr->hydrogen()->value()->type().IsHeapObject()) { |
LOperand* input = instr->value(); |
__ SmiTst(ToRegister(input)); |
- DeoptimizeIf(eq, instr, "Smi"); |
+ DeoptimizeIf(eq, instr, Deoptimizer::kSmi); |
} |
} |
@@ -5204,13 +5204,13 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { |
// If there is only one type in the interval check for equality. |
if (first == last) { |
- DeoptimizeIf(ne, instr, "wrong instance type"); |
+ DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType); |
} else { |
- DeoptimizeIf(lo, instr, "wrong instance type"); |
+ DeoptimizeIf(lo, instr, Deoptimizer::kWrongInstanceType); |
// Omit check for the last type. |
if (last != LAST_TYPE) { |
__ cmp(scratch, Operand(last)); |
- DeoptimizeIf(hi, instr, "wrong instance type"); |
+ DeoptimizeIf(hi, instr, Deoptimizer::kWrongInstanceType); |
} |
} |
} else { |
@@ -5221,11 +5221,11 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { |
if (base::bits::IsPowerOfTwo32(mask)) { |
DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag)); |
__ tst(scratch, Operand(mask)); |
- DeoptimizeIf(tag == 0 ? ne : eq, instr, "wrong instance type"); |
+ DeoptimizeIf(tag == 0 ? ne : eq, instr, Deoptimizer::kWrongInstanceType); |
} else { |
__ and_(scratch, scratch, Operand(mask)); |
__ cmp(scratch, Operand(tag)); |
- DeoptimizeIf(ne, instr, "wrong instance type"); |
+ DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType); |
} |
} |
} |
@@ -5244,7 +5244,7 @@ void LCodeGen::DoCheckValue(LCheckValue* instr) { |
} else { |
__ cmp(reg, Operand(object)); |
} |
- DeoptimizeIf(ne, instr, "value mismatch"); |
+ DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch); |
} |
@@ -5259,7 +5259,7 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { |
__ StoreToSafepointRegisterSlot(r0, scratch0()); |
} |
__ tst(scratch0(), Operand(kSmiTagMask)); |
- DeoptimizeIf(eq, instr, "instance migration failed"); |
+ DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed); |
} |
@@ -5317,7 +5317,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) { |
if (instr->hydrogen()->HasMigrationTarget()) { |
__ b(ne, deferred->entry()); |
} else { |
- DeoptimizeIf(ne, instr, "wrong map"); |
+ DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap); |
} |
__ bind(&success); |
@@ -5356,7 +5356,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { |
// Check for undefined. Undefined is converted to zero for clamping |
// conversions. |
__ cmp(input_reg, Operand(factory()->undefined_value())); |
- DeoptimizeIf(ne, instr, "not a heap number/undefined"); |
+ DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined); |
__ mov(result_reg, Operand::Zero()); |
__ jmp(&done); |
@@ -5824,19 +5824,19 @@ void LCodeGen::DoOsrEntry(LOsrEntry* instr) { |
void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { |
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex); |
__ cmp(r0, ip); |
- DeoptimizeIf(eq, instr, "undefined"); |
+ DeoptimizeIf(eq, instr, Deoptimizer::kUndefined); |
Register null_value = r5; |
__ LoadRoot(null_value, Heap::kNullValueRootIndex); |
__ cmp(r0, null_value); |
- DeoptimizeIf(eq, instr, "null"); |
+ DeoptimizeIf(eq, instr, Deoptimizer::kNull); |
__ SmiTst(r0); |
- DeoptimizeIf(eq, instr, "Smi"); |
+ DeoptimizeIf(eq, instr, Deoptimizer::kSmi); |
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); |
__ CompareObjectType(r0, r1, r1, LAST_JS_PROXY_TYPE); |
- DeoptimizeIf(le, instr, "wrong instance type"); |
+ DeoptimizeIf(le, instr, Deoptimizer::kWrongInstanceType); |
Label use_cache, call_runtime; |
__ CheckEnumCache(null_value, &call_runtime); |
@@ -5852,7 +5852,7 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { |
__ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); |
__ LoadRoot(ip, Heap::kMetaMapRootIndex); |
__ cmp(r1, ip); |
- DeoptimizeIf(ne, instr, "wrong map"); |
+ DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap); |
__ bind(&use_cache); |
} |
@@ -5874,7 +5874,7 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { |
__ ldr(result, |
FieldMemOperand(result, FixedArray::SizeFor(instr->idx()))); |
__ cmp(result, Operand::Zero()); |
- DeoptimizeIf(eq, instr, "no cache"); |
+ DeoptimizeIf(eq, instr, Deoptimizer::kNoCache); |
__ bind(&done); |
} |
@@ -5885,7 +5885,7 @@ void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { |
Register map = ToRegister(instr->map()); |
__ ldr(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset)); |
__ cmp(map, scratch0()); |
- DeoptimizeIf(ne, instr, "wrong map"); |
+ DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap); |
} |