| Index: src/arm64/lithium-codegen-arm64.cc
 | 
| diff --git a/src/arm64/lithium-codegen-arm64.cc b/src/arm64/lithium-codegen-arm64.cc
 | 
| index 5ded2edd7666f80bb6551a743f1d475e81557db0..b0c9258dd06fd2608f5a191507888ad68524ccda 100644
 | 
| --- a/src/arm64/lithium-codegen-arm64.cc
 | 
| +++ b/src/arm64/lithium-codegen-arm64.cc
 | 
| @@ -1005,8 +1005,9 @@ void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
 | 
|  
 | 
|  
 | 
|  void LCodeGen::DeoptimizeBranch(
 | 
| -    LInstruction* instr, const char* detail, BranchType branch_type,
 | 
| -    Register reg, int bit, Deoptimizer::BailoutType* override_bailout_type) {
 | 
| +    LInstruction* instr, Deoptimizer::DeoptReason deopt_reason,
 | 
| +    BranchType branch_type, Register reg, int bit,
 | 
| +    Deoptimizer::BailoutType* override_bailout_type) {
 | 
|    LEnvironment* environment = instr->environment();
 | 
|    RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
 | 
|    Deoptimizer::BailoutType bailout_type =
 | 
| @@ -1057,7 +1058,7 @@ void LCodeGen::DeoptimizeBranch(
 | 
|    }
 | 
|  
 | 
|    Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
 | 
| -                             instr->Mnemonic(), detail);
 | 
| +                             instr->Mnemonic(), deopt_reason);
 | 
|    DCHECK(info()->IsStub() || frame_is_built_);
 | 
|    // Go through jump table if we need to build frame, or restore caller doubles.
 | 
|    if (branch_type == always &&
 | 
| @@ -1079,85 +1080,89 @@ void LCodeGen::DeoptimizeBranch(
 | 
|  }
 | 
|  
 | 
|  
 | 
| -void LCodeGen::Deoptimize(LInstruction* instr, const char* detail,
 | 
| +void LCodeGen::Deoptimize(LInstruction* instr,
 | 
| +                          Deoptimizer::DeoptReason deopt_reason,
 | 
|                            Deoptimizer::BailoutType* override_bailout_type) {
 | 
| -  DeoptimizeBranch(instr, detail, always, NoReg, -1, override_bailout_type);
 | 
| +  DeoptimizeBranch(instr, deopt_reason, always, NoReg, -1,
 | 
| +                   override_bailout_type);
 | 
|  }
 | 
|  
 | 
|  
 | 
|  void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
 | 
| -                            const char* detail) {
 | 
| -  DeoptimizeBranch(instr, detail, static_cast<BranchType>(cond));
 | 
| +                            Deoptimizer::DeoptReason deopt_reason) {
 | 
| +  DeoptimizeBranch(instr, deopt_reason, static_cast<BranchType>(cond));
 | 
|  }
 | 
|  
 | 
|  
 | 
|  void LCodeGen::DeoptimizeIfZero(Register rt, LInstruction* instr,
 | 
| -                                const char* detail) {
 | 
| -  DeoptimizeBranch(instr, detail, reg_zero, rt);
 | 
| +                                Deoptimizer::DeoptReason deopt_reason) {
 | 
| +  DeoptimizeBranch(instr, deopt_reason, reg_zero, rt);
 | 
|  }
 | 
|  
 | 
|  
 | 
|  void LCodeGen::DeoptimizeIfNotZero(Register rt, LInstruction* instr,
 | 
| -                                   const char* detail) {
 | 
| -  DeoptimizeBranch(instr, detail, reg_not_zero, rt);
 | 
| +                                   Deoptimizer::DeoptReason deopt_reason) {
 | 
| +  DeoptimizeBranch(instr, deopt_reason, reg_not_zero, rt);
 | 
|  }
 | 
|  
 | 
|  
 | 
|  void LCodeGen::DeoptimizeIfNegative(Register rt, LInstruction* instr,
 | 
| -                                    const char* detail) {
 | 
| +                                    Deoptimizer::DeoptReason deopt_reason) {
 | 
|    int sign_bit = rt.Is64Bits() ? kXSignBit : kWSignBit;
 | 
| -  DeoptimizeIfBitSet(rt, sign_bit, instr, detail);
 | 
| +  DeoptimizeIfBitSet(rt, sign_bit, instr, deopt_reason);
 | 
|  }
 | 
|  
 | 
|  
 | 
|  void LCodeGen::DeoptimizeIfSmi(Register rt, LInstruction* instr,
 | 
| -                               const char* detail) {
 | 
| -  DeoptimizeIfBitClear(rt, MaskToBit(kSmiTagMask), instr, detail);
 | 
| +                               Deoptimizer::DeoptReason deopt_reason) {
 | 
| +  DeoptimizeIfBitClear(rt, MaskToBit(kSmiTagMask), instr, deopt_reason);
 | 
|  }
 | 
|  
 | 
|  
 | 
|  void LCodeGen::DeoptimizeIfNotSmi(Register rt, LInstruction* instr,
 | 
| -                                  const char* detail) {
 | 
| -  DeoptimizeIfBitSet(rt, MaskToBit(kSmiTagMask), instr, detail);
 | 
| +                                  Deoptimizer::DeoptReason deopt_reason) {
 | 
| +  DeoptimizeIfBitSet(rt, MaskToBit(kSmiTagMask), instr, deopt_reason);
 | 
|  }
 | 
|  
 | 
|  
 | 
|  void LCodeGen::DeoptimizeIfRoot(Register rt, Heap::RootListIndex index,
 | 
| -                                LInstruction* instr, const char* detail) {
 | 
| +                                LInstruction* instr,
 | 
| +                                Deoptimizer::DeoptReason deopt_reason) {
 | 
|    __ CompareRoot(rt, index);
 | 
| -  DeoptimizeIf(eq, instr, detail);
 | 
| +  DeoptimizeIf(eq, instr, deopt_reason);
 | 
|  }
 | 
|  
 | 
|  
 | 
|  void LCodeGen::DeoptimizeIfNotRoot(Register rt, Heap::RootListIndex index,
 | 
| -                                   LInstruction* instr, const char* detail) {
 | 
| +                                   LInstruction* instr,
 | 
| +                                   Deoptimizer::DeoptReason deopt_reason) {
 | 
|    __ CompareRoot(rt, index);
 | 
| -  DeoptimizeIf(ne, instr, detail);
 | 
| +  DeoptimizeIf(ne, instr, deopt_reason);
 | 
|  }
 | 
|  
 | 
|  
 | 
|  void LCodeGen::DeoptimizeIfMinusZero(DoubleRegister input, LInstruction* instr,
 | 
| -                                     const char* detail) {
 | 
| +                                     Deoptimizer::DeoptReason deopt_reason) {
 | 
|    __ TestForMinusZero(input);
 | 
| -  DeoptimizeIf(vs, instr, detail);
 | 
| +  DeoptimizeIf(vs, instr, deopt_reason);
 | 
|  }
 | 
|  
 | 
|  
 | 
|  void LCodeGen::DeoptimizeIfNotHeapNumber(Register object, LInstruction* instr) {
 | 
|    __ CompareObjectMap(object, Heap::kHeapNumberMapRootIndex);
 | 
| -  DeoptimizeIf(ne, instr, "not heap number");
 | 
| +  DeoptimizeIf(ne, instr, Deoptimizer::kNotHeapNumber);
 | 
|  }
 | 
|  
 | 
|  
 | 
|  void LCodeGen::DeoptimizeIfBitSet(Register rt, int bit, LInstruction* instr,
 | 
| -                                  const char* detail) {
 | 
| -  DeoptimizeBranch(instr, detail, reg_bit_set, rt, bit);
 | 
| +                                  Deoptimizer::DeoptReason deopt_reason) {
 | 
| +  DeoptimizeBranch(instr, deopt_reason, reg_bit_set, rt, bit);
 | 
|  }
 | 
|  
 | 
|  
 | 
|  void LCodeGen::DeoptimizeIfBitClear(Register rt, int bit, LInstruction* instr,
 | 
| -                                    const char* detail) {
 | 
| -  DeoptimizeBranch(instr, detail, reg_bit_clear, rt, bit);
 | 
| +                                    Deoptimizer::DeoptReason deopt_reason) {
 | 
| +  DeoptimizeBranch(instr, deopt_reason, reg_bit_clear, rt, bit);
 | 
|  }
 | 
|  
 | 
|  
 | 
| @@ -1531,7 +1536,7 @@ void LCodeGen::DoAddI(LAddI* instr) {
 | 
|  
 | 
|    if (can_overflow) {
 | 
|      __ Adds(result, left, right);
 | 
| -    DeoptimizeIf(vs, instr, "overflow");
 | 
| +    DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
 | 
|    } else {
 | 
|      __ Add(result, left, right);
 | 
|    }
 | 
| @@ -1545,7 +1550,7 @@ void LCodeGen::DoAddS(LAddS* instr) {
 | 
|    Operand right = ToOperand(instr->right());
 | 
|    if (can_overflow) {
 | 
|      __ Adds(result, left, right);
 | 
| -    DeoptimizeIf(vs, instr, "overflow");
 | 
| +    DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
 | 
|    } else {
 | 
|      __ Add(result, left, right);
 | 
|    }
 | 
| @@ -1671,7 +1676,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
 | 
|    // adaptor frame below it.
 | 
|    const uint32_t kArgumentsLimit = 1 * KB;
 | 
|    __ Cmp(length, kArgumentsLimit);
 | 
| -  DeoptimizeIf(hi, instr, "too many arguments");
 | 
| +  DeoptimizeIf(hi, instr, Deoptimizer::kTooManyArguments);
 | 
|  
 | 
|    // Push the receiver and use the register to keep the original
 | 
|    // number of arguments.
 | 
| @@ -1852,7 +1857,7 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck *instr) {
 | 
|    if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
 | 
|      __ Assert(NegateCondition(cond), kEliminatedBoundsCheckFailed);
 | 
|    } else {
 | 
| -    DeoptimizeIf(cond, instr, "out of bounds");
 | 
| +    DeoptimizeIf(cond, instr, Deoptimizer::kOutOfBounds);
 | 
|    }
 | 
|  }
 | 
|  
 | 
| @@ -1931,7 +1936,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
 | 
|          __ JumpIfSmi(value, true_label);
 | 
|        } else if (expected.NeedsMap()) {
 | 
|          // If we need a map later and have a smi, deopt.
 | 
| -        DeoptimizeIfSmi(value, instr, "Smi");
 | 
| +        DeoptimizeIfSmi(value, instr, Deoptimizer::kSmi);
 | 
|        }
 | 
|  
 | 
|        Register map = NoReg;
 | 
| @@ -1992,7 +1997,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
 | 
|        if (!expected.IsGeneric()) {
 | 
|          // We've seen something for the first time -> deopt.
 | 
|          // This can only happen if we are not generic already.
 | 
| -        Deoptimize(instr, "unexpected object");
 | 
| +        Deoptimize(instr, Deoptimizer::kUnexpectedObject);
 | 
|        }
 | 
|      }
 | 
|    }
 | 
| @@ -2198,7 +2203,7 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
 | 
|          instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
 | 
|      __ StoreToSafepointRegisterSlot(x0, temp);
 | 
|    }
 | 
| -  DeoptimizeIfSmi(temp, instr, "instance migration failed");
 | 
| +  DeoptimizeIfSmi(temp, instr, Deoptimizer::kInstanceMigrationFailed);
 | 
|  }
 | 
|  
 | 
|  
 | 
| @@ -2253,7 +2258,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
 | 
|    if (instr->hydrogen()->HasMigrationTarget()) {
 | 
|      __ B(ne, deferred->entry());
 | 
|    } else {
 | 
| -    DeoptimizeIf(ne, instr, "wrong map");
 | 
| +    DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
 | 
|    }
 | 
|  
 | 
|    __ Bind(&success);
 | 
| @@ -2262,7 +2267,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
 | 
|  
 | 
|  void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
 | 
|    if (!instr->hydrogen()->value()->type().IsHeapObject()) {
 | 
| -    DeoptimizeIfSmi(ToRegister(instr->value()), instr, "Smi");
 | 
| +    DeoptimizeIfSmi(ToRegister(instr->value()), instr, Deoptimizer::kSmi);
 | 
|    }
 | 
|  }
 | 
|  
 | 
| @@ -2270,7 +2275,7 @@ void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
 | 
|  void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
 | 
|    Register value = ToRegister(instr->value());
 | 
|    DCHECK(!instr->result() || ToRegister(instr->result()).Is(value));
 | 
| -  DeoptimizeIfNotSmi(value, instr, "not a Smi");
 | 
| +  DeoptimizeIfNotSmi(value, instr, Deoptimizer::kNotASmi);
 | 
|  }
 | 
|  
 | 
|  
 | 
| @@ -2288,15 +2293,15 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
 | 
|      __ Cmp(scratch, first);
 | 
|      if (first == last) {
 | 
|        // If there is only one type in the interval check for equality.
 | 
| -      DeoptimizeIf(ne, instr, "wrong instance type");
 | 
| +      DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
 | 
|      } else if (last == LAST_TYPE) {
 | 
|        // We don't need to compare with the higher bound of the interval.
 | 
| -      DeoptimizeIf(lo, instr, "wrong instance type");
 | 
| +      DeoptimizeIf(lo, instr, Deoptimizer::kWrongInstanceType);
 | 
|      } else {
 | 
|        // If we are below the lower bound, set the C flag and clear the Z flag
 | 
|        // to force a deopt.
 | 
|        __ Ccmp(scratch, last, CFlag, hs);
 | 
| -      DeoptimizeIf(hi, instr, "wrong instance type");
 | 
| +      DeoptimizeIf(hi, instr, Deoptimizer::kWrongInstanceType);
 | 
|      }
 | 
|    } else {
 | 
|      uint8_t mask;
 | 
| @@ -2307,10 +2312,10 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
 | 
|        DCHECK((tag == 0) || (tag == mask));
 | 
|        if (tag == 0) {
 | 
|          DeoptimizeIfBitSet(scratch, MaskToBit(mask), instr,
 | 
| -                           "wrong instance type");
 | 
| +                           Deoptimizer::kWrongInstanceType);
 | 
|        } else {
 | 
|          DeoptimizeIfBitClear(scratch, MaskToBit(mask), instr,
 | 
| -                             "wrong instance type");
 | 
| +                             Deoptimizer::kWrongInstanceType);
 | 
|        }
 | 
|      } else {
 | 
|        if (tag == 0) {
 | 
| @@ -2319,7 +2324,7 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
 | 
|          __ And(scratch, scratch, mask);
 | 
|          __ Cmp(scratch, tag);
 | 
|        }
 | 
| -      DeoptimizeIf(ne, instr, "wrong instance type");
 | 
| +      DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
 | 
|      }
 | 
|    }
 | 
|  }
 | 
| @@ -2359,7 +2364,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
 | 
|  
 | 
|    // Check for undefined. Undefined is coverted to zero for clamping conversion.
 | 
|    DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr,
 | 
| -                      "not a heap number/undefined");
 | 
| +                      Deoptimizer::kNotAHeapNumberUndefined);
 | 
|    __ Mov(result, 0);
 | 
|    __ B(&done);
 | 
|  
 | 
| @@ -2664,7 +2669,7 @@ void LCodeGen::DoCheckValue(LCheckValue* instr) {
 | 
|    } else {
 | 
|      __ Cmp(reg, Operand(object));
 | 
|    }
 | 
| -  DeoptimizeIf(ne, instr, "value mismatch");
 | 
| +  DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch);
 | 
|  }
 | 
|  
 | 
|  
 | 
| @@ -2688,9 +2693,9 @@ void LCodeGen::DoDateField(LDateField* instr) {
 | 
|    DCHECK(object.is(result) && object.Is(x0));
 | 
|    DCHECK(instr->IsMarkedAsCall());
 | 
|  
 | 
| -  DeoptimizeIfSmi(object, instr, "Smi");
 | 
| +  DeoptimizeIfSmi(object, instr, Deoptimizer::kSmi);
 | 
|    __ CompareObjectType(object, temp1, temp1, JS_DATE_TYPE);
 | 
| -  DeoptimizeIf(ne, instr, "not a date object");
 | 
| +  DeoptimizeIf(ne, instr, Deoptimizer::kNotADateObject);
 | 
|  
 | 
|    if (index->value() == 0) {
 | 
|      __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
 | 
| @@ -2740,21 +2745,21 @@ void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
 | 
|    // Check for (0 / -x) that will produce negative zero.
 | 
|    HDiv* hdiv = instr->hydrogen();
 | 
|    if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
 | 
| -    DeoptimizeIfZero(dividend, instr, "division by zero");
 | 
| +    DeoptimizeIfZero(dividend, instr, Deoptimizer::kDivisionByZero);
 | 
|    }
 | 
|    // Check for (kMinInt / -1).
 | 
|    if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
 | 
|      // Test dividend for kMinInt by subtracting one (cmp) and checking for
 | 
|      // overflow.
 | 
|      __ Cmp(dividend, 1);
 | 
| -    DeoptimizeIf(vs, instr, "overflow");
 | 
| +    DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
 | 
|    }
 | 
|    // Deoptimize if remainder will not be 0.
 | 
|    if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
 | 
|        divisor != 1 && divisor != -1) {
 | 
|      int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
 | 
|      __ Tst(dividend, mask);
 | 
| -    DeoptimizeIf(ne, instr, "lost precision");
 | 
| +    DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
 | 
|    }
 | 
|  
 | 
|    if (divisor == -1) {  // Nice shortcut, not needed for correctness.
 | 
| @@ -2782,14 +2787,14 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
 | 
|    DCHECK(!AreAliased(dividend, result));
 | 
|  
 | 
|    if (divisor == 0) {
 | 
| -    Deoptimize(instr, "division by zero");
 | 
| +    Deoptimize(instr, Deoptimizer::kDivisionByZero);
 | 
|      return;
 | 
|    }
 | 
|  
 | 
|    // Check for (0 / -x) that will produce negative zero.
 | 
|    HDiv* hdiv = instr->hydrogen();
 | 
|    if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
 | 
| -    DeoptimizeIfZero(dividend, instr, "minus zero");
 | 
| +    DeoptimizeIfZero(dividend, instr, Deoptimizer::kMinusZero);
 | 
|    }
 | 
|  
 | 
|    __ TruncatingDiv(result, dividend, Abs(divisor));
 | 
| @@ -2801,7 +2806,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
 | 
|      __ Sxtw(dividend.X(), dividend);
 | 
|      __ Mov(temp, divisor);
 | 
|      __ Smsubl(temp.X(), result, temp, dividend.X());
 | 
| -    DeoptimizeIfNotZero(temp, instr, "lost precision");
 | 
| +    DeoptimizeIfNotZero(temp, instr, Deoptimizer::kLostPrecision);
 | 
|    }
 | 
|  }
 | 
|  
 | 
| @@ -2824,7 +2829,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
 | 
|  
 | 
|    // Check for x / 0.
 | 
|    if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
 | 
| -    DeoptimizeIfZero(divisor, instr, "division by zero");
 | 
| +    DeoptimizeIfZero(divisor, instr, Deoptimizer::kDivisionByZero);
 | 
|    }
 | 
|  
 | 
|    // Check for (0 / -x) as that will produce negative zero.
 | 
| @@ -2836,7 +2841,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
 | 
|      // If the divisor >= 0 (pl, the opposite of mi) set the flags to
 | 
|      // condition ne, so we don't deopt, ie. positive divisor doesn't deopt.
 | 
|      __ Ccmp(dividend, 0, NoFlag, mi);
 | 
| -    DeoptimizeIf(eq, instr, "minus zero");
 | 
| +    DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
 | 
|    }
 | 
|  
 | 
|    // Check for (kMinInt / -1).
 | 
| @@ -2848,13 +2853,13 @@ void LCodeGen::DoDivI(LDivI* instr) {
 | 
|      // -1. If overflow is clear, set the flags for condition ne, as the
 | 
|      // dividend isn't -1, and thus we shouldn't deopt.
 | 
|      __ Ccmp(divisor, -1, NoFlag, vs);
 | 
| -    DeoptimizeIf(eq, instr, "overflow");
 | 
| +    DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
 | 
|    }
 | 
|  
 | 
|    // Compute remainder and deopt if it's not zero.
 | 
|    Register remainder = ToRegister32(instr->temp());
 | 
|    __ Msub(remainder, result, divisor, dividend);
 | 
| -  DeoptimizeIfNotZero(remainder, instr, "lost precision");
 | 
| +  DeoptimizeIfNotZero(remainder, instr, Deoptimizer::kLostPrecision);
 | 
|  }
 | 
|  
 | 
|  
 | 
| @@ -2863,11 +2868,11 @@ void LCodeGen::DoDoubleToIntOrSmi(LDoubleToIntOrSmi* instr) {
 | 
|    Register result = ToRegister32(instr->result());
 | 
|  
 | 
|    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
 | 
| -    DeoptimizeIfMinusZero(input, instr, "minus zero");
 | 
| +    DeoptimizeIfMinusZero(input, instr, Deoptimizer::kMinusZero);
 | 
|    }
 | 
|  
 | 
|    __ TryRepresentDoubleAsInt32(result, input, double_scratch());
 | 
| -  DeoptimizeIf(ne, instr, "lost precision or NaN");
 | 
| +  DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
 | 
|  
 | 
|    if (instr->tag_result()) {
 | 
|      __ SmiTag(result.X());
 | 
| @@ -2928,7 +2933,7 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
 | 
|    __ LoadInstanceDescriptors(map, result);
 | 
|    __ Ldr(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
 | 
|    __ Ldr(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
 | 
| -  DeoptimizeIfZero(result, instr, "no cache");
 | 
| +  DeoptimizeIfZero(result, instr, Deoptimizer::kNoCache);
 | 
|  
 | 
|    __ Bind(&done);
 | 
|  }
 | 
| @@ -2941,17 +2946,18 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
 | 
|    DCHECK(instr->IsMarkedAsCall());
 | 
|    DCHECK(object.Is(x0));
 | 
|  
 | 
| -  DeoptimizeIfRoot(object, Heap::kUndefinedValueRootIndex, instr, "undefined");
 | 
| +  DeoptimizeIfRoot(object, Heap::kUndefinedValueRootIndex, instr,
 | 
| +                   Deoptimizer::kUndefined);
 | 
|  
 | 
|    __ LoadRoot(null_value, Heap::kNullValueRootIndex);
 | 
|    __ Cmp(object, null_value);
 | 
| -  DeoptimizeIf(eq, instr, "null");
 | 
| +  DeoptimizeIf(eq, instr, Deoptimizer::kNull);
 | 
|  
 | 
| -  DeoptimizeIfSmi(object, instr, "Smi");
 | 
| +  DeoptimizeIfSmi(object, instr, Deoptimizer::kSmi);
 | 
|  
 | 
|    STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
 | 
|    __ CompareObjectType(object, x1, x1, LAST_JS_PROXY_TYPE);
 | 
| -  DeoptimizeIf(le, instr, "not a JavaScript object");
 | 
| +  DeoptimizeIf(le, instr, Deoptimizer::kNotAJavaScriptObject);
 | 
|  
 | 
|    Label use_cache, call_runtime;
 | 
|    __ CheckEnumCache(object, null_value, x1, x2, x3, x4, &call_runtime);
 | 
| @@ -2965,7 +2971,8 @@ void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
 | 
|    CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
 | 
|  
 | 
|    __ Ldr(x1, FieldMemOperand(object, HeapObject::kMapOffset));
 | 
| -  DeoptimizeIfNotRoot(x1, Heap::kMetaMapRootIndex, instr, "wrong map");
 | 
| +  DeoptimizeIfNotRoot(x1, Heap::kMetaMapRootIndex, instr,
 | 
| +                      Deoptimizer::kWrongMap);
 | 
|  
 | 
|    __ Bind(&use_cache);
 | 
|  }
 | 
| @@ -3356,7 +3363,8 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
 | 
|    __ Ldr(result, ContextMemOperand(context, instr->slot_index()));
 | 
|    if (instr->hydrogen()->RequiresHoleCheck()) {
 | 
|      if (instr->hydrogen()->DeoptimizesOnHole()) {
 | 
| -      DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, "hole");
 | 
| +      DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr,
 | 
| +                       Deoptimizer::kHole);
 | 
|      } else {
 | 
|        Label not_the_hole;
 | 
|        __ JumpIfNotRoot(result, Heap::kTheHoleValueRootIndex, ¬_the_hole);
 | 
| @@ -3377,7 +3385,8 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
 | 
|                                   JSFunction::kPrototypeOrInitialMapOffset));
 | 
|  
 | 
|    // Check that the function has a prototype or an initial map.
 | 
| -  DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, "hole");
 | 
| +  DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr,
 | 
| +                   Deoptimizer::kHole);
 | 
|  
 | 
|    // If the function does not have an initial map, we're done.
 | 
|    Label done;
 | 
| @@ -3397,7 +3406,8 @@ void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
 | 
|    __ Mov(result, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
 | 
|    __ Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
 | 
|    if (instr->hydrogen()->RequiresHoleCheck()) {
 | 
| -    DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, "hole");
 | 
| +    DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr,
 | 
| +                     Deoptimizer::kHole);
 | 
|    }
 | 
|  }
 | 
|  
 | 
| @@ -3533,7 +3543,7 @@ void LCodeGen::DoLoadKeyedExternal(LLoadKeyedExternal* instr) {
 | 
|          if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
 | 
|            // Deopt if value > 0x80000000.
 | 
|            __ Tst(result, 0xFFFFFFFF80000000);
 | 
| -          DeoptimizeIf(ne, instr, "negative value");
 | 
| +          DeoptimizeIf(ne, instr, Deoptimizer::kNegativeValue);
 | 
|          }
 | 
|          break;
 | 
|        case FLOAT32_ELEMENTS:
 | 
| @@ -3627,7 +3637,7 @@ void LCodeGen::DoLoadKeyedFixedDouble(LLoadKeyedFixedDouble* instr) {
 | 
|      Register scratch = ToRegister(instr->temp());
 | 
|      __ Fmov(scratch, result);
 | 
|      __ Eor(scratch, scratch, kHoleNanInt64);
 | 
| -    DeoptimizeIfZero(scratch, instr, "hole");
 | 
| +    DeoptimizeIfZero(scratch, instr, Deoptimizer::kHole);
 | 
|    }
 | 
|  }
 | 
|  
 | 
| @@ -3665,9 +3675,10 @@ void LCodeGen::DoLoadKeyedFixed(LLoadKeyedFixed* instr) {
 | 
|  
 | 
|    if (instr->hydrogen()->RequiresHoleCheck()) {
 | 
|      if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
 | 
| -      DeoptimizeIfNotSmi(result, instr, "not a Smi");
 | 
| +      DeoptimizeIfNotSmi(result, instr, Deoptimizer::kNotASmi);
 | 
|      } else {
 | 
| -      DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, "hole");
 | 
| +      DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr,
 | 
| +                       Deoptimizer::kHole);
 | 
|      }
 | 
|    }
 | 
|  }
 | 
| @@ -3771,7 +3782,7 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) {
 | 
|      Register result = r.IsSmi() ? ToRegister(instr->result())
 | 
|                                  : ToRegister32(instr->result());
 | 
|      __ Abs(result, input);
 | 
| -    DeoptimizeIf(vs, instr, "overflow");
 | 
| +    DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
 | 
|    }
 | 
|  }
 | 
|  
 | 
| @@ -3923,7 +3934,7 @@ void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
 | 
|    Register result = ToRegister(instr->result());
 | 
|  
 | 
|    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
 | 
| -    DeoptimizeIfMinusZero(input, instr, "minus zero");
 | 
| +    DeoptimizeIfMinusZero(input, instr, Deoptimizer::kMinusZero);
 | 
|    }
 | 
|  
 | 
|    __ Fcvtms(result, input);
 | 
| @@ -3933,7 +3944,7 @@ void LCodeGen::DoMathFloorI(LMathFloorI* instr) {
 | 
|    __ Cmp(result, Operand(result, SXTW));
 | 
|    //  - The input was not NaN.
 | 
|    __ Fccmp(input, input, NoFlag, eq);
 | 
| -  DeoptimizeIf(ne, instr, "lost precision or NaN");
 | 
| +  DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
 | 
|  }
 | 
|  
 | 
|  
 | 
| @@ -3959,13 +3970,13 @@ void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
 | 
|    // If the divisor is negative, we have to negate and handle edge cases.
 | 
|    __ Negs(result, dividend);
 | 
|    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
 | 
| -    DeoptimizeIf(eq, instr, "minus zero");
 | 
| +    DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
 | 
|    }
 | 
|  
 | 
|    // Dividing by -1 is basically negation, unless we overflow.
 | 
|    if (divisor == -1) {
 | 
|      if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
 | 
| -      DeoptimizeIf(vs, instr, "overflow");
 | 
| +      DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
 | 
|      }
 | 
|      return;
 | 
|    }
 | 
| @@ -3988,14 +3999,14 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
 | 
|    DCHECK(!AreAliased(dividend, result));
 | 
|  
 | 
|    if (divisor == 0) {
 | 
| -    Deoptimize(instr, "division by zero");
 | 
| +    Deoptimize(instr, Deoptimizer::kDivisionByZero);
 | 
|      return;
 | 
|    }
 | 
|  
 | 
|    // Check for (0 / -x) that will produce negative zero.
 | 
|    HMathFloorOfDiv* hdiv = instr->hydrogen();
 | 
|    if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
 | 
| -    DeoptimizeIfZero(dividend, instr, "minus zero");
 | 
| +    DeoptimizeIfZero(dividend, instr, Deoptimizer::kMinusZero);
 | 
|    }
 | 
|  
 | 
|    // Easy case: We need no dynamic check for the dividend and the flooring
 | 
| @@ -4038,14 +4049,14 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
 | 
|    __ Sdiv(result, dividend, divisor);
 | 
|  
 | 
|    // Check for x / 0.
 | 
| -  DeoptimizeIfZero(divisor, instr, "division by zero");
 | 
| +  DeoptimizeIfZero(divisor, instr, Deoptimizer::kDivisionByZero);
 | 
|  
 | 
|    // Check for (kMinInt / -1).
 | 
|    if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
 | 
|      // The V flag will be set iff dividend == kMinInt.
 | 
|      __ Cmp(dividend, 1);
 | 
|      __ Ccmp(divisor, -1, NoFlag, vs);
 | 
| -    DeoptimizeIf(eq, instr, "overflow");
 | 
| +    DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
 | 
|    }
 | 
|  
 | 
|    // Check for (0 / -x) that will produce negative zero.
 | 
| @@ -4055,7 +4066,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
 | 
|      // "divisor" can't be null because the code would have already been
 | 
|      // deoptimized. The Z flag is set only if (divisor < 0) and (dividend == 0).
 | 
|      // In this case we need to deoptimize to produce a -0.
 | 
| -    DeoptimizeIf(eq, instr, "minus zero");
 | 
| +    DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
 | 
|    }
 | 
|  
 | 
|    Label done;
 | 
| @@ -4214,18 +4225,18 @@ void LCodeGen::DoMathRoundI(LMathRoundI* instr) {
 | 
|  
 | 
|    // Deoptimize if the result > 1, as it must be larger than 32 bits.
 | 
|    __ Cmp(result, 1);
 | 
| -  DeoptimizeIf(hi, instr, "overflow");
 | 
| +  DeoptimizeIf(hi, instr, Deoptimizer::kOverflow);
 | 
|  
 | 
|    // Deoptimize for negative inputs, which at this point are only numbers in
 | 
|    // the range [-0.5, -0.0]
 | 
|    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
 | 
|      __ Fmov(result, input);
 | 
| -    DeoptimizeIfNegative(result, instr, "minus zero");
 | 
| +    DeoptimizeIfNegative(result, instr, Deoptimizer::kMinusZero);
 | 
|    }
 | 
|  
 | 
|    // Deoptimize if the input was NaN.
 | 
|    __ Fcmp(input, dot_five);
 | 
| -  DeoptimizeIf(vs, instr, "NaN");
 | 
| +  DeoptimizeIf(vs, instr, Deoptimizer::kNaN);
 | 
|  
 | 
|    // Now, the only unhandled inputs are in the range [0.0, 1.5[ (or [-0.5, 1.5[
 | 
|    // if we didn't generate a -0.0 bailout). If input >= 0.5 then return 1,
 | 
| @@ -4303,7 +4314,7 @@ void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
 | 
|      __ And(dividend, dividend, mask);
 | 
|      __ Negs(dividend, dividend);
 | 
|      if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
 | 
| -      DeoptimizeIf(eq, instr, "minus zero");
 | 
| +      DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
 | 
|      }
 | 
|      __ B(&done);
 | 
|    }
 | 
| @@ -4322,7 +4333,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
 | 
|    DCHECK(!AreAliased(dividend, result, temp));
 | 
|  
 | 
|    if (divisor == 0) {
 | 
| -    Deoptimize(instr, "division by zero");
 | 
| +    Deoptimize(instr, Deoptimizer::kDivisionByZero);
 | 
|      return;
 | 
|    }
 | 
|  
 | 
| @@ -4336,7 +4347,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
 | 
|    if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
 | 
|      Label remainder_not_zero;
 | 
|      __ Cbnz(result, &remainder_not_zero);
 | 
| -    DeoptimizeIfNegative(dividend, instr, "minus zero");
 | 
| +    DeoptimizeIfNegative(dividend, instr, Deoptimizer::kMinusZero);
 | 
|      __ bind(&remainder_not_zero);
 | 
|    }
 | 
|  }
 | 
| @@ -4351,12 +4362,12 @@ void LCodeGen::DoModI(LModI* instr) {
 | 
|    // modulo = dividend - quotient * divisor
 | 
|    __ Sdiv(result, dividend, divisor);
 | 
|    if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
 | 
| -    DeoptimizeIfZero(divisor, instr, "division by zero");
 | 
| +    DeoptimizeIfZero(divisor, instr, Deoptimizer::kDivisionByZero);
 | 
|    }
 | 
|    __ Msub(result, result, divisor, dividend);
 | 
|    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
 | 
|      __ Cbnz(result, &done);
 | 
| -    DeoptimizeIfNegative(dividend, instr, "minus zero");
 | 
| +    DeoptimizeIfNegative(dividend, instr, Deoptimizer::kMinusZero);
 | 
|    }
 | 
|    __ Bind(&done);
 | 
|  }
 | 
| @@ -4379,10 +4390,10 @@ void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
 | 
|    if (bailout_on_minus_zero) {
 | 
|      if (right < 0) {
 | 
|        // The result is -0 if right is negative and left is zero.
 | 
| -      DeoptimizeIfZero(left, instr, "minus zero");
 | 
| +      DeoptimizeIfZero(left, instr, Deoptimizer::kMinusZero);
 | 
|      } else if (right == 0) {
 | 
|        // The result is -0 if the right is zero and the left is negative.
 | 
| -      DeoptimizeIfNegative(left, instr, "minus zero");
 | 
| +      DeoptimizeIfNegative(left, instr, Deoptimizer::kMinusZero);
 | 
|      }
 | 
|    }
 | 
|  
 | 
| @@ -4392,7 +4403,7 @@ void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
 | 
|        if (can_overflow) {
 | 
|          // Only 0x80000000 can overflow here.
 | 
|          __ Negs(result, left);
 | 
| -        DeoptimizeIf(vs, instr, "overflow");
 | 
| +        DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
 | 
|        } else {
 | 
|          __ Neg(result, left);
 | 
|        }
 | 
| @@ -4408,7 +4419,7 @@ void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
 | 
|      case 2:
 | 
|        if (can_overflow) {
 | 
|          __ Adds(result, left, left);
 | 
| -        DeoptimizeIf(vs, instr, "overflow");
 | 
| +        DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
 | 
|        } else {
 | 
|          __ Add(result, left, left);
 | 
|        }
 | 
| @@ -4427,7 +4438,7 @@ void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
 | 
|            DCHECK(!AreAliased(scratch, left));
 | 
|            __ Cls(scratch, left);
 | 
|            __ Cmp(scratch, right_log2);
 | 
| -          DeoptimizeIf(lt, instr, "overflow");
 | 
| +          DeoptimizeIf(lt, instr, Deoptimizer::kOverflow);
 | 
|          }
 | 
|  
 | 
|          if (right >= 0) {
 | 
| @@ -4437,7 +4448,7 @@ void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
 | 
|            // result = -left << log2(-right)
 | 
|            if (can_overflow) {
 | 
|              __ Negs(result, Operand(left, LSL, right_log2));
 | 
| -            DeoptimizeIf(vs, instr, "overflow");
 | 
| +            DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
 | 
|            } else {
 | 
|              __ Neg(result, Operand(left, LSL, right_log2));
 | 
|            }
 | 
| @@ -4495,13 +4506,13 @@ void LCodeGen::DoMulI(LMulI* instr) {
 | 
|      //  - If so (eq), set N (mi) if left + right is negative.
 | 
|      //  - Otherwise, clear N.
 | 
|      __ Ccmn(left, right, NoFlag, eq);
 | 
| -    DeoptimizeIf(mi, instr, "minus zero");
 | 
| +    DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
 | 
|    }
 | 
|  
 | 
|    if (can_overflow) {
 | 
|      __ Smull(result.X(), left, right);
 | 
|      __ Cmp(result.X(), Operand(result, SXTW));
 | 
| -    DeoptimizeIf(ne, instr, "overflow");
 | 
| +    DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
 | 
|    } else {
 | 
|      __ Mul(result, left, right);
 | 
|    }
 | 
| @@ -4525,7 +4536,7 @@ void LCodeGen::DoMulS(LMulS* instr) {
 | 
|      //  - If so (eq), set N (mi) if left + right is negative.
 | 
|      //  - Otherwise, clear N.
 | 
|      __ Ccmn(left, right, NoFlag, eq);
 | 
| -    DeoptimizeIf(mi, instr, "minus zero");
 | 
| +    DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
 | 
|    }
 | 
|  
 | 
|    STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0));
 | 
| @@ -4533,7 +4544,7 @@ void LCodeGen::DoMulS(LMulS* instr) {
 | 
|      __ Smulh(result, left, right);
 | 
|      __ Cmp(result, Operand(result.W(), SXTW));
 | 
|      __ SmiTag(result);
 | 
| -    DeoptimizeIf(ne, instr, "overflow");
 | 
| +    DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
 | 
|    } else {
 | 
|      if (AreAliased(result, left, right)) {
 | 
|        // All three registers are the same: half untag the input and then
 | 
| @@ -4709,14 +4720,14 @@ void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
 | 
|      // Load heap number.
 | 
|      __ Ldr(result, FieldMemOperand(input, HeapNumber::kValueOffset));
 | 
|      if (instr->hydrogen()->deoptimize_on_minus_zero()) {
 | 
| -      DeoptimizeIfMinusZero(result, instr, "minus zero");
 | 
| +      DeoptimizeIfMinusZero(result, instr, Deoptimizer::kMinusZero);
 | 
|      }
 | 
|      __ B(&done);
 | 
|  
 | 
|      if (can_convert_undefined_to_nan) {
 | 
|        __ Bind(&convert_undefined);
 | 
|        DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr,
 | 
| -                          "not a heap number/undefined");
 | 
| +                          Deoptimizer::kNotAHeapNumberUndefined);
 | 
|  
 | 
|        __ LoadRoot(scratch, Heap::kNanValueRootIndex);
 | 
|        __ Ldr(result, FieldMemOperand(scratch, HeapNumber::kValueOffset));
 | 
| @@ -4910,7 +4921,7 @@ void LCodeGen::DoSmiTag(LSmiTag* instr) {
 | 
|    Register output = ToRegister(instr->result());
 | 
|    if (hchange->CheckFlag(HValue::kCanOverflow) &&
 | 
|        hchange->value()->CheckFlag(HValue::kUint32)) {
 | 
| -    DeoptimizeIfNegative(input.W(), instr, "overflow");
 | 
| +    DeoptimizeIfNegative(input.W(), instr, Deoptimizer::kOverflow);
 | 
|    }
 | 
|    __ SmiTag(output, input);
 | 
|  }
 | 
| @@ -4922,7 +4933,7 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
 | 
|    Label done, untag;
 | 
|  
 | 
|    if (instr->needs_check()) {
 | 
| -    DeoptimizeIfNotSmi(input, instr, "not a Smi");
 | 
| +    DeoptimizeIfNotSmi(input, instr, Deoptimizer::kNotASmi);
 | 
|    }
 | 
|  
 | 
|    __ Bind(&untag);
 | 
| @@ -4947,7 +4958,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
 | 
|          if (instr->can_deopt()) {
 | 
|            // If `left >>> right` >= 0x80000000, the result is not representable
 | 
|            // in a signed 32-bit smi.
 | 
| -          DeoptimizeIfNegative(result, instr, "negative value");
 | 
| +          DeoptimizeIfNegative(result, instr, Deoptimizer::kNegativeValue);
 | 
|          }
 | 
|          break;
 | 
|        default: UNREACHABLE();
 | 
| @@ -4957,7 +4968,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
 | 
|      int shift_count = JSShiftAmountFromLConstant(right_op);
 | 
|      if (shift_count == 0) {
 | 
|        if ((instr->op() == Token::SHR) && instr->can_deopt()) {
 | 
| -        DeoptimizeIfNegative(left, instr, "negative value");
 | 
| +        DeoptimizeIfNegative(left, instr, Deoptimizer::kNegativeValue);
 | 
|        }
 | 
|        __ Mov(result, left, kDiscardForSameWReg);
 | 
|      } else {
 | 
| @@ -5010,7 +5021,7 @@ void LCodeGen::DoShiftS(LShiftS* instr) {
 | 
|          if (instr->can_deopt()) {
 | 
|            // If `left >>> right` >= 0x80000000, the result is not representable
 | 
|            // in a signed 32-bit smi.
 | 
| -          DeoptimizeIfNegative(result, instr, "negative value");
 | 
| +          DeoptimizeIfNegative(result, instr, Deoptimizer::kNegativeValue);
 | 
|          }
 | 
|          break;
 | 
|        default: UNREACHABLE();
 | 
| @@ -5020,7 +5031,7 @@ void LCodeGen::DoShiftS(LShiftS* instr) {
 | 
|      int shift_count = JSShiftAmountFromLConstant(right_op);
 | 
|      if (shift_count == 0) {
 | 
|        if ((instr->op() == Token::SHR) && instr->can_deopt()) {
 | 
| -        DeoptimizeIfNegative(left, instr, "negative value");
 | 
| +        DeoptimizeIfNegative(left, instr, Deoptimizer::kNegativeValue);
 | 
|        }
 | 
|        __ Mov(result, left);
 | 
|      } else {
 | 
| @@ -5148,7 +5159,8 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
 | 
|    if (instr->hydrogen()->RequiresHoleCheck()) {
 | 
|      __ Ldr(scratch, target);
 | 
|      if (instr->hydrogen()->DeoptimizesOnHole()) {
 | 
| -      DeoptimizeIfRoot(scratch, Heap::kTheHoleValueRootIndex, instr, "hole");
 | 
| +      DeoptimizeIfRoot(scratch, Heap::kTheHoleValueRootIndex, instr,
 | 
| +                       Deoptimizer::kHole);
 | 
|      } else {
 | 
|        __ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, &skip_assignment);
 | 
|      }
 | 
| @@ -5186,7 +5198,8 @@ void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
 | 
|    if (instr->hydrogen()->RequiresHoleCheck()) {
 | 
|      Register payload = ToRegister(instr->temp2());
 | 
|      __ Ldr(payload, FieldMemOperand(cell, Cell::kValueOffset));
 | 
| -    DeoptimizeIfRoot(payload, Heap::kTheHoleValueRootIndex, instr, "hole");
 | 
| +    DeoptimizeIfRoot(payload, Heap::kTheHoleValueRootIndex, instr,
 | 
| +                     Deoptimizer::kHole);
 | 
|    }
 | 
|  
 | 
|    // Store the value.
 | 
| @@ -5608,7 +5621,7 @@ void LCodeGen::DoSubI(LSubI* instr) {
 | 
|  
 | 
|    if (can_overflow) {
 | 
|      __ Subs(result, left, right);
 | 
| -    DeoptimizeIf(vs, instr, "overflow");
 | 
| +    DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
 | 
|    } else {
 | 
|      __ Sub(result, left, right);
 | 
|    }
 | 
| @@ -5622,7 +5635,7 @@ void LCodeGen::DoSubS(LSubS* instr) {
 | 
|    Operand right = ToOperand(instr->right());
 | 
|    if (can_overflow) {
 | 
|      __ Subs(result, left, right);
 | 
| -    DeoptimizeIf(vs, instr, "overflow");
 | 
| +    DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
 | 
|    } else {
 | 
|      __ Sub(result, left, right);
 | 
|    }
 | 
| @@ -5663,7 +5676,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr,
 | 
|      // Output contains zero, undefined is converted to zero for truncating
 | 
|      // conversions.
 | 
|      DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr,
 | 
| -                        "not a heap number/undefined/true/false");
 | 
| +                        Deoptimizer::kNotAHeapNumberUndefinedBoolean);
 | 
|    } else {
 | 
|      Register output = ToRegister32(instr->result());
 | 
|      DoubleRegister dbl_scratch2 = ToDoubleRegister(temp2);
 | 
| @@ -5674,13 +5687,13 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr,
 | 
|      // function. If the result is out of range, branch to deoptimize.
 | 
|      __ Ldr(dbl_scratch1, FieldMemOperand(input, HeapNumber::kValueOffset));
 | 
|      __ TryRepresentDoubleAsInt32(output, dbl_scratch1, dbl_scratch2);
 | 
| -    DeoptimizeIf(ne, instr, "lost precision or NaN");
 | 
| +    DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
 | 
|  
 | 
|      if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
 | 
|        __ Cmp(output, 0);
 | 
|        __ B(ne, &done);
 | 
|        __ Fmov(scratch1, dbl_scratch1);
 | 
| -      DeoptimizeIfNegative(scratch1, instr, "minus zero");
 | 
| +      DeoptimizeIfNegative(scratch1, instr, Deoptimizer::kMinusZero);
 | 
|      }
 | 
|    }
 | 
|    __ Bind(&done);
 | 
| @@ -5821,7 +5834,7 @@ void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
 | 
|  
 | 
|    Label no_memento_found;
 | 
|    __ TestJSArrayForAllocationMemento(object, temp1, temp2, &no_memento_found);
 | 
| -  DeoptimizeIf(eq, instr, "memento found");
 | 
| +  DeoptimizeIf(eq, instr, Deoptimizer::kMementoFound);
 | 
|    __ Bind(&no_memento_found);
 | 
|  }
 | 
|  
 | 
| @@ -5946,7 +5959,7 @@ void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
 | 
|    Register temp = ToRegister(instr->temp());
 | 
|    __ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
 | 
|    __ Cmp(map, temp);
 | 
| -  DeoptimizeIf(ne, instr, "wrong map");
 | 
| +  DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
 | 
|  }
 | 
|  
 | 
|  
 | 
| @@ -5980,10 +5993,10 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
 | 
|    __ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex, &global_object);
 | 
|  
 | 
|    // Deoptimize if the receiver is not a JS object.
 | 
| -  DeoptimizeIfSmi(receiver, instr, "Smi");
 | 
| +  DeoptimizeIfSmi(receiver, instr, Deoptimizer::kSmi);
 | 
|    __ CompareObjectType(receiver, result, result, FIRST_SPEC_OBJECT_TYPE);
 | 
|    __ B(ge, ©_receiver);
 | 
| -  Deoptimize(instr, "not a JavaScript object");
 | 
| +  Deoptimize(instr, Deoptimizer::kNotAJavaScriptObject);
 | 
|  
 | 
|    __ Bind(&global_object);
 | 
|    __ Ldr(result, FieldMemOperand(function, JSFunction::kContextOffset));
 | 
| 
 |