Index: src/arm64/lithium-codegen-arm64.cc |
diff --git a/src/arm64/lithium-codegen-arm64.cc b/src/arm64/lithium-codegen-arm64.cc |
index a0287214b606e41a00d0aef795eedce506e78080..e8c12b42ccfa97fefc3d30609cf7ee5ba16d4f96 100644 |
--- a/src/arm64/lithium-codegen-arm64.cc |
+++ b/src/arm64/lithium-codegen-arm64.cc |
@@ -1005,9 +1005,8 @@ |
void LCodeGen::DeoptimizeBranch( |
- LInstruction* instr, Deoptimizer::DeoptReason deopt_reason, |
- BranchType branch_type, Register reg, int bit, |
- Deoptimizer::BailoutType* override_bailout_type) { |
+ LInstruction* instr, const char* detail, BranchType branch_type, |
+ Register reg, int bit, Deoptimizer::BailoutType* override_bailout_type) { |
LEnvironment* environment = instr->environment(); |
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); |
Deoptimizer::BailoutType bailout_type = |
@@ -1058,7 +1057,7 @@ |
} |
Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(), |
- instr->Mnemonic(), deopt_reason); |
+ instr->Mnemonic(), detail); |
DCHECK(info()->IsStub() || frame_is_built_); |
// Go through jump table if we need to build frame, or restore caller doubles. |
if (branch_type == always && |
@@ -1080,89 +1079,85 @@ |
} |
-void LCodeGen::Deoptimize(LInstruction* instr, |
- Deoptimizer::DeoptReason deopt_reason, |
+void LCodeGen::Deoptimize(LInstruction* instr, const char* detail, |
Deoptimizer::BailoutType* override_bailout_type) { |
- DeoptimizeBranch(instr, deopt_reason, always, NoReg, -1, |
- override_bailout_type); |
+ DeoptimizeBranch(instr, detail, always, NoReg, -1, override_bailout_type); |
} |
void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr, |
- Deoptimizer::DeoptReason deopt_reason) { |
- DeoptimizeBranch(instr, deopt_reason, static_cast<BranchType>(cond)); |
+ const char* detail) { |
+ DeoptimizeBranch(instr, detail, static_cast<BranchType>(cond)); |
} |
void LCodeGen::DeoptimizeIfZero(Register rt, LInstruction* instr, |
- Deoptimizer::DeoptReason deopt_reason) { |
- DeoptimizeBranch(instr, deopt_reason, reg_zero, rt); |
+ const char* detail) { |
+ DeoptimizeBranch(instr, detail, reg_zero, rt); |
} |
void LCodeGen::DeoptimizeIfNotZero(Register rt, LInstruction* instr, |
- Deoptimizer::DeoptReason deopt_reason) { |
- DeoptimizeBranch(instr, deopt_reason, reg_not_zero, rt); |
+ const char* detail) { |
+ DeoptimizeBranch(instr, detail, reg_not_zero, rt); |
} |
void LCodeGen::DeoptimizeIfNegative(Register rt, LInstruction* instr, |
- Deoptimizer::DeoptReason deopt_reason) { |
+ const char* detail) { |
int sign_bit = rt.Is64Bits() ? kXSignBit : kWSignBit; |
- DeoptimizeIfBitSet(rt, sign_bit, instr, deopt_reason); |
+ DeoptimizeIfBitSet(rt, sign_bit, instr, detail); |
} |
void LCodeGen::DeoptimizeIfSmi(Register rt, LInstruction* instr, |
- Deoptimizer::DeoptReason deopt_reason) { |
- DeoptimizeIfBitClear(rt, MaskToBit(kSmiTagMask), instr, deopt_reason); |
+ const char* detail) { |
+ DeoptimizeIfBitClear(rt, MaskToBit(kSmiTagMask), instr, detail); |
} |
void LCodeGen::DeoptimizeIfNotSmi(Register rt, LInstruction* instr, |
- Deoptimizer::DeoptReason deopt_reason) { |
- DeoptimizeIfBitSet(rt, MaskToBit(kSmiTagMask), instr, deopt_reason); |
+ const char* detail) { |
+ DeoptimizeIfBitSet(rt, MaskToBit(kSmiTagMask), instr, detail); |
} |
void LCodeGen::DeoptimizeIfRoot(Register rt, Heap::RootListIndex index, |
- LInstruction* instr, |
- Deoptimizer::DeoptReason deopt_reason) { |
+ LInstruction* instr, const char* detail) { |
__ CompareRoot(rt, index); |
- DeoptimizeIf(eq, instr, deopt_reason); |
+ DeoptimizeIf(eq, instr, detail); |
} |
void LCodeGen::DeoptimizeIfNotRoot(Register rt, Heap::RootListIndex index, |
- LInstruction* instr, |
- Deoptimizer::DeoptReason deopt_reason) { |
+ LInstruction* instr, const char* detail) { |
__ CompareRoot(rt, index); |
- DeoptimizeIf(ne, instr, deopt_reason); |
+ DeoptimizeIf(ne, instr, detail); |
} |
void LCodeGen::DeoptimizeIfMinusZero(DoubleRegister input, LInstruction* instr, |
- Deoptimizer::DeoptReason deopt_reason) { |
+ const char* detail) { |
__ TestForMinusZero(input); |
- DeoptimizeIf(vs, instr, deopt_reason); |
+ DeoptimizeIf(vs, instr, detail); |
} |
void LCodeGen::DeoptimizeIfNotHeapNumber(Register object, LInstruction* instr) { |
__ CompareObjectMap(object, Heap::kHeapNumberMapRootIndex); |
- DeoptimizeIf(ne, instr, Deoptimizer::kNotHeapNumber); |
+ DeoptimizeIf(ne, instr, "not heap number"); |
} |
void LCodeGen::DeoptimizeIfBitSet(Register rt, int bit, LInstruction* instr, |
- Deoptimizer::DeoptReason deopt_reason) { |
- DeoptimizeBranch(instr, deopt_reason, reg_bit_set, rt, bit); |
+ const char* detail) { |
+ DeoptimizeBranch(instr, detail, reg_bit_set, rt, bit); |
} |
void LCodeGen::DeoptimizeIfBitClear(Register rt, int bit, LInstruction* instr, |
- Deoptimizer::DeoptReason deopt_reason) { |
- DeoptimizeBranch(instr, deopt_reason, reg_bit_clear, rt, bit); |
+ const char* detail) { |
+ DeoptimizeBranch(instr, detail, reg_bit_clear, rt, bit); |
} |
@@ -1536,7 +1531,7 @@ |
if (can_overflow) { |
__ Adds(result, left, right); |
- DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); |
+ DeoptimizeIf(vs, instr, "overflow"); |
} else { |
__ Add(result, left, right); |
} |
@@ -1550,7 +1545,7 @@ |
Operand right = ToOperand(instr->right()); |
if (can_overflow) { |
__ Adds(result, left, right); |
- DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); |
+ DeoptimizeIf(vs, instr, "overflow"); |
} else { |
__ Add(result, left, right); |
} |
@@ -1676,7 +1671,7 @@ |
// adaptor frame below it. |
const uint32_t kArgumentsLimit = 1 * KB; |
__ Cmp(length, kArgumentsLimit); |
- DeoptimizeIf(hi, instr, Deoptimizer::kTooManyArguments); |
+ DeoptimizeIf(hi, instr, "too many arguments"); |
// Push the receiver and use the register to keep the original |
// number of arguments. |
@@ -1858,7 +1853,7 @@ |
if (FLAG_debug_code && instr->hydrogen()->skip_check()) { |
__ Assert(NegateCondition(cond), kEliminatedBoundsCheckFailed); |
} else { |
- DeoptimizeIf(cond, instr, Deoptimizer::kOutOfBounds); |
+ DeoptimizeIf(cond, instr, "out of bounds"); |
} |
} |
@@ -1937,7 +1932,7 @@ |
__ JumpIfSmi(value, true_label); |
} else if (expected.NeedsMap()) { |
// If we need a map later and have a smi, deopt. |
- DeoptimizeIfSmi(value, instr, Deoptimizer::kSmi); |
+ DeoptimizeIfSmi(value, instr, "Smi"); |
} |
Register map = NoReg; |
@@ -1998,7 +1993,7 @@ |
if (!expected.IsGeneric()) { |
// We've seen something for the first time -> deopt. |
// This can only happen if we are not generic already. |
- Deoptimize(instr, Deoptimizer::kUnexpectedObject); |
+ Deoptimize(instr, "unexpected object"); |
} |
} |
} |
@@ -2204,7 +2199,7 @@ |
instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); |
__ StoreToSafepointRegisterSlot(x0, temp); |
} |
- DeoptimizeIfSmi(temp, instr, Deoptimizer::kInstanceMigrationFailed); |
+ DeoptimizeIfSmi(temp, instr, "instance migration failed"); |
} |
@@ -2259,7 +2254,7 @@ |
if (instr->hydrogen()->HasMigrationTarget()) { |
__ B(ne, deferred->entry()); |
} else { |
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap); |
+ DeoptimizeIf(ne, instr, "wrong map"); |
} |
__ Bind(&success); |
@@ -2268,7 +2263,7 @@ |
void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { |
if (!instr->hydrogen()->value()->type().IsHeapObject()) { |
- DeoptimizeIfSmi(ToRegister(instr->value()), instr, Deoptimizer::kSmi); |
+ DeoptimizeIfSmi(ToRegister(instr->value()), instr, "Smi"); |
} |
} |
@@ -2276,7 +2271,7 @@ |
void LCodeGen::DoCheckSmi(LCheckSmi* instr) { |
Register value = ToRegister(instr->value()); |
DCHECK(!instr->result() || ToRegister(instr->result()).Is(value)); |
- DeoptimizeIfNotSmi(value, instr, Deoptimizer::kNotASmi); |
+ DeoptimizeIfNotSmi(value, instr, "not a Smi"); |
} |
@@ -2294,15 +2289,15 @@ |
__ Cmp(scratch, first); |
if (first == last) { |
// If there is only one type in the interval check for equality. |
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType); |
+ DeoptimizeIf(ne, instr, "wrong instance type"); |
} else if (last == LAST_TYPE) { |
// We don't need to compare with the higher bound of the interval. |
- DeoptimizeIf(lo, instr, Deoptimizer::kWrongInstanceType); |
+ DeoptimizeIf(lo, instr, "wrong instance type"); |
} else { |
// If we are below the lower bound, set the C flag and clear the Z flag |
// to force a deopt. |
__ Ccmp(scratch, last, CFlag, hs); |
- DeoptimizeIf(hi, instr, Deoptimizer::kWrongInstanceType); |
+ DeoptimizeIf(hi, instr, "wrong instance type"); |
} |
} else { |
uint8_t mask; |
@@ -2313,10 +2308,10 @@ |
DCHECK((tag == 0) || (tag == mask)); |
if (tag == 0) { |
DeoptimizeIfBitSet(scratch, MaskToBit(mask), instr, |
- Deoptimizer::kWrongInstanceType); |
+ "wrong instance type"); |
} else { |
DeoptimizeIfBitClear(scratch, MaskToBit(mask), instr, |
- Deoptimizer::kWrongInstanceType); |
+ "wrong instance type"); |
} |
} else { |
if (tag == 0) { |
@@ -2325,7 +2320,7 @@ |
__ And(scratch, scratch, mask); |
__ Cmp(scratch, tag); |
} |
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType); |
+ DeoptimizeIf(ne, instr, "wrong instance type"); |
} |
} |
} |
@@ -2365,7 +2360,7 @@ |
// Check for undefined. Undefined is coverted to zero for clamping conversion. |
DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr, |
- Deoptimizer::kNotAHeapNumberUndefined); |
+ "not a heap number/undefined"); |
__ Mov(result, 0); |
__ B(&done); |
@@ -2670,7 +2665,7 @@ |
} else { |
__ Cmp(reg, Operand(object)); |
} |
- DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch); |
+ DeoptimizeIf(ne, instr, "value mismatch"); |
} |
@@ -2694,9 +2689,9 @@ |
DCHECK(object.is(result) && object.Is(x0)); |
DCHECK(instr->IsMarkedAsCall()); |
- DeoptimizeIfSmi(object, instr, Deoptimizer::kSmi); |
+ DeoptimizeIfSmi(object, instr, "Smi"); |
__ CompareObjectType(object, temp1, temp1, JS_DATE_TYPE); |
- DeoptimizeIf(ne, instr, Deoptimizer::kNotADateObject); |
+ DeoptimizeIf(ne, instr, "not a date object"); |
if (index->value() == 0) { |
__ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset)); |
@@ -2746,21 +2741,21 @@ |
// Check for (0 / -x) that will produce negative zero. |
HDiv* hdiv = instr->hydrogen(); |
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
- DeoptimizeIfZero(dividend, instr, Deoptimizer::kDivisionByZero); |
+ DeoptimizeIfZero(dividend, instr, "division by zero"); |
} |
// Check for (kMinInt / -1). |
if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { |
// Test dividend for kMinInt by subtracting one (cmp) and checking for |
// overflow. |
__ Cmp(dividend, 1); |
- DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); |
+ DeoptimizeIf(vs, instr, "overflow"); |
} |
// Deoptimize if remainder will not be 0. |
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && |
divisor != 1 && divisor != -1) { |
int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); |
__ Tst(dividend, mask); |
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision); |
+ DeoptimizeIf(ne, instr, "lost precision"); |
} |
if (divisor == -1) { // Nice shortcut, not needed for correctness. |
@@ -2788,14 +2783,14 @@ |
DCHECK(!AreAliased(dividend, result)); |
if (divisor == 0) { |
- Deoptimize(instr, Deoptimizer::kDivisionByZero); |
+ Deoptimize(instr, "division by zero"); |
return; |
} |
// Check for (0 / -x) that will produce negative zero. |
HDiv* hdiv = instr->hydrogen(); |
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
- DeoptimizeIfZero(dividend, instr, Deoptimizer::kMinusZero); |
+ DeoptimizeIfZero(dividend, instr, "minus zero"); |
} |
__ TruncatingDiv(result, dividend, Abs(divisor)); |
@@ -2807,7 +2802,7 @@ |
__ Sxtw(dividend.X(), dividend); |
__ Mov(temp, divisor); |
__ Smsubl(temp.X(), result, temp, dividend.X()); |
- DeoptimizeIfNotZero(temp, instr, Deoptimizer::kLostPrecision); |
+ DeoptimizeIfNotZero(temp, instr, "lost precision"); |
} |
} |
@@ -2830,7 +2825,7 @@ |
// Check for x / 0. |
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { |
- DeoptimizeIfZero(divisor, instr, Deoptimizer::kDivisionByZero); |
+ DeoptimizeIfZero(divisor, instr, "division by zero"); |
} |
// Check for (0 / -x) as that will produce negative zero. |
@@ -2842,7 +2837,7 @@ |
// If the divisor >= 0 (pl, the opposite of mi) set the flags to |
// condition ne, so we don't deopt, ie. positive divisor doesn't deopt. |
__ Ccmp(dividend, 0, NoFlag, mi); |
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); |
+ DeoptimizeIf(eq, instr, "minus zero"); |
} |
// Check for (kMinInt / -1). |
@@ -2854,13 +2849,13 @@ |
// -1. If overflow is clear, set the flags for condition ne, as the |
// dividend isn't -1, and thus we shouldn't deopt. |
__ Ccmp(divisor, -1, NoFlag, vs); |
- DeoptimizeIf(eq, instr, Deoptimizer::kOverflow); |
+ DeoptimizeIf(eq, instr, "overflow"); |
} |
// Compute remainder and deopt if it's not zero. |
Register remainder = ToRegister32(instr->temp()); |
__ Msub(remainder, result, divisor, dividend); |
- DeoptimizeIfNotZero(remainder, instr, Deoptimizer::kLostPrecision); |
+ DeoptimizeIfNotZero(remainder, instr, "lost precision"); |
} |
@@ -2869,11 +2864,11 @@ |
Register result = ToRegister32(instr->result()); |
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
- DeoptimizeIfMinusZero(input, instr, Deoptimizer::kMinusZero); |
+ DeoptimizeIfMinusZero(input, instr, "minus zero"); |
} |
__ TryRepresentDoubleAsInt32(result, input, double_scratch()); |
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN); |
+ DeoptimizeIf(ne, instr, "lost precision or NaN"); |
if (instr->tag_result()) { |
__ SmiTag(result.X()); |
@@ -2934,7 +2929,7 @@ |
__ LoadInstanceDescriptors(map, result); |
__ Ldr(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset)); |
__ Ldr(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx()))); |
- DeoptimizeIfZero(result, instr, Deoptimizer::kNoCache); |
+ DeoptimizeIfZero(result, instr, "no cache"); |
__ Bind(&done); |
} |
@@ -2947,18 +2942,17 @@ |
DCHECK(instr->IsMarkedAsCall()); |
DCHECK(object.Is(x0)); |
- DeoptimizeIfRoot(object, Heap::kUndefinedValueRootIndex, instr, |
- Deoptimizer::kUndefined); |
+ DeoptimizeIfRoot(object, Heap::kUndefinedValueRootIndex, instr, "undefined"); |
__ LoadRoot(null_value, Heap::kNullValueRootIndex); |
__ Cmp(object, null_value); |
- DeoptimizeIf(eq, instr, Deoptimizer::kNull); |
- |
- DeoptimizeIfSmi(object, instr, Deoptimizer::kSmi); |
+ DeoptimizeIf(eq, instr, "null"); |
+ |
+ DeoptimizeIfSmi(object, instr, "Smi"); |
STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); |
__ CompareObjectType(object, x1, x1, LAST_JS_PROXY_TYPE); |
- DeoptimizeIf(le, instr, Deoptimizer::kNotAJavaScriptObject); |
+ DeoptimizeIf(le, instr, "not a JavaScript object"); |
Label use_cache, call_runtime; |
__ CheckEnumCache(object, null_value, x1, x2, x3, x4, &call_runtime); |
@@ -2972,8 +2966,7 @@ |
CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr); |
__ Ldr(x1, FieldMemOperand(object, HeapObject::kMapOffset)); |
- DeoptimizeIfNotRoot(x1, Heap::kMetaMapRootIndex, instr, |
- Deoptimizer::kWrongMap); |
+ DeoptimizeIfNotRoot(x1, Heap::kMetaMapRootIndex, instr, "wrong map"); |
__ Bind(&use_cache); |
} |
@@ -3364,8 +3357,7 @@ |
__ Ldr(result, ContextMemOperand(context, instr->slot_index())); |
if (instr->hydrogen()->RequiresHoleCheck()) { |
if (instr->hydrogen()->DeoptimizesOnHole()) { |
- DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, |
- Deoptimizer::kHole); |
+ DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, "hole"); |
} else { |
Label not_the_hole; |
__ JumpIfNotRoot(result, Heap::kTheHoleValueRootIndex, ¬_the_hole); |
@@ -3386,8 +3378,7 @@ |
JSFunction::kPrototypeOrInitialMapOffset)); |
// Check that the function has a prototype or an initial map. |
- DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, |
- Deoptimizer::kHole); |
+ DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, "hole"); |
// If the function does not have an initial map, we're done. |
Label done; |
@@ -3407,8 +3398,7 @@ |
__ Mov(result, Operand(Handle<Object>(instr->hydrogen()->cell().handle()))); |
__ Ldr(result, FieldMemOperand(result, Cell::kValueOffset)); |
if (instr->hydrogen()->RequiresHoleCheck()) { |
- DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, |
- Deoptimizer::kHole); |
+ DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, "hole"); |
} |
} |
@@ -3544,7 +3534,7 @@ |
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { |
// Deopt if value > 0x80000000. |
__ Tst(result, 0xFFFFFFFF80000000); |
- DeoptimizeIf(ne, instr, Deoptimizer::kNegativeValue); |
+ DeoptimizeIf(ne, instr, "negative value"); |
} |
break; |
case FLOAT32_ELEMENTS: |
@@ -3638,7 +3628,7 @@ |
Register scratch = ToRegister(instr->temp()); |
__ Fmov(scratch, result); |
__ Eor(scratch, scratch, kHoleNanInt64); |
- DeoptimizeIfZero(scratch, instr, Deoptimizer::kHole); |
+ DeoptimizeIfZero(scratch, instr, "hole"); |
} |
} |
@@ -3676,10 +3666,9 @@ |
if (instr->hydrogen()->RequiresHoleCheck()) { |
if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { |
- DeoptimizeIfNotSmi(result, instr, Deoptimizer::kNotASmi); |
+ DeoptimizeIfNotSmi(result, instr, "not a Smi"); |
} else { |
- DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, |
- Deoptimizer::kHole); |
+ DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr, "hole"); |
} |
} |
} |
@@ -3783,7 +3772,7 @@ |
Register result = r.IsSmi() ? ToRegister(instr->result()) |
: ToRegister32(instr->result()); |
__ Abs(result, input); |
- DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); |
+ DeoptimizeIf(vs, instr, "overflow"); |
} |
} |
@@ -3935,7 +3924,7 @@ |
Register result = ToRegister(instr->result()); |
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
- DeoptimizeIfMinusZero(input, instr, Deoptimizer::kMinusZero); |
+ DeoptimizeIfMinusZero(input, instr, "minus zero"); |
} |
__ Fcvtms(result, input); |
@@ -3945,7 +3934,7 @@ |
__ Cmp(result, Operand(result, SXTW)); |
// - The input was not NaN. |
__ Fccmp(input, input, NoFlag, eq); |
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN); |
+ DeoptimizeIf(ne, instr, "lost precision or NaN"); |
} |
@@ -3971,13 +3960,13 @@ |
// If the divisor is negative, we have to negate and handle edge cases. |
__ Negs(result, dividend); |
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); |
+ DeoptimizeIf(eq, instr, "minus zero"); |
} |
// Dividing by -1 is basically negation, unless we overflow. |
if (divisor == -1) { |
if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { |
- DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); |
+ DeoptimizeIf(vs, instr, "overflow"); |
} |
return; |
} |
@@ -4000,14 +3989,14 @@ |
DCHECK(!AreAliased(dividend, result)); |
if (divisor == 0) { |
- Deoptimize(instr, Deoptimizer::kDivisionByZero); |
+ Deoptimize(instr, "division by zero"); |
return; |
} |
// Check for (0 / -x) that will produce negative zero. |
HMathFloorOfDiv* hdiv = instr->hydrogen(); |
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
- DeoptimizeIfZero(dividend, instr, Deoptimizer::kMinusZero); |
+ DeoptimizeIfZero(dividend, instr, "minus zero"); |
} |
// Easy case: We need no dynamic check for the dividend and the flooring |
@@ -4050,14 +4039,14 @@ |
__ Sdiv(result, dividend, divisor); |
// Check for x / 0. |
- DeoptimizeIfZero(divisor, instr, Deoptimizer::kDivisionByZero); |
+ DeoptimizeIfZero(divisor, instr, "division by zero"); |
// Check for (kMinInt / -1). |
if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) { |
// The V flag will be set iff dividend == kMinInt. |
__ Cmp(dividend, 1); |
__ Ccmp(divisor, -1, NoFlag, vs); |
- DeoptimizeIf(eq, instr, Deoptimizer::kOverflow); |
+ DeoptimizeIf(eq, instr, "overflow"); |
} |
// Check for (0 / -x) that will produce negative zero. |
@@ -4067,7 +4056,7 @@ |
// "divisor" can't be null because the code would have already been |
// deoptimized. The Z flag is set only if (divisor < 0) and (dividend == 0). |
// In this case we need to deoptimize to produce a -0. |
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); |
+ DeoptimizeIf(eq, instr, "minus zero"); |
} |
Label done; |
@@ -4226,18 +4215,18 @@ |
// Deoptimize if the result > 1, as it must be larger than 32 bits. |
__ Cmp(result, 1); |
- DeoptimizeIf(hi, instr, Deoptimizer::kOverflow); |
+ DeoptimizeIf(hi, instr, "overflow"); |
// Deoptimize for negative inputs, which at this point are only numbers in |
// the range [-0.5, -0.0] |
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
__ Fmov(result, input); |
- DeoptimizeIfNegative(result, instr, Deoptimizer::kMinusZero); |
+ DeoptimizeIfNegative(result, instr, "minus zero"); |
} |
// Deoptimize if the input was NaN. |
__ Fcmp(input, dot_five); |
- DeoptimizeIf(vs, instr, Deoptimizer::kNaN); |
+ DeoptimizeIf(vs, instr, "NaN"); |
// Now, the only unhandled inputs are in the range [0.0, 1.5[ (or [-0.5, 1.5[ |
// if we didn't generate a -0.0 bailout). If input >= 0.5 then return 1, |
@@ -4315,7 +4304,7 @@ |
__ And(dividend, dividend, mask); |
__ Negs(dividend, dividend); |
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); |
+ DeoptimizeIf(eq, instr, "minus zero"); |
} |
__ B(&done); |
} |
@@ -4334,7 +4323,7 @@ |
DCHECK(!AreAliased(dividend, result, temp)); |
if (divisor == 0) { |
- Deoptimize(instr, Deoptimizer::kDivisionByZero); |
+ Deoptimize(instr, "division by zero"); |
return; |
} |
@@ -4348,7 +4337,7 @@ |
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
Label remainder_not_zero; |
__ Cbnz(result, &remainder_not_zero); |
- DeoptimizeIfNegative(dividend, instr, Deoptimizer::kMinusZero); |
+ DeoptimizeIfNegative(dividend, instr, "minus zero"); |
__ bind(&remainder_not_zero); |
} |
} |
@@ -4363,12 +4352,12 @@ |
// modulo = dividend - quotient * divisor |
__ Sdiv(result, dividend, divisor); |
if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) { |
- DeoptimizeIfZero(divisor, instr, Deoptimizer::kDivisionByZero); |
+ DeoptimizeIfZero(divisor, instr, "division by zero"); |
} |
__ Msub(result, result, divisor, dividend); |
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
__ Cbnz(result, &done); |
- DeoptimizeIfNegative(dividend, instr, Deoptimizer::kMinusZero); |
+ DeoptimizeIfNegative(dividend, instr, "minus zero"); |
} |
__ Bind(&done); |
} |
@@ -4391,10 +4380,10 @@ |
if (bailout_on_minus_zero) { |
if (right < 0) { |
// The result is -0 if right is negative and left is zero. |
- DeoptimizeIfZero(left, instr, Deoptimizer::kMinusZero); |
+ DeoptimizeIfZero(left, instr, "minus zero"); |
} else if (right == 0) { |
// The result is -0 if the right is zero and the left is negative. |
- DeoptimizeIfNegative(left, instr, Deoptimizer::kMinusZero); |
+ DeoptimizeIfNegative(left, instr, "minus zero"); |
} |
} |
@@ -4404,7 +4393,7 @@ |
if (can_overflow) { |
// Only 0x80000000 can overflow here. |
__ Negs(result, left); |
- DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); |
+ DeoptimizeIf(vs, instr, "overflow"); |
} else { |
__ Neg(result, left); |
} |
@@ -4420,7 +4409,7 @@ |
case 2: |
if (can_overflow) { |
__ Adds(result, left, left); |
- DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); |
+ DeoptimizeIf(vs, instr, "overflow"); |
} else { |
__ Add(result, left, left); |
} |
@@ -4439,7 +4428,7 @@ |
DCHECK(!AreAliased(scratch, left)); |
__ Cls(scratch, left); |
__ Cmp(scratch, right_log2); |
- DeoptimizeIf(lt, instr, Deoptimizer::kOverflow); |
+ DeoptimizeIf(lt, instr, "overflow"); |
} |
if (right >= 0) { |
@@ -4449,7 +4438,7 @@ |
// result = -left << log2(-right) |
if (can_overflow) { |
__ Negs(result, Operand(left, LSL, right_log2)); |
- DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); |
+ DeoptimizeIf(vs, instr, "overflow"); |
} else { |
__ Neg(result, Operand(left, LSL, right_log2)); |
} |
@@ -4507,13 +4496,13 @@ |
// - If so (eq), set N (mi) if left + right is negative. |
// - Otherwise, clear N. |
__ Ccmn(left, right, NoFlag, eq); |
- DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero); |
+ DeoptimizeIf(mi, instr, "minus zero"); |
} |
if (can_overflow) { |
__ Smull(result.X(), left, right); |
__ Cmp(result.X(), Operand(result, SXTW)); |
- DeoptimizeIf(ne, instr, Deoptimizer::kOverflow); |
+ DeoptimizeIf(ne, instr, "overflow"); |
} else { |
__ Mul(result, left, right); |
} |
@@ -4537,7 +4526,7 @@ |
// - If so (eq), set N (mi) if left + right is negative. |
// - Otherwise, clear N. |
__ Ccmn(left, right, NoFlag, eq); |
- DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero); |
+ DeoptimizeIf(mi, instr, "minus zero"); |
} |
STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0)); |
@@ -4545,7 +4534,7 @@ |
__ Smulh(result, left, right); |
__ Cmp(result, Operand(result.W(), SXTW)); |
__ SmiTag(result); |
- DeoptimizeIf(ne, instr, Deoptimizer::kOverflow); |
+ DeoptimizeIf(ne, instr, "overflow"); |
} else { |
if (AreAliased(result, left, right)) { |
// All three registers are the same: half untag the input and then |
@@ -4721,14 +4710,14 @@ |
// Load heap number. |
__ Ldr(result, FieldMemOperand(input, HeapNumber::kValueOffset)); |
if (instr->hydrogen()->deoptimize_on_minus_zero()) { |
- DeoptimizeIfMinusZero(result, instr, Deoptimizer::kMinusZero); |
+ DeoptimizeIfMinusZero(result, instr, "minus zero"); |
} |
__ B(&done); |
if (can_convert_undefined_to_nan) { |
__ Bind(&convert_undefined); |
DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr, |
- Deoptimizer::kNotAHeapNumberUndefined); |
+ "not a heap number/undefined"); |
__ LoadRoot(scratch, Heap::kNanValueRootIndex); |
__ Ldr(result, FieldMemOperand(scratch, HeapNumber::kValueOffset)); |
@@ -4922,7 +4911,7 @@ |
Register output = ToRegister(instr->result()); |
if (hchange->CheckFlag(HValue::kCanOverflow) && |
hchange->value()->CheckFlag(HValue::kUint32)) { |
- DeoptimizeIfNegative(input.W(), instr, Deoptimizer::kOverflow); |
+ DeoptimizeIfNegative(input.W(), instr, "overflow"); |
} |
__ SmiTag(output, input); |
} |
@@ -4934,7 +4923,7 @@ |
Label done, untag; |
if (instr->needs_check()) { |
- DeoptimizeIfNotSmi(input, instr, Deoptimizer::kNotASmi); |
+ DeoptimizeIfNotSmi(input, instr, "not a Smi"); |
} |
__ Bind(&untag); |
@@ -4959,7 +4948,7 @@ |
if (instr->can_deopt()) { |
// If `left >>> right` >= 0x80000000, the result is not representable |
// in a signed 32-bit smi. |
- DeoptimizeIfNegative(result, instr, Deoptimizer::kNegativeValue); |
+ DeoptimizeIfNegative(result, instr, "negative value"); |
} |
break; |
default: UNREACHABLE(); |
@@ -4969,7 +4958,7 @@ |
int shift_count = JSShiftAmountFromLConstant(right_op); |
if (shift_count == 0) { |
if ((instr->op() == Token::SHR) && instr->can_deopt()) { |
- DeoptimizeIfNegative(left, instr, Deoptimizer::kNegativeValue); |
+ DeoptimizeIfNegative(left, instr, "negative value"); |
} |
__ Mov(result, left, kDiscardForSameWReg); |
} else { |
@@ -5022,7 +5011,7 @@ |
if (instr->can_deopt()) { |
// If `left >>> right` >= 0x80000000, the result is not representable |
// in a signed 32-bit smi. |
- DeoptimizeIfNegative(result, instr, Deoptimizer::kNegativeValue); |
+ DeoptimizeIfNegative(result, instr, "negative value"); |
} |
break; |
default: UNREACHABLE(); |
@@ -5032,7 +5021,7 @@ |
int shift_count = JSShiftAmountFromLConstant(right_op); |
if (shift_count == 0) { |
if ((instr->op() == Token::SHR) && instr->can_deopt()) { |
- DeoptimizeIfNegative(left, instr, Deoptimizer::kNegativeValue); |
+ DeoptimizeIfNegative(left, instr, "negative value"); |
} |
__ Mov(result, left); |
} else { |
@@ -5160,8 +5149,7 @@ |
if (instr->hydrogen()->RequiresHoleCheck()) { |
__ Ldr(scratch, target); |
if (instr->hydrogen()->DeoptimizesOnHole()) { |
- DeoptimizeIfRoot(scratch, Heap::kTheHoleValueRootIndex, instr, |
- Deoptimizer::kHole); |
+ DeoptimizeIfRoot(scratch, Heap::kTheHoleValueRootIndex, instr, "hole"); |
} else { |
__ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, &skip_assignment); |
} |
@@ -5199,8 +5187,7 @@ |
if (instr->hydrogen()->RequiresHoleCheck()) { |
Register payload = ToRegister(instr->temp2()); |
__ Ldr(payload, FieldMemOperand(cell, Cell::kValueOffset)); |
- DeoptimizeIfRoot(payload, Heap::kTheHoleValueRootIndex, instr, |
- Deoptimizer::kHole); |
+ DeoptimizeIfRoot(payload, Heap::kTheHoleValueRootIndex, instr, "hole"); |
} |
// Store the value. |
@@ -5622,7 +5609,7 @@ |
if (can_overflow) { |
__ Subs(result, left, right); |
- DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); |
+ DeoptimizeIf(vs, instr, "overflow"); |
} else { |
__ Sub(result, left, right); |
} |
@@ -5636,7 +5623,7 @@ |
Operand right = ToOperand(instr->right()); |
if (can_overflow) { |
__ Subs(result, left, right); |
- DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); |
+ DeoptimizeIf(vs, instr, "overflow"); |
} else { |
__ Sub(result, left, right); |
} |
@@ -5677,7 +5664,7 @@ |
// Output contains zero, undefined is converted to zero for truncating |
// conversions. |
DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr, |
- Deoptimizer::kNotAHeapNumberUndefinedBoolean); |
+ "not a heap number/undefined/true/false"); |
} else { |
Register output = ToRegister32(instr->result()); |
DoubleRegister dbl_scratch2 = ToDoubleRegister(temp2); |
@@ -5688,13 +5675,13 @@ |
// function. If the result is out of range, branch to deoptimize. |
__ Ldr(dbl_scratch1, FieldMemOperand(input, HeapNumber::kValueOffset)); |
__ TryRepresentDoubleAsInt32(output, dbl_scratch1, dbl_scratch2); |
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN); |
+ DeoptimizeIf(ne, instr, "lost precision or NaN"); |
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
__ Cmp(output, 0); |
__ B(ne, &done); |
__ Fmov(scratch1, dbl_scratch1); |
- DeoptimizeIfNegative(scratch1, instr, Deoptimizer::kMinusZero); |
+ DeoptimizeIfNegative(scratch1, instr, "minus zero"); |
} |
} |
__ Bind(&done); |
@@ -5835,7 +5822,7 @@ |
Label no_memento_found; |
__ TestJSArrayForAllocationMemento(object, temp1, temp2, &no_memento_found); |
- DeoptimizeIf(eq, instr, Deoptimizer::kMementoFound); |
+ DeoptimizeIf(eq, instr, "memento found"); |
__ Bind(&no_memento_found); |
} |
@@ -5960,7 +5947,7 @@ |
Register temp = ToRegister(instr->temp()); |
__ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset)); |
__ Cmp(map, temp); |
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap); |
+ DeoptimizeIf(ne, instr, "wrong map"); |
} |
@@ -5994,10 +5981,10 @@ |
__ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex, &global_object); |
// Deoptimize if the receiver is not a JS object. |
- DeoptimizeIfSmi(receiver, instr, Deoptimizer::kSmi); |
+ DeoptimizeIfSmi(receiver, instr, "Smi"); |
__ CompareObjectType(receiver, result, result, FIRST_SPEC_OBJECT_TYPE); |
__ B(ge, ©_receiver); |
- Deoptimize(instr, Deoptimizer::kNotAJavaScriptObject); |
+ Deoptimize(instr, "not a JavaScript object"); |
__ Bind(&global_object); |
__ Ldr(result, FieldMemOperand(function, JSFunction::kContextOffset)); |