Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(135)

Unified Diff: src/crankshaft/arm/lithium-codegen-arm.cc

Issue 2161543002: [turbofan] Add support for eager/soft deoptimization reasons. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Do the ports properly Created 4 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « src/crankshaft/arm/lithium-codegen-arm.h ('k') | src/crankshaft/arm64/lithium-codegen-arm64.h » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: src/crankshaft/arm/lithium-codegen-arm.cc
diff --git a/src/crankshaft/arm/lithium-codegen-arm.cc b/src/crankshaft/arm/lithium-codegen-arm.cc
index 357402771621f0c3128de9d3de33500fbed8d6ff..11b2f6dc28201e7f6d04bbca2abbb561cfa8a333 100644
--- a/src/crankshaft/arm/lithium-codegen-arm.cc
+++ b/src/crankshaft/arm/lithium-codegen-arm.cc
@@ -769,9 +769,8 @@ void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
}
}
-
void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason,
+ DeoptimizeReason deopt_reason,
Deoptimizer::BailoutType bailout_type) {
LEnvironment* environment = instr->environment();
RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
@@ -846,9 +845,8 @@ void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
}
}
-
void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
- Deoptimizer::DeoptReason deopt_reason) {
+ DeoptimizeReason deopt_reason) {
Deoptimizer::BailoutType bailout_type = info()->IsStub()
? Deoptimizer::LAZY
: Deoptimizer::EAGER;
@@ -981,7 +979,7 @@ void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
__ and_(dividend, dividend, Operand(mask));
__ rsb(dividend, dividend, Operand::Zero(), SetCC);
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
}
__ b(&done);
}
@@ -999,7 +997,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
DCHECK(!dividend.is(result));
if (divisor == 0) {
- DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero);
return;
}
@@ -1014,7 +1012,7 @@ void LCodeGen::DoModByConstI(LModByConstI* instr) {
Label remainder_not_zero;
__ b(ne, &remainder_not_zero);
__ cmp(dividend, Operand::Zero());
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
__ bind(&remainder_not_zero);
}
}
@@ -1034,7 +1032,7 @@ void LCodeGen::DoModI(LModI* instr) {
// case because we can't return a NaN.
if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
__ cmp(right_reg, Operand::Zero());
- DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero);
}
// Check for kMinInt % -1, sdiv will return kMinInt, which is not what we
@@ -1045,7 +1043,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ b(ne, &no_overflow_possible);
__ cmp(right_reg, Operand(-1));
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
} else {
__ b(ne, &no_overflow_possible);
__ mov(result_reg, Operand::Zero());
@@ -1066,7 +1064,7 @@ void LCodeGen::DoModI(LModI* instr) {
__ cmp(result_reg, Operand::Zero());
__ b(ne, &done);
__ cmp(left_reg, Operand::Zero());
- DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(lt, instr, DeoptimizeReason::kMinusZero);
}
__ bind(&done);
@@ -1091,7 +1089,7 @@ void LCodeGen::DoModI(LModI* instr) {
// NaN.
if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
__ cmp(right_reg, Operand::Zero());
- DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero);
}
__ Move(result_reg, left_reg);
@@ -1121,7 +1119,7 @@ void LCodeGen::DoModI(LModI* instr) {
if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ b(ne, &done);
__ cmp(left_reg, Operand::Zero());
- DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(mi, instr, DeoptimizeReason::kMinusZero);
}
__ bind(&done);
}
@@ -1139,19 +1137,19 @@ void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ cmp(dividend, Operand::Zero());
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
}
// Check for (kMinInt / -1).
if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
__ cmp(dividend, Operand(kMinInt));
- DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
}
// Deoptimize if remainder will not be 0.
if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
divisor != 1 && divisor != -1) {
int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
__ tst(dividend, Operand(mask));
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision);
}
if (divisor == -1) { // Nice shortcut, not needed for correctness.
@@ -1179,7 +1177,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
DCHECK(!dividend.is(result));
if (divisor == 0) {
- DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero);
return;
}
@@ -1187,7 +1185,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
HDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ cmp(dividend, Operand::Zero());
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
}
__ TruncatingDiv(result, dividend, Abs(divisor));
@@ -1197,7 +1195,7 @@ void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
__ mov(ip, Operand(divisor));
__ smull(scratch0(), ip, result, ip);
__ sub(scratch0(), scratch0(), dividend, SetCC);
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision);
}
}
@@ -1212,7 +1210,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ cmp(divisor, Operand::Zero());
- DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero);
}
// Check for (0 / -x) that will produce negative zero.
@@ -1224,7 +1222,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
}
__ b(pl, &positive);
__ cmp(dividend, Operand::Zero());
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
__ bind(&positive);
}
@@ -1236,7 +1234,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
// support because, on ARM, sdiv kMinInt, -1 -> kMinInt.
__ cmp(dividend, Operand(kMinInt));
__ cmp(divisor, Operand(-1), eq);
- DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
}
if (CpuFeatures::IsSupported(SUDIV)) {
@@ -1259,7 +1257,7 @@ void LCodeGen::DoDivI(LDivI* instr) {
Register remainder = scratch0();
__ Mls(remainder, result, divisor, dividend);
__ cmp(remainder, Operand::Zero());
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecision);
}
}
@@ -1310,13 +1308,13 @@ void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
// If the divisor is negative, we have to negate and handle edge cases.
__ rsb(result, dividend, Operand::Zero(), SetCC);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
}
// Dividing by -1 is basically negation, unless we overflow.
if (divisor == -1) {
if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
- DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
}
return;
}
@@ -1339,7 +1337,7 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
DCHECK(!dividend.is(result));
if (divisor == 0) {
- DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(al, instr, DeoptimizeReason::kDivisionByZero);
return;
}
@@ -1347,7 +1345,7 @@ void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
HMathFloorOfDiv* hdiv = instr->hydrogen();
if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
__ cmp(dividend, Operand::Zero());
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
}
// Easy case: We need no dynamic check for the dividend and the flooring
@@ -1388,7 +1386,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
// Check for x / 0.
if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
__ cmp(right, Operand::Zero());
- DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kDivisionByZero);
}
// Check for (0 / -x) that will produce negative zero.
@@ -1400,7 +1398,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
}
__ b(pl, &positive);
__ cmp(left, Operand::Zero());
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
__ bind(&positive);
}
@@ -1412,7 +1410,7 @@ void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
// support because, on ARM, sdiv kMinInt, -1 -> kMinInt.
__ cmp(left, Operand(kMinInt));
__ cmp(right, Operand(-1), eq);
- DeoptimizeIf(eq, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kOverflow);
}
if (CpuFeatures::IsSupported(SUDIV)) {
@@ -1458,14 +1456,14 @@ void LCodeGen::DoMulI(LMulI* instr) {
// The case of a null constant will be handled separately.
// If constant is negative and left is null, the result should be -0.
__ cmp(left, Operand::Zero());
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
}
switch (constant) {
case -1:
if (overflow) {
__ rsb(result, left, Operand::Zero(), SetCC);
- DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
} else {
__ rsb(result, left, Operand::Zero());
}
@@ -1475,7 +1473,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
// If left is strictly negative and the constant is null, the
// result is -0. Deoptimize if required, otherwise return 0.
__ cmp(left, Operand::Zero());
- DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(mi, instr, DeoptimizeReason::kMinusZero);
}
__ mov(result, Operand::Zero());
break;
@@ -1525,7 +1523,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ smull(result, scratch, left, right);
}
__ cmp(scratch, Operand(result, ASR, 31));
- DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
} else {
if (instr->hydrogen()->representation().IsSmi()) {
__ SmiUntag(result, left);
@@ -1541,7 +1539,7 @@ void LCodeGen::DoMulI(LMulI* instr) {
__ b(pl, &done);
// Bail out if the result is minus zero.
__ cmp(result, Operand::Zero());
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
__ bind(&done);
}
}
@@ -1604,7 +1602,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
case Token::SHR:
if (instr->can_deopt()) {
__ mov(result, Operand(left, LSR, scratch), SetCC);
- DeoptimizeIf(mi, instr, Deoptimizer::kNegativeValue);
+ DeoptimizeIf(mi, instr, DeoptimizeReason::kNegativeValue);
} else {
__ mov(result, Operand(left, LSR, scratch));
}
@@ -1641,7 +1639,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
} else {
if (instr->can_deopt()) {
__ tst(left, Operand(0x80000000));
- DeoptimizeIf(ne, instr, Deoptimizer::kNegativeValue);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNegativeValue);
}
__ Move(result, left);
}
@@ -1656,7 +1654,7 @@ void LCodeGen::DoShiftI(LShiftI* instr) {
} else {
__ SmiTag(result, left, SetCC);
}
- DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
} else {
__ mov(result, Operand(left, LSL, shift_count));
}
@@ -1688,7 +1686,7 @@ void LCodeGen::DoSubI(LSubI* instr) {
}
if (can_overflow) {
- DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
}
}
@@ -1709,7 +1707,7 @@ void LCodeGen::DoRSubI(LRSubI* instr) {
}
if (can_overflow) {
- DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
}
}
@@ -1850,7 +1848,7 @@ void LCodeGen::DoAddI(LAddI* instr) {
}
if (can_overflow) {
- DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
}
}
@@ -2089,7 +2087,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
} else if (expected.NeedsMap()) {
// If we need a map later and have a Smi -> deopt.
__ SmiTst(reg);
- DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi);
}
const Register map = scratch0();
@@ -2151,7 +2149,7 @@ void LCodeGen::DoBranch(LBranch* instr) {
if (!expected.IsGeneric()) {
// We've seen something for the first time -> deopt.
// This can only happen if we are not generic already.
- DeoptimizeIf(al, instr, Deoptimizer::kUnexpectedObject);
+ DeoptimizeIf(al, instr, DeoptimizeReason::kUnexpectedObject);
}
}
}
@@ -2515,10 +2513,10 @@ void LCodeGen::DoHasInPrototypeChainAndBranch(
__ ldrb(object_instance_type,
FieldMemOperand(object_map, Map::kBitFieldOffset));
__ tst(object_instance_type, Operand(1 << Map::kIsAccessCheckNeeded));
- DeoptimizeIf(ne, instr, Deoptimizer::kAccessCheck);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kAccessCheck);
// Deoptimize for proxies.
__ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE);
- DeoptimizeIf(eq, instr, Deoptimizer::kProxy);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kProxy);
__ ldr(object_prototype, FieldMemOperand(object_map, Map::kPrototypeOffset));
__ CompareRoot(object_prototype, Heap::kNullValueRootIndex);
@@ -2636,7 +2634,7 @@ void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(result, ip);
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(eq, instr, Deoptimizer::kHole);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
} else {
__ mov(result, Operand(factory()->undefined_value()), LeaveCC, eq);
}
@@ -2657,7 +2655,7 @@ void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(scratch, ip);
if (instr->hydrogen()->DeoptimizesOnHole()) {
- DeoptimizeIf(eq, instr, Deoptimizer::kHole);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
} else {
__ b(ne, &skip_assignment);
}
@@ -2735,7 +2733,7 @@ void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
// Check that the function has a prototype or an initial map.
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(result, ip);
- DeoptimizeIf(eq, instr, Deoptimizer::kHole);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
// If the function does not have an initial map, we're done.
Label done;
@@ -2849,7 +2847,7 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
__ ldr(result, mem_operand);
if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
__ cmp(result, Operand(0x80000000));
- DeoptimizeIf(cs, instr, Deoptimizer::kNegativeValue);
+ DeoptimizeIf(cs, instr, DeoptimizeReason::kNegativeValue);
}
break;
case FLOAT32_ELEMENTS:
@@ -2904,7 +2902,7 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
__ ldr(scratch, MemOperand(scratch, sizeof(kHoleNanLower32)));
__ cmp(scratch, Operand(kHoleNanUpper32));
- DeoptimizeIf(eq, instr, Deoptimizer::kHole);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
}
}
@@ -2938,11 +2936,11 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
if (instr->hydrogen()->RequiresHoleCheck()) {
if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
__ SmiTst(result);
- DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi);
} else {
__ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
__ cmp(result, scratch);
- DeoptimizeIf(eq, instr, Deoptimizer::kHole);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kHole);
}
} else if (instr->hydrogen()->hole_mode() == CONVERT_HOLE_TO_UNDEFINED) {
DCHECK(instr->hydrogen()->elements_kind() == FAST_HOLEY_ELEMENTS);
@@ -2957,7 +2955,7 @@ void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
__ LoadRoot(result, Heap::kArrayProtectorRootIndex);
__ ldr(result, FieldMemOperand(result, Cell::kValueOffset));
__ cmp(result, Operand(Smi::FromInt(Isolate::kArrayProtectorValid)));
- DeoptimizeIf(ne, instr, Deoptimizer::kHole);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kHole);
}
__ LoadRoot(result, Heap::kUndefinedValueRootIndex);
__ bind(&done);
@@ -3102,9 +3100,9 @@ void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
// Deoptimize if the receiver is not a JS object.
__ SmiTst(receiver);
- DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi);
__ CompareObjectType(receiver, scratch, scratch, FIRST_JS_RECEIVER_TYPE);
- DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject);
+ DeoptimizeIf(lt, instr, DeoptimizeReason::kNotAJavaScriptObject);
__ b(&result_in_receiver);
__ bind(&global_object);
@@ -3138,7 +3136,7 @@ void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
// adaptor frame below it.
const uint32_t kArgumentsLimit = 1 * KB;
__ cmp(length, Operand(kArgumentsLimit));
- DeoptimizeIf(hi, instr, Deoptimizer::kTooManyArguments);
+ DeoptimizeIf(hi, instr, DeoptimizeReason::kTooManyArguments);
// Push the receiver and use the register to keep the original
// number of arguments.
@@ -3292,7 +3290,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
__ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
__ cmp(scratch, Operand(ip));
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
Label done;
Register exponent = scratch0();
@@ -3360,7 +3358,7 @@ void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
// if input is positive.
__ rsb(result, input, Operand::Zero(), SetCC, mi);
// Deoptimize on overflow.
- DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
}
@@ -3407,7 +3405,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
Label done, exact;
__ TryInt32Floor(result, input, input_high, double_scratch0(), &done, &exact);
- DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN);
+ DeoptimizeIf(al, instr, DeoptimizeReason::kLostPrecisionOrNaN);
__ bind(&exact);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@@ -3415,7 +3413,7 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) {
__ cmp(result, Operand::Zero());
__ b(ne, &done);
__ cmp(input_high, Operand::Zero());
- DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(mi, instr, DeoptimizeReason::kMinusZero);
}
__ bind(&done);
}
@@ -3441,7 +3439,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
__ VmovHigh(input_high, input);
__ cmp(input_high, Operand::Zero());
// [-0.5, -0].
- DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(mi, instr, DeoptimizeReason::kMinusZero);
}
__ VFPCompareAndSetFlags(input, dot_five);
__ mov(result, Operand(1), LeaveCC, eq); // +0.5.
@@ -3455,7 +3453,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) {
// Reuse dot_five (double_scratch0) as we no longer need this value.
__ TryInt32Floor(result, input_plus_dot_five, input_high, double_scratch0(),
&done, &done);
- DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN);
+ DeoptimizeIf(al, instr, DeoptimizeReason::kLostPrecisionOrNaN);
__ bind(&done);
}
@@ -3519,7 +3517,7 @@ void LCodeGen::DoPower(LPower* instr) {
__ ldr(r6, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
__ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
__ cmp(r6, Operand(ip));
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
__ bind(&no_deopt);
MathPowStub stub(isolate(), MathPowStub::TAGGED);
__ CallStub(&stub);
@@ -3893,7 +3891,7 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
__ stop("eliminated bounds check failed");
__ bind(&done);
} else {
- DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds);
+ DeoptimizeIf(cc, instr, DeoptimizeReason::kOutOfBounds);
}
}
@@ -4184,7 +4182,7 @@ void LCodeGen::DoDeferredMaybeGrowElements(LMaybeGrowElements* instr) {
// Deopt on smi, which means the elements array changed to dictionary mode.
__ SmiTst(result);
- DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi);
}
@@ -4231,7 +4229,7 @@ void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
Register temp = ToRegister(instr->temp());
Label no_memento_found;
__ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
- DeoptimizeIf(eq, instr, Deoptimizer::kMementoFound);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMementoFound);
__ bind(&no_memento_found);
}
@@ -4555,12 +4553,12 @@ void LCodeGen::DoSmiTag(LSmiTag* instr) {
if (hchange->CheckFlag(HValue::kCanOverflow) &&
hchange->value()->CheckFlag(HValue::kUint32)) {
__ tst(input, Operand(0xc0000000));
- DeoptimizeIf(ne, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kOverflow);
}
if (hchange->CheckFlag(HValue::kCanOverflow) &&
!hchange->value()->CheckFlag(HValue::kUint32)) {
__ SmiTag(output, input, SetCC);
- DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
} else {
__ SmiTag(output, input);
}
@@ -4574,7 +4572,7 @@ void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
STATIC_ASSERT(kHeapObjectTag == 1);
// If the input is a HeapObject, SmiUntag will set the carry flag.
__ SmiUntag(result, input, SetCC);
- DeoptimizeIf(cs, instr, Deoptimizer::kNotASmi);
+ DeoptimizeIf(cs, instr, DeoptimizeReason::kNotASmi);
} else {
__ SmiUntag(result, input);
}
@@ -4602,7 +4600,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
if (can_convert_undefined_to_nan) {
__ b(ne, &convert);
} else {
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
}
// load heap number
__ vldr(result_reg, input_reg, HeapNumber::kValueOffset - kHeapObjectTag);
@@ -4612,7 +4610,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
__ b(ne, &done);
__ VmovHigh(scratch, result_reg);
__ cmp(scratch, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kMinusZero);
}
__ jmp(&done);
if (can_convert_undefined_to_nan) {
@@ -4620,7 +4618,7 @@ void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
// Convert undefined (and hole) to NaN.
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ cmp(input_reg, Operand(ip));
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined);
__ LoadRoot(scratch, Heap::kNanValueRootIndex);
__ vldr(result_reg, scratch, HeapNumber::kValueOffset - kHeapObjectTag);
__ jmp(&done);
@@ -4688,22 +4686,22 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
__ bind(&check_false);
__ LoadRoot(ip, Heap::kFalseValueRootIndex);
__ cmp(scratch2, Operand(ip));
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedBoolean);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefinedBoolean);
__ mov(input_reg, Operand::Zero());
} else {
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumber);
__ sub(ip, scratch2, Operand(kHeapObjectTag));
__ vldr(double_scratch2, ip, HeapNumber::kValueOffset);
__ TryDoubleToInt32Exact(input_reg, double_scratch2, double_scratch);
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
__ cmp(input_reg, Operand::Zero());
__ b(ne, &done);
__ VmovHigh(scratch1, double_scratch2);
__ tst(scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero);
}
}
__ bind(&done);
@@ -4772,14 +4770,14 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
} else {
__ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
// Deoptimize if the input wasn't a int32 (inside a double).
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label done;
__ cmp(result_reg, Operand::Zero());
__ b(ne, &done);
__ VmovHigh(scratch1, double_input);
__ tst(scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero);
__ bind(&done);
}
}
@@ -4797,26 +4795,26 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
} else {
__ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
// Deoptimize if the input wasn't a int32 (inside a double).
- DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kLostPrecisionOrNaN);
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
Label done;
__ cmp(result_reg, Operand::Zero());
__ b(ne, &done);
__ VmovHigh(scratch1, double_input);
__ tst(scratch1, Operand(HeapNumber::kSignMask));
- DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kMinusZero);
__ bind(&done);
}
}
__ SmiTag(result_reg, SetCC);
- DeoptimizeIf(vs, instr, Deoptimizer::kOverflow);
+ DeoptimizeIf(vs, instr, DeoptimizeReason::kOverflow);
}
void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
LOperand* input = instr->value();
__ SmiTst(ToRegister(input));
- DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNotASmi);
}
@@ -4824,7 +4822,7 @@ void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
if (!instr->hydrogen()->value()->type().IsHeapObject()) {
LOperand* input = instr->value();
__ SmiTst(ToRegister(input));
- DeoptimizeIf(eq, instr, Deoptimizer::kSmi);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kSmi);
}
}
@@ -4837,7 +4835,7 @@ void LCodeGen::DoCheckArrayBufferNotNeutered(
__ ldr(scratch, FieldMemOperand(view, JSArrayBufferView::kBufferOffset));
__ ldr(scratch, FieldMemOperand(scratch, JSArrayBuffer::kBitFieldOffset));
__ tst(scratch, Operand(1 << JSArrayBuffer::WasNeutered::kShift));
- DeoptimizeIf(ne, instr, Deoptimizer::kOutOfBounds);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kOutOfBounds);
}
@@ -4857,13 +4855,13 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
// If there is only one type in the interval check for equality.
if (first == last) {
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType);
} else {
- DeoptimizeIf(lo, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(lo, instr, DeoptimizeReason::kWrongInstanceType);
// Omit check for the last type.
if (last != LAST_TYPE) {
__ cmp(scratch, Operand(last));
- DeoptimizeIf(hi, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(hi, instr, DeoptimizeReason::kWrongInstanceType);
}
}
} else {
@@ -4874,11 +4872,12 @@ void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
if (base::bits::IsPowerOfTwo32(mask)) {
DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
__ tst(scratch, Operand(mask));
- DeoptimizeIf(tag == 0 ? ne : eq, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(tag == 0 ? ne : eq, instr,
+ DeoptimizeReason::kWrongInstanceType);
} else {
__ and_(scratch, scratch, Operand(mask));
__ cmp(scratch, Operand(tag));
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongInstanceType);
}
}
}
@@ -4897,7 +4896,7 @@ void LCodeGen::DoCheckValue(LCheckValue* instr) {
} else {
__ cmp(reg, Operand(object));
}
- DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kValueMismatch);
}
@@ -4912,7 +4911,7 @@ void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
__ StoreToSafepointRegisterSlot(r0, scratch0());
}
__ tst(scratch0(), Operand(kSmiTagMask));
- DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kInstanceMigrationFailed);
}
@@ -4970,7 +4969,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
if (instr->hydrogen()->HasMigrationTarget()) {
__ b(ne, deferred->entry());
} else {
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap);
}
__ bind(&success);
@@ -5009,7 +5008,7 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
// Check for undefined. Undefined is converted to zero for clamping
// conversions.
__ cmp(input_reg, Operand(factory()->undefined_value()));
- DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kNotAHeapNumberUndefined);
__ mov(result_reg, Operand::Zero());
__ jmp(&done);
@@ -5442,7 +5441,7 @@ void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
__ ldr(result,
FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
__ cmp(result, Operand::Zero());
- DeoptimizeIf(eq, instr, Deoptimizer::kNoCache);
+ DeoptimizeIf(eq, instr, DeoptimizeReason::kNoCache);
__ bind(&done);
}
@@ -5453,7 +5452,7 @@ void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
Register map = ToRegister(instr->map());
__ ldr(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
__ cmp(map, scratch0());
- DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap);
+ DeoptimizeIf(ne, instr, DeoptimizeReason::kWrongMap);
}
« no previous file with comments | « src/crankshaft/arm/lithium-codegen-arm.h ('k') | src/crankshaft/arm64/lithium-codegen-arm64.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698