Index: src/arm/lithium-codegen-arm.cc |
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc |
index 1b5d90bd1f658b7ac00d5bbd2cb971cd84f5ae46..057ca1ef2bae528deba94161f7478d26203a4d8c 100644 |
--- a/src/arm/lithium-codegen-arm.cc |
+++ b/src/arm/lithium-codegen-arm.cc |
@@ -426,11 +426,7 @@ Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) { |
Abort("EmitLoadRegister: Unsupported double immediate."); |
} else { |
ASSERT(r.IsTagged()); |
- if (literal->IsSmi()) { |
- __ mov(scratch, Operand(literal)); |
- } else { |
- __ LoadHeapObject(scratch, Handle<HeapObject>::cast(literal)); |
- } |
+ __ LoadObject(scratch, literal); |
} |
return scratch; |
} else if (op->IsStackSlot() || op->IsArgument()) { |
@@ -498,9 +494,18 @@ bool LCodeGen::IsSmi(LConstantOperand* op) const { |
} |
-int LCodeGen::ToInteger32(LConstantOperand* op) const { |
+int32_t LCodeGen::ToInteger32(LConstantOperand* op) const { |
+ return ToRepresentation(op, Representation::Integer32()); |
+} |
+ |
+ |
+int32_t LCodeGen::ToRepresentation(LConstantOperand* op, |
+ const Representation& r) const { |
HConstant* constant = chunk_->LookupConstant(op); |
- return constant->Integer32Value(); |
+ int32_t value = constant->Integer32Value(); |
+ if (r.IsInteger32()) return value; |
+ ASSERT(r.IsSmiOrTagged()); |
+ return reinterpret_cast<int32_t>(Smi::FromInt(value)); |
} |
@@ -522,7 +527,10 @@ Operand LCodeGen::ToOperand(LOperand* op) { |
LConstantOperand* const_op = LConstantOperand::cast(op); |
HConstant* constant = chunk()->LookupConstant(const_op); |
Representation r = chunk_->LookupLiteralRepresentation(const_op); |
- if (r.IsInteger32()) { |
+ if (r.IsSmi()) { |
+ ASSERT(constant->HasSmiValue()); |
+ return Operand(Smi::FromInt(constant->Integer32Value())); |
+ } else if (r.IsInteger32()) { |
ASSERT(constant->HasInteger32Value()); |
return Operand(constant->Integer32Value()); |
} else if (r.IsDouble()) { |
@@ -810,12 +818,6 @@ void LCodeGen::DeoptimizeIf(Condition cc, |
} |
-void LCodeGen::SoftDeoptimize(LEnvironment* environment) { |
- ASSERT(!info()->IsStub()); |
- DeoptimizeIf(al, environment, Deoptimizer::SOFT); |
-} |
- |
- |
void LCodeGen::RegisterDependentCodeForEmbeddedMaps(Handle<Code> code) { |
ZoneList<Handle<Map> > maps(1, zone()); |
int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT); |
@@ -1188,7 +1190,7 @@ void LCodeGen::DoModI(LModI* instr) { |
DwVfpRegister dividend = ToDoubleRegister(instr->temp()); |
DwVfpRegister divisor = ToDoubleRegister(instr->temp2()); |
ASSERT(!divisor.is(dividend)); |
- DwVfpRegister quotient = double_scratch0(); |
+ LowDwVfpRegister quotient = double_scratch0(); |
ASSERT(!quotient.is(dividend)); |
ASSERT(!quotient.is(divisor)); |
@@ -1204,11 +1206,10 @@ void LCodeGen::DoModI(LModI* instr) { |
// Load the arguments in VFP registers. The divisor value is preloaded |
// before. Be careful that 'right_reg' is only live on entry. |
// TODO(svenpanne) The last comments seems to be wrong nowadays. |
- __ vmov(dividend.low(), left_reg); |
- __ vmov(divisor.low(), right_reg); |
- |
- __ vcvt_f64_s32(dividend, dividend.low()); |
- __ vcvt_f64_s32(divisor, divisor.low()); |
+ __ vmov(double_scratch0().low(), left_reg); |
+ __ vcvt_f64_s32(dividend, double_scratch0().low()); |
+ __ vmov(double_scratch0().low(), right_reg); |
+ __ vcvt_f64_s32(divisor, double_scratch0().low()); |
// We do not care about the sign of the divisor. Note that we still handle |
// the kMinInt % -1 case correctly, though. |
@@ -1219,10 +1220,9 @@ void LCodeGen::DoModI(LModI* instr) { |
__ vcvt_f64_s32(quotient, quotient.low()); |
// Compute the remainder in result. |
- DwVfpRegister double_scratch = dividend; |
- __ vmul(double_scratch, divisor, quotient); |
- __ vcvt_s32_f64(double_scratch.low(), double_scratch); |
- __ vmov(scratch, double_scratch.low()); |
+ __ vmul(double_scratch0(), divisor, quotient); |
+ __ vcvt_s32_f64(double_scratch0().low(), double_scratch0()); |
+ __ vmov(scratch, double_scratch0().low()); |
__ sub(result_reg, left_reg, scratch, SetCC); |
// If we care about -0, test if the dividend is <0 and the result is 0. |
@@ -1422,20 +1422,20 @@ void LCodeGen::DoDivI(LDivI* instr) { |
} else { |
const DoubleRegister vleft = ToDoubleRegister(instr->temp()); |
const DoubleRegister vright = double_scratch0(); |
- __ vmov(vleft.low(), left); |
- __ vmov(vright.low(), right); |
- __ vcvt_f64_s32(vleft, vleft.low()); |
- __ vcvt_f64_s32(vright, vright.low()); |
+ __ vmov(double_scratch0().low(), left); |
+ __ vcvt_f64_s32(vleft, double_scratch0().low()); |
+ __ vmov(double_scratch0().low(), right); |
+ __ vcvt_f64_s32(vright, double_scratch0().low()); |
__ vdiv(vleft, vleft, vright); // vleft now contains the result. |
- __ vcvt_s32_f64(vright.low(), vleft); |
- __ vmov(result, vright.low()); |
+ __ vcvt_s32_f64(double_scratch0().low(), vleft); |
+ __ vmov(result, double_scratch0().low()); |
if (!instr->hydrogen()->CheckFlag( |
HInstruction::kAllUsesTruncatingToInt32)) { |
// Deopt if exact conversion to integer was not possible. |
// Use vright as scratch register. |
- __ vcvt_f64_s32(vright, vright.low()); |
- __ VFPCompareAndSetFlags(vleft, vright); |
+ __ vcvt_f64_s32(double_scratch0(), double_scratch0().low()); |
+ __ VFPCompareAndSetFlags(vleft, double_scratch0()); |
DeoptimizeIf(ne, instr->environment()); |
} |
} |
@@ -1550,7 +1550,9 @@ void LCodeGen::DoMulI(LMulI* instr) { |
if (right_op->IsConstantOperand() && !can_overflow) { |
// Use optimized code for specific constants. |
- int32_t constant = ToInteger32(LConstantOperand::cast(right_op)); |
+ int32_t constant = ToRepresentation( |
+ LConstantOperand::cast(right_op), |
+ instr->hydrogen()->right()->representation()); |
if (bailout_on_minus_zero && (constant < 0)) { |
// The case of a null constant will be handled separately. |
@@ -1614,11 +1616,21 @@ void LCodeGen::DoMulI(LMulI* instr) { |
if (can_overflow) { |
// scratch:result = left * right. |
- __ smull(result, scratch, left, right); |
+ if (instr->hydrogen()->representation().IsSmi()) { |
+ __ SmiUntag(result, left); |
+ __ smull(result, scratch, result, right); |
+ } else { |
+ __ smull(result, scratch, left, right); |
+ } |
__ cmp(scratch, Operand(result, ASR, 31)); |
DeoptimizeIf(ne, instr->environment()); |
} else { |
- __ mul(result, left, right); |
+ if (instr->hydrogen()->representation().IsSmi()) { |
+ __ SmiUntag(result, left); |
+ __ mul(result, result, right); |
+ } else { |
+ __ mul(result, left, right); |
+ } |
} |
if (bailout_on_minus_zero) { |
@@ -1730,7 +1742,14 @@ void LCodeGen::DoShiftI(LShiftI* instr) { |
break; |
case Token::SHL: |
if (shift_count != 0) { |
- __ mov(result, Operand(left, LSL, shift_count)); |
+ if (instr->hydrogen_value()->representation().IsSmi() && |
+ instr->can_deopt()) { |
+ __ mov(result, Operand(left, LSL, shift_count - 1)); |
+ __ SmiTag(result, result, SetCC); |
+ DeoptimizeIf(vs, instr->environment()); |
+ } else { |
+ __ mov(result, Operand(left, LSL, shift_count)); |
+ } |
} else { |
__ Move(result, left); |
} |
@@ -1806,12 +1825,7 @@ void LCodeGen::DoConstantD(LConstantD* instr) { |
void LCodeGen::DoConstantT(LConstantT* instr) { |
Handle<Object> value = instr->value(); |
AllowDeferredHandleDereference smi_check; |
- if (value->IsSmi()) { |
- __ mov(ToRegister(instr->result()), Operand(value)); |
- } else { |
- __ LoadHeapObject(ToRegister(instr->result()), |
- Handle<HeapObject>::cast(value)); |
- } |
+ __ LoadObject(ToRegister(instr->result()), value); |
} |
@@ -1973,7 +1987,7 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) { |
LOperand* left = instr->left(); |
LOperand* right = instr->right(); |
HMathMinMax::Operation operation = instr->hydrogen()->operation(); |
- if (instr->hydrogen()->representation().IsInteger32()) { |
+ if (instr->hydrogen()->representation().IsSmiOrInteger32()) { |
Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge; |
Register left_reg = ToRegister(left); |
Operand right_op = (right->IsRegister() || right->IsConstantOperand()) |
@@ -2372,14 +2386,6 @@ void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) { |
} |
-void LCodeGen::DoCmpConstantEqAndBranch(LCmpConstantEqAndBranch* instr) { |
- Register left = ToRegister(instr->left()); |
- |
- __ cmp(left, Operand(instr->hydrogen()->right())); |
- EmitBranch(instr, eq); |
-} |
- |
- |
Condition LCodeGen::EmitIsObject(Register input, |
Register temp1, |
Label* is_not_object, |
@@ -3032,9 +3038,9 @@ void LCodeGen::EmitLoadFieldOrConstantFunction(Register result, |
__ ldr(result, FieldMemOperand(object, JSObject::kPropertiesOffset)); |
__ ldr(result, FieldMemOperand(result, offset + FixedArray::kHeaderSize)); |
} |
- } else if (lookup.IsConstantFunction()) { |
- Handle<JSFunction> function(lookup.GetConstantFunctionFromMap(*type)); |
- __ LoadHeapObject(result, function); |
+ } else if (lookup.IsConstant()) { |
+ Handle<Object> constant(lookup.GetConstantFromMap(*type), isolate()); |
+ __ LoadObject(result, constant); |
} else { |
// Negative lookup. |
// Check prototypes. |
@@ -3209,8 +3215,8 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) { |
: Operand(key, LSL, shift_size); |
__ add(scratch0(), external_pointer, operand); |
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { |
- __ vldr(kScratchDoubleReg.low(), scratch0(), additional_offset); |
- __ vcvt_f64_f32(result, kScratchDoubleReg.low()); |
+ __ vldr(double_scratch0().low(), scratch0(), additional_offset); |
+ __ vcvt_f64_f32(result, double_scratch0().low()); |
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS |
__ vldr(result, scratch0(), additional_offset); |
} |
@@ -3778,7 +3784,6 @@ void LCodeGen::DoMathFloor(LMathFloor* instr) { |
Register input_high = scratch0(); |
Label done, exact; |
- __ vmov(input_high, input.high()); |
__ TryInt32Floor(result, input, input_high, double_scratch0(), &done, &exact); |
DeoptimizeIf(al, instr->environment()); |
@@ -3811,7 +3816,7 @@ void LCodeGen::DoMathRound(LMathRound* instr) { |
// If the input is +0.5, the result is 1. |
__ b(hi, &convert); // Out of [-0.5, +0.5]. |
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
- __ vmov(input_high, input.high()); |
+ __ VmovHigh(input_high, input); |
__ cmp(input_high, Operand::Zero()); |
DeoptimizeIf(mi, instr->environment()); // [-0.5, -0]. |
} |
@@ -3824,7 +3829,6 @@ void LCodeGen::DoMathRound(LMathRound* instr) { |
__ bind(&convert); |
__ vadd(input_plus_dot_five, input, dot_five); |
- __ vmov(input_high, input_plus_dot_five.high()); |
// Reuse dot_five (double_scratch0) as we no longer need this value. |
__ TryInt32Floor(result, input_plus_dot_five, input_high, double_scratch0(), |
&done, &done); |
@@ -4266,6 +4270,18 @@ void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) { |
} |
+void LCodeGen::ApplyCheckIf(Condition cc, LBoundsCheck* check) { |
+ if (FLAG_debug_code && check->hydrogen()->skip_check()) { |
+ Label done; |
+ __ b(NegateCondition(cc), &done); |
+ __ stop("eliminated bounds check failed"); |
+ __ bind(&done); |
+ } else { |
+ DeoptimizeIf(cc, check->environment()); |
+ } |
+} |
+ |
+ |
void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { |
if (instr->hydrogen()->skip_check()) return; |
@@ -4281,7 +4297,8 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) { |
} else { |
__ cmp(ToRegister(instr->index()), ToRegister(instr->length())); |
} |
- DeoptimizeIf(hs, instr->environment()); |
+ Condition condition = instr->hydrogen()->allow_equality() ? hi : hs; |
+ ApplyCheckIf(condition, instr); |
} |
@@ -4490,7 +4507,7 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { |
// Write barrier. |
__ RecordWriteField(object_reg, HeapObject::kMapOffset, new_map_reg, |
scratch, GetLinkRegisterState(), kDontSaveFPRegs); |
- } else if (FLAG_compiled_transitions) { |
+ } else { |
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); |
__ Move(r0, object_reg); |
__ Move(r1, to_map); |
@@ -4498,28 +4515,6 @@ void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) { |
__ CallStub(&stub); |
RecordSafepointWithRegisters( |
instr->pointer_map(), 0, Safepoint::kNoLazyDeopt); |
- } else if (IsFastSmiElementsKind(from_kind) && |
- IsFastDoubleElementsKind(to_kind)) { |
- Register fixed_object_reg = ToRegister(instr->temp()); |
- ASSERT(fixed_object_reg.is(r2)); |
- Register new_map_reg = ToRegister(instr->new_map_temp()); |
- ASSERT(new_map_reg.is(r3)); |
- __ mov(new_map_reg, Operand(to_map)); |
- __ mov(fixed_object_reg, object_reg); |
- CallCode(isolate()->builtins()->TransitionElementsSmiToDouble(), |
- RelocInfo::CODE_TARGET, instr); |
- } else if (IsFastDoubleElementsKind(from_kind) && |
- IsFastObjectElementsKind(to_kind)) { |
- Register fixed_object_reg = ToRegister(instr->temp()); |
- ASSERT(fixed_object_reg.is(r2)); |
- Register new_map_reg = ToRegister(instr->new_map_temp()); |
- ASSERT(new_map_reg.is(r3)); |
- __ mov(new_map_reg, Operand(to_map)); |
- __ mov(fixed_object_reg, object_reg); |
- CallCode(isolate()->builtins()->TransitionElementsDoubleToObject(), |
- RelocInfo::CODE_TARGET, instr); |
- } else { |
- UNREACHABLE(); |
} |
__ bind(¬_applicable); |
} |
@@ -4747,8 +4742,7 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr, |
Label slow; |
Register src = ToRegister(value); |
Register dst = ToRegister(instr->result()); |
- DwVfpRegister dbl_scratch = double_scratch0(); |
- SwVfpRegister flt_scratch = dbl_scratch.low(); |
+ LowDwVfpRegister dbl_scratch = double_scratch0(); |
// Preserve the value of all registers. |
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters); |
@@ -4762,11 +4756,11 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr, |
__ SmiUntag(src, dst); |
__ eor(src, src, Operand(0x80000000)); |
} |
- __ vmov(flt_scratch, src); |
- __ vcvt_f64_s32(dbl_scratch, flt_scratch); |
+ __ vmov(dbl_scratch.low(), src); |
+ __ vcvt_f64_s32(dbl_scratch, dbl_scratch.low()); |
} else { |
- __ vmov(flt_scratch, src); |
- __ vcvt_f64_u32(dbl_scratch, flt_scratch); |
+ __ vmov(dbl_scratch.low(), src); |
+ __ vcvt_f64_u32(dbl_scratch, dbl_scratch.low()); |
} |
if (FLAG_inline_new) { |
@@ -4827,7 +4821,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) { |
DwVfpRegister input_reg = ToDoubleRegister(instr->value()); |
__ VFPCompareAndSetFlags(input_reg, input_reg); |
__ b(vc, &no_special_nan_handling); |
- __ vmov(scratch, input_reg.high()); |
+ __ VmovHigh(scratch, input_reg); |
__ cmp(scratch, Operand(kHoleNanUpper32)); |
// If not the hole NaN, force the NaN to be canonical. |
__ VFPCanonicalizeNaN(input_reg, ne); |
@@ -4927,22 +4921,20 @@ void LCodeGen::EmitNumberUntagD(Register input_reg, |
DeoptimizeIf(ne, env); |
__ bind(&convert); |
- __ LoadRoot(ip, Heap::kNanValueRootIndex); |
- __ sub(ip, ip, Operand(kHeapObjectTag)); |
- __ vldr(result_reg, ip, HeapNumber::kValueOffset); |
+ __ LoadRoot(scratch, Heap::kNanValueRootIndex); |
+ __ vldr(result_reg, scratch, HeapNumber::kValueOffset - kHeapObjectTag); |
__ jmp(&done); |
__ bind(&heap_number); |
} |
// Heap number to double register conversion. |
- __ sub(ip, input_reg, Operand(kHeapObjectTag)); |
- __ vldr(result_reg, ip, HeapNumber::kValueOffset); |
+ __ vldr(result_reg, input_reg, HeapNumber::kValueOffset - kHeapObjectTag); |
if (deoptimize_on_minus_zero) { |
- __ vmov(ip, result_reg.low()); |
- __ cmp(ip, Operand::Zero()); |
+ __ VmovLow(scratch, result_reg); |
+ __ cmp(scratch, Operand::Zero()); |
__ b(ne, &done); |
- __ vmov(ip, result_reg.high()); |
- __ cmp(ip, Operand(HeapNumber::kSignMask)); |
+ __ VmovHigh(scratch, result_reg); |
+ __ cmp(scratch, Operand(HeapNumber::kSignMask)); |
DeoptimizeIf(eq, env); |
} |
__ jmp(&done); |
@@ -4964,7 +4956,7 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { |
Register input_reg = ToRegister(instr->value()); |
Register scratch1 = scratch0(); |
Register scratch2 = ToRegister(instr->temp()); |
- DwVfpRegister double_scratch = double_scratch0(); |
+ LowDwVfpRegister double_scratch = double_scratch0(); |
DwVfpRegister double_scratch2 = ToDoubleRegister(instr->temp3()); |
ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2)); |
@@ -5012,14 +5004,14 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) { |
DeoptimizeIf(ne, instr->environment()); |
__ sub(ip, input_reg, Operand(kHeapObjectTag)); |
- __ vldr(double_scratch, ip, HeapNumber::kValueOffset); |
- __ TryDoubleToInt32Exact(input_reg, double_scratch, double_scratch2); |
+ __ vldr(double_scratch2, ip, HeapNumber::kValueOffset); |
+ __ TryDoubleToInt32Exact(input_reg, double_scratch2, double_scratch); |
DeoptimizeIf(ne, instr->environment()); |
if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
__ cmp(input_reg, Operand::Zero()); |
__ b(ne, &done); |
- __ vmov(scratch1, double_scratch.high()); |
+ __ VmovHigh(scratch1, double_scratch2); |
__ tst(scratch1, Operand(HeapNumber::kSignMask)); |
DeoptimizeIf(ne, instr->environment()); |
} |
@@ -5092,7 +5084,7 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) { |
Register scratch1 = scratch0(); |
Register scratch2 = ToRegister(instr->temp()); |
DwVfpRegister double_input = ToDoubleRegister(instr->value()); |
- DwVfpRegister double_scratch = double_scratch0(); |
+ LowDwVfpRegister double_scratch = double_scratch0(); |
if (instr->truncating()) { |
Register scratch3 = ToRegister(instr->temp2()); |
@@ -5106,7 +5098,7 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) { |
Label done; |
__ cmp(result_reg, Operand::Zero()); |
__ b(ne, &done); |
- __ vmov(scratch1, double_input.high()); |
+ __ VmovHigh(scratch1, double_input); |
__ tst(scratch1, Operand(HeapNumber::kSignMask)); |
DeoptimizeIf(ne, instr->environment()); |
__ bind(&done); |
@@ -5120,7 +5112,7 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { |
Register scratch1 = scratch0(); |
Register scratch2 = ToRegister(instr->temp()); |
DwVfpRegister double_input = ToDoubleRegister(instr->value()); |
- DwVfpRegister double_scratch = double_scratch0(); |
+ LowDwVfpRegister double_scratch = double_scratch0(); |
if (instr->truncating()) { |
Register scratch3 = ToRegister(instr->temp2()); |
@@ -5134,7 +5126,7 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { |
Label done; |
__ cmp(result_reg, Operand::Zero()); |
__ b(ne, &done); |
- __ vmov(scratch1, double_input.high()); |
+ __ VmovHigh(scratch1, double_input); |
__ tst(scratch1, Operand(HeapNumber::kSignMask)); |
DeoptimizeIf(ne, instr->environment()); |
__ bind(&done); |
@@ -5232,6 +5224,7 @@ void LCodeGen::DoCheckMapCommon(Register map_reg, |
void LCodeGen::DoCheckMaps(LCheckMaps* instr) { |
+ if (instr->hydrogen()->CanOmitMapChecks()) return; |
Register map_reg = scratch0(); |
LOperand* input = instr->value(); |
ASSERT(input->IsRegister()); |
@@ -5254,8 +5247,7 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) { |
void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { |
DwVfpRegister value_reg = ToDoubleRegister(instr->unclamped()); |
Register result_reg = ToRegister(instr->result()); |
- DwVfpRegister temp_reg = ToDoubleRegister(instr->temp()); |
- __ ClampDoubleToUint8(result_reg, value_reg, temp_reg); |
+ __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0()); |
} |
@@ -5290,9 +5282,8 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { |
// Heap number |
__ bind(&heap_number); |
- __ vldr(double_scratch0(), FieldMemOperand(input_reg, |
- HeapNumber::kValueOffset)); |
- __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg); |
+ __ vldr(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset)); |
+ __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0()); |
__ jmp(&done); |
// smi |
@@ -5304,6 +5295,8 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) { |
void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) { |
+ if (instr->hydrogen()->CanOmitPrototypeChecks()) return; |
+ |
Register prototype_reg = ToRegister(instr->temp()); |
Register map_reg = ToRegister(instr->temp2()); |
@@ -5312,12 +5305,10 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) { |
ASSERT(prototypes->length() == maps->length()); |
- if (!instr->hydrogen()->CanOmitPrototypeChecks()) { |
- for (int i = 0; i < prototypes->length(); i++) { |
- __ LoadHeapObject(prototype_reg, prototypes->at(i)); |
- __ ldr(map_reg, FieldMemOperand(prototype_reg, HeapObject::kMapOffset)); |
- DoCheckMapCommon(map_reg, maps->at(i), instr->environment()); |
- } |
+ for (int i = 0; i < prototypes->length(); i++) { |
+ __ LoadHeapObject(prototype_reg, prototypes->at(i)); |
+ __ ldr(map_reg, FieldMemOperand(prototype_reg, HeapObject::kMapOffset)); |
+ DoCheckMapCommon(map_reg, maps->at(i), instr->environment()); |
} |
} |
@@ -5465,8 +5456,7 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) { |
__ bind(&allocated); |
// Copy the content into the newly allocated memory. |
- __ CopyFields(r0, r1, double_scratch0(), double_scratch0().low(), |
- size / kPointerSize); |
+ __ CopyFields(r0, r1, double_scratch0(), size / kPointerSize); |
} |
@@ -5645,11 +5635,15 @@ void LCodeGen::DoLazyBailout(LLazyBailout* instr) { |
void LCodeGen::DoDeoptimize(LDeoptimize* instr) { |
- if (instr->hydrogen_value()->IsSoftDeoptimize()) { |
- SoftDeoptimize(instr->environment()); |
- } else { |
- DeoptimizeIf(al, instr->environment()); |
- } |
+ Deoptimizer::BailoutType type = instr->hydrogen()->type(); |
+ // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the |
+ // needed return address), even though the implementation of LAZY and EAGER is |
+ // now identical. When LAZY is eventually completely folded into EAGER, remove |
+ // the special case below. |
+ if (info()->IsStub() && type == Deoptimizer::EAGER) { |
+ type = Deoptimizer::LAZY; |
+ } |
+ DeoptimizeIf(al, instr->environment(), type); |
} |