| OLD | NEW | 
|---|
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. | 
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be | 
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. | 
| 4 | 4 | 
| 5 #include "src/v8.h" | 5 #include "src/v8.h" | 
| 6 | 6 | 
| 7 #include "src/arm/lithium-codegen-arm.h" | 7 #include "src/arm/lithium-codegen-arm.h" | 
| 8 #include "src/arm/lithium-gap-resolver-arm.h" | 8 #include "src/arm/lithium-gap-resolver-arm.h" | 
| 9 #include "src/base/bits.h" | 9 #include "src/base/bits.h" | 
| 10 #include "src/code-factory.h" | 10 #include "src/code-factory.h" | 
| (...skipping 822 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 833     int pc_offset = masm()->pc_offset(); | 833     int pc_offset = masm()->pc_offset(); | 
| 834     environment->Register(deoptimization_index, | 834     environment->Register(deoptimization_index, | 
| 835                           translation.index(), | 835                           translation.index(), | 
| 836                           (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); | 836                           (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); | 
| 837     deoptimizations_.Add(environment, zone()); | 837     deoptimizations_.Add(environment, zone()); | 
| 838   } | 838   } | 
| 839 } | 839 } | 
| 840 | 840 | 
| 841 | 841 | 
| 842 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr, | 842 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr, | 
| 843                             Deoptimizer::DeoptReason deopt_reason, | 843                             const char* detail, | 
| 844                             Deoptimizer::BailoutType bailout_type) { | 844                             Deoptimizer::BailoutType bailout_type) { | 
| 845   LEnvironment* environment = instr->environment(); | 845   LEnvironment* environment = instr->environment(); | 
| 846   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); | 846   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); | 
| 847   DCHECK(environment->HasBeenRegistered()); | 847   DCHECK(environment->HasBeenRegistered()); | 
| 848   int id = environment->deoptimization_index(); | 848   int id = environment->deoptimization_index(); | 
| 849   DCHECK(info()->IsOptimizing() || info()->IsStub()); | 849   DCHECK(info()->IsOptimizing() || info()->IsStub()); | 
| 850   Address entry = | 850   Address entry = | 
| 851       Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); | 851       Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); | 
| 852   if (entry == NULL) { | 852   if (entry == NULL) { | 
| 853     Abort(kBailoutWasNotPrepared); | 853     Abort(kBailoutWasNotPrepared); | 
| (...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 887       condition = ne; | 887       condition = ne; | 
| 888       __ cmp(scratch, Operand::Zero()); | 888       __ cmp(scratch, Operand::Zero()); | 
| 889     } | 889     } | 
| 890   } | 890   } | 
| 891 | 891 | 
| 892   if (info()->ShouldTrapOnDeopt()) { | 892   if (info()->ShouldTrapOnDeopt()) { | 
| 893     __ stop("trap_on_deopt", condition); | 893     __ stop("trap_on_deopt", condition); | 
| 894   } | 894   } | 
| 895 | 895 | 
| 896   Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(), | 896   Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(), | 
| 897                              instr->Mnemonic(), deopt_reason); | 897                              instr->Mnemonic(), detail); | 
| 898   DCHECK(info()->IsStub() || frame_is_built_); | 898   DCHECK(info()->IsStub() || frame_is_built_); | 
| 899   // Go through jump table if we need to handle condition, build frame, or | 899   // Go through jump table if we need to handle condition, build frame, or | 
| 900   // restore caller doubles. | 900   // restore caller doubles. | 
| 901   if (condition == al && frame_is_built_ && | 901   if (condition == al && frame_is_built_ && | 
| 902       !info()->saves_caller_doubles()) { | 902       !info()->saves_caller_doubles()) { | 
| 903     DeoptComment(reason); | 903     DeoptComment(reason); | 
| 904     __ Call(entry, RelocInfo::RUNTIME_ENTRY); | 904     __ Call(entry, RelocInfo::RUNTIME_ENTRY); | 
| 905   } else { | 905   } else { | 
| 906     Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type, | 906     Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type, | 
| 907                                             !frame_is_built_); | 907                                             !frame_is_built_); | 
| 908     // We often have several deopts to the same entry, reuse the last | 908     // We often have several deopts to the same entry, reuse the last | 
| 909     // jump entry if this is the case. | 909     // jump entry if this is the case. | 
| 910     if (jump_table_.is_empty() || | 910     if (jump_table_.is_empty() || | 
| 911         !table_entry.IsEquivalentTo(jump_table_.last())) { | 911         !table_entry.IsEquivalentTo(jump_table_.last())) { | 
| 912       jump_table_.Add(table_entry, zone()); | 912       jump_table_.Add(table_entry, zone()); | 
| 913     } | 913     } | 
| 914     __ b(condition, &jump_table_.last().label); | 914     __ b(condition, &jump_table_.last().label); | 
| 915   } | 915   } | 
| 916 } | 916 } | 
| 917 | 917 | 
| 918 | 918 | 
| 919 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr, | 919 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr, | 
| 920                             Deoptimizer::DeoptReason deopt_reason) { | 920                             const char* detail) { | 
| 921   Deoptimizer::BailoutType bailout_type = info()->IsStub() | 921   Deoptimizer::BailoutType bailout_type = info()->IsStub() | 
| 922       ? Deoptimizer::LAZY | 922       ? Deoptimizer::LAZY | 
| 923       : Deoptimizer::EAGER; | 923       : Deoptimizer::EAGER; | 
| 924   DeoptimizeIf(condition, instr, deopt_reason, bailout_type); | 924   DeoptimizeIf(condition, instr, detail, bailout_type); | 
| 925 } | 925 } | 
| 926 | 926 | 
| 927 | 927 | 
| 928 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { | 928 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { | 
| 929   int length = deoptimizations_.length(); | 929   int length = deoptimizations_.length(); | 
| 930   if (length == 0) return; | 930   if (length == 0) return; | 
| 931   Handle<DeoptimizationInputData> data = | 931   Handle<DeoptimizationInputData> data = | 
| 932       DeoptimizationInputData::New(isolate(), length, TENURED); | 932       DeoptimizationInputData::New(isolate(), length, TENURED); | 
| 933 | 933 | 
| 934   Handle<ByteArray> translations = | 934   Handle<ByteArray> translations = | 
| (...skipping 215 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 1150   int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); | 1150   int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); | 
| 1151   Label dividend_is_not_negative, done; | 1151   Label dividend_is_not_negative, done; | 
| 1152   if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { | 1152   if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { | 
| 1153     __ cmp(dividend, Operand::Zero()); | 1153     __ cmp(dividend, Operand::Zero()); | 
| 1154     __ b(pl, ÷nd_is_not_negative); | 1154     __ b(pl, ÷nd_is_not_negative); | 
| 1155     // Note that this is correct even for kMinInt operands. | 1155     // Note that this is correct even for kMinInt operands. | 
| 1156     __ rsb(dividend, dividend, Operand::Zero()); | 1156     __ rsb(dividend, dividend, Operand::Zero()); | 
| 1157     __ and_(dividend, dividend, Operand(mask)); | 1157     __ and_(dividend, dividend, Operand(mask)); | 
| 1158     __ rsb(dividend, dividend, Operand::Zero(), SetCC); | 1158     __ rsb(dividend, dividend, Operand::Zero(), SetCC); | 
| 1159     if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1159     if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 
| 1160       DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); | 1160       DeoptimizeIf(eq, instr, "minus zero"); | 
| 1161     } | 1161     } | 
| 1162     __ b(&done); | 1162     __ b(&done); | 
| 1163   } | 1163   } | 
| 1164 | 1164 | 
| 1165   __ bind(÷nd_is_not_negative); | 1165   __ bind(÷nd_is_not_negative); | 
| 1166   __ and_(dividend, dividend, Operand(mask)); | 1166   __ and_(dividend, dividend, Operand(mask)); | 
| 1167   __ bind(&done); | 1167   __ bind(&done); | 
| 1168 } | 1168 } | 
| 1169 | 1169 | 
| 1170 | 1170 | 
| 1171 void LCodeGen::DoModByConstI(LModByConstI* instr) { | 1171 void LCodeGen::DoModByConstI(LModByConstI* instr) { | 
| 1172   Register dividend = ToRegister(instr->dividend()); | 1172   Register dividend = ToRegister(instr->dividend()); | 
| 1173   int32_t divisor = instr->divisor(); | 1173   int32_t divisor = instr->divisor(); | 
| 1174   Register result = ToRegister(instr->result()); | 1174   Register result = ToRegister(instr->result()); | 
| 1175   DCHECK(!dividend.is(result)); | 1175   DCHECK(!dividend.is(result)); | 
| 1176 | 1176 | 
| 1177   if (divisor == 0) { | 1177   if (divisor == 0) { | 
| 1178     DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero); | 1178     DeoptimizeIf(al, instr, "division by zero"); | 
| 1179     return; | 1179     return; | 
| 1180   } | 1180   } | 
| 1181 | 1181 | 
| 1182   __ TruncatingDiv(result, dividend, Abs(divisor)); | 1182   __ TruncatingDiv(result, dividend, Abs(divisor)); | 
| 1183   __ mov(ip, Operand(Abs(divisor))); | 1183   __ mov(ip, Operand(Abs(divisor))); | 
| 1184   __ smull(result, ip, result, ip); | 1184   __ smull(result, ip, result, ip); | 
| 1185   __ sub(result, dividend, result, SetCC); | 1185   __ sub(result, dividend, result, SetCC); | 
| 1186 | 1186 | 
| 1187   // Check for negative zero. | 1187   // Check for negative zero. | 
| 1188   HMod* hmod = instr->hydrogen(); | 1188   HMod* hmod = instr->hydrogen(); | 
| 1189   if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1189   if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 
| 1190     Label remainder_not_zero; | 1190     Label remainder_not_zero; | 
| 1191     __ b(ne, &remainder_not_zero); | 1191     __ b(ne, &remainder_not_zero); | 
| 1192     __ cmp(dividend, Operand::Zero()); | 1192     __ cmp(dividend, Operand::Zero()); | 
| 1193     DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); | 1193     DeoptimizeIf(lt, instr, "minus zero"); | 
| 1194     __ bind(&remainder_not_zero); | 1194     __ bind(&remainder_not_zero); | 
| 1195   } | 1195   } | 
| 1196 } | 1196 } | 
| 1197 | 1197 | 
| 1198 | 1198 | 
| 1199 void LCodeGen::DoModI(LModI* instr) { | 1199 void LCodeGen::DoModI(LModI* instr) { | 
| 1200   HMod* hmod = instr->hydrogen(); | 1200   HMod* hmod = instr->hydrogen(); | 
| 1201   if (CpuFeatures::IsSupported(SUDIV)) { | 1201   if (CpuFeatures::IsSupported(SUDIV)) { | 
| 1202     CpuFeatureScope scope(masm(), SUDIV); | 1202     CpuFeatureScope scope(masm(), SUDIV); | 
| 1203 | 1203 | 
| 1204     Register left_reg = ToRegister(instr->left()); | 1204     Register left_reg = ToRegister(instr->left()); | 
| 1205     Register right_reg = ToRegister(instr->right()); | 1205     Register right_reg = ToRegister(instr->right()); | 
| 1206     Register result_reg = ToRegister(instr->result()); | 1206     Register result_reg = ToRegister(instr->result()); | 
| 1207 | 1207 | 
| 1208     Label done; | 1208     Label done; | 
| 1209     // Check for x % 0, sdiv might signal an exception. We have to deopt in this | 1209     // Check for x % 0, sdiv might signal an exception. We have to deopt in this | 
| 1210     // case because we can't return a NaN. | 1210     // case because we can't return a NaN. | 
| 1211     if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { | 1211     if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { | 
| 1212       __ cmp(right_reg, Operand::Zero()); | 1212       __ cmp(right_reg, Operand::Zero()); | 
| 1213       DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero); | 1213       DeoptimizeIf(eq, instr, "division by zero"); | 
| 1214     } | 1214     } | 
| 1215 | 1215 | 
| 1216     // Check for kMinInt % -1, sdiv will return kMinInt, which is not what we | 1216     // Check for kMinInt % -1, sdiv will return kMinInt, which is not what we | 
| 1217     // want. We have to deopt if we care about -0, because we can't return that. | 1217     // want. We have to deopt if we care about -0, because we can't return that. | 
| 1218     if (hmod->CheckFlag(HValue::kCanOverflow)) { | 1218     if (hmod->CheckFlag(HValue::kCanOverflow)) { | 
| 1219       Label no_overflow_possible; | 1219       Label no_overflow_possible; | 
| 1220       __ cmp(left_reg, Operand(kMinInt)); | 1220       __ cmp(left_reg, Operand(kMinInt)); | 
| 1221       __ b(ne, &no_overflow_possible); | 1221       __ b(ne, &no_overflow_possible); | 
| 1222       __ cmp(right_reg, Operand(-1)); | 1222       __ cmp(right_reg, Operand(-1)); | 
| 1223       if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1223       if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 
| 1224         DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); | 1224         DeoptimizeIf(eq, instr, "minus zero"); | 
| 1225       } else { | 1225       } else { | 
| 1226         __ b(ne, &no_overflow_possible); | 1226         __ b(ne, &no_overflow_possible); | 
| 1227         __ mov(result_reg, Operand::Zero()); | 1227         __ mov(result_reg, Operand::Zero()); | 
| 1228         __ jmp(&done); | 1228         __ jmp(&done); | 
| 1229       } | 1229       } | 
| 1230       __ bind(&no_overflow_possible); | 1230       __ bind(&no_overflow_possible); | 
| 1231     } | 1231     } | 
| 1232 | 1232 | 
| 1233     // For 'r3 = r1 % r2' we can have the following ARM code: | 1233     // For 'r3 = r1 % r2' we can have the following ARM code: | 
| 1234     //   sdiv r3, r1, r2 | 1234     //   sdiv r3, r1, r2 | 
| 1235     //   mls r3, r3, r2, r1 | 1235     //   mls r3, r3, r2, r1 | 
| 1236 | 1236 | 
| 1237     __ sdiv(result_reg, left_reg, right_reg); | 1237     __ sdiv(result_reg, left_reg, right_reg); | 
| 1238     __ Mls(result_reg, result_reg, right_reg, left_reg); | 1238     __ Mls(result_reg, result_reg, right_reg, left_reg); | 
| 1239 | 1239 | 
| 1240     // If we care about -0, test if the dividend is <0 and the result is 0. | 1240     // If we care about -0, test if the dividend is <0 and the result is 0. | 
| 1241     if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1241     if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 
| 1242       __ cmp(result_reg, Operand::Zero()); | 1242       __ cmp(result_reg, Operand::Zero()); | 
| 1243       __ b(ne, &done); | 1243       __ b(ne, &done); | 
| 1244       __ cmp(left_reg, Operand::Zero()); | 1244       __ cmp(left_reg, Operand::Zero()); | 
| 1245       DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); | 1245       DeoptimizeIf(lt, instr, "minus zero"); | 
| 1246     } | 1246     } | 
| 1247     __ bind(&done); | 1247     __ bind(&done); | 
| 1248 | 1248 | 
| 1249   } else { | 1249   } else { | 
| 1250     // General case, without any SDIV support. | 1250     // General case, without any SDIV support. | 
| 1251     Register left_reg = ToRegister(instr->left()); | 1251     Register left_reg = ToRegister(instr->left()); | 
| 1252     Register right_reg = ToRegister(instr->right()); | 1252     Register right_reg = ToRegister(instr->right()); | 
| 1253     Register result_reg = ToRegister(instr->result()); | 1253     Register result_reg = ToRegister(instr->result()); | 
| 1254     Register scratch = scratch0(); | 1254     Register scratch = scratch0(); | 
| 1255     DCHECK(!scratch.is(left_reg)); | 1255     DCHECK(!scratch.is(left_reg)); | 
| 1256     DCHECK(!scratch.is(right_reg)); | 1256     DCHECK(!scratch.is(right_reg)); | 
| 1257     DCHECK(!scratch.is(result_reg)); | 1257     DCHECK(!scratch.is(result_reg)); | 
| 1258     DwVfpRegister dividend = ToDoubleRegister(instr->temp()); | 1258     DwVfpRegister dividend = ToDoubleRegister(instr->temp()); | 
| 1259     DwVfpRegister divisor = ToDoubleRegister(instr->temp2()); | 1259     DwVfpRegister divisor = ToDoubleRegister(instr->temp2()); | 
| 1260     DCHECK(!divisor.is(dividend)); | 1260     DCHECK(!divisor.is(dividend)); | 
| 1261     LowDwVfpRegister quotient = double_scratch0(); | 1261     LowDwVfpRegister quotient = double_scratch0(); | 
| 1262     DCHECK(!quotient.is(dividend)); | 1262     DCHECK(!quotient.is(dividend)); | 
| 1263     DCHECK(!quotient.is(divisor)); | 1263     DCHECK(!quotient.is(divisor)); | 
| 1264 | 1264 | 
| 1265     Label done; | 1265     Label done; | 
| 1266     // Check for x % 0, we have to deopt in this case because we can't return a | 1266     // Check for x % 0, we have to deopt in this case because we can't return a | 
| 1267     // NaN. | 1267     // NaN. | 
| 1268     if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { | 1268     if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { | 
| 1269       __ cmp(right_reg, Operand::Zero()); | 1269       __ cmp(right_reg, Operand::Zero()); | 
| 1270       DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero); | 1270       DeoptimizeIf(eq, instr, "division by zero"); | 
| 1271     } | 1271     } | 
| 1272 | 1272 | 
| 1273     __ Move(result_reg, left_reg); | 1273     __ Move(result_reg, left_reg); | 
| 1274     // Load the arguments in VFP registers. The divisor value is preloaded | 1274     // Load the arguments in VFP registers. The divisor value is preloaded | 
| 1275     // before. Be careful that 'right_reg' is only live on entry. | 1275     // before. Be careful that 'right_reg' is only live on entry. | 
| 1276     // TODO(svenpanne) The last comments seems to be wrong nowadays. | 1276     // TODO(svenpanne) The last comments seems to be wrong nowadays. | 
| 1277     __ vmov(double_scratch0().low(), left_reg); | 1277     __ vmov(double_scratch0().low(), left_reg); | 
| 1278     __ vcvt_f64_s32(dividend, double_scratch0().low()); | 1278     __ vcvt_f64_s32(dividend, double_scratch0().low()); | 
| 1279     __ vmov(double_scratch0().low(), right_reg); | 1279     __ vmov(double_scratch0().low(), right_reg); | 
| 1280     __ vcvt_f64_s32(divisor, double_scratch0().low()); | 1280     __ vcvt_f64_s32(divisor, double_scratch0().low()); | 
| 1281 | 1281 | 
| 1282     // We do not care about the sign of the divisor. Note that we still handle | 1282     // We do not care about the sign of the divisor. Note that we still handle | 
| 1283     // the kMinInt % -1 case correctly, though. | 1283     // the kMinInt % -1 case correctly, though. | 
| 1284     __ vabs(divisor, divisor); | 1284     __ vabs(divisor, divisor); | 
| 1285     // Compute the quotient and round it to a 32bit integer. | 1285     // Compute the quotient and round it to a 32bit integer. | 
| 1286     __ vdiv(quotient, dividend, divisor); | 1286     __ vdiv(quotient, dividend, divisor); | 
| 1287     __ vcvt_s32_f64(quotient.low(), quotient); | 1287     __ vcvt_s32_f64(quotient.low(), quotient); | 
| 1288     __ vcvt_f64_s32(quotient, quotient.low()); | 1288     __ vcvt_f64_s32(quotient, quotient.low()); | 
| 1289 | 1289 | 
| 1290     // Compute the remainder in result. | 1290     // Compute the remainder in result. | 
| 1291     __ vmul(double_scratch0(), divisor, quotient); | 1291     __ vmul(double_scratch0(), divisor, quotient); | 
| 1292     __ vcvt_s32_f64(double_scratch0().low(), double_scratch0()); | 1292     __ vcvt_s32_f64(double_scratch0().low(), double_scratch0()); | 
| 1293     __ vmov(scratch, double_scratch0().low()); | 1293     __ vmov(scratch, double_scratch0().low()); | 
| 1294     __ sub(result_reg, left_reg, scratch, SetCC); | 1294     __ sub(result_reg, left_reg, scratch, SetCC); | 
| 1295 | 1295 | 
| 1296     // If we care about -0, test if the dividend is <0 and the result is 0. | 1296     // If we care about -0, test if the dividend is <0 and the result is 0. | 
| 1297     if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1297     if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 
| 1298       __ b(ne, &done); | 1298       __ b(ne, &done); | 
| 1299       __ cmp(left_reg, Operand::Zero()); | 1299       __ cmp(left_reg, Operand::Zero()); | 
| 1300       DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero); | 1300       DeoptimizeIf(mi, instr, "minus zero"); | 
| 1301     } | 1301     } | 
| 1302     __ bind(&done); | 1302     __ bind(&done); | 
| 1303   } | 1303   } | 
| 1304 } | 1304 } | 
| 1305 | 1305 | 
| 1306 | 1306 | 
| 1307 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { | 1307 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { | 
| 1308   Register dividend = ToRegister(instr->dividend()); | 1308   Register dividend = ToRegister(instr->dividend()); | 
| 1309   int32_t divisor = instr->divisor(); | 1309   int32_t divisor = instr->divisor(); | 
| 1310   Register result = ToRegister(instr->result()); | 1310   Register result = ToRegister(instr->result()); | 
| 1311   DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor))); | 1311   DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor))); | 
| 1312   DCHECK(!result.is(dividend)); | 1312   DCHECK(!result.is(dividend)); | 
| 1313 | 1313 | 
| 1314   // Check for (0 / -x) that will produce negative zero. | 1314   // Check for (0 / -x) that will produce negative zero. | 
| 1315   HDiv* hdiv = instr->hydrogen(); | 1315   HDiv* hdiv = instr->hydrogen(); | 
| 1316   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | 1316   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | 
| 1317     __ cmp(dividend, Operand::Zero()); | 1317     __ cmp(dividend, Operand::Zero()); | 
| 1318     DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); | 1318     DeoptimizeIf(eq, instr, "minus zero"); | 
| 1319   } | 1319   } | 
| 1320   // Check for (kMinInt / -1). | 1320   // Check for (kMinInt / -1). | 
| 1321   if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { | 1321   if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { | 
| 1322     __ cmp(dividend, Operand(kMinInt)); | 1322     __ cmp(dividend, Operand(kMinInt)); | 
| 1323     DeoptimizeIf(eq, instr, Deoptimizer::kOverflow); | 1323     DeoptimizeIf(eq, instr, "overflow"); | 
| 1324   } | 1324   } | 
| 1325   // Deoptimize if remainder will not be 0. | 1325   // Deoptimize if remainder will not be 0. | 
| 1326   if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && | 1326   if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && | 
| 1327       divisor != 1 && divisor != -1) { | 1327       divisor != 1 && divisor != -1) { | 
| 1328     int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); | 1328     int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); | 
| 1329     __ tst(dividend, Operand(mask)); | 1329     __ tst(dividend, Operand(mask)); | 
| 1330     DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision); | 1330     DeoptimizeIf(ne, instr, "lost precision"); | 
| 1331   } | 1331   } | 
| 1332 | 1332 | 
| 1333   if (divisor == -1) {  // Nice shortcut, not needed for correctness. | 1333   if (divisor == -1) {  // Nice shortcut, not needed for correctness. | 
| 1334     __ rsb(result, dividend, Operand(0)); | 1334     __ rsb(result, dividend, Operand(0)); | 
| 1335     return; | 1335     return; | 
| 1336   } | 1336   } | 
| 1337   int32_t shift = WhichPowerOf2Abs(divisor); | 1337   int32_t shift = WhichPowerOf2Abs(divisor); | 
| 1338   if (shift == 0) { | 1338   if (shift == 0) { | 
| 1339     __ mov(result, dividend); | 1339     __ mov(result, dividend); | 
| 1340   } else if (shift == 1) { | 1340   } else if (shift == 1) { | 
| 1341     __ add(result, dividend, Operand(dividend, LSR, 31)); | 1341     __ add(result, dividend, Operand(dividend, LSR, 31)); | 
| 1342   } else { | 1342   } else { | 
| 1343     __ mov(result, Operand(dividend, ASR, 31)); | 1343     __ mov(result, Operand(dividend, ASR, 31)); | 
| 1344     __ add(result, dividend, Operand(result, LSR, 32 - shift)); | 1344     __ add(result, dividend, Operand(result, LSR, 32 - shift)); | 
| 1345   } | 1345   } | 
| 1346   if (shift > 0) __ mov(result, Operand(result, ASR, shift)); | 1346   if (shift > 0) __ mov(result, Operand(result, ASR, shift)); | 
| 1347   if (divisor < 0) __ rsb(result, result, Operand(0)); | 1347   if (divisor < 0) __ rsb(result, result, Operand(0)); | 
| 1348 } | 1348 } | 
| 1349 | 1349 | 
| 1350 | 1350 | 
| 1351 void LCodeGen::DoDivByConstI(LDivByConstI* instr) { | 1351 void LCodeGen::DoDivByConstI(LDivByConstI* instr) { | 
| 1352   Register dividend = ToRegister(instr->dividend()); | 1352   Register dividend = ToRegister(instr->dividend()); | 
| 1353   int32_t divisor = instr->divisor(); | 1353   int32_t divisor = instr->divisor(); | 
| 1354   Register result = ToRegister(instr->result()); | 1354   Register result = ToRegister(instr->result()); | 
| 1355   DCHECK(!dividend.is(result)); | 1355   DCHECK(!dividend.is(result)); | 
| 1356 | 1356 | 
| 1357   if (divisor == 0) { | 1357   if (divisor == 0) { | 
| 1358     DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero); | 1358     DeoptimizeIf(al, instr, "division by zero"); | 
| 1359     return; | 1359     return; | 
| 1360   } | 1360   } | 
| 1361 | 1361 | 
| 1362   // Check for (0 / -x) that will produce negative zero. | 1362   // Check for (0 / -x) that will produce negative zero. | 
| 1363   HDiv* hdiv = instr->hydrogen(); | 1363   HDiv* hdiv = instr->hydrogen(); | 
| 1364   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | 1364   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | 
| 1365     __ cmp(dividend, Operand::Zero()); | 1365     __ cmp(dividend, Operand::Zero()); | 
| 1366     DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); | 1366     DeoptimizeIf(eq, instr, "minus zero"); | 
| 1367   } | 1367   } | 
| 1368 | 1368 | 
| 1369   __ TruncatingDiv(result, dividend, Abs(divisor)); | 1369   __ TruncatingDiv(result, dividend, Abs(divisor)); | 
| 1370   if (divisor < 0) __ rsb(result, result, Operand::Zero()); | 1370   if (divisor < 0) __ rsb(result, result, Operand::Zero()); | 
| 1371 | 1371 | 
| 1372   if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { | 1372   if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { | 
| 1373     __ mov(ip, Operand(divisor)); | 1373     __ mov(ip, Operand(divisor)); | 
| 1374     __ smull(scratch0(), ip, result, ip); | 1374     __ smull(scratch0(), ip, result, ip); | 
| 1375     __ sub(scratch0(), scratch0(), dividend, SetCC); | 1375     __ sub(scratch0(), scratch0(), dividend, SetCC); | 
| 1376     DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision); | 1376     DeoptimizeIf(ne, instr, "lost precision"); | 
| 1377   } | 1377   } | 
| 1378 } | 1378 } | 
| 1379 | 1379 | 
| 1380 | 1380 | 
| 1381 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. | 1381 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. | 
| 1382 void LCodeGen::DoDivI(LDivI* instr) { | 1382 void LCodeGen::DoDivI(LDivI* instr) { | 
| 1383   HBinaryOperation* hdiv = instr->hydrogen(); | 1383   HBinaryOperation* hdiv = instr->hydrogen(); | 
| 1384   Register dividend = ToRegister(instr->dividend()); | 1384   Register dividend = ToRegister(instr->dividend()); | 
| 1385   Register divisor = ToRegister(instr->divisor()); | 1385   Register divisor = ToRegister(instr->divisor()); | 
| 1386   Register result = ToRegister(instr->result()); | 1386   Register result = ToRegister(instr->result()); | 
| 1387 | 1387 | 
| 1388   // Check for x / 0. | 1388   // Check for x / 0. | 
| 1389   if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { | 1389   if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { | 
| 1390     __ cmp(divisor, Operand::Zero()); | 1390     __ cmp(divisor, Operand::Zero()); | 
| 1391     DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero); | 1391     DeoptimizeIf(eq, instr, "division by zero"); | 
| 1392   } | 1392   } | 
| 1393 | 1393 | 
| 1394   // Check for (0 / -x) that will produce negative zero. | 1394   // Check for (0 / -x) that will produce negative zero. | 
| 1395   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1395   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { | 
| 1396     Label positive; | 1396     Label positive; | 
| 1397     if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) { | 1397     if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) { | 
| 1398       // Do the test only if it hadn't be done above. | 1398       // Do the test only if it hadn't be done above. | 
| 1399       __ cmp(divisor, Operand::Zero()); | 1399       __ cmp(divisor, Operand::Zero()); | 
| 1400     } | 1400     } | 
| 1401     __ b(pl, &positive); | 1401     __ b(pl, &positive); | 
| 1402     __ cmp(dividend, Operand::Zero()); | 1402     __ cmp(dividend, Operand::Zero()); | 
| 1403     DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); | 1403     DeoptimizeIf(eq, instr, "minus zero"); | 
| 1404     __ bind(&positive); | 1404     __ bind(&positive); | 
| 1405   } | 1405   } | 
| 1406 | 1406 | 
| 1407   // Check for (kMinInt / -1). | 1407   // Check for (kMinInt / -1). | 
| 1408   if (hdiv->CheckFlag(HValue::kCanOverflow) && | 1408   if (hdiv->CheckFlag(HValue::kCanOverflow) && | 
| 1409       (!CpuFeatures::IsSupported(SUDIV) || | 1409       (!CpuFeatures::IsSupported(SUDIV) || | 
| 1410        !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) { | 1410        !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) { | 
| 1411     // We don't need to check for overflow when truncating with sdiv | 1411     // We don't need to check for overflow when truncating with sdiv | 
| 1412     // support because, on ARM, sdiv kMinInt, -1 -> kMinInt. | 1412     // support because, on ARM, sdiv kMinInt, -1 -> kMinInt. | 
| 1413     __ cmp(dividend, Operand(kMinInt)); | 1413     __ cmp(dividend, Operand(kMinInt)); | 
| 1414     __ cmp(divisor, Operand(-1), eq); | 1414     __ cmp(divisor, Operand(-1), eq); | 
| 1415     DeoptimizeIf(eq, instr, Deoptimizer::kOverflow); | 1415     DeoptimizeIf(eq, instr, "overflow"); | 
| 1416   } | 1416   } | 
| 1417 | 1417 | 
| 1418   if (CpuFeatures::IsSupported(SUDIV)) { | 1418   if (CpuFeatures::IsSupported(SUDIV)) { | 
| 1419     CpuFeatureScope scope(masm(), SUDIV); | 1419     CpuFeatureScope scope(masm(), SUDIV); | 
| 1420     __ sdiv(result, dividend, divisor); | 1420     __ sdiv(result, dividend, divisor); | 
| 1421   } else { | 1421   } else { | 
| 1422     DoubleRegister vleft = ToDoubleRegister(instr->temp()); | 1422     DoubleRegister vleft = ToDoubleRegister(instr->temp()); | 
| 1423     DoubleRegister vright = double_scratch0(); | 1423     DoubleRegister vright = double_scratch0(); | 
| 1424     __ vmov(double_scratch0().low(), dividend); | 1424     __ vmov(double_scratch0().low(), dividend); | 
| 1425     __ vcvt_f64_s32(vleft, double_scratch0().low()); | 1425     __ vcvt_f64_s32(vleft, double_scratch0().low()); | 
| 1426     __ vmov(double_scratch0().low(), divisor); | 1426     __ vmov(double_scratch0().low(), divisor); | 
| 1427     __ vcvt_f64_s32(vright, double_scratch0().low()); | 1427     __ vcvt_f64_s32(vright, double_scratch0().low()); | 
| 1428     __ vdiv(vleft, vleft, vright);  // vleft now contains the result. | 1428     __ vdiv(vleft, vleft, vright);  // vleft now contains the result. | 
| 1429     __ vcvt_s32_f64(double_scratch0().low(), vleft); | 1429     __ vcvt_s32_f64(double_scratch0().low(), vleft); | 
| 1430     __ vmov(result, double_scratch0().low()); | 1430     __ vmov(result, double_scratch0().low()); | 
| 1431   } | 1431   } | 
| 1432 | 1432 | 
| 1433   if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { | 1433   if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { | 
| 1434     // Compute remainder and deopt if it's not zero. | 1434     // Compute remainder and deopt if it's not zero. | 
| 1435     Register remainder = scratch0(); | 1435     Register remainder = scratch0(); | 
| 1436     __ Mls(remainder, result, divisor, dividend); | 1436     __ Mls(remainder, result, divisor, dividend); | 
| 1437     __ cmp(remainder, Operand::Zero()); | 1437     __ cmp(remainder, Operand::Zero()); | 
| 1438     DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision); | 1438     DeoptimizeIf(ne, instr, "lost precision"); | 
| 1439   } | 1439   } | 
| 1440 } | 1440 } | 
| 1441 | 1441 | 
| 1442 | 1442 | 
| 1443 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) { | 1443 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) { | 
| 1444   DwVfpRegister addend = ToDoubleRegister(instr->addend()); | 1444   DwVfpRegister addend = ToDoubleRegister(instr->addend()); | 
| 1445   DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier()); | 1445   DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier()); | 
| 1446   DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand()); | 1446   DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand()); | 
| 1447 | 1447 | 
| 1448   // This is computed in-place. | 1448   // This is computed in-place. | 
| (...skipping 30 matching lines...) Expand all  Loading... | 
| 1479   // can simply do an arithmetic right shift. | 1479   // can simply do an arithmetic right shift. | 
| 1480   int32_t shift = WhichPowerOf2Abs(divisor); | 1480   int32_t shift = WhichPowerOf2Abs(divisor); | 
| 1481   if (divisor > 1) { | 1481   if (divisor > 1) { | 
| 1482     __ mov(result, Operand(dividend, ASR, shift)); | 1482     __ mov(result, Operand(dividend, ASR, shift)); | 
| 1483     return; | 1483     return; | 
| 1484   } | 1484   } | 
| 1485 | 1485 | 
| 1486   // If the divisor is negative, we have to negate and handle edge cases. | 1486   // If the divisor is negative, we have to negate and handle edge cases. | 
| 1487   __ rsb(result, dividend, Operand::Zero(), SetCC); | 1487   __ rsb(result, dividend, Operand::Zero(), SetCC); | 
| 1488   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1488   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 
| 1489     DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); | 1489     DeoptimizeIf(eq, instr, "minus zero"); | 
| 1490   } | 1490   } | 
| 1491 | 1491 | 
| 1492   // Dividing by -1 is basically negation, unless we overflow. | 1492   // Dividing by -1 is basically negation, unless we overflow. | 
| 1493   if (divisor == -1) { | 1493   if (divisor == -1) { | 
| 1494     if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { | 1494     if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { | 
| 1495       DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); | 1495       DeoptimizeIf(vs, instr, "overflow"); | 
| 1496     } | 1496     } | 
| 1497     return; | 1497     return; | 
| 1498   } | 1498   } | 
| 1499 | 1499 | 
| 1500   // If the negation could not overflow, simply shifting is OK. | 1500   // If the negation could not overflow, simply shifting is OK. | 
| 1501   if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { | 1501   if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { | 
| 1502     __ mov(result, Operand(result, ASR, shift)); | 1502     __ mov(result, Operand(result, ASR, shift)); | 
| 1503     return; | 1503     return; | 
| 1504   } | 1504   } | 
| 1505 | 1505 | 
| 1506   __ mov(result, Operand(kMinInt / divisor), LeaveCC, vs); | 1506   __ mov(result, Operand(kMinInt / divisor), LeaveCC, vs); | 
| 1507   __ mov(result, Operand(result, ASR, shift), LeaveCC, vc); | 1507   __ mov(result, Operand(result, ASR, shift), LeaveCC, vc); | 
| 1508 } | 1508 } | 
| 1509 | 1509 | 
| 1510 | 1510 | 
| 1511 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { | 1511 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { | 
| 1512   Register dividend = ToRegister(instr->dividend()); | 1512   Register dividend = ToRegister(instr->dividend()); | 
| 1513   int32_t divisor = instr->divisor(); | 1513   int32_t divisor = instr->divisor(); | 
| 1514   Register result = ToRegister(instr->result()); | 1514   Register result = ToRegister(instr->result()); | 
| 1515   DCHECK(!dividend.is(result)); | 1515   DCHECK(!dividend.is(result)); | 
| 1516 | 1516 | 
| 1517   if (divisor == 0) { | 1517   if (divisor == 0) { | 
| 1518     DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero); | 1518     DeoptimizeIf(al, instr, "division by zero"); | 
| 1519     return; | 1519     return; | 
| 1520   } | 1520   } | 
| 1521 | 1521 | 
| 1522   // Check for (0 / -x) that will produce negative zero. | 1522   // Check for (0 / -x) that will produce negative zero. | 
| 1523   HMathFloorOfDiv* hdiv = instr->hydrogen(); | 1523   HMathFloorOfDiv* hdiv = instr->hydrogen(); | 
| 1524   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | 1524   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | 
| 1525     __ cmp(dividend, Operand::Zero()); | 1525     __ cmp(dividend, Operand::Zero()); | 
| 1526     DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); | 1526     DeoptimizeIf(eq, instr, "minus zero"); | 
| 1527   } | 1527   } | 
| 1528 | 1528 | 
| 1529   // Easy case: We need no dynamic check for the dividend and the flooring | 1529   // Easy case: We need no dynamic check for the dividend and the flooring | 
| 1530   // division is the same as the truncating division. | 1530   // division is the same as the truncating division. | 
| 1531   if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || | 1531   if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || | 
| 1532       (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { | 1532       (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { | 
| 1533     __ TruncatingDiv(result, dividend, Abs(divisor)); | 1533     __ TruncatingDiv(result, dividend, Abs(divisor)); | 
| 1534     if (divisor < 0) __ rsb(result, result, Operand::Zero()); | 1534     if (divisor < 0) __ rsb(result, result, Operand::Zero()); | 
| 1535     return; | 1535     return; | 
| 1536   } | 1536   } | 
| (...skipping 20 matching lines...) Expand all  Loading... | 
| 1557 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI. | 1557 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI. | 
| 1558 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) { | 1558 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) { | 
| 1559   HBinaryOperation* hdiv = instr->hydrogen(); | 1559   HBinaryOperation* hdiv = instr->hydrogen(); | 
| 1560   Register left = ToRegister(instr->dividend()); | 1560   Register left = ToRegister(instr->dividend()); | 
| 1561   Register right = ToRegister(instr->divisor()); | 1561   Register right = ToRegister(instr->divisor()); | 
| 1562   Register result = ToRegister(instr->result()); | 1562   Register result = ToRegister(instr->result()); | 
| 1563 | 1563 | 
| 1564   // Check for x / 0. | 1564   // Check for x / 0. | 
| 1565   if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { | 1565   if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { | 
| 1566     __ cmp(right, Operand::Zero()); | 1566     __ cmp(right, Operand::Zero()); | 
| 1567     DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero); | 1567     DeoptimizeIf(eq, instr, "division by zero"); | 
| 1568   } | 1568   } | 
| 1569 | 1569 | 
| 1570   // Check for (0 / -x) that will produce negative zero. | 1570   // Check for (0 / -x) that will produce negative zero. | 
| 1571   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1571   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { | 
| 1572     Label positive; | 1572     Label positive; | 
| 1573     if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) { | 1573     if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) { | 
| 1574       // Do the test only if it hadn't be done above. | 1574       // Do the test only if it hadn't be done above. | 
| 1575       __ cmp(right, Operand::Zero()); | 1575       __ cmp(right, Operand::Zero()); | 
| 1576     } | 1576     } | 
| 1577     __ b(pl, &positive); | 1577     __ b(pl, &positive); | 
| 1578     __ cmp(left, Operand::Zero()); | 1578     __ cmp(left, Operand::Zero()); | 
| 1579     DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); | 1579     DeoptimizeIf(eq, instr, "minus zero"); | 
| 1580     __ bind(&positive); | 1580     __ bind(&positive); | 
| 1581   } | 1581   } | 
| 1582 | 1582 | 
| 1583   // Check for (kMinInt / -1). | 1583   // Check for (kMinInt / -1). | 
| 1584   if (hdiv->CheckFlag(HValue::kCanOverflow) && | 1584   if (hdiv->CheckFlag(HValue::kCanOverflow) && | 
| 1585       (!CpuFeatures::IsSupported(SUDIV) || | 1585       (!CpuFeatures::IsSupported(SUDIV) || | 
| 1586        !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) { | 1586        !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) { | 
| 1587     // We don't need to check for overflow when truncating with sdiv | 1587     // We don't need to check for overflow when truncating with sdiv | 
| 1588     // support because, on ARM, sdiv kMinInt, -1 -> kMinInt. | 1588     // support because, on ARM, sdiv kMinInt, -1 -> kMinInt. | 
| 1589     __ cmp(left, Operand(kMinInt)); | 1589     __ cmp(left, Operand(kMinInt)); | 
| 1590     __ cmp(right, Operand(-1), eq); | 1590     __ cmp(right, Operand(-1), eq); | 
| 1591     DeoptimizeIf(eq, instr, Deoptimizer::kOverflow); | 1591     DeoptimizeIf(eq, instr, "overflow"); | 
| 1592   } | 1592   } | 
| 1593 | 1593 | 
| 1594   if (CpuFeatures::IsSupported(SUDIV)) { | 1594   if (CpuFeatures::IsSupported(SUDIV)) { | 
| 1595     CpuFeatureScope scope(masm(), SUDIV); | 1595     CpuFeatureScope scope(masm(), SUDIV); | 
| 1596     __ sdiv(result, left, right); | 1596     __ sdiv(result, left, right); | 
| 1597   } else { | 1597   } else { | 
| 1598     DoubleRegister vleft = ToDoubleRegister(instr->temp()); | 1598     DoubleRegister vleft = ToDoubleRegister(instr->temp()); | 
| 1599     DoubleRegister vright = double_scratch0(); | 1599     DoubleRegister vright = double_scratch0(); | 
| 1600     __ vmov(double_scratch0().low(), left); | 1600     __ vmov(double_scratch0().low(), left); | 
| 1601     __ vcvt_f64_s32(vleft, double_scratch0().low()); | 1601     __ vcvt_f64_s32(vleft, double_scratch0().low()); | 
| (...skipping 25 matching lines...) Expand all  Loading... | 
| 1627     instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); | 1627     instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); | 
| 1628   bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); | 1628   bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); | 
| 1629 | 1629 | 
| 1630   if (right_op->IsConstantOperand()) { | 1630   if (right_op->IsConstantOperand()) { | 
| 1631     int32_t constant = ToInteger32(LConstantOperand::cast(right_op)); | 1631     int32_t constant = ToInteger32(LConstantOperand::cast(right_op)); | 
| 1632 | 1632 | 
| 1633     if (bailout_on_minus_zero && (constant < 0)) { | 1633     if (bailout_on_minus_zero && (constant < 0)) { | 
| 1634       // The case of a null constant will be handled separately. | 1634       // The case of a null constant will be handled separately. | 
| 1635       // If constant is negative and left is null, the result should be -0. | 1635       // If constant is negative and left is null, the result should be -0. | 
| 1636       __ cmp(left, Operand::Zero()); | 1636       __ cmp(left, Operand::Zero()); | 
| 1637       DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); | 1637       DeoptimizeIf(eq, instr, "minus zero"); | 
| 1638     } | 1638     } | 
| 1639 | 1639 | 
| 1640     switch (constant) { | 1640     switch (constant) { | 
| 1641       case -1: | 1641       case -1: | 
| 1642         if (overflow) { | 1642         if (overflow) { | 
| 1643           __ rsb(result, left, Operand::Zero(), SetCC); | 1643           __ rsb(result, left, Operand::Zero(), SetCC); | 
| 1644           DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); | 1644           DeoptimizeIf(vs, instr, "overflow"); | 
| 1645         } else { | 1645         } else { | 
| 1646           __ rsb(result, left, Operand::Zero()); | 1646           __ rsb(result, left, Operand::Zero()); | 
| 1647         } | 1647         } | 
| 1648         break; | 1648         break; | 
| 1649       case 0: | 1649       case 0: | 
| 1650         if (bailout_on_minus_zero) { | 1650         if (bailout_on_minus_zero) { | 
| 1651           // If left is strictly negative and the constant is null, the | 1651           // If left is strictly negative and the constant is null, the | 
| 1652           // result is -0. Deoptimize if required, otherwise return 0. | 1652           // result is -0. Deoptimize if required, otherwise return 0. | 
| 1653           __ cmp(left, Operand::Zero()); | 1653           __ cmp(left, Operand::Zero()); | 
| 1654           DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero); | 1654           DeoptimizeIf(mi, instr, "minus zero"); | 
| 1655         } | 1655         } | 
| 1656         __ mov(result, Operand::Zero()); | 1656         __ mov(result, Operand::Zero()); | 
| 1657         break; | 1657         break; | 
| 1658       case 1: | 1658       case 1: | 
| 1659         __ Move(result, left); | 1659         __ Move(result, left); | 
| 1660         break; | 1660         break; | 
| 1661       default: | 1661       default: | 
| 1662         // Multiplying by powers of two and powers of two plus or minus | 1662         // Multiplying by powers of two and powers of two plus or minus | 
| 1663         // one can be done faster with shifted operands. | 1663         // one can be done faster with shifted operands. | 
| 1664         // For other constants we emit standard code. | 1664         // For other constants we emit standard code. | 
| (...skipping 29 matching lines...) Expand all  Loading... | 
| 1694     if (overflow) { | 1694     if (overflow) { | 
| 1695       Register scratch = scratch0(); | 1695       Register scratch = scratch0(); | 
| 1696       // scratch:result = left * right. | 1696       // scratch:result = left * right. | 
| 1697       if (instr->hydrogen()->representation().IsSmi()) { | 1697       if (instr->hydrogen()->representation().IsSmi()) { | 
| 1698         __ SmiUntag(result, left); | 1698         __ SmiUntag(result, left); | 
| 1699         __ smull(result, scratch, result, right); | 1699         __ smull(result, scratch, result, right); | 
| 1700       } else { | 1700       } else { | 
| 1701         __ smull(result, scratch, left, right); | 1701         __ smull(result, scratch, left, right); | 
| 1702       } | 1702       } | 
| 1703       __ cmp(scratch, Operand(result, ASR, 31)); | 1703       __ cmp(scratch, Operand(result, ASR, 31)); | 
| 1704       DeoptimizeIf(ne, instr, Deoptimizer::kOverflow); | 1704       DeoptimizeIf(ne, instr, "overflow"); | 
| 1705     } else { | 1705     } else { | 
| 1706       if (instr->hydrogen()->representation().IsSmi()) { | 1706       if (instr->hydrogen()->representation().IsSmi()) { | 
| 1707         __ SmiUntag(result, left); | 1707         __ SmiUntag(result, left); | 
| 1708         __ mul(result, result, right); | 1708         __ mul(result, result, right); | 
| 1709       } else { | 1709       } else { | 
| 1710         __ mul(result, left, right); | 1710         __ mul(result, left, right); | 
| 1711       } | 1711       } | 
| 1712     } | 1712     } | 
| 1713 | 1713 | 
| 1714     if (bailout_on_minus_zero) { | 1714     if (bailout_on_minus_zero) { | 
| 1715       Label done; | 1715       Label done; | 
| 1716       __ teq(left, Operand(right)); | 1716       __ teq(left, Operand(right)); | 
| 1717       __ b(pl, &done); | 1717       __ b(pl, &done); | 
| 1718       // Bail out if the result is minus zero. | 1718       // Bail out if the result is minus zero. | 
| 1719       __ cmp(result, Operand::Zero()); | 1719       __ cmp(result, Operand::Zero()); | 
| 1720       DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); | 1720       DeoptimizeIf(eq, instr, "minus zero"); | 
| 1721       __ bind(&done); | 1721       __ bind(&done); | 
| 1722     } | 1722     } | 
| 1723   } | 1723   } | 
| 1724 } | 1724 } | 
| 1725 | 1725 | 
| 1726 | 1726 | 
| 1727 void LCodeGen::DoBitI(LBitI* instr) { | 1727 void LCodeGen::DoBitI(LBitI* instr) { | 
| 1728   LOperand* left_op = instr->left(); | 1728   LOperand* left_op = instr->left(); | 
| 1729   LOperand* right_op = instr->right(); | 1729   LOperand* right_op = instr->right(); | 
| 1730   DCHECK(left_op->IsRegister()); | 1730   DCHECK(left_op->IsRegister()); | 
| (...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 1773     switch (instr->op()) { | 1773     switch (instr->op()) { | 
| 1774       case Token::ROR: | 1774       case Token::ROR: | 
| 1775         __ mov(result, Operand(left, ROR, scratch)); | 1775         __ mov(result, Operand(left, ROR, scratch)); | 
| 1776         break; | 1776         break; | 
| 1777       case Token::SAR: | 1777       case Token::SAR: | 
| 1778         __ mov(result, Operand(left, ASR, scratch)); | 1778         __ mov(result, Operand(left, ASR, scratch)); | 
| 1779         break; | 1779         break; | 
| 1780       case Token::SHR: | 1780       case Token::SHR: | 
| 1781         if (instr->can_deopt()) { | 1781         if (instr->can_deopt()) { | 
| 1782           __ mov(result, Operand(left, LSR, scratch), SetCC); | 1782           __ mov(result, Operand(left, LSR, scratch), SetCC); | 
| 1783           DeoptimizeIf(mi, instr, Deoptimizer::kNegativeValue); | 1783           DeoptimizeIf(mi, instr, "negative value"); | 
| 1784         } else { | 1784         } else { | 
| 1785           __ mov(result, Operand(left, LSR, scratch)); | 1785           __ mov(result, Operand(left, LSR, scratch)); | 
| 1786         } | 1786         } | 
| 1787         break; | 1787         break; | 
| 1788       case Token::SHL: | 1788       case Token::SHL: | 
| 1789         __ mov(result, Operand(left, LSL, scratch)); | 1789         __ mov(result, Operand(left, LSL, scratch)); | 
| 1790         break; | 1790         break; | 
| 1791       default: | 1791       default: | 
| 1792         UNREACHABLE(); | 1792         UNREACHABLE(); | 
| 1793         break; | 1793         break; | 
| (...skipping 16 matching lines...) Expand all  Loading... | 
| 1810         } else { | 1810         } else { | 
| 1811           __ Move(result, left); | 1811           __ Move(result, left); | 
| 1812         } | 1812         } | 
| 1813         break; | 1813         break; | 
| 1814       case Token::SHR: | 1814       case Token::SHR: | 
| 1815         if (shift_count != 0) { | 1815         if (shift_count != 0) { | 
| 1816           __ mov(result, Operand(left, LSR, shift_count)); | 1816           __ mov(result, Operand(left, LSR, shift_count)); | 
| 1817         } else { | 1817         } else { | 
| 1818           if (instr->can_deopt()) { | 1818           if (instr->can_deopt()) { | 
| 1819             __ tst(left, Operand(0x80000000)); | 1819             __ tst(left, Operand(0x80000000)); | 
| 1820             DeoptimizeIf(ne, instr, Deoptimizer::kNegativeValue); | 1820             DeoptimizeIf(ne, instr, "negative value"); | 
| 1821           } | 1821           } | 
| 1822           __ Move(result, left); | 1822           __ Move(result, left); | 
| 1823         } | 1823         } | 
| 1824         break; | 1824         break; | 
| 1825       case Token::SHL: | 1825       case Token::SHL: | 
| 1826         if (shift_count != 0) { | 1826         if (shift_count != 0) { | 
| 1827           if (instr->hydrogen_value()->representation().IsSmi() && | 1827           if (instr->hydrogen_value()->representation().IsSmi() && | 
| 1828               instr->can_deopt()) { | 1828               instr->can_deopt()) { | 
| 1829             if (shift_count != 1) { | 1829             if (shift_count != 1) { | 
| 1830               __ mov(result, Operand(left, LSL, shift_count - 1)); | 1830               __ mov(result, Operand(left, LSL, shift_count - 1)); | 
| 1831               __ SmiTag(result, result, SetCC); | 1831               __ SmiTag(result, result, SetCC); | 
| 1832             } else { | 1832             } else { | 
| 1833               __ SmiTag(result, left, SetCC); | 1833               __ SmiTag(result, left, SetCC); | 
| 1834             } | 1834             } | 
| 1835             DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); | 1835             DeoptimizeIf(vs, instr, "overflow"); | 
| 1836           } else { | 1836           } else { | 
| 1837             __ mov(result, Operand(left, LSL, shift_count)); | 1837             __ mov(result, Operand(left, LSL, shift_count)); | 
| 1838           } | 1838           } | 
| 1839         } else { | 1839         } else { | 
| 1840           __ Move(result, left); | 1840           __ Move(result, left); | 
| 1841         } | 1841         } | 
| 1842         break; | 1842         break; | 
| 1843       default: | 1843       default: | 
| 1844         UNREACHABLE(); | 1844         UNREACHABLE(); | 
| 1845         break; | 1845         break; | 
| (...skipping 11 matching lines...) Expand all  Loading... | 
| 1857 | 1857 | 
| 1858   if (right->IsStackSlot()) { | 1858   if (right->IsStackSlot()) { | 
| 1859     Register right_reg = EmitLoadRegister(right, ip); | 1859     Register right_reg = EmitLoadRegister(right, ip); | 
| 1860     __ sub(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond); | 1860     __ sub(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond); | 
| 1861   } else { | 1861   } else { | 
| 1862     DCHECK(right->IsRegister() || right->IsConstantOperand()); | 1862     DCHECK(right->IsRegister() || right->IsConstantOperand()); | 
| 1863     __ sub(ToRegister(result), ToRegister(left), ToOperand(right), set_cond); | 1863     __ sub(ToRegister(result), ToRegister(left), ToOperand(right), set_cond); | 
| 1864   } | 1864   } | 
| 1865 | 1865 | 
| 1866   if (can_overflow) { | 1866   if (can_overflow) { | 
| 1867     DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); | 1867     DeoptimizeIf(vs, instr, "overflow"); | 
| 1868   } | 1868   } | 
| 1869 } | 1869 } | 
| 1870 | 1870 | 
| 1871 | 1871 | 
| 1872 void LCodeGen::DoRSubI(LRSubI* instr) { | 1872 void LCodeGen::DoRSubI(LRSubI* instr) { | 
| 1873   LOperand* left = instr->left(); | 1873   LOperand* left = instr->left(); | 
| 1874   LOperand* right = instr->right(); | 1874   LOperand* right = instr->right(); | 
| 1875   LOperand* result = instr->result(); | 1875   LOperand* result = instr->result(); | 
| 1876   bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); | 1876   bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); | 
| 1877   SBit set_cond = can_overflow ? SetCC : LeaveCC; | 1877   SBit set_cond = can_overflow ? SetCC : LeaveCC; | 
| 1878 | 1878 | 
| 1879   if (right->IsStackSlot()) { | 1879   if (right->IsStackSlot()) { | 
| 1880     Register right_reg = EmitLoadRegister(right, ip); | 1880     Register right_reg = EmitLoadRegister(right, ip); | 
| 1881     __ rsb(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond); | 1881     __ rsb(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond); | 
| 1882   } else { | 1882   } else { | 
| 1883     DCHECK(right->IsRegister() || right->IsConstantOperand()); | 1883     DCHECK(right->IsRegister() || right->IsConstantOperand()); | 
| 1884     __ rsb(ToRegister(result), ToRegister(left), ToOperand(right), set_cond); | 1884     __ rsb(ToRegister(result), ToRegister(left), ToOperand(right), set_cond); | 
| 1885   } | 1885   } | 
| 1886 | 1886 | 
| 1887   if (can_overflow) { | 1887   if (can_overflow) { | 
| 1888     DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); | 1888     DeoptimizeIf(vs, instr, "overflow"); | 
| 1889   } | 1889   } | 
| 1890 } | 1890 } | 
| 1891 | 1891 | 
| 1892 | 1892 | 
| 1893 void LCodeGen::DoConstantI(LConstantI* instr) { | 1893 void LCodeGen::DoConstantI(LConstantI* instr) { | 
| 1894   __ mov(ToRegister(instr->result()), Operand(instr->value())); | 1894   __ mov(ToRegister(instr->result()), Operand(instr->value())); | 
| 1895 } | 1895 } | 
| 1896 | 1896 | 
| 1897 | 1897 | 
| 1898 void LCodeGen::DoConstantS(LConstantS* instr) { | 1898 void LCodeGen::DoConstantS(LConstantS* instr) { | 
| (...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 1946   Register result = ToRegister(instr->result()); | 1946   Register result = ToRegister(instr->result()); | 
| 1947   Register scratch = ToRegister(instr->temp()); | 1947   Register scratch = ToRegister(instr->temp()); | 
| 1948   Smi* index = instr->index(); | 1948   Smi* index = instr->index(); | 
| 1949   Label runtime, done; | 1949   Label runtime, done; | 
| 1950   DCHECK(object.is(result)); | 1950   DCHECK(object.is(result)); | 
| 1951   DCHECK(object.is(r0)); | 1951   DCHECK(object.is(r0)); | 
| 1952   DCHECK(!scratch.is(scratch0())); | 1952   DCHECK(!scratch.is(scratch0())); | 
| 1953   DCHECK(!scratch.is(object)); | 1953   DCHECK(!scratch.is(object)); | 
| 1954 | 1954 | 
| 1955   __ SmiTst(object); | 1955   __ SmiTst(object); | 
| 1956   DeoptimizeIf(eq, instr, Deoptimizer::kSmi); | 1956   DeoptimizeIf(eq, instr, "Smi"); | 
| 1957   __ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE); | 1957   __ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE); | 
| 1958   DeoptimizeIf(ne, instr, Deoptimizer::kNotADateObject); | 1958   DeoptimizeIf(ne, instr, "not a date object"); | 
| 1959 | 1959 | 
| 1960   if (index->value() == 0) { | 1960   if (index->value() == 0) { | 
| 1961     __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset)); | 1961     __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset)); | 
| 1962   } else { | 1962   } else { | 
| 1963     if (index->value() < JSDate::kFirstUncachedField) { | 1963     if (index->value() < JSDate::kFirstUncachedField) { | 
| 1964       ExternalReference stamp = ExternalReference::date_cache_stamp(isolate()); | 1964       ExternalReference stamp = ExternalReference::date_cache_stamp(isolate()); | 
| 1965       __ mov(scratch, Operand(stamp)); | 1965       __ mov(scratch, Operand(stamp)); | 
| 1966       __ ldr(scratch, MemOperand(scratch)); | 1966       __ ldr(scratch, MemOperand(scratch)); | 
| 1967       __ ldr(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset)); | 1967       __ ldr(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset)); | 
| 1968       __ cmp(scratch, scratch0()); | 1968       __ cmp(scratch, scratch0()); | 
| (...skipping 96 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 2065 | 2065 | 
| 2066   if (right->IsStackSlot()) { | 2066   if (right->IsStackSlot()) { | 
| 2067     Register right_reg = EmitLoadRegister(right, ip); | 2067     Register right_reg = EmitLoadRegister(right, ip); | 
| 2068     __ add(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond); | 2068     __ add(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond); | 
| 2069   } else { | 2069   } else { | 
| 2070     DCHECK(right->IsRegister() || right->IsConstantOperand()); | 2070     DCHECK(right->IsRegister() || right->IsConstantOperand()); | 
| 2071     __ add(ToRegister(result), ToRegister(left), ToOperand(right), set_cond); | 2071     __ add(ToRegister(result), ToRegister(left), ToOperand(right), set_cond); | 
| 2072   } | 2072   } | 
| 2073 | 2073 | 
| 2074   if (can_overflow) { | 2074   if (can_overflow) { | 
| 2075     DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); | 2075     DeoptimizeIf(vs, instr, "overflow"); | 
| 2076   } | 2076   } | 
| 2077 } | 2077 } | 
| 2078 | 2078 | 
| 2079 | 2079 | 
| 2080 void LCodeGen::DoMathMinMax(LMathMinMax* instr) { | 2080 void LCodeGen::DoMathMinMax(LMathMinMax* instr) { | 
| 2081   LOperand* left = instr->left(); | 2081   LOperand* left = instr->left(); | 
| 2082   LOperand* right = instr->right(); | 2082   LOperand* right = instr->right(); | 
| 2083   HMathMinMax::Operation operation = instr->hydrogen()->operation(); | 2083   HMathMinMax::Operation operation = instr->hydrogen()->operation(); | 
| 2084   if (instr->hydrogen()->representation().IsSmiOrInteger32()) { | 2084   if (instr->hydrogen()->representation().IsSmiOrInteger32()) { | 
| 2085     Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge; | 2085     Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge; | 
| (...skipping 205 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 2291       } | 2291       } | 
| 2292 | 2292 | 
| 2293       if (expected.Contains(ToBooleanStub::SMI)) { | 2293       if (expected.Contains(ToBooleanStub::SMI)) { | 
| 2294         // Smis: 0 -> false, all other -> true. | 2294         // Smis: 0 -> false, all other -> true. | 
| 2295         __ cmp(reg, Operand::Zero()); | 2295         __ cmp(reg, Operand::Zero()); | 
| 2296         __ b(eq, instr->FalseLabel(chunk_)); | 2296         __ b(eq, instr->FalseLabel(chunk_)); | 
| 2297         __ JumpIfSmi(reg, instr->TrueLabel(chunk_)); | 2297         __ JumpIfSmi(reg, instr->TrueLabel(chunk_)); | 
| 2298       } else if (expected.NeedsMap()) { | 2298       } else if (expected.NeedsMap()) { | 
| 2299         // If we need a map later and have a Smi -> deopt. | 2299         // If we need a map later and have a Smi -> deopt. | 
| 2300         __ SmiTst(reg); | 2300         __ SmiTst(reg); | 
| 2301         DeoptimizeIf(eq, instr, Deoptimizer::kSmi); | 2301         DeoptimizeIf(eq, instr, "Smi"); | 
| 2302       } | 2302       } | 
| 2303 | 2303 | 
| 2304       const Register map = scratch0(); | 2304       const Register map = scratch0(); | 
| 2305       if (expected.NeedsMap()) { | 2305       if (expected.NeedsMap()) { | 
| 2306         __ ldr(map, FieldMemOperand(reg, HeapObject::kMapOffset)); | 2306         __ ldr(map, FieldMemOperand(reg, HeapObject::kMapOffset)); | 
| 2307 | 2307 | 
| 2308         if (expected.CanBeUndetectable()) { | 2308         if (expected.CanBeUndetectable()) { | 
| 2309           // Undetectable -> false. | 2309           // Undetectable -> false. | 
| 2310           __ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset)); | 2310           __ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset)); | 
| 2311           __ tst(ip, Operand(1 << Map::kIsUndetectable)); | 2311           __ tst(ip, Operand(1 << Map::kIsUndetectable)); | 
| (...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 2347         __ VFPCompareAndSetFlags(dbl_scratch, 0.0); | 2347         __ VFPCompareAndSetFlags(dbl_scratch, 0.0); | 
| 2348         __ cmp(r0, r0, vs);  // NaN -> false. | 2348         __ cmp(r0, r0, vs);  // NaN -> false. | 
| 2349         __ b(eq, instr->FalseLabel(chunk_));  // +0, -0 -> false. | 2349         __ b(eq, instr->FalseLabel(chunk_));  // +0, -0 -> false. | 
| 2350         __ b(instr->TrueLabel(chunk_)); | 2350         __ b(instr->TrueLabel(chunk_)); | 
| 2351         __ bind(¬_heap_number); | 2351         __ bind(¬_heap_number); | 
| 2352       } | 2352       } | 
| 2353 | 2353 | 
| 2354       if (!expected.IsGeneric()) { | 2354       if (!expected.IsGeneric()) { | 
| 2355         // We've seen something for the first time -> deopt. | 2355         // We've seen something for the first time -> deopt. | 
| 2356         // This can only happen if we are not generic already. | 2356         // This can only happen if we are not generic already. | 
| 2357         DeoptimizeIf(al, instr, Deoptimizer::kUnexpectedObject); | 2357         DeoptimizeIf(al, instr, "unexpected object"); | 
| 2358       } | 2358       } | 
| 2359     } | 2359     } | 
| 2360   } | 2360   } | 
| 2361 } | 2361 } | 
| 2362 | 2362 | 
| 2363 | 2363 | 
| 2364 void LCodeGen::EmitGoto(int block) { | 2364 void LCodeGen::EmitGoto(int block) { | 
| 2365   if (!IsNextEmittedBlock(block)) { | 2365   if (!IsNextEmittedBlock(block)) { | 
| 2366     __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block))); | 2366     __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block))); | 
| 2367   } | 2367   } | 
| (...skipping 625 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 2993 } | 2993 } | 
| 2994 | 2994 | 
| 2995 | 2995 | 
| 2996 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { | 2996 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { | 
| 2997   Register result = ToRegister(instr->result()); | 2997   Register result = ToRegister(instr->result()); | 
| 2998   __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell().handle()))); | 2998   __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell().handle()))); | 
| 2999   __ ldr(result, FieldMemOperand(ip, Cell::kValueOffset)); | 2999   __ ldr(result, FieldMemOperand(ip, Cell::kValueOffset)); | 
| 3000   if (instr->hydrogen()->RequiresHoleCheck()) { | 3000   if (instr->hydrogen()->RequiresHoleCheck()) { | 
| 3001     __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); | 3001     __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); | 
| 3002     __ cmp(result, ip); | 3002     __ cmp(result, ip); | 
| 3003     DeoptimizeIf(eq, instr, Deoptimizer::kHole); | 3003     DeoptimizeIf(eq, instr, "hole"); | 
| 3004   } | 3004   } | 
| 3005 } | 3005 } | 
| 3006 | 3006 | 
| 3007 | 3007 | 
| 3008 template <class T> | 3008 template <class T> | 
| 3009 void LCodeGen::EmitVectorLoadICRegisters(T* instr) { | 3009 void LCodeGen::EmitVectorLoadICRegisters(T* instr) { | 
| 3010   DCHECK(FLAG_vector_ics); | 3010   DCHECK(FLAG_vector_ics); | 
| 3011   Register vector_register = ToRegister(instr->temp_vector()); | 3011   Register vector_register = ToRegister(instr->temp_vector()); | 
| 3012   Register slot_register = VectorLoadICDescriptor::SlotRegister(); | 3012   Register slot_register = VectorLoadICDescriptor::SlotRegister(); | 
| 3013   DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister())); | 3013   DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister())); | 
| (...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 3048 | 3048 | 
| 3049   // If the cell we are storing to contains the hole it could have | 3049   // If the cell we are storing to contains the hole it could have | 
| 3050   // been deleted from the property dictionary. In that case, we need | 3050   // been deleted from the property dictionary. In that case, we need | 
| 3051   // to update the property details in the property dictionary to mark | 3051   // to update the property details in the property dictionary to mark | 
| 3052   // it as no longer deleted. | 3052   // it as no longer deleted. | 
| 3053   if (instr->hydrogen()->RequiresHoleCheck()) { | 3053   if (instr->hydrogen()->RequiresHoleCheck()) { | 
| 3054     // We use a temp to check the payload (CompareRoot might clobber ip). | 3054     // We use a temp to check the payload (CompareRoot might clobber ip). | 
| 3055     Register payload = ToRegister(instr->temp()); | 3055     Register payload = ToRegister(instr->temp()); | 
| 3056     __ ldr(payload, FieldMemOperand(cell, Cell::kValueOffset)); | 3056     __ ldr(payload, FieldMemOperand(cell, Cell::kValueOffset)); | 
| 3057     __ CompareRoot(payload, Heap::kTheHoleValueRootIndex); | 3057     __ CompareRoot(payload, Heap::kTheHoleValueRootIndex); | 
| 3058     DeoptimizeIf(eq, instr, Deoptimizer::kHole); | 3058     DeoptimizeIf(eq, instr, "hole"); | 
| 3059   } | 3059   } | 
| 3060 | 3060 | 
| 3061   // Store the value. | 3061   // Store the value. | 
| 3062   __ str(value, FieldMemOperand(cell, Cell::kValueOffset)); | 3062   __ str(value, FieldMemOperand(cell, Cell::kValueOffset)); | 
| 3063   // Cells are always rescanned, so no write barrier here. | 3063   // Cells are always rescanned, so no write barrier here. | 
| 3064 } | 3064 } | 
| 3065 | 3065 | 
| 3066 | 3066 | 
| 3067 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { | 3067 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { | 
| 3068   Register context = ToRegister(instr->context()); | 3068   Register context = ToRegister(instr->context()); | 
| 3069   Register result = ToRegister(instr->result()); | 3069   Register result = ToRegister(instr->result()); | 
| 3070   __ ldr(result, ContextOperand(context, instr->slot_index())); | 3070   __ ldr(result, ContextOperand(context, instr->slot_index())); | 
| 3071   if (instr->hydrogen()->RequiresHoleCheck()) { | 3071   if (instr->hydrogen()->RequiresHoleCheck()) { | 
| 3072     __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); | 3072     __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); | 
| 3073     __ cmp(result, ip); | 3073     __ cmp(result, ip); | 
| 3074     if (instr->hydrogen()->DeoptimizesOnHole()) { | 3074     if (instr->hydrogen()->DeoptimizesOnHole()) { | 
| 3075       DeoptimizeIf(eq, instr, Deoptimizer::kHole); | 3075       DeoptimizeIf(eq, instr, "hole"); | 
| 3076     } else { | 3076     } else { | 
| 3077       __ mov(result, Operand(factory()->undefined_value()), LeaveCC, eq); | 3077       __ mov(result, Operand(factory()->undefined_value()), LeaveCC, eq); | 
| 3078     } | 3078     } | 
| 3079   } | 3079   } | 
| 3080 } | 3080 } | 
| 3081 | 3081 | 
| 3082 | 3082 | 
| 3083 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { | 3083 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { | 
| 3084   Register context = ToRegister(instr->context()); | 3084   Register context = ToRegister(instr->context()); | 
| 3085   Register value = ToRegister(instr->value()); | 3085   Register value = ToRegister(instr->value()); | 
| 3086   Register scratch = scratch0(); | 3086   Register scratch = scratch0(); | 
| 3087   MemOperand target = ContextOperand(context, instr->slot_index()); | 3087   MemOperand target = ContextOperand(context, instr->slot_index()); | 
| 3088 | 3088 | 
| 3089   Label skip_assignment; | 3089   Label skip_assignment; | 
| 3090 | 3090 | 
| 3091   if (instr->hydrogen()->RequiresHoleCheck()) { | 3091   if (instr->hydrogen()->RequiresHoleCheck()) { | 
| 3092     __ ldr(scratch, target); | 3092     __ ldr(scratch, target); | 
| 3093     __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); | 3093     __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); | 
| 3094     __ cmp(scratch, ip); | 3094     __ cmp(scratch, ip); | 
| 3095     if (instr->hydrogen()->DeoptimizesOnHole()) { | 3095     if (instr->hydrogen()->DeoptimizesOnHole()) { | 
| 3096       DeoptimizeIf(eq, instr, Deoptimizer::kHole); | 3096       DeoptimizeIf(eq, instr, "hole"); | 
| 3097     } else { | 3097     } else { | 
| 3098       __ b(ne, &skip_assignment); | 3098       __ b(ne, &skip_assignment); | 
| 3099     } | 3099     } | 
| 3100   } | 3100   } | 
| 3101 | 3101 | 
| 3102   __ str(value, target); | 3102   __ str(value, target); | 
| 3103   if (instr->hydrogen()->NeedsWriteBarrier()) { | 3103   if (instr->hydrogen()->NeedsWriteBarrier()) { | 
| 3104     SmiCheck check_needed = | 3104     SmiCheck check_needed = | 
| 3105         instr->hydrogen()->value()->type().IsHeapObject() | 3105         instr->hydrogen()->value()->type().IsHeapObject() | 
| 3106             ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; | 3106             ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; | 
| (...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 3167   Register function = ToRegister(instr->function()); | 3167   Register function = ToRegister(instr->function()); | 
| 3168   Register result = ToRegister(instr->result()); | 3168   Register result = ToRegister(instr->result()); | 
| 3169 | 3169 | 
| 3170   // Get the prototype or initial map from the function. | 3170   // Get the prototype or initial map from the function. | 
| 3171   __ ldr(result, | 3171   __ ldr(result, | 
| 3172          FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); | 3172          FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); | 
| 3173 | 3173 | 
| 3174   // Check that the function has a prototype or an initial map. | 3174   // Check that the function has a prototype or an initial map. | 
| 3175   __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); | 3175   __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); | 
| 3176   __ cmp(result, ip); | 3176   __ cmp(result, ip); | 
| 3177   DeoptimizeIf(eq, instr, Deoptimizer::kHole); | 3177   DeoptimizeIf(eq, instr, "hole"); | 
| 3178 | 3178 | 
| 3179   // If the function does not have an initial map, we're done. | 3179   // If the function does not have an initial map, we're done. | 
| 3180   Label done; | 3180   Label done; | 
| 3181   __ CompareObjectType(result, scratch, scratch, MAP_TYPE); | 3181   __ CompareObjectType(result, scratch, scratch, MAP_TYPE); | 
| 3182   __ b(ne, &done); | 3182   __ b(ne, &done); | 
| 3183 | 3183 | 
| 3184   // Get the prototype from the initial map. | 3184   // Get the prototype from the initial map. | 
| 3185   __ ldr(result, FieldMemOperand(result, Map::kPrototypeOffset)); | 3185   __ ldr(result, FieldMemOperand(result, Map::kPrototypeOffset)); | 
| 3186 | 3186 | 
| 3187   // All done. | 3187   // All done. | 
| (...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 3293         break; | 3293         break; | 
| 3294       case EXTERNAL_INT32_ELEMENTS: | 3294       case EXTERNAL_INT32_ELEMENTS: | 
| 3295       case INT32_ELEMENTS: | 3295       case INT32_ELEMENTS: | 
| 3296         __ ldr(result, mem_operand); | 3296         __ ldr(result, mem_operand); | 
| 3297         break; | 3297         break; | 
| 3298       case EXTERNAL_UINT32_ELEMENTS: | 3298       case EXTERNAL_UINT32_ELEMENTS: | 
| 3299       case UINT32_ELEMENTS: | 3299       case UINT32_ELEMENTS: | 
| 3300         __ ldr(result, mem_operand); | 3300         __ ldr(result, mem_operand); | 
| 3301         if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { | 3301         if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { | 
| 3302           __ cmp(result, Operand(0x80000000)); | 3302           __ cmp(result, Operand(0x80000000)); | 
| 3303           DeoptimizeIf(cs, instr, Deoptimizer::kNegativeValue); | 3303           DeoptimizeIf(cs, instr, "negative value"); | 
| 3304         } | 3304         } | 
| 3305         break; | 3305         break; | 
| 3306       case FLOAT32_ELEMENTS: | 3306       case FLOAT32_ELEMENTS: | 
| 3307       case FLOAT64_ELEMENTS: | 3307       case FLOAT64_ELEMENTS: | 
| 3308       case EXTERNAL_FLOAT32_ELEMENTS: | 3308       case EXTERNAL_FLOAT32_ELEMENTS: | 
| 3309       case EXTERNAL_FLOAT64_ELEMENTS: | 3309       case EXTERNAL_FLOAT64_ELEMENTS: | 
| 3310       case FAST_HOLEY_DOUBLE_ELEMENTS: | 3310       case FAST_HOLEY_DOUBLE_ELEMENTS: | 
| 3311       case FAST_HOLEY_ELEMENTS: | 3311       case FAST_HOLEY_ELEMENTS: | 
| 3312       case FAST_HOLEY_SMI_ELEMENTS: | 3312       case FAST_HOLEY_SMI_ELEMENTS: | 
| 3313       case FAST_DOUBLE_ELEMENTS: | 3313       case FAST_DOUBLE_ELEMENTS: | 
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 3346     int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) | 3346     int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) | 
| 3347         ? (element_size_shift - kSmiTagSize) : element_size_shift; | 3347         ? (element_size_shift - kSmiTagSize) : element_size_shift; | 
| 3348     __ add(scratch, scratch, Operand(key, LSL, shift_size)); | 3348     __ add(scratch, scratch, Operand(key, LSL, shift_size)); | 
| 3349   } | 3349   } | 
| 3350 | 3350 | 
| 3351   __ vldr(result, scratch, 0); | 3351   __ vldr(result, scratch, 0); | 
| 3352 | 3352 | 
| 3353   if (instr->hydrogen()->RequiresHoleCheck()) { | 3353   if (instr->hydrogen()->RequiresHoleCheck()) { | 
| 3354     __ ldr(scratch, MemOperand(scratch, sizeof(kHoleNanLower32))); | 3354     __ ldr(scratch, MemOperand(scratch, sizeof(kHoleNanLower32))); | 
| 3355     __ cmp(scratch, Operand(kHoleNanUpper32)); | 3355     __ cmp(scratch, Operand(kHoleNanUpper32)); | 
| 3356     DeoptimizeIf(eq, instr, Deoptimizer::kHole); | 3356     DeoptimizeIf(eq, instr, "hole"); | 
| 3357   } | 3357   } | 
| 3358 } | 3358 } | 
| 3359 | 3359 | 
| 3360 | 3360 | 
| 3361 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { | 3361 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { | 
| 3362   Register elements = ToRegister(instr->elements()); | 3362   Register elements = ToRegister(instr->elements()); | 
| 3363   Register result = ToRegister(instr->result()); | 3363   Register result = ToRegister(instr->result()); | 
| 3364   Register scratch = scratch0(); | 3364   Register scratch = scratch0(); | 
| 3365   Register store_base = scratch; | 3365   Register store_base = scratch; | 
| 3366   int offset = instr->base_offset(); | 3366   int offset = instr->base_offset(); | 
| (...skipping 13 matching lines...) Expand all  Loading... | 
| 3380     } else { | 3380     } else { | 
| 3381       __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2)); | 3381       __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2)); | 
| 3382     } | 3382     } | 
| 3383   } | 3383   } | 
| 3384   __ ldr(result, MemOperand(store_base, offset)); | 3384   __ ldr(result, MemOperand(store_base, offset)); | 
| 3385 | 3385 | 
| 3386   // Check for the hole value. | 3386   // Check for the hole value. | 
| 3387   if (instr->hydrogen()->RequiresHoleCheck()) { | 3387   if (instr->hydrogen()->RequiresHoleCheck()) { | 
| 3388     if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { | 3388     if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { | 
| 3389       __ SmiTst(result); | 3389       __ SmiTst(result); | 
| 3390       DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi); | 3390       DeoptimizeIf(ne, instr, "not a Smi"); | 
| 3391     } else { | 3391     } else { | 
| 3392       __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); | 3392       __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); | 
| 3393       __ cmp(result, scratch); | 3393       __ cmp(result, scratch); | 
| 3394       DeoptimizeIf(eq, instr, Deoptimizer::kHole); | 3394       DeoptimizeIf(eq, instr, "hole"); | 
| 3395     } | 3395     } | 
| 3396   } | 3396   } | 
| 3397 } | 3397 } | 
| 3398 | 3398 | 
| 3399 | 3399 | 
| 3400 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) { | 3400 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) { | 
| 3401   if (instr->is_typed_elements()) { | 3401   if (instr->is_typed_elements()) { | 
| 3402     DoLoadKeyedExternalArray(instr); | 3402     DoLoadKeyedExternalArray(instr); | 
| 3403   } else if (instr->hydrogen()->representation().IsDouble()) { | 3403   } else if (instr->hydrogen()->representation().IsDouble()) { | 
| 3404     DoLoadKeyedFixedDoubleArray(instr); | 3404     DoLoadKeyedFixedDoubleArray(instr); | 
| (...skipping 121 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 3526   // Normal function. Replace undefined or null with global receiver. | 3526   // Normal function. Replace undefined or null with global receiver. | 
| 3527   __ LoadRoot(scratch, Heap::kNullValueRootIndex); | 3527   __ LoadRoot(scratch, Heap::kNullValueRootIndex); | 
| 3528   __ cmp(receiver, scratch); | 3528   __ cmp(receiver, scratch); | 
| 3529   __ b(eq, &global_object); | 3529   __ b(eq, &global_object); | 
| 3530   __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); | 3530   __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); | 
| 3531   __ cmp(receiver, scratch); | 3531   __ cmp(receiver, scratch); | 
| 3532   __ b(eq, &global_object); | 3532   __ b(eq, &global_object); | 
| 3533 | 3533 | 
| 3534   // Deoptimize if the receiver is not a JS object. | 3534   // Deoptimize if the receiver is not a JS object. | 
| 3535   __ SmiTst(receiver); | 3535   __ SmiTst(receiver); | 
| 3536   DeoptimizeIf(eq, instr, Deoptimizer::kSmi); | 3536   DeoptimizeIf(eq, instr, "Smi"); | 
| 3537   __ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE); | 3537   __ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE); | 
| 3538   DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject); | 3538   DeoptimizeIf(lt, instr, "not a JavaScript object"); | 
| 3539 | 3539 | 
| 3540   __ b(&result_in_receiver); | 3540   __ b(&result_in_receiver); | 
| 3541   __ bind(&global_object); | 3541   __ bind(&global_object); | 
| 3542   __ ldr(result, FieldMemOperand(function, JSFunction::kContextOffset)); | 3542   __ ldr(result, FieldMemOperand(function, JSFunction::kContextOffset)); | 
| 3543   __ ldr(result, | 3543   __ ldr(result, | 
| 3544          ContextOperand(result, Context::GLOBAL_OBJECT_INDEX)); | 3544          ContextOperand(result, Context::GLOBAL_OBJECT_INDEX)); | 
| 3545   __ ldr(result, FieldMemOperand(result, GlobalObject::kGlobalProxyOffset)); | 3545   __ ldr(result, FieldMemOperand(result, GlobalObject::kGlobalProxyOffset)); | 
| 3546 | 3546 | 
| 3547   if (result.is(receiver)) { | 3547   if (result.is(receiver)) { | 
| 3548     __ bind(&result_in_receiver); | 3548     __ bind(&result_in_receiver); | 
| (...skipping 14 matching lines...) Expand all  Loading... | 
| 3563   Register elements = ToRegister(instr->elements()); | 3563   Register elements = ToRegister(instr->elements()); | 
| 3564   Register scratch = scratch0(); | 3564   Register scratch = scratch0(); | 
| 3565   DCHECK(receiver.is(r0));  // Used for parameter count. | 3565   DCHECK(receiver.is(r0));  // Used for parameter count. | 
| 3566   DCHECK(function.is(r1));  // Required by InvokeFunction. | 3566   DCHECK(function.is(r1));  // Required by InvokeFunction. | 
| 3567   DCHECK(ToRegister(instr->result()).is(r0)); | 3567   DCHECK(ToRegister(instr->result()).is(r0)); | 
| 3568 | 3568 | 
| 3569   // Copy the arguments to this function possibly from the | 3569   // Copy the arguments to this function possibly from the | 
| 3570   // adaptor frame below it. | 3570   // adaptor frame below it. | 
| 3571   const uint32_t kArgumentsLimit = 1 * KB; | 3571   const uint32_t kArgumentsLimit = 1 * KB; | 
| 3572   __ cmp(length, Operand(kArgumentsLimit)); | 3572   __ cmp(length, Operand(kArgumentsLimit)); | 
| 3573   DeoptimizeIf(hi, instr, Deoptimizer::kTooManyArguments); | 3573   DeoptimizeIf(hi, instr, "too many arguments"); | 
| 3574 | 3574 | 
| 3575   // Push the receiver and use the register to keep the original | 3575   // Push the receiver and use the register to keep the original | 
| 3576   // number of arguments. | 3576   // number of arguments. | 
| 3577   __ push(receiver); | 3577   __ push(receiver); | 
| 3578   __ mov(receiver, length); | 3578   __ mov(receiver, length); | 
| 3579   // The arguments are at a one pointer size offset from elements. | 3579   // The arguments are at a one pointer size offset from elements. | 
| 3580   __ add(elements, elements, Operand(1 * kPointerSize)); | 3580   __ add(elements, elements, Operand(1 * kPointerSize)); | 
| 3581 | 3581 | 
| 3582   // Loop through the arguments pushing them onto the execution | 3582   // Loop through the arguments pushing them onto the execution | 
| 3583   // stack. | 3583   // stack. | 
| (...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 3689   DCHECK(instr->context() != NULL); | 3689   DCHECK(instr->context() != NULL); | 
| 3690   DCHECK(ToRegister(instr->context()).is(cp)); | 3690   DCHECK(ToRegister(instr->context()).is(cp)); | 
| 3691   Register input = ToRegister(instr->value()); | 3691   Register input = ToRegister(instr->value()); | 
| 3692   Register result = ToRegister(instr->result()); | 3692   Register result = ToRegister(instr->result()); | 
| 3693   Register scratch = scratch0(); | 3693   Register scratch = scratch0(); | 
| 3694 | 3694 | 
| 3695   // Deoptimize if not a heap number. | 3695   // Deoptimize if not a heap number. | 
| 3696   __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); | 3696   __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); | 
| 3697   __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); | 3697   __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); | 
| 3698   __ cmp(scratch, Operand(ip)); | 3698   __ cmp(scratch, Operand(ip)); | 
| 3699   DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber); | 3699   DeoptimizeIf(ne, instr, "not a heap number"); | 
| 3700 | 3700 | 
| 3701   Label done; | 3701   Label done; | 
| 3702   Register exponent = scratch0(); | 3702   Register exponent = scratch0(); | 
| 3703   scratch = no_reg; | 3703   scratch = no_reg; | 
| 3704   __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); | 3704   __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); | 
| 3705   // Check the sign of the argument. If the argument is positive, just | 3705   // Check the sign of the argument. If the argument is positive, just | 
| 3706   // return it. | 3706   // return it. | 
| 3707   __ tst(exponent, Operand(HeapNumber::kSignMask)); | 3707   __ tst(exponent, Operand(HeapNumber::kSignMask)); | 
| 3708   // Move the input to the result if necessary. | 3708   // Move the input to the result if necessary. | 
| 3709   __ Move(result, input); | 3709   __ Move(result, input); | 
| (...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 3757 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) { | 3757 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) { | 
| 3758   Register input = ToRegister(instr->value()); | 3758   Register input = ToRegister(instr->value()); | 
| 3759   Register result = ToRegister(instr->result()); | 3759   Register result = ToRegister(instr->result()); | 
| 3760   __ cmp(input, Operand::Zero()); | 3760   __ cmp(input, Operand::Zero()); | 
| 3761   __ Move(result, input, pl); | 3761   __ Move(result, input, pl); | 
| 3762   // We can make rsb conditional because the previous cmp instruction | 3762   // We can make rsb conditional because the previous cmp instruction | 
| 3763   // will clear the V (overflow) flag and rsb won't set this flag | 3763   // will clear the V (overflow) flag and rsb won't set this flag | 
| 3764   // if input is positive. | 3764   // if input is positive. | 
| 3765   __ rsb(result, input, Operand::Zero(), SetCC, mi); | 3765   __ rsb(result, input, Operand::Zero(), SetCC, mi); | 
| 3766   // Deoptimize on overflow. | 3766   // Deoptimize on overflow. | 
| 3767   DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); | 3767   DeoptimizeIf(vs, instr, "overflow"); | 
| 3768 } | 3768 } | 
| 3769 | 3769 | 
| 3770 | 3770 | 
| 3771 void LCodeGen::DoMathAbs(LMathAbs* instr) { | 3771 void LCodeGen::DoMathAbs(LMathAbs* instr) { | 
| 3772   // Class for deferred case. | 3772   // Class for deferred case. | 
| 3773   class DeferredMathAbsTaggedHeapNumber FINAL : public LDeferredCode { | 3773   class DeferredMathAbsTaggedHeapNumber FINAL : public LDeferredCode { | 
| 3774    public: | 3774    public: | 
| 3775     DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr) | 3775     DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr) | 
| 3776         : LDeferredCode(codegen), instr_(instr) { } | 3776         : LDeferredCode(codegen), instr_(instr) { } | 
| 3777     void Generate() OVERRIDE { | 3777     void Generate() OVERRIDE { | 
| (...skipping 26 matching lines...) Expand all  Loading... | 
| 3804 } | 3804 } | 
| 3805 | 3805 | 
| 3806 | 3806 | 
| 3807 void LCodeGen::DoMathFloor(LMathFloor* instr) { | 3807 void LCodeGen::DoMathFloor(LMathFloor* instr) { | 
| 3808   DwVfpRegister input = ToDoubleRegister(instr->value()); | 3808   DwVfpRegister input = ToDoubleRegister(instr->value()); | 
| 3809   Register result = ToRegister(instr->result()); | 3809   Register result = ToRegister(instr->result()); | 
| 3810   Register input_high = scratch0(); | 3810   Register input_high = scratch0(); | 
| 3811   Label done, exact; | 3811   Label done, exact; | 
| 3812 | 3812 | 
| 3813   __ TryInt32Floor(result, input, input_high, double_scratch0(), &done, &exact); | 3813   __ TryInt32Floor(result, input, input_high, double_scratch0(), &done, &exact); | 
| 3814   DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN); | 3814   DeoptimizeIf(al, instr, "lost precision or NaN"); | 
| 3815 | 3815 | 
| 3816   __ bind(&exact); | 3816   __ bind(&exact); | 
| 3817   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 3817   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 
| 3818     // Test for -0. | 3818     // Test for -0. | 
| 3819     __ cmp(result, Operand::Zero()); | 3819     __ cmp(result, Operand::Zero()); | 
| 3820     __ b(ne, &done); | 3820     __ b(ne, &done); | 
| 3821     __ cmp(input_high, Operand::Zero()); | 3821     __ cmp(input_high, Operand::Zero()); | 
| 3822     DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero); | 3822     DeoptimizeIf(mi, instr, "minus zero"); | 
| 3823   } | 3823   } | 
| 3824   __ bind(&done); | 3824   __ bind(&done); | 
| 3825 } | 3825 } | 
| 3826 | 3826 | 
| 3827 | 3827 | 
| 3828 void LCodeGen::DoMathRound(LMathRound* instr) { | 3828 void LCodeGen::DoMathRound(LMathRound* instr) { | 
| 3829   DwVfpRegister input = ToDoubleRegister(instr->value()); | 3829   DwVfpRegister input = ToDoubleRegister(instr->value()); | 
| 3830   Register result = ToRegister(instr->result()); | 3830   Register result = ToRegister(instr->result()); | 
| 3831   DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp()); | 3831   DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp()); | 
| 3832   DwVfpRegister input_plus_dot_five = double_scratch1; | 3832   DwVfpRegister input_plus_dot_five = double_scratch1; | 
| 3833   Register input_high = scratch0(); | 3833   Register input_high = scratch0(); | 
| 3834   DwVfpRegister dot_five = double_scratch0(); | 3834   DwVfpRegister dot_five = double_scratch0(); | 
| 3835   Label convert, done; | 3835   Label convert, done; | 
| 3836 | 3836 | 
| 3837   __ Vmov(dot_five, 0.5, scratch0()); | 3837   __ Vmov(dot_five, 0.5, scratch0()); | 
| 3838   __ vabs(double_scratch1, input); | 3838   __ vabs(double_scratch1, input); | 
| 3839   __ VFPCompareAndSetFlags(double_scratch1, dot_five); | 3839   __ VFPCompareAndSetFlags(double_scratch1, dot_five); | 
| 3840   // If input is in [-0.5, -0], the result is -0. | 3840   // If input is in [-0.5, -0], the result is -0. | 
| 3841   // If input is in [+0, +0.5[, the result is +0. | 3841   // If input is in [+0, +0.5[, the result is +0. | 
| 3842   // If the input is +0.5, the result is 1. | 3842   // If the input is +0.5, the result is 1. | 
| 3843   __ b(hi, &convert);  // Out of [-0.5, +0.5]. | 3843   __ b(hi, &convert);  // Out of [-0.5, +0.5]. | 
| 3844   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 3844   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 
| 3845     __ VmovHigh(input_high, input); | 3845     __ VmovHigh(input_high, input); | 
| 3846     __ cmp(input_high, Operand::Zero()); | 3846     __ cmp(input_high, Operand::Zero()); | 
| 3847     // [-0.5, -0]. | 3847     // [-0.5, -0]. | 
| 3848     DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero); | 3848     DeoptimizeIf(mi, instr, "minus zero"); | 
| 3849   } | 3849   } | 
| 3850   __ VFPCompareAndSetFlags(input, dot_five); | 3850   __ VFPCompareAndSetFlags(input, dot_five); | 
| 3851   __ mov(result, Operand(1), LeaveCC, eq);  // +0.5. | 3851   __ mov(result, Operand(1), LeaveCC, eq);  // +0.5. | 
| 3852   // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on | 3852   // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on | 
| 3853   // flag kBailoutOnMinusZero. | 3853   // flag kBailoutOnMinusZero. | 
| 3854   __ mov(result, Operand::Zero(), LeaveCC, ne); | 3854   __ mov(result, Operand::Zero(), LeaveCC, ne); | 
| 3855   __ b(&done); | 3855   __ b(&done); | 
| 3856 | 3856 | 
| 3857   __ bind(&convert); | 3857   __ bind(&convert); | 
| 3858   __ vadd(input_plus_dot_five, input, dot_five); | 3858   __ vadd(input_plus_dot_five, input, dot_five); | 
| 3859   // Reuse dot_five (double_scratch0) as we no longer need this value. | 3859   // Reuse dot_five (double_scratch0) as we no longer need this value. | 
| 3860   __ TryInt32Floor(result, input_plus_dot_five, input_high, double_scratch0(), | 3860   __ TryInt32Floor(result, input_plus_dot_five, input_high, double_scratch0(), | 
| 3861                    &done, &done); | 3861                    &done, &done); | 
| 3862   DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN); | 3862   DeoptimizeIf(al, instr, "lost precision or NaN"); | 
| 3863   __ bind(&done); | 3863   __ bind(&done); | 
| 3864 } | 3864 } | 
| 3865 | 3865 | 
| 3866 | 3866 | 
| 3867 void LCodeGen::DoMathFround(LMathFround* instr) { | 3867 void LCodeGen::DoMathFround(LMathFround* instr) { | 
| 3868   DwVfpRegister input_reg = ToDoubleRegister(instr->value()); | 3868   DwVfpRegister input_reg = ToDoubleRegister(instr->value()); | 
| 3869   DwVfpRegister output_reg = ToDoubleRegister(instr->result()); | 3869   DwVfpRegister output_reg = ToDoubleRegister(instr->result()); | 
| 3870   LowDwVfpRegister scratch = double_scratch0(); | 3870   LowDwVfpRegister scratch = double_scratch0(); | 
| 3871   __ vcvt_f32_f64(scratch.low(), input_reg); | 3871   __ vcvt_f32_f64(scratch.low(), input_reg); | 
| 3872   __ vcvt_f64_f32(output_reg, scratch.low()); | 3872   __ vcvt_f64_f32(output_reg, scratch.low()); | 
| (...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 3916   if (exponent_type.IsSmi()) { | 3916   if (exponent_type.IsSmi()) { | 
| 3917     MathPowStub stub(isolate(), MathPowStub::TAGGED); | 3917     MathPowStub stub(isolate(), MathPowStub::TAGGED); | 
| 3918     __ CallStub(&stub); | 3918     __ CallStub(&stub); | 
| 3919   } else if (exponent_type.IsTagged()) { | 3919   } else if (exponent_type.IsTagged()) { | 
| 3920     Label no_deopt; | 3920     Label no_deopt; | 
| 3921     __ JumpIfSmi(tagged_exponent, &no_deopt); | 3921     __ JumpIfSmi(tagged_exponent, &no_deopt); | 
| 3922     DCHECK(!r6.is(tagged_exponent)); | 3922     DCHECK(!r6.is(tagged_exponent)); | 
| 3923     __ ldr(r6, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset)); | 3923     __ ldr(r6, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset)); | 
| 3924     __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); | 3924     __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); | 
| 3925     __ cmp(r6, Operand(ip)); | 3925     __ cmp(r6, Operand(ip)); | 
| 3926     DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber); | 3926     DeoptimizeIf(ne, instr, "not a heap number"); | 
| 3927     __ bind(&no_deopt); | 3927     __ bind(&no_deopt); | 
| 3928     MathPowStub stub(isolate(), MathPowStub::TAGGED); | 3928     MathPowStub stub(isolate(), MathPowStub::TAGGED); | 
| 3929     __ CallStub(&stub); | 3929     __ CallStub(&stub); | 
| 3930   } else if (exponent_type.IsInteger32()) { | 3930   } else if (exponent_type.IsInteger32()) { | 
| 3931     MathPowStub stub(isolate(), MathPowStub::INTEGER); | 3931     MathPowStub stub(isolate(), MathPowStub::INTEGER); | 
| 3932     __ CallStub(&stub); | 3932     __ CallStub(&stub); | 
| 3933   } else { | 3933   } else { | 
| 3934     DCHECK(exponent_type.IsDouble()); | 3934     DCHECK(exponent_type.IsDouble()); | 
| 3935     MathPowStub stub(isolate(), MathPowStub::DOUBLE); | 3935     MathPowStub stub(isolate(), MathPowStub::DOUBLE); | 
| 3936     __ CallStub(&stub); | 3936     __ CallStub(&stub); | 
| (...skipping 388 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 4325     Register index = ToRegister(instr->index()); | 4325     Register index = ToRegister(instr->index()); | 
| 4326     Operand length = ToOperand(instr->length()); | 4326     Operand length = ToOperand(instr->length()); | 
| 4327     __ cmp(index, length); | 4327     __ cmp(index, length); | 
| 4328   } | 4328   } | 
| 4329   if (FLAG_debug_code && instr->hydrogen()->skip_check()) { | 4329   if (FLAG_debug_code && instr->hydrogen()->skip_check()) { | 
| 4330     Label done; | 4330     Label done; | 
| 4331     __ b(NegateCondition(cc), &done); | 4331     __ b(NegateCondition(cc), &done); | 
| 4332     __ stop("eliminated bounds check failed"); | 4332     __ stop("eliminated bounds check failed"); | 
| 4333     __ bind(&done); | 4333     __ bind(&done); | 
| 4334   } else { | 4334   } else { | 
| 4335     DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds); | 4335     DeoptimizeIf(cc, instr, "out of bounds"); | 
| 4336   } | 4336   } | 
| 4337 } | 4337 } | 
| 4338 | 4338 | 
| 4339 | 4339 | 
| 4340 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { | 4340 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { | 
| 4341   Register external_pointer = ToRegister(instr->elements()); | 4341   Register external_pointer = ToRegister(instr->elements()); | 
| 4342   Register key = no_reg; | 4342   Register key = no_reg; | 
| 4343   ElementsKind elements_kind = instr->elements_kind(); | 4343   ElementsKind elements_kind = instr->elements_kind(); | 
| 4344   bool key_is_constant = instr->key()->IsConstantOperand(); | 4344   bool key_is_constant = instr->key()->IsConstantOperand(); | 
| 4345   int constant_key = 0; | 4345   int constant_key = 0; | 
| (...skipping 227 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 4573   } | 4573   } | 
| 4574   __ bind(¬_applicable); | 4574   __ bind(¬_applicable); | 
| 4575 } | 4575 } | 
| 4576 | 4576 | 
| 4577 | 4577 | 
| 4578 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { | 4578 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { | 
| 4579   Register object = ToRegister(instr->object()); | 4579   Register object = ToRegister(instr->object()); | 
| 4580   Register temp = ToRegister(instr->temp()); | 4580   Register temp = ToRegister(instr->temp()); | 
| 4581   Label no_memento_found; | 4581   Label no_memento_found; | 
| 4582   __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found); | 4582   __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found); | 
| 4583   DeoptimizeIf(eq, instr, Deoptimizer::kMementoFound); | 4583   DeoptimizeIf(eq, instr, "memento found"); | 
| 4584   __ bind(&no_memento_found); | 4584   __ bind(&no_memento_found); | 
| 4585 } | 4585 } | 
| 4586 | 4586 | 
| 4587 | 4587 | 
| 4588 void LCodeGen::DoStringAdd(LStringAdd* instr) { | 4588 void LCodeGen::DoStringAdd(LStringAdd* instr) { | 
| 4589   DCHECK(ToRegister(instr->context()).is(cp)); | 4589   DCHECK(ToRegister(instr->context()).is(cp)); | 
| 4590   DCHECK(ToRegister(instr->left()).is(r1)); | 4590   DCHECK(ToRegister(instr->left()).is(r1)); | 
| 4591   DCHECK(ToRegister(instr->right()).is(r0)); | 4591   DCHECK(ToRegister(instr->right()).is(r0)); | 
| 4592   StringAddStub stub(isolate(), | 4592   StringAddStub stub(isolate(), | 
| 4593                      instr->hydrogen()->flags(), | 4593                      instr->hydrogen()->flags(), | 
| (...skipping 314 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 4908 } | 4908 } | 
| 4909 | 4909 | 
| 4910 | 4910 | 
| 4911 void LCodeGen::DoSmiTag(LSmiTag* instr) { | 4911 void LCodeGen::DoSmiTag(LSmiTag* instr) { | 
| 4912   HChange* hchange = instr->hydrogen(); | 4912   HChange* hchange = instr->hydrogen(); | 
| 4913   Register input = ToRegister(instr->value()); | 4913   Register input = ToRegister(instr->value()); | 
| 4914   Register output = ToRegister(instr->result()); | 4914   Register output = ToRegister(instr->result()); | 
| 4915   if (hchange->CheckFlag(HValue::kCanOverflow) && | 4915   if (hchange->CheckFlag(HValue::kCanOverflow) && | 
| 4916       hchange->value()->CheckFlag(HValue::kUint32)) { | 4916       hchange->value()->CheckFlag(HValue::kUint32)) { | 
| 4917     __ tst(input, Operand(0xc0000000)); | 4917     __ tst(input, Operand(0xc0000000)); | 
| 4918     DeoptimizeIf(ne, instr, Deoptimizer::kOverflow); | 4918     DeoptimizeIf(ne, instr, "overflow"); | 
| 4919   } | 4919   } | 
| 4920   if (hchange->CheckFlag(HValue::kCanOverflow) && | 4920   if (hchange->CheckFlag(HValue::kCanOverflow) && | 
| 4921       !hchange->value()->CheckFlag(HValue::kUint32)) { | 4921       !hchange->value()->CheckFlag(HValue::kUint32)) { | 
| 4922     __ SmiTag(output, input, SetCC); | 4922     __ SmiTag(output, input, SetCC); | 
| 4923     DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); | 4923     DeoptimizeIf(vs, instr, "overflow"); | 
| 4924   } else { | 4924   } else { | 
| 4925     __ SmiTag(output, input); | 4925     __ SmiTag(output, input); | 
| 4926   } | 4926   } | 
| 4927 } | 4927 } | 
| 4928 | 4928 | 
| 4929 | 4929 | 
| 4930 void LCodeGen::DoSmiUntag(LSmiUntag* instr) { | 4930 void LCodeGen::DoSmiUntag(LSmiUntag* instr) { | 
| 4931   Register input = ToRegister(instr->value()); | 4931   Register input = ToRegister(instr->value()); | 
| 4932   Register result = ToRegister(instr->result()); | 4932   Register result = ToRegister(instr->result()); | 
| 4933   if (instr->needs_check()) { | 4933   if (instr->needs_check()) { | 
| 4934     STATIC_ASSERT(kHeapObjectTag == 1); | 4934     STATIC_ASSERT(kHeapObjectTag == 1); | 
| 4935     // If the input is a HeapObject, SmiUntag will set the carry flag. | 4935     // If the input is a HeapObject, SmiUntag will set the carry flag. | 
| 4936     __ SmiUntag(result, input, SetCC); | 4936     __ SmiUntag(result, input, SetCC); | 
| 4937     DeoptimizeIf(cs, instr, Deoptimizer::kNotASmi); | 4937     DeoptimizeIf(cs, instr, "not a Smi"); | 
| 4938   } else { | 4938   } else { | 
| 4939     __ SmiUntag(result, input); | 4939     __ SmiUntag(result, input); | 
| 4940   } | 4940   } | 
| 4941 } | 4941 } | 
| 4942 | 4942 | 
| 4943 | 4943 | 
| 4944 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg, | 4944 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg, | 
| 4945                                 DwVfpRegister result_reg, | 4945                                 DwVfpRegister result_reg, | 
| 4946                                 NumberUntagDMode mode) { | 4946                                 NumberUntagDMode mode) { | 
| 4947   bool can_convert_undefined_to_nan = | 4947   bool can_convert_undefined_to_nan = | 
| 4948       instr->hydrogen()->can_convert_undefined_to_nan(); | 4948       instr->hydrogen()->can_convert_undefined_to_nan(); | 
| 4949   bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero(); | 4949   bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero(); | 
| 4950 | 4950 | 
| 4951   Register scratch = scratch0(); | 4951   Register scratch = scratch0(); | 
| 4952   SwVfpRegister flt_scratch = double_scratch0().low(); | 4952   SwVfpRegister flt_scratch = double_scratch0().low(); | 
| 4953   DCHECK(!result_reg.is(double_scratch0())); | 4953   DCHECK(!result_reg.is(double_scratch0())); | 
| 4954   Label convert, load_smi, done; | 4954   Label convert, load_smi, done; | 
| 4955   if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { | 4955   if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { | 
| 4956     // Smi check. | 4956     // Smi check. | 
| 4957     __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi); | 4957     __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi); | 
| 4958     // Heap number map check. | 4958     // Heap number map check. | 
| 4959     __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); | 4959     __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); | 
| 4960     __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); | 4960     __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); | 
| 4961     __ cmp(scratch, Operand(ip)); | 4961     __ cmp(scratch, Operand(ip)); | 
| 4962     if (can_convert_undefined_to_nan) { | 4962     if (can_convert_undefined_to_nan) { | 
| 4963       __ b(ne, &convert); | 4963       __ b(ne, &convert); | 
| 4964     } else { | 4964     } else { | 
| 4965       DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber); | 4965       DeoptimizeIf(ne, instr, "not a heap number"); | 
| 4966     } | 4966     } | 
| 4967     // load heap number | 4967     // load heap number | 
| 4968     __ vldr(result_reg, input_reg, HeapNumber::kValueOffset - kHeapObjectTag); | 4968     __ vldr(result_reg, input_reg, HeapNumber::kValueOffset - kHeapObjectTag); | 
| 4969     if (deoptimize_on_minus_zero) { | 4969     if (deoptimize_on_minus_zero) { | 
| 4970       __ VmovLow(scratch, result_reg); | 4970       __ VmovLow(scratch, result_reg); | 
| 4971       __ cmp(scratch, Operand::Zero()); | 4971       __ cmp(scratch, Operand::Zero()); | 
| 4972       __ b(ne, &done); | 4972       __ b(ne, &done); | 
| 4973       __ VmovHigh(scratch, result_reg); | 4973       __ VmovHigh(scratch, result_reg); | 
| 4974       __ cmp(scratch, Operand(HeapNumber::kSignMask)); | 4974       __ cmp(scratch, Operand(HeapNumber::kSignMask)); | 
| 4975       DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); | 4975       DeoptimizeIf(eq, instr, "minus zero"); | 
| 4976     } | 4976     } | 
| 4977     __ jmp(&done); | 4977     __ jmp(&done); | 
| 4978     if (can_convert_undefined_to_nan) { | 4978     if (can_convert_undefined_to_nan) { | 
| 4979       __ bind(&convert); | 4979       __ bind(&convert); | 
| 4980       // Convert undefined (and hole) to NaN. | 4980       // Convert undefined (and hole) to NaN. | 
| 4981       __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); | 4981       __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); | 
| 4982       __ cmp(input_reg, Operand(ip)); | 4982       __ cmp(input_reg, Operand(ip)); | 
| 4983       DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined); | 4983       DeoptimizeIf(ne, instr, "not a heap number/undefined"); | 
| 4984       __ LoadRoot(scratch, Heap::kNanValueRootIndex); | 4984       __ LoadRoot(scratch, Heap::kNanValueRootIndex); | 
| 4985       __ vldr(result_reg, scratch, HeapNumber::kValueOffset - kHeapObjectTag); | 4985       __ vldr(result_reg, scratch, HeapNumber::kValueOffset - kHeapObjectTag); | 
| 4986       __ jmp(&done); | 4986       __ jmp(&done); | 
| 4987     } | 4987     } | 
| 4988   } else { | 4988   } else { | 
| 4989     __ SmiUntag(scratch, input_reg); | 4989     __ SmiUntag(scratch, input_reg); | 
| 4990     DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); | 4990     DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); | 
| 4991   } | 4991   } | 
| 4992   // Smi to double register conversion | 4992   // Smi to double register conversion | 
| 4993   __ bind(&load_smi); | 4993   __ bind(&load_smi); | 
| (...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 5041     __ bind(&check_bools); | 5041     __ bind(&check_bools); | 
| 5042     __ LoadRoot(ip, Heap::kTrueValueRootIndex); | 5042     __ LoadRoot(ip, Heap::kTrueValueRootIndex); | 
| 5043     __ cmp(scratch2, Operand(ip)); | 5043     __ cmp(scratch2, Operand(ip)); | 
| 5044     __ b(ne, &check_false); | 5044     __ b(ne, &check_false); | 
| 5045     __ mov(input_reg, Operand(1)); | 5045     __ mov(input_reg, Operand(1)); | 
| 5046     __ b(&done); | 5046     __ b(&done); | 
| 5047 | 5047 | 
| 5048     __ bind(&check_false); | 5048     __ bind(&check_false); | 
| 5049     __ LoadRoot(ip, Heap::kFalseValueRootIndex); | 5049     __ LoadRoot(ip, Heap::kFalseValueRootIndex); | 
| 5050     __ cmp(scratch2, Operand(ip)); | 5050     __ cmp(scratch2, Operand(ip)); | 
| 5051     DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedBoolean); | 5051     DeoptimizeIf(ne, instr, "not a heap number/undefined/true/false"); | 
| 5052     __ mov(input_reg, Operand::Zero()); | 5052     __ mov(input_reg, Operand::Zero()); | 
| 5053   } else { | 5053   } else { | 
| 5054     DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber); | 5054     DeoptimizeIf(ne, instr, "not a heap number"); | 
| 5055 | 5055 | 
| 5056     __ sub(ip, scratch2, Operand(kHeapObjectTag)); | 5056     __ sub(ip, scratch2, Operand(kHeapObjectTag)); | 
| 5057     __ vldr(double_scratch2, ip, HeapNumber::kValueOffset); | 5057     __ vldr(double_scratch2, ip, HeapNumber::kValueOffset); | 
| 5058     __ TryDoubleToInt32Exact(input_reg, double_scratch2, double_scratch); | 5058     __ TryDoubleToInt32Exact(input_reg, double_scratch2, double_scratch); | 
| 5059     DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN); | 5059     DeoptimizeIf(ne, instr, "lost precision or NaN"); | 
| 5060 | 5060 | 
| 5061     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 5061     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 
| 5062       __ cmp(input_reg, Operand::Zero()); | 5062       __ cmp(input_reg, Operand::Zero()); | 
| 5063       __ b(ne, &done); | 5063       __ b(ne, &done); | 
| 5064       __ VmovHigh(scratch1, double_scratch2); | 5064       __ VmovHigh(scratch1, double_scratch2); | 
| 5065       __ tst(scratch1, Operand(HeapNumber::kSignMask)); | 5065       __ tst(scratch1, Operand(HeapNumber::kSignMask)); | 
| 5066       DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero); | 5066       DeoptimizeIf(ne, instr, "minus zero"); | 
| 5067     } | 5067     } | 
| 5068   } | 5068   } | 
| 5069   __ bind(&done); | 5069   __ bind(&done); | 
| 5070 } | 5070 } | 
| 5071 | 5071 | 
| 5072 | 5072 | 
| 5073 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { | 5073 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { | 
| 5074   class DeferredTaggedToI FINAL : public LDeferredCode { | 5074   class DeferredTaggedToI FINAL : public LDeferredCode { | 
| 5075    public: | 5075    public: | 
| 5076     DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) | 5076     DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) | 
| (...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 5125   Register result_reg = ToRegister(instr->result()); | 5125   Register result_reg = ToRegister(instr->result()); | 
| 5126   Register scratch1 = scratch0(); | 5126   Register scratch1 = scratch0(); | 
| 5127   DwVfpRegister double_input = ToDoubleRegister(instr->value()); | 5127   DwVfpRegister double_input = ToDoubleRegister(instr->value()); | 
| 5128   LowDwVfpRegister double_scratch = double_scratch0(); | 5128   LowDwVfpRegister double_scratch = double_scratch0(); | 
| 5129 | 5129 | 
| 5130   if (instr->truncating()) { | 5130   if (instr->truncating()) { | 
| 5131     __ TruncateDoubleToI(result_reg, double_input); | 5131     __ TruncateDoubleToI(result_reg, double_input); | 
| 5132   } else { | 5132   } else { | 
| 5133     __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch); | 5133     __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch); | 
| 5134     // Deoptimize if the input wasn't a int32 (inside a double). | 5134     // Deoptimize if the input wasn't a int32 (inside a double). | 
| 5135     DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN); | 5135     DeoptimizeIf(ne, instr, "lost precision or NaN"); | 
| 5136     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 5136     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 
| 5137       Label done; | 5137       Label done; | 
| 5138       __ cmp(result_reg, Operand::Zero()); | 5138       __ cmp(result_reg, Operand::Zero()); | 
| 5139       __ b(ne, &done); | 5139       __ b(ne, &done); | 
| 5140       __ VmovHigh(scratch1, double_input); | 5140       __ VmovHigh(scratch1, double_input); | 
| 5141       __ tst(scratch1, Operand(HeapNumber::kSignMask)); | 5141       __ tst(scratch1, Operand(HeapNumber::kSignMask)); | 
| 5142       DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero); | 5142       DeoptimizeIf(ne, instr, "minus zero"); | 
| 5143       __ bind(&done); | 5143       __ bind(&done); | 
| 5144     } | 5144     } | 
| 5145   } | 5145   } | 
| 5146 } | 5146 } | 
| 5147 | 5147 | 
| 5148 | 5148 | 
| 5149 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { | 5149 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { | 
| 5150   Register result_reg = ToRegister(instr->result()); | 5150   Register result_reg = ToRegister(instr->result()); | 
| 5151   Register scratch1 = scratch0(); | 5151   Register scratch1 = scratch0(); | 
| 5152   DwVfpRegister double_input = ToDoubleRegister(instr->value()); | 5152   DwVfpRegister double_input = ToDoubleRegister(instr->value()); | 
| 5153   LowDwVfpRegister double_scratch = double_scratch0(); | 5153   LowDwVfpRegister double_scratch = double_scratch0(); | 
| 5154 | 5154 | 
| 5155   if (instr->truncating()) { | 5155   if (instr->truncating()) { | 
| 5156     __ TruncateDoubleToI(result_reg, double_input); | 5156     __ TruncateDoubleToI(result_reg, double_input); | 
| 5157   } else { | 5157   } else { | 
| 5158     __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch); | 5158     __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch); | 
| 5159     // Deoptimize if the input wasn't a int32 (inside a double). | 5159     // Deoptimize if the input wasn't a int32 (inside a double). | 
| 5160     DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN); | 5160     DeoptimizeIf(ne, instr, "lost precision or NaN"); | 
| 5161     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 5161     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 
| 5162       Label done; | 5162       Label done; | 
| 5163       __ cmp(result_reg, Operand::Zero()); | 5163       __ cmp(result_reg, Operand::Zero()); | 
| 5164       __ b(ne, &done); | 5164       __ b(ne, &done); | 
| 5165       __ VmovHigh(scratch1, double_input); | 5165       __ VmovHigh(scratch1, double_input); | 
| 5166       __ tst(scratch1, Operand(HeapNumber::kSignMask)); | 5166       __ tst(scratch1, Operand(HeapNumber::kSignMask)); | 
| 5167       DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero); | 5167       DeoptimizeIf(ne, instr, "minus zero"); | 
| 5168       __ bind(&done); | 5168       __ bind(&done); | 
| 5169     } | 5169     } | 
| 5170   } | 5170   } | 
| 5171   __ SmiTag(result_reg, SetCC); | 5171   __ SmiTag(result_reg, SetCC); | 
| 5172   DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); | 5172   DeoptimizeIf(vs, instr, "overflow"); | 
| 5173 } | 5173 } | 
| 5174 | 5174 | 
| 5175 | 5175 | 
| 5176 void LCodeGen::DoCheckSmi(LCheckSmi* instr) { | 5176 void LCodeGen::DoCheckSmi(LCheckSmi* instr) { | 
| 5177   LOperand* input = instr->value(); | 5177   LOperand* input = instr->value(); | 
| 5178   __ SmiTst(ToRegister(input)); | 5178   __ SmiTst(ToRegister(input)); | 
| 5179   DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi); | 5179   DeoptimizeIf(ne, instr, "not a Smi"); | 
| 5180 } | 5180 } | 
| 5181 | 5181 | 
| 5182 | 5182 | 
| 5183 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { | 5183 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { | 
| 5184   if (!instr->hydrogen()->value()->type().IsHeapObject()) { | 5184   if (!instr->hydrogen()->value()->type().IsHeapObject()) { | 
| 5185     LOperand* input = instr->value(); | 5185     LOperand* input = instr->value(); | 
| 5186     __ SmiTst(ToRegister(input)); | 5186     __ SmiTst(ToRegister(input)); | 
| 5187     DeoptimizeIf(eq, instr, Deoptimizer::kSmi); | 5187     DeoptimizeIf(eq, instr, "Smi"); | 
| 5188   } | 5188   } | 
| 5189 } | 5189 } | 
| 5190 | 5190 | 
| 5191 | 5191 | 
| 5192 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { | 5192 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { | 
| 5193   Register input = ToRegister(instr->value()); | 5193   Register input = ToRegister(instr->value()); | 
| 5194   Register scratch = scratch0(); | 5194   Register scratch = scratch0(); | 
| 5195 | 5195 | 
| 5196   __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); | 5196   __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); | 
| 5197   __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); | 5197   __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); | 
| 5198 | 5198 | 
| 5199   if (instr->hydrogen()->is_interval_check()) { | 5199   if (instr->hydrogen()->is_interval_check()) { | 
| 5200     InstanceType first; | 5200     InstanceType first; | 
| 5201     InstanceType last; | 5201     InstanceType last; | 
| 5202     instr->hydrogen()->GetCheckInterval(&first, &last); | 5202     instr->hydrogen()->GetCheckInterval(&first, &last); | 
| 5203 | 5203 | 
| 5204     __ cmp(scratch, Operand(first)); | 5204     __ cmp(scratch, Operand(first)); | 
| 5205 | 5205 | 
| 5206     // If there is only one type in the interval check for equality. | 5206     // If there is only one type in the interval check for equality. | 
| 5207     if (first == last) { | 5207     if (first == last) { | 
| 5208       DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType); | 5208       DeoptimizeIf(ne, instr, "wrong instance type"); | 
| 5209     } else { | 5209     } else { | 
| 5210       DeoptimizeIf(lo, instr, Deoptimizer::kWrongInstanceType); | 5210       DeoptimizeIf(lo, instr, "wrong instance type"); | 
| 5211       // Omit check for the last type. | 5211       // Omit check for the last type. | 
| 5212       if (last != LAST_TYPE) { | 5212       if (last != LAST_TYPE) { | 
| 5213         __ cmp(scratch, Operand(last)); | 5213         __ cmp(scratch, Operand(last)); | 
| 5214         DeoptimizeIf(hi, instr, Deoptimizer::kWrongInstanceType); | 5214         DeoptimizeIf(hi, instr, "wrong instance type"); | 
| 5215       } | 5215       } | 
| 5216     } | 5216     } | 
| 5217   } else { | 5217   } else { | 
| 5218     uint8_t mask; | 5218     uint8_t mask; | 
| 5219     uint8_t tag; | 5219     uint8_t tag; | 
| 5220     instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); | 5220     instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); | 
| 5221 | 5221 | 
| 5222     if (base::bits::IsPowerOfTwo32(mask)) { | 5222     if (base::bits::IsPowerOfTwo32(mask)) { | 
| 5223       DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag)); | 5223       DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag)); | 
| 5224       __ tst(scratch, Operand(mask)); | 5224       __ tst(scratch, Operand(mask)); | 
| 5225       DeoptimizeIf(tag == 0 ? ne : eq, instr, Deoptimizer::kWrongInstanceType); | 5225       DeoptimizeIf(tag == 0 ? ne : eq, instr, "wrong instance type"); | 
| 5226     } else { | 5226     } else { | 
| 5227       __ and_(scratch, scratch, Operand(mask)); | 5227       __ and_(scratch, scratch, Operand(mask)); | 
| 5228       __ cmp(scratch, Operand(tag)); | 5228       __ cmp(scratch, Operand(tag)); | 
| 5229       DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType); | 5229       DeoptimizeIf(ne, instr, "wrong instance type"); | 
| 5230     } | 5230     } | 
| 5231   } | 5231   } | 
| 5232 } | 5232 } | 
| 5233 | 5233 | 
| 5234 | 5234 | 
| 5235 void LCodeGen::DoCheckValue(LCheckValue* instr) { | 5235 void LCodeGen::DoCheckValue(LCheckValue* instr) { | 
| 5236   Register reg = ToRegister(instr->value()); | 5236   Register reg = ToRegister(instr->value()); | 
| 5237   Handle<HeapObject> object = instr->hydrogen()->object().handle(); | 5237   Handle<HeapObject> object = instr->hydrogen()->object().handle(); | 
| 5238   AllowDeferredHandleDereference smi_check; | 5238   AllowDeferredHandleDereference smi_check; | 
| 5239   if (isolate()->heap()->InNewSpace(*object)) { | 5239   if (isolate()->heap()->InNewSpace(*object)) { | 
| 5240     Register reg = ToRegister(instr->value()); | 5240     Register reg = ToRegister(instr->value()); | 
| 5241     Handle<Cell> cell = isolate()->factory()->NewCell(object); | 5241     Handle<Cell> cell = isolate()->factory()->NewCell(object); | 
| 5242     __ mov(ip, Operand(Handle<Object>(cell))); | 5242     __ mov(ip, Operand(Handle<Object>(cell))); | 
| 5243     __ ldr(ip, FieldMemOperand(ip, Cell::kValueOffset)); | 5243     __ ldr(ip, FieldMemOperand(ip, Cell::kValueOffset)); | 
| 5244     __ cmp(reg, ip); | 5244     __ cmp(reg, ip); | 
| 5245   } else { | 5245   } else { | 
| 5246     __ cmp(reg, Operand(object)); | 5246     __ cmp(reg, Operand(object)); | 
| 5247   } | 5247   } | 
| 5248   DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch); | 5248   DeoptimizeIf(ne, instr, "value mismatch"); | 
| 5249 } | 5249 } | 
| 5250 | 5250 | 
| 5251 | 5251 | 
| 5252 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { | 5252 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { | 
| 5253   { | 5253   { | 
| 5254     PushSafepointRegistersScope scope(this); | 5254     PushSafepointRegistersScope scope(this); | 
| 5255     __ push(object); | 5255     __ push(object); | 
| 5256     __ mov(cp, Operand::Zero()); | 5256     __ mov(cp, Operand::Zero()); | 
| 5257     __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); | 5257     __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); | 
| 5258     RecordSafepointWithRegisters( | 5258     RecordSafepointWithRegisters( | 
| 5259         instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); | 5259         instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); | 
| 5260     __ StoreToSafepointRegisterSlot(r0, scratch0()); | 5260     __ StoreToSafepointRegisterSlot(r0, scratch0()); | 
| 5261   } | 5261   } | 
| 5262   __ tst(scratch0(), Operand(kSmiTagMask)); | 5262   __ tst(scratch0(), Operand(kSmiTagMask)); | 
| 5263   DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed); | 5263   DeoptimizeIf(eq, instr, "instance migration failed"); | 
| 5264 } | 5264 } | 
| 5265 | 5265 | 
| 5266 | 5266 | 
| 5267 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { | 5267 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { | 
| 5268   class DeferredCheckMaps FINAL : public LDeferredCode { | 5268   class DeferredCheckMaps FINAL : public LDeferredCode { | 
| 5269    public: | 5269    public: | 
| 5270     DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) | 5270     DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) | 
| 5271         : LDeferredCode(codegen), instr_(instr), object_(object) { | 5271         : LDeferredCode(codegen), instr_(instr), object_(object) { | 
| 5272       SetExit(check_maps()); | 5272       SetExit(check_maps()); | 
| 5273     } | 5273     } | 
| (...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 5311     Handle<Map> map = maps->at(i).handle(); | 5311     Handle<Map> map = maps->at(i).handle(); | 
| 5312     __ CompareMap(map_reg, map, &success); | 5312     __ CompareMap(map_reg, map, &success); | 
| 5313     __ b(eq, &success); | 5313     __ b(eq, &success); | 
| 5314   } | 5314   } | 
| 5315 | 5315 | 
| 5316   Handle<Map> map = maps->at(maps->size() - 1).handle(); | 5316   Handle<Map> map = maps->at(maps->size() - 1).handle(); | 
| 5317   __ CompareMap(map_reg, map, &success); | 5317   __ CompareMap(map_reg, map, &success); | 
| 5318   if (instr->hydrogen()->HasMigrationTarget()) { | 5318   if (instr->hydrogen()->HasMigrationTarget()) { | 
| 5319     __ b(ne, deferred->entry()); | 5319     __ b(ne, deferred->entry()); | 
| 5320   } else { | 5320   } else { | 
| 5321     DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap); | 5321     DeoptimizeIf(ne, instr, "wrong map"); | 
| 5322   } | 5322   } | 
| 5323 | 5323 | 
| 5324   __ bind(&success); | 5324   __ bind(&success); | 
| 5325 } | 5325 } | 
| 5326 | 5326 | 
| 5327 | 5327 | 
| 5328 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { | 5328 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { | 
| 5329   DwVfpRegister value_reg = ToDoubleRegister(instr->unclamped()); | 5329   DwVfpRegister value_reg = ToDoubleRegister(instr->unclamped()); | 
| 5330   Register result_reg = ToRegister(instr->result()); | 5330   Register result_reg = ToRegister(instr->result()); | 
| 5331   __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0()); | 5331   __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0()); | 
| (...skipping 18 matching lines...) Expand all  Loading... | 
| 5350   __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi); | 5350   __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi); | 
| 5351 | 5351 | 
| 5352   // Check for heap number | 5352   // Check for heap number | 
| 5353   __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); | 5353   __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); | 
| 5354   __ cmp(scratch, Operand(factory()->heap_number_map())); | 5354   __ cmp(scratch, Operand(factory()->heap_number_map())); | 
| 5355   __ b(eq, &heap_number); | 5355   __ b(eq, &heap_number); | 
| 5356 | 5356 | 
| 5357   // Check for undefined. Undefined is converted to zero for clamping | 5357   // Check for undefined. Undefined is converted to zero for clamping | 
| 5358   // conversions. | 5358   // conversions. | 
| 5359   __ cmp(input_reg, Operand(factory()->undefined_value())); | 5359   __ cmp(input_reg, Operand(factory()->undefined_value())); | 
| 5360   DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined); | 5360   DeoptimizeIf(ne, instr, "not a heap number/undefined"); | 
| 5361   __ mov(result_reg, Operand::Zero()); | 5361   __ mov(result_reg, Operand::Zero()); | 
| 5362   __ jmp(&done); | 5362   __ jmp(&done); | 
| 5363 | 5363 | 
| 5364   // Heap number | 5364   // Heap number | 
| 5365   __ bind(&heap_number); | 5365   __ bind(&heap_number); | 
| 5366   __ vldr(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset)); | 5366   __ vldr(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset)); | 
| 5367   __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0()); | 5367   __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0()); | 
| 5368   __ jmp(&done); | 5368   __ jmp(&done); | 
| 5369 | 5369 | 
| 5370   // smi | 5370   // smi | 
| (...skipping 447 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 5818   DCHECK(!environment->HasBeenRegistered()); | 5818   DCHECK(!environment->HasBeenRegistered()); | 
| 5819   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); | 5819   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); | 
| 5820 | 5820 | 
| 5821   GenerateOsrPrologue(); | 5821   GenerateOsrPrologue(); | 
| 5822 } | 5822 } | 
| 5823 | 5823 | 
| 5824 | 5824 | 
| 5825 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { | 5825 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { | 
| 5826   __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); | 5826   __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); | 
| 5827   __ cmp(r0, ip); | 5827   __ cmp(r0, ip); | 
| 5828   DeoptimizeIf(eq, instr, Deoptimizer::kUndefined); | 5828   DeoptimizeIf(eq, instr, "undefined"); | 
| 5829 | 5829 | 
| 5830   Register null_value = r5; | 5830   Register null_value = r5; | 
| 5831   __ LoadRoot(null_value, Heap::kNullValueRootIndex); | 5831   __ LoadRoot(null_value, Heap::kNullValueRootIndex); | 
| 5832   __ cmp(r0, null_value); | 5832   __ cmp(r0, null_value); | 
| 5833   DeoptimizeIf(eq, instr, Deoptimizer::kNull); | 5833   DeoptimizeIf(eq, instr, "null"); | 
| 5834 | 5834 | 
| 5835   __ SmiTst(r0); | 5835   __ SmiTst(r0); | 
| 5836   DeoptimizeIf(eq, instr, Deoptimizer::kSmi); | 5836   DeoptimizeIf(eq, instr, "Smi"); | 
| 5837 | 5837 | 
| 5838   STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); | 5838   STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); | 
| 5839   __ CompareObjectType(r0, r1, r1, LAST_JS_PROXY_TYPE); | 5839   __ CompareObjectType(r0, r1, r1, LAST_JS_PROXY_TYPE); | 
| 5840   DeoptimizeIf(le, instr, Deoptimizer::kWrongInstanceType); | 5840   DeoptimizeIf(le, instr, "wrong instance type"); | 
| 5841 | 5841 | 
| 5842   Label use_cache, call_runtime; | 5842   Label use_cache, call_runtime; | 
| 5843   __ CheckEnumCache(null_value, &call_runtime); | 5843   __ CheckEnumCache(null_value, &call_runtime); | 
| 5844 | 5844 | 
| 5845   __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset)); | 5845   __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset)); | 
| 5846   __ b(&use_cache); | 5846   __ b(&use_cache); | 
| 5847 | 5847 | 
| 5848   // Get the set of properties to enumerate. | 5848   // Get the set of properties to enumerate. | 
| 5849   __ bind(&call_runtime); | 5849   __ bind(&call_runtime); | 
| 5850   __ push(r0); | 5850   __ push(r0); | 
| 5851   CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr); | 5851   CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr); | 
| 5852 | 5852 | 
| 5853   __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); | 5853   __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); | 
| 5854   __ LoadRoot(ip, Heap::kMetaMapRootIndex); | 5854   __ LoadRoot(ip, Heap::kMetaMapRootIndex); | 
| 5855   __ cmp(r1, ip); | 5855   __ cmp(r1, ip); | 
| 5856   DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap); | 5856   DeoptimizeIf(ne, instr, "wrong map"); | 
| 5857   __ bind(&use_cache); | 5857   __ bind(&use_cache); | 
| 5858 } | 5858 } | 
| 5859 | 5859 | 
| 5860 | 5860 | 
| 5861 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { | 5861 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { | 
| 5862   Register map = ToRegister(instr->map()); | 5862   Register map = ToRegister(instr->map()); | 
| 5863   Register result = ToRegister(instr->result()); | 5863   Register result = ToRegister(instr->result()); | 
| 5864   Label load_cache, done; | 5864   Label load_cache, done; | 
| 5865   __ EnumLength(result, map); | 5865   __ EnumLength(result, map); | 
| 5866   __ cmp(result, Operand(Smi::FromInt(0))); | 5866   __ cmp(result, Operand(Smi::FromInt(0))); | 
| 5867   __ b(ne, &load_cache); | 5867   __ b(ne, &load_cache); | 
| 5868   __ mov(result, Operand(isolate()->factory()->empty_fixed_array())); | 5868   __ mov(result, Operand(isolate()->factory()->empty_fixed_array())); | 
| 5869   __ jmp(&done); | 5869   __ jmp(&done); | 
| 5870 | 5870 | 
| 5871   __ bind(&load_cache); | 5871   __ bind(&load_cache); | 
| 5872   __ LoadInstanceDescriptors(map, result); | 5872   __ LoadInstanceDescriptors(map, result); | 
| 5873   __ ldr(result, | 5873   __ ldr(result, | 
| 5874          FieldMemOperand(result, DescriptorArray::kEnumCacheOffset)); | 5874          FieldMemOperand(result, DescriptorArray::kEnumCacheOffset)); | 
| 5875   __ ldr(result, | 5875   __ ldr(result, | 
| 5876          FieldMemOperand(result, FixedArray::SizeFor(instr->idx()))); | 5876          FieldMemOperand(result, FixedArray::SizeFor(instr->idx()))); | 
| 5877   __ cmp(result, Operand::Zero()); | 5877   __ cmp(result, Operand::Zero()); | 
| 5878   DeoptimizeIf(eq, instr, Deoptimizer::kNoCache); | 5878   DeoptimizeIf(eq, instr, "no cache"); | 
| 5879 | 5879 | 
| 5880   __ bind(&done); | 5880   __ bind(&done); | 
| 5881 } | 5881 } | 
| 5882 | 5882 | 
| 5883 | 5883 | 
| 5884 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { | 5884 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { | 
| 5885   Register object = ToRegister(instr->value()); | 5885   Register object = ToRegister(instr->value()); | 
| 5886   Register map = ToRegister(instr->map()); | 5886   Register map = ToRegister(instr->map()); | 
| 5887   __ ldr(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset)); | 5887   __ ldr(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset)); | 
| 5888   __ cmp(map, scratch0()); | 5888   __ cmp(map, scratch0()); | 
| 5889   DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap); | 5889   DeoptimizeIf(ne, instr, "wrong map"); | 
| 5890 } | 5890 } | 
| 5891 | 5891 | 
| 5892 | 5892 | 
| 5893 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, | 5893 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, | 
| 5894                                            Register result, | 5894                                            Register result, | 
| 5895                                            Register object, | 5895                                            Register object, | 
| 5896                                            Register index) { | 5896                                            Register index) { | 
| 5897   PushSafepointRegistersScope scope(this); | 5897   PushSafepointRegistersScope scope(this); | 
| 5898   __ Push(object); | 5898   __ Push(object); | 
| 5899   __ Push(index); | 5899   __ Push(index); | 
| (...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 5977   __ Push(scope_info); | 5977   __ Push(scope_info); | 
| 5978   __ push(ToRegister(instr->function())); | 5978   __ push(ToRegister(instr->function())); | 
| 5979   CallRuntime(Runtime::kPushBlockContext, 2, instr); | 5979   CallRuntime(Runtime::kPushBlockContext, 2, instr); | 
| 5980   RecordSafepoint(Safepoint::kNoLazyDeopt); | 5980   RecordSafepoint(Safepoint::kNoLazyDeopt); | 
| 5981 } | 5981 } | 
| 5982 | 5982 | 
| 5983 | 5983 | 
| 5984 #undef __ | 5984 #undef __ | 
| 5985 | 5985 | 
| 5986 } }  // namespace v8::internal | 5986 } }  // namespace v8::internal | 
| OLD | NEW | 
|---|