| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/v8.h" | 5 #include "src/v8.h" |
| 6 | 6 |
| 7 #include "src/arm/lithium-codegen-arm.h" | 7 #include "src/arm/lithium-codegen-arm.h" |
| 8 #include "src/arm/lithium-gap-resolver-arm.h" | 8 #include "src/arm/lithium-gap-resolver-arm.h" |
| 9 #include "src/base/bits.h" | 9 #include "src/base/bits.h" |
| 10 #include "src/code-factory.h" | 10 #include "src/code-factory.h" |
| (...skipping 828 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 839 int deoptimization_index = deoptimizations_.length(); | 839 int deoptimization_index = deoptimizations_.length(); |
| 840 int pc_offset = masm()->pc_offset(); | 840 int pc_offset = masm()->pc_offset(); |
| 841 environment->Register(deoptimization_index, | 841 environment->Register(deoptimization_index, |
| 842 translation.index(), | 842 translation.index(), |
| 843 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); | 843 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); |
| 844 deoptimizations_.Add(environment, zone()); | 844 deoptimizations_.Add(environment, zone()); |
| 845 } | 845 } |
| 846 } | 846 } |
| 847 | 847 |
| 848 | 848 |
| 849 void LCodeGen::DeoptimizeIf(Condition condition, | 849 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr, |
| 850 LEnvironment* environment, | |
| 851 Deoptimizer::BailoutType bailout_type) { | 850 Deoptimizer::BailoutType bailout_type) { |
| 851 LEnvironment* environment = instr->environment(); |
| 852 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); | 852 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); |
| 853 DCHECK(environment->HasBeenRegistered()); | 853 DCHECK(environment->HasBeenRegistered()); |
| 854 int id = environment->deoptimization_index(); | 854 int id = environment->deoptimization_index(); |
| 855 DCHECK(info()->IsOptimizing() || info()->IsStub()); | 855 DCHECK(info()->IsOptimizing() || info()->IsStub()); |
| 856 Address entry = | 856 Address entry = |
| 857 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); | 857 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); |
| 858 if (entry == NULL) { | 858 if (entry == NULL) { |
| 859 Abort(kBailoutWasNotPrepared); | 859 Abort(kBailoutWasNotPrepared); |
| 860 return; | 860 return; |
| 861 } | 861 } |
| (...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 915 Deoptimizer::JumpTableEntry table_entry(entry, | 915 Deoptimizer::JumpTableEntry table_entry(entry, |
| 916 bailout_type, | 916 bailout_type, |
| 917 !frame_is_built_); | 917 !frame_is_built_); |
| 918 deopt_jump_table_.Add(table_entry, zone()); | 918 deopt_jump_table_.Add(table_entry, zone()); |
| 919 } | 919 } |
| 920 __ b(condition, &deopt_jump_table_.last().label); | 920 __ b(condition, &deopt_jump_table_.last().label); |
| 921 } | 921 } |
| 922 } | 922 } |
| 923 | 923 |
| 924 | 924 |
| 925 void LCodeGen::DeoptimizeIf(Condition condition, | 925 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr) { |
| 926 LEnvironment* environment) { | |
| 927 Deoptimizer::BailoutType bailout_type = info()->IsStub() | 926 Deoptimizer::BailoutType bailout_type = info()->IsStub() |
| 928 ? Deoptimizer::LAZY | 927 ? Deoptimizer::LAZY |
| 929 : Deoptimizer::EAGER; | 928 : Deoptimizer::EAGER; |
| 930 DeoptimizeIf(condition, environment, bailout_type); | 929 DeoptimizeIf(condition, instr, bailout_type); |
| 931 } | 930 } |
| 932 | 931 |
| 933 | 932 |
| 934 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { | 933 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { |
| 935 int length = deoptimizations_.length(); | 934 int length = deoptimizations_.length(); |
| 936 if (length == 0) return; | 935 if (length == 0) return; |
| 937 Handle<DeoptimizationInputData> data = | 936 Handle<DeoptimizationInputData> data = |
| 938 DeoptimizationInputData::New(isolate(), length, TENURED); | 937 DeoptimizationInputData::New(isolate(), length, TENURED); |
| 939 | 938 |
| 940 Handle<ByteArray> translations = | 939 Handle<ByteArray> translations = |
| (...skipping 214 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1155 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); | 1154 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); |
| 1156 Label dividend_is_not_negative, done; | 1155 Label dividend_is_not_negative, done; |
| 1157 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { | 1156 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { |
| 1158 __ cmp(dividend, Operand::Zero()); | 1157 __ cmp(dividend, Operand::Zero()); |
| 1159 __ b(pl, ÷nd_is_not_negative); | 1158 __ b(pl, ÷nd_is_not_negative); |
| 1160 // Note that this is correct even for kMinInt operands. | 1159 // Note that this is correct even for kMinInt operands. |
| 1161 __ rsb(dividend, dividend, Operand::Zero()); | 1160 __ rsb(dividend, dividend, Operand::Zero()); |
| 1162 __ and_(dividend, dividend, Operand(mask)); | 1161 __ and_(dividend, dividend, Operand(mask)); |
| 1163 __ rsb(dividend, dividend, Operand::Zero(), SetCC); | 1162 __ rsb(dividend, dividend, Operand::Zero(), SetCC); |
| 1164 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1163 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1165 DeoptimizeIf(eq, instr->environment()); | 1164 DeoptimizeIf(eq, instr); |
| 1166 } | 1165 } |
| 1167 __ b(&done); | 1166 __ b(&done); |
| 1168 } | 1167 } |
| 1169 | 1168 |
| 1170 __ bind(÷nd_is_not_negative); | 1169 __ bind(÷nd_is_not_negative); |
| 1171 __ and_(dividend, dividend, Operand(mask)); | 1170 __ and_(dividend, dividend, Operand(mask)); |
| 1172 __ bind(&done); | 1171 __ bind(&done); |
| 1173 } | 1172 } |
| 1174 | 1173 |
| 1175 | 1174 |
| 1176 void LCodeGen::DoModByConstI(LModByConstI* instr) { | 1175 void LCodeGen::DoModByConstI(LModByConstI* instr) { |
| 1177 Register dividend = ToRegister(instr->dividend()); | 1176 Register dividend = ToRegister(instr->dividend()); |
| 1178 int32_t divisor = instr->divisor(); | 1177 int32_t divisor = instr->divisor(); |
| 1179 Register result = ToRegister(instr->result()); | 1178 Register result = ToRegister(instr->result()); |
| 1180 DCHECK(!dividend.is(result)); | 1179 DCHECK(!dividend.is(result)); |
| 1181 | 1180 |
| 1182 if (divisor == 0) { | 1181 if (divisor == 0) { |
| 1183 DeoptimizeIf(al, instr->environment()); | 1182 DeoptimizeIf(al, instr); |
| 1184 return; | 1183 return; |
| 1185 } | 1184 } |
| 1186 | 1185 |
| 1187 __ TruncatingDiv(result, dividend, Abs(divisor)); | 1186 __ TruncatingDiv(result, dividend, Abs(divisor)); |
| 1188 __ mov(ip, Operand(Abs(divisor))); | 1187 __ mov(ip, Operand(Abs(divisor))); |
| 1189 __ smull(result, ip, result, ip); | 1188 __ smull(result, ip, result, ip); |
| 1190 __ sub(result, dividend, result, SetCC); | 1189 __ sub(result, dividend, result, SetCC); |
| 1191 | 1190 |
| 1192 // Check for negative zero. | 1191 // Check for negative zero. |
| 1193 HMod* hmod = instr->hydrogen(); | 1192 HMod* hmod = instr->hydrogen(); |
| 1194 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1193 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1195 Label remainder_not_zero; | 1194 Label remainder_not_zero; |
| 1196 __ b(ne, &remainder_not_zero); | 1195 __ b(ne, &remainder_not_zero); |
| 1197 __ cmp(dividend, Operand::Zero()); | 1196 __ cmp(dividend, Operand::Zero()); |
| 1198 DeoptimizeIf(lt, instr->environment()); | 1197 DeoptimizeIf(lt, instr); |
| 1199 __ bind(&remainder_not_zero); | 1198 __ bind(&remainder_not_zero); |
| 1200 } | 1199 } |
| 1201 } | 1200 } |
| 1202 | 1201 |
| 1203 | 1202 |
| 1204 void LCodeGen::DoModI(LModI* instr) { | 1203 void LCodeGen::DoModI(LModI* instr) { |
| 1205 HMod* hmod = instr->hydrogen(); | 1204 HMod* hmod = instr->hydrogen(); |
| 1206 if (CpuFeatures::IsSupported(SUDIV)) { | 1205 if (CpuFeatures::IsSupported(SUDIV)) { |
| 1207 CpuFeatureScope scope(masm(), SUDIV); | 1206 CpuFeatureScope scope(masm(), SUDIV); |
| 1208 | 1207 |
| 1209 Register left_reg = ToRegister(instr->left()); | 1208 Register left_reg = ToRegister(instr->left()); |
| 1210 Register right_reg = ToRegister(instr->right()); | 1209 Register right_reg = ToRegister(instr->right()); |
| 1211 Register result_reg = ToRegister(instr->result()); | 1210 Register result_reg = ToRegister(instr->result()); |
| 1212 | 1211 |
| 1213 Label done; | 1212 Label done; |
| 1214 // Check for x % 0, sdiv might signal an exception. We have to deopt in this | 1213 // Check for x % 0, sdiv might signal an exception. We have to deopt in this |
| 1215 // case because we can't return a NaN. | 1214 // case because we can't return a NaN. |
| 1216 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { | 1215 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { |
| 1217 __ cmp(right_reg, Operand::Zero()); | 1216 __ cmp(right_reg, Operand::Zero()); |
| 1218 DeoptimizeIf(eq, instr->environment()); | 1217 DeoptimizeIf(eq, instr); |
| 1219 } | 1218 } |
| 1220 | 1219 |
| 1221 // Check for kMinInt % -1, sdiv will return kMinInt, which is not what we | 1220 // Check for kMinInt % -1, sdiv will return kMinInt, which is not what we |
| 1222 // want. We have to deopt if we care about -0, because we can't return that. | 1221 // want. We have to deopt if we care about -0, because we can't return that. |
| 1223 if (hmod->CheckFlag(HValue::kCanOverflow)) { | 1222 if (hmod->CheckFlag(HValue::kCanOverflow)) { |
| 1224 Label no_overflow_possible; | 1223 Label no_overflow_possible; |
| 1225 __ cmp(left_reg, Operand(kMinInt)); | 1224 __ cmp(left_reg, Operand(kMinInt)); |
| 1226 __ b(ne, &no_overflow_possible); | 1225 __ b(ne, &no_overflow_possible); |
| 1227 __ cmp(right_reg, Operand(-1)); | 1226 __ cmp(right_reg, Operand(-1)); |
| 1228 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1227 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1229 DeoptimizeIf(eq, instr->environment()); | 1228 DeoptimizeIf(eq, instr); |
| 1230 } else { | 1229 } else { |
| 1231 __ b(ne, &no_overflow_possible); | 1230 __ b(ne, &no_overflow_possible); |
| 1232 __ mov(result_reg, Operand::Zero()); | 1231 __ mov(result_reg, Operand::Zero()); |
| 1233 __ jmp(&done); | 1232 __ jmp(&done); |
| 1234 } | 1233 } |
| 1235 __ bind(&no_overflow_possible); | 1234 __ bind(&no_overflow_possible); |
| 1236 } | 1235 } |
| 1237 | 1236 |
| 1238 // For 'r3 = r1 % r2' we can have the following ARM code: | 1237 // For 'r3 = r1 % r2' we can have the following ARM code: |
| 1239 // sdiv r3, r1, r2 | 1238 // sdiv r3, r1, r2 |
| 1240 // mls r3, r3, r2, r1 | 1239 // mls r3, r3, r2, r1 |
| 1241 | 1240 |
| 1242 __ sdiv(result_reg, left_reg, right_reg); | 1241 __ sdiv(result_reg, left_reg, right_reg); |
| 1243 __ Mls(result_reg, result_reg, right_reg, left_reg); | 1242 __ Mls(result_reg, result_reg, right_reg, left_reg); |
| 1244 | 1243 |
| 1245 // If we care about -0, test if the dividend is <0 and the result is 0. | 1244 // If we care about -0, test if the dividend is <0 and the result is 0. |
| 1246 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1245 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1247 __ cmp(result_reg, Operand::Zero()); | 1246 __ cmp(result_reg, Operand::Zero()); |
| 1248 __ b(ne, &done); | 1247 __ b(ne, &done); |
| 1249 __ cmp(left_reg, Operand::Zero()); | 1248 __ cmp(left_reg, Operand::Zero()); |
| 1250 DeoptimizeIf(lt, instr->environment()); | 1249 DeoptimizeIf(lt, instr); |
| 1251 } | 1250 } |
| 1252 __ bind(&done); | 1251 __ bind(&done); |
| 1253 | 1252 |
| 1254 } else { | 1253 } else { |
| 1255 // General case, without any SDIV support. | 1254 // General case, without any SDIV support. |
| 1256 Register left_reg = ToRegister(instr->left()); | 1255 Register left_reg = ToRegister(instr->left()); |
| 1257 Register right_reg = ToRegister(instr->right()); | 1256 Register right_reg = ToRegister(instr->right()); |
| 1258 Register result_reg = ToRegister(instr->result()); | 1257 Register result_reg = ToRegister(instr->result()); |
| 1259 Register scratch = scratch0(); | 1258 Register scratch = scratch0(); |
| 1260 DCHECK(!scratch.is(left_reg)); | 1259 DCHECK(!scratch.is(left_reg)); |
| 1261 DCHECK(!scratch.is(right_reg)); | 1260 DCHECK(!scratch.is(right_reg)); |
| 1262 DCHECK(!scratch.is(result_reg)); | 1261 DCHECK(!scratch.is(result_reg)); |
| 1263 DwVfpRegister dividend = ToDoubleRegister(instr->temp()); | 1262 DwVfpRegister dividend = ToDoubleRegister(instr->temp()); |
| 1264 DwVfpRegister divisor = ToDoubleRegister(instr->temp2()); | 1263 DwVfpRegister divisor = ToDoubleRegister(instr->temp2()); |
| 1265 DCHECK(!divisor.is(dividend)); | 1264 DCHECK(!divisor.is(dividend)); |
| 1266 LowDwVfpRegister quotient = double_scratch0(); | 1265 LowDwVfpRegister quotient = double_scratch0(); |
| 1267 DCHECK(!quotient.is(dividend)); | 1266 DCHECK(!quotient.is(dividend)); |
| 1268 DCHECK(!quotient.is(divisor)); | 1267 DCHECK(!quotient.is(divisor)); |
| 1269 | 1268 |
| 1270 Label done; | 1269 Label done; |
| 1271 // Check for x % 0, we have to deopt in this case because we can't return a | 1270 // Check for x % 0, we have to deopt in this case because we can't return a |
| 1272 // NaN. | 1271 // NaN. |
| 1273 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { | 1272 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { |
| 1274 __ cmp(right_reg, Operand::Zero()); | 1273 __ cmp(right_reg, Operand::Zero()); |
| 1275 DeoptimizeIf(eq, instr->environment()); | 1274 DeoptimizeIf(eq, instr); |
| 1276 } | 1275 } |
| 1277 | 1276 |
| 1278 __ Move(result_reg, left_reg); | 1277 __ Move(result_reg, left_reg); |
| 1279 // Load the arguments in VFP registers. The divisor value is preloaded | 1278 // Load the arguments in VFP registers. The divisor value is preloaded |
| 1280 // before. Be careful that 'right_reg' is only live on entry. | 1279 // before. Be careful that 'right_reg' is only live on entry. |
| 1281 // TODO(svenpanne) The last comments seems to be wrong nowadays. | 1280 // TODO(svenpanne) The last comments seems to be wrong nowadays. |
| 1282 __ vmov(double_scratch0().low(), left_reg); | 1281 __ vmov(double_scratch0().low(), left_reg); |
| 1283 __ vcvt_f64_s32(dividend, double_scratch0().low()); | 1282 __ vcvt_f64_s32(dividend, double_scratch0().low()); |
| 1284 __ vmov(double_scratch0().low(), right_reg); | 1283 __ vmov(double_scratch0().low(), right_reg); |
| 1285 __ vcvt_f64_s32(divisor, double_scratch0().low()); | 1284 __ vcvt_f64_s32(divisor, double_scratch0().low()); |
| 1286 | 1285 |
| 1287 // We do not care about the sign of the divisor. Note that we still handle | 1286 // We do not care about the sign of the divisor. Note that we still handle |
| 1288 // the kMinInt % -1 case correctly, though. | 1287 // the kMinInt % -1 case correctly, though. |
| 1289 __ vabs(divisor, divisor); | 1288 __ vabs(divisor, divisor); |
| 1290 // Compute the quotient and round it to a 32bit integer. | 1289 // Compute the quotient and round it to a 32bit integer. |
| 1291 __ vdiv(quotient, dividend, divisor); | 1290 __ vdiv(quotient, dividend, divisor); |
| 1292 __ vcvt_s32_f64(quotient.low(), quotient); | 1291 __ vcvt_s32_f64(quotient.low(), quotient); |
| 1293 __ vcvt_f64_s32(quotient, quotient.low()); | 1292 __ vcvt_f64_s32(quotient, quotient.low()); |
| 1294 | 1293 |
| 1295 // Compute the remainder in result. | 1294 // Compute the remainder in result. |
| 1296 __ vmul(double_scratch0(), divisor, quotient); | 1295 __ vmul(double_scratch0(), divisor, quotient); |
| 1297 __ vcvt_s32_f64(double_scratch0().low(), double_scratch0()); | 1296 __ vcvt_s32_f64(double_scratch0().low(), double_scratch0()); |
| 1298 __ vmov(scratch, double_scratch0().low()); | 1297 __ vmov(scratch, double_scratch0().low()); |
| 1299 __ sub(result_reg, left_reg, scratch, SetCC); | 1298 __ sub(result_reg, left_reg, scratch, SetCC); |
| 1300 | 1299 |
| 1301 // If we care about -0, test if the dividend is <0 and the result is 0. | 1300 // If we care about -0, test if the dividend is <0 and the result is 0. |
| 1302 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1301 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1303 __ b(ne, &done); | 1302 __ b(ne, &done); |
| 1304 __ cmp(left_reg, Operand::Zero()); | 1303 __ cmp(left_reg, Operand::Zero()); |
| 1305 DeoptimizeIf(mi, instr->environment()); | 1304 DeoptimizeIf(mi, instr); |
| 1306 } | 1305 } |
| 1307 __ bind(&done); | 1306 __ bind(&done); |
| 1308 } | 1307 } |
| 1309 } | 1308 } |
| 1310 | 1309 |
| 1311 | 1310 |
| 1312 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { | 1311 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { |
| 1313 Register dividend = ToRegister(instr->dividend()); | 1312 Register dividend = ToRegister(instr->dividend()); |
| 1314 int32_t divisor = instr->divisor(); | 1313 int32_t divisor = instr->divisor(); |
| 1315 Register result = ToRegister(instr->result()); | 1314 Register result = ToRegister(instr->result()); |
| 1316 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor))); | 1315 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor))); |
| 1317 DCHECK(!result.is(dividend)); | 1316 DCHECK(!result.is(dividend)); |
| 1318 | 1317 |
| 1319 // Check for (0 / -x) that will produce negative zero. | 1318 // Check for (0 / -x) that will produce negative zero. |
| 1320 HDiv* hdiv = instr->hydrogen(); | 1319 HDiv* hdiv = instr->hydrogen(); |
| 1321 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | 1320 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
| 1322 __ cmp(dividend, Operand::Zero()); | 1321 __ cmp(dividend, Operand::Zero()); |
| 1323 DeoptimizeIf(eq, instr->environment()); | 1322 DeoptimizeIf(eq, instr); |
| 1324 } | 1323 } |
| 1325 // Check for (kMinInt / -1). | 1324 // Check for (kMinInt / -1). |
| 1326 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { | 1325 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { |
| 1327 __ cmp(dividend, Operand(kMinInt)); | 1326 __ cmp(dividend, Operand(kMinInt)); |
| 1328 DeoptimizeIf(eq, instr->environment()); | 1327 DeoptimizeIf(eq, instr); |
| 1329 } | 1328 } |
| 1330 // Deoptimize if remainder will not be 0. | 1329 // Deoptimize if remainder will not be 0. |
| 1331 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && | 1330 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && |
| 1332 divisor != 1 && divisor != -1) { | 1331 divisor != 1 && divisor != -1) { |
| 1333 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); | 1332 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); |
| 1334 __ tst(dividend, Operand(mask)); | 1333 __ tst(dividend, Operand(mask)); |
| 1335 DeoptimizeIf(ne, instr->environment()); | 1334 DeoptimizeIf(ne, instr); |
| 1336 } | 1335 } |
| 1337 | 1336 |
| 1338 if (divisor == -1) { // Nice shortcut, not needed for correctness. | 1337 if (divisor == -1) { // Nice shortcut, not needed for correctness. |
| 1339 __ rsb(result, dividend, Operand(0)); | 1338 __ rsb(result, dividend, Operand(0)); |
| 1340 return; | 1339 return; |
| 1341 } | 1340 } |
| 1342 int32_t shift = WhichPowerOf2Abs(divisor); | 1341 int32_t shift = WhichPowerOf2Abs(divisor); |
| 1343 if (shift == 0) { | 1342 if (shift == 0) { |
| 1344 __ mov(result, dividend); | 1343 __ mov(result, dividend); |
| 1345 } else if (shift == 1) { | 1344 } else if (shift == 1) { |
| 1346 __ add(result, dividend, Operand(dividend, LSR, 31)); | 1345 __ add(result, dividend, Operand(dividend, LSR, 31)); |
| 1347 } else { | 1346 } else { |
| 1348 __ mov(result, Operand(dividend, ASR, 31)); | 1347 __ mov(result, Operand(dividend, ASR, 31)); |
| 1349 __ add(result, dividend, Operand(result, LSR, 32 - shift)); | 1348 __ add(result, dividend, Operand(result, LSR, 32 - shift)); |
| 1350 } | 1349 } |
| 1351 if (shift > 0) __ mov(result, Operand(result, ASR, shift)); | 1350 if (shift > 0) __ mov(result, Operand(result, ASR, shift)); |
| 1352 if (divisor < 0) __ rsb(result, result, Operand(0)); | 1351 if (divisor < 0) __ rsb(result, result, Operand(0)); |
| 1353 } | 1352 } |
| 1354 | 1353 |
| 1355 | 1354 |
| 1356 void LCodeGen::DoDivByConstI(LDivByConstI* instr) { | 1355 void LCodeGen::DoDivByConstI(LDivByConstI* instr) { |
| 1357 Register dividend = ToRegister(instr->dividend()); | 1356 Register dividend = ToRegister(instr->dividend()); |
| 1358 int32_t divisor = instr->divisor(); | 1357 int32_t divisor = instr->divisor(); |
| 1359 Register result = ToRegister(instr->result()); | 1358 Register result = ToRegister(instr->result()); |
| 1360 DCHECK(!dividend.is(result)); | 1359 DCHECK(!dividend.is(result)); |
| 1361 | 1360 |
| 1362 if (divisor == 0) { | 1361 if (divisor == 0) { |
| 1363 DeoptimizeIf(al, instr->environment()); | 1362 DeoptimizeIf(al, instr); |
| 1364 return; | 1363 return; |
| 1365 } | 1364 } |
| 1366 | 1365 |
| 1367 // Check for (0 / -x) that will produce negative zero. | 1366 // Check for (0 / -x) that will produce negative zero. |
| 1368 HDiv* hdiv = instr->hydrogen(); | 1367 HDiv* hdiv = instr->hydrogen(); |
| 1369 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | 1368 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
| 1370 __ cmp(dividend, Operand::Zero()); | 1369 __ cmp(dividend, Operand::Zero()); |
| 1371 DeoptimizeIf(eq, instr->environment()); | 1370 DeoptimizeIf(eq, instr); |
| 1372 } | 1371 } |
| 1373 | 1372 |
| 1374 __ TruncatingDiv(result, dividend, Abs(divisor)); | 1373 __ TruncatingDiv(result, dividend, Abs(divisor)); |
| 1375 if (divisor < 0) __ rsb(result, result, Operand::Zero()); | 1374 if (divisor < 0) __ rsb(result, result, Operand::Zero()); |
| 1376 | 1375 |
| 1377 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { | 1376 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { |
| 1378 __ mov(ip, Operand(divisor)); | 1377 __ mov(ip, Operand(divisor)); |
| 1379 __ smull(scratch0(), ip, result, ip); | 1378 __ smull(scratch0(), ip, result, ip); |
| 1380 __ sub(scratch0(), scratch0(), dividend, SetCC); | 1379 __ sub(scratch0(), scratch0(), dividend, SetCC); |
| 1381 DeoptimizeIf(ne, instr->environment()); | 1380 DeoptimizeIf(ne, instr); |
| 1382 } | 1381 } |
| 1383 } | 1382 } |
| 1384 | 1383 |
| 1385 | 1384 |
| 1386 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. | 1385 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. |
| 1387 void LCodeGen::DoDivI(LDivI* instr) { | 1386 void LCodeGen::DoDivI(LDivI* instr) { |
| 1388 HBinaryOperation* hdiv = instr->hydrogen(); | 1387 HBinaryOperation* hdiv = instr->hydrogen(); |
| 1389 Register dividend = ToRegister(instr->dividend()); | 1388 Register dividend = ToRegister(instr->dividend()); |
| 1390 Register divisor = ToRegister(instr->divisor()); | 1389 Register divisor = ToRegister(instr->divisor()); |
| 1391 Register result = ToRegister(instr->result()); | 1390 Register result = ToRegister(instr->result()); |
| 1392 | 1391 |
| 1393 // Check for x / 0. | 1392 // Check for x / 0. |
| 1394 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { | 1393 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { |
| 1395 __ cmp(divisor, Operand::Zero()); | 1394 __ cmp(divisor, Operand::Zero()); |
| 1396 DeoptimizeIf(eq, instr->environment()); | 1395 DeoptimizeIf(eq, instr); |
| 1397 } | 1396 } |
| 1398 | 1397 |
| 1399 // Check for (0 / -x) that will produce negative zero. | 1398 // Check for (0 / -x) that will produce negative zero. |
| 1400 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1399 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1401 Label positive; | 1400 Label positive; |
| 1402 if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) { | 1401 if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) { |
| 1403 // Do the test only if it hadn't be done above. | 1402 // Do the test only if it hadn't be done above. |
| 1404 __ cmp(divisor, Operand::Zero()); | 1403 __ cmp(divisor, Operand::Zero()); |
| 1405 } | 1404 } |
| 1406 __ b(pl, &positive); | 1405 __ b(pl, &positive); |
| 1407 __ cmp(dividend, Operand::Zero()); | 1406 __ cmp(dividend, Operand::Zero()); |
| 1408 DeoptimizeIf(eq, instr->environment()); | 1407 DeoptimizeIf(eq, instr); |
| 1409 __ bind(&positive); | 1408 __ bind(&positive); |
| 1410 } | 1409 } |
| 1411 | 1410 |
| 1412 // Check for (kMinInt / -1). | 1411 // Check for (kMinInt / -1). |
| 1413 if (hdiv->CheckFlag(HValue::kCanOverflow) && | 1412 if (hdiv->CheckFlag(HValue::kCanOverflow) && |
| 1414 (!CpuFeatures::IsSupported(SUDIV) || | 1413 (!CpuFeatures::IsSupported(SUDIV) || |
| 1415 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) { | 1414 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) { |
| 1416 // We don't need to check for overflow when truncating with sdiv | 1415 // We don't need to check for overflow when truncating with sdiv |
| 1417 // support because, on ARM, sdiv kMinInt, -1 -> kMinInt. | 1416 // support because, on ARM, sdiv kMinInt, -1 -> kMinInt. |
| 1418 __ cmp(dividend, Operand(kMinInt)); | 1417 __ cmp(dividend, Operand(kMinInt)); |
| 1419 __ cmp(divisor, Operand(-1), eq); | 1418 __ cmp(divisor, Operand(-1), eq); |
| 1420 DeoptimizeIf(eq, instr->environment()); | 1419 DeoptimizeIf(eq, instr); |
| 1421 } | 1420 } |
| 1422 | 1421 |
| 1423 if (CpuFeatures::IsSupported(SUDIV)) { | 1422 if (CpuFeatures::IsSupported(SUDIV)) { |
| 1424 CpuFeatureScope scope(masm(), SUDIV); | 1423 CpuFeatureScope scope(masm(), SUDIV); |
| 1425 __ sdiv(result, dividend, divisor); | 1424 __ sdiv(result, dividend, divisor); |
| 1426 } else { | 1425 } else { |
| 1427 DoubleRegister vleft = ToDoubleRegister(instr->temp()); | 1426 DoubleRegister vleft = ToDoubleRegister(instr->temp()); |
| 1428 DoubleRegister vright = double_scratch0(); | 1427 DoubleRegister vright = double_scratch0(); |
| 1429 __ vmov(double_scratch0().low(), dividend); | 1428 __ vmov(double_scratch0().low(), dividend); |
| 1430 __ vcvt_f64_s32(vleft, double_scratch0().low()); | 1429 __ vcvt_f64_s32(vleft, double_scratch0().low()); |
| 1431 __ vmov(double_scratch0().low(), divisor); | 1430 __ vmov(double_scratch0().low(), divisor); |
| 1432 __ vcvt_f64_s32(vright, double_scratch0().low()); | 1431 __ vcvt_f64_s32(vright, double_scratch0().low()); |
| 1433 __ vdiv(vleft, vleft, vright); // vleft now contains the result. | 1432 __ vdiv(vleft, vleft, vright); // vleft now contains the result. |
| 1434 __ vcvt_s32_f64(double_scratch0().low(), vleft); | 1433 __ vcvt_s32_f64(double_scratch0().low(), vleft); |
| 1435 __ vmov(result, double_scratch0().low()); | 1434 __ vmov(result, double_scratch0().low()); |
| 1436 } | 1435 } |
| 1437 | 1436 |
| 1438 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { | 1437 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { |
| 1439 // Compute remainder and deopt if it's not zero. | 1438 // Compute remainder and deopt if it's not zero. |
| 1440 Register remainder = scratch0(); | 1439 Register remainder = scratch0(); |
| 1441 __ Mls(remainder, result, divisor, dividend); | 1440 __ Mls(remainder, result, divisor, dividend); |
| 1442 __ cmp(remainder, Operand::Zero()); | 1441 __ cmp(remainder, Operand::Zero()); |
| 1443 DeoptimizeIf(ne, instr->environment()); | 1442 DeoptimizeIf(ne, instr); |
| 1444 } | 1443 } |
| 1445 } | 1444 } |
| 1446 | 1445 |
| 1447 | 1446 |
| 1448 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) { | 1447 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) { |
| 1449 DwVfpRegister addend = ToDoubleRegister(instr->addend()); | 1448 DwVfpRegister addend = ToDoubleRegister(instr->addend()); |
| 1450 DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier()); | 1449 DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier()); |
| 1451 DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand()); | 1450 DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand()); |
| 1452 | 1451 |
| 1453 // This is computed in-place. | 1452 // This is computed in-place. |
| (...skipping 30 matching lines...) Expand all Loading... |
| 1484 // can simply do an arithmetic right shift. | 1483 // can simply do an arithmetic right shift. |
| 1485 int32_t shift = WhichPowerOf2Abs(divisor); | 1484 int32_t shift = WhichPowerOf2Abs(divisor); |
| 1486 if (divisor > 1) { | 1485 if (divisor > 1) { |
| 1487 __ mov(result, Operand(dividend, ASR, shift)); | 1486 __ mov(result, Operand(dividend, ASR, shift)); |
| 1488 return; | 1487 return; |
| 1489 } | 1488 } |
| 1490 | 1489 |
| 1491 // If the divisor is negative, we have to negate and handle edge cases. | 1490 // If the divisor is negative, we have to negate and handle edge cases. |
| 1492 __ rsb(result, dividend, Operand::Zero(), SetCC); | 1491 __ rsb(result, dividend, Operand::Zero(), SetCC); |
| 1493 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1492 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1494 DeoptimizeIf(eq, instr->environment()); | 1493 DeoptimizeIf(eq, instr); |
| 1495 } | 1494 } |
| 1496 | 1495 |
| 1497 // Dividing by -1 is basically negation, unless we overflow. | 1496 // Dividing by -1 is basically negation, unless we overflow. |
| 1498 if (divisor == -1) { | 1497 if (divisor == -1) { |
| 1499 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { | 1498 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { |
| 1500 DeoptimizeIf(vs, instr->environment()); | 1499 DeoptimizeIf(vs, instr); |
| 1501 } | 1500 } |
| 1502 return; | 1501 return; |
| 1503 } | 1502 } |
| 1504 | 1503 |
| 1505 // If the negation could not overflow, simply shifting is OK. | 1504 // If the negation could not overflow, simply shifting is OK. |
| 1506 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { | 1505 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { |
| 1507 __ mov(result, Operand(result, ASR, shift)); | 1506 __ mov(result, Operand(result, ASR, shift)); |
| 1508 return; | 1507 return; |
| 1509 } | 1508 } |
| 1510 | 1509 |
| 1511 __ mov(result, Operand(kMinInt / divisor), LeaveCC, vs); | 1510 __ mov(result, Operand(kMinInt / divisor), LeaveCC, vs); |
| 1512 __ mov(result, Operand(result, ASR, shift), LeaveCC, vc); | 1511 __ mov(result, Operand(result, ASR, shift), LeaveCC, vc); |
| 1513 } | 1512 } |
| 1514 | 1513 |
| 1515 | 1514 |
| 1516 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { | 1515 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { |
| 1517 Register dividend = ToRegister(instr->dividend()); | 1516 Register dividend = ToRegister(instr->dividend()); |
| 1518 int32_t divisor = instr->divisor(); | 1517 int32_t divisor = instr->divisor(); |
| 1519 Register result = ToRegister(instr->result()); | 1518 Register result = ToRegister(instr->result()); |
| 1520 DCHECK(!dividend.is(result)); | 1519 DCHECK(!dividend.is(result)); |
| 1521 | 1520 |
| 1522 if (divisor == 0) { | 1521 if (divisor == 0) { |
| 1523 DeoptimizeIf(al, instr->environment()); | 1522 DeoptimizeIf(al, instr); |
| 1524 return; | 1523 return; |
| 1525 } | 1524 } |
| 1526 | 1525 |
| 1527 // Check for (0 / -x) that will produce negative zero. | 1526 // Check for (0 / -x) that will produce negative zero. |
| 1528 HMathFloorOfDiv* hdiv = instr->hydrogen(); | 1527 HMathFloorOfDiv* hdiv = instr->hydrogen(); |
| 1529 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | 1528 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
| 1530 __ cmp(dividend, Operand::Zero()); | 1529 __ cmp(dividend, Operand::Zero()); |
| 1531 DeoptimizeIf(eq, instr->environment()); | 1530 DeoptimizeIf(eq, instr); |
| 1532 } | 1531 } |
| 1533 | 1532 |
| 1534 // Easy case: We need no dynamic check for the dividend and the flooring | 1533 // Easy case: We need no dynamic check for the dividend and the flooring |
| 1535 // division is the same as the truncating division. | 1534 // division is the same as the truncating division. |
| 1536 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || | 1535 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || |
| 1537 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { | 1536 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { |
| 1538 __ TruncatingDiv(result, dividend, Abs(divisor)); | 1537 __ TruncatingDiv(result, dividend, Abs(divisor)); |
| 1539 if (divisor < 0) __ rsb(result, result, Operand::Zero()); | 1538 if (divisor < 0) __ rsb(result, result, Operand::Zero()); |
| 1540 return; | 1539 return; |
| 1541 } | 1540 } |
| (...skipping 20 matching lines...) Expand all Loading... |
| 1562 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI. | 1561 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI. |
| 1563 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) { | 1562 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) { |
| 1564 HBinaryOperation* hdiv = instr->hydrogen(); | 1563 HBinaryOperation* hdiv = instr->hydrogen(); |
| 1565 Register left = ToRegister(instr->dividend()); | 1564 Register left = ToRegister(instr->dividend()); |
| 1566 Register right = ToRegister(instr->divisor()); | 1565 Register right = ToRegister(instr->divisor()); |
| 1567 Register result = ToRegister(instr->result()); | 1566 Register result = ToRegister(instr->result()); |
| 1568 | 1567 |
| 1569 // Check for x / 0. | 1568 // Check for x / 0. |
| 1570 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { | 1569 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { |
| 1571 __ cmp(right, Operand::Zero()); | 1570 __ cmp(right, Operand::Zero()); |
| 1572 DeoptimizeIf(eq, instr->environment()); | 1571 DeoptimizeIf(eq, instr); |
| 1573 } | 1572 } |
| 1574 | 1573 |
| 1575 // Check for (0 / -x) that will produce negative zero. | 1574 // Check for (0 / -x) that will produce negative zero. |
| 1576 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1575 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1577 Label positive; | 1576 Label positive; |
| 1578 if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) { | 1577 if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) { |
| 1579 // Do the test only if it hadn't be done above. | 1578 // Do the test only if it hadn't be done above. |
| 1580 __ cmp(right, Operand::Zero()); | 1579 __ cmp(right, Operand::Zero()); |
| 1581 } | 1580 } |
| 1582 __ b(pl, &positive); | 1581 __ b(pl, &positive); |
| 1583 __ cmp(left, Operand::Zero()); | 1582 __ cmp(left, Operand::Zero()); |
| 1584 DeoptimizeIf(eq, instr->environment()); | 1583 DeoptimizeIf(eq, instr); |
| 1585 __ bind(&positive); | 1584 __ bind(&positive); |
| 1586 } | 1585 } |
| 1587 | 1586 |
| 1588 // Check for (kMinInt / -1). | 1587 // Check for (kMinInt / -1). |
| 1589 if (hdiv->CheckFlag(HValue::kCanOverflow) && | 1588 if (hdiv->CheckFlag(HValue::kCanOverflow) && |
| 1590 (!CpuFeatures::IsSupported(SUDIV) || | 1589 (!CpuFeatures::IsSupported(SUDIV) || |
| 1591 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) { | 1590 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) { |
| 1592 // We don't need to check for overflow when truncating with sdiv | 1591 // We don't need to check for overflow when truncating with sdiv |
| 1593 // support because, on ARM, sdiv kMinInt, -1 -> kMinInt. | 1592 // support because, on ARM, sdiv kMinInt, -1 -> kMinInt. |
| 1594 __ cmp(left, Operand(kMinInt)); | 1593 __ cmp(left, Operand(kMinInt)); |
| 1595 __ cmp(right, Operand(-1), eq); | 1594 __ cmp(right, Operand(-1), eq); |
| 1596 DeoptimizeIf(eq, instr->environment()); | 1595 DeoptimizeIf(eq, instr); |
| 1597 } | 1596 } |
| 1598 | 1597 |
| 1599 if (CpuFeatures::IsSupported(SUDIV)) { | 1598 if (CpuFeatures::IsSupported(SUDIV)) { |
| 1600 CpuFeatureScope scope(masm(), SUDIV); | 1599 CpuFeatureScope scope(masm(), SUDIV); |
| 1601 __ sdiv(result, left, right); | 1600 __ sdiv(result, left, right); |
| 1602 } else { | 1601 } else { |
| 1603 DoubleRegister vleft = ToDoubleRegister(instr->temp()); | 1602 DoubleRegister vleft = ToDoubleRegister(instr->temp()); |
| 1604 DoubleRegister vright = double_scratch0(); | 1603 DoubleRegister vright = double_scratch0(); |
| 1605 __ vmov(double_scratch0().low(), left); | 1604 __ vmov(double_scratch0().low(), left); |
| 1606 __ vcvt_f64_s32(vleft, double_scratch0().low()); | 1605 __ vcvt_f64_s32(vleft, double_scratch0().low()); |
| (...skipping 25 matching lines...) Expand all Loading... |
| 1632 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); | 1631 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); |
| 1633 bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); | 1632 bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); |
| 1634 | 1633 |
| 1635 if (right_op->IsConstantOperand()) { | 1634 if (right_op->IsConstantOperand()) { |
| 1636 int32_t constant = ToInteger32(LConstantOperand::cast(right_op)); | 1635 int32_t constant = ToInteger32(LConstantOperand::cast(right_op)); |
| 1637 | 1636 |
| 1638 if (bailout_on_minus_zero && (constant < 0)) { | 1637 if (bailout_on_minus_zero && (constant < 0)) { |
| 1639 // The case of a null constant will be handled separately. | 1638 // The case of a null constant will be handled separately. |
| 1640 // If constant is negative and left is null, the result should be -0. | 1639 // If constant is negative and left is null, the result should be -0. |
| 1641 __ cmp(left, Operand::Zero()); | 1640 __ cmp(left, Operand::Zero()); |
| 1642 DeoptimizeIf(eq, instr->environment()); | 1641 DeoptimizeIf(eq, instr); |
| 1643 } | 1642 } |
| 1644 | 1643 |
| 1645 switch (constant) { | 1644 switch (constant) { |
| 1646 case -1: | 1645 case -1: |
| 1647 if (overflow) { | 1646 if (overflow) { |
| 1648 __ rsb(result, left, Operand::Zero(), SetCC); | 1647 __ rsb(result, left, Operand::Zero(), SetCC); |
| 1649 DeoptimizeIf(vs, instr->environment()); | 1648 DeoptimizeIf(vs, instr); |
| 1650 } else { | 1649 } else { |
| 1651 __ rsb(result, left, Operand::Zero()); | 1650 __ rsb(result, left, Operand::Zero()); |
| 1652 } | 1651 } |
| 1653 break; | 1652 break; |
| 1654 case 0: | 1653 case 0: |
| 1655 if (bailout_on_minus_zero) { | 1654 if (bailout_on_minus_zero) { |
| 1656 // If left is strictly negative and the constant is null, the | 1655 // If left is strictly negative and the constant is null, the |
| 1657 // result is -0. Deoptimize if required, otherwise return 0. | 1656 // result is -0. Deoptimize if required, otherwise return 0. |
| 1658 __ cmp(left, Operand::Zero()); | 1657 __ cmp(left, Operand::Zero()); |
| 1659 DeoptimizeIf(mi, instr->environment()); | 1658 DeoptimizeIf(mi, instr); |
| 1660 } | 1659 } |
| 1661 __ mov(result, Operand::Zero()); | 1660 __ mov(result, Operand::Zero()); |
| 1662 break; | 1661 break; |
| 1663 case 1: | 1662 case 1: |
| 1664 __ Move(result, left); | 1663 __ Move(result, left); |
| 1665 break; | 1664 break; |
| 1666 default: | 1665 default: |
| 1667 // Multiplying by powers of two and powers of two plus or minus | 1666 // Multiplying by powers of two and powers of two plus or minus |
| 1668 // one can be done faster with shifted operands. | 1667 // one can be done faster with shifted operands. |
| 1669 // For other constants we emit standard code. | 1668 // For other constants we emit standard code. |
| (...skipping 29 matching lines...) Expand all Loading... |
| 1699 if (overflow) { | 1698 if (overflow) { |
| 1700 Register scratch = scratch0(); | 1699 Register scratch = scratch0(); |
| 1701 // scratch:result = left * right. | 1700 // scratch:result = left * right. |
| 1702 if (instr->hydrogen()->representation().IsSmi()) { | 1701 if (instr->hydrogen()->representation().IsSmi()) { |
| 1703 __ SmiUntag(result, left); | 1702 __ SmiUntag(result, left); |
| 1704 __ smull(result, scratch, result, right); | 1703 __ smull(result, scratch, result, right); |
| 1705 } else { | 1704 } else { |
| 1706 __ smull(result, scratch, left, right); | 1705 __ smull(result, scratch, left, right); |
| 1707 } | 1706 } |
| 1708 __ cmp(scratch, Operand(result, ASR, 31)); | 1707 __ cmp(scratch, Operand(result, ASR, 31)); |
| 1709 DeoptimizeIf(ne, instr->environment()); | 1708 DeoptimizeIf(ne, instr); |
| 1710 } else { | 1709 } else { |
| 1711 if (instr->hydrogen()->representation().IsSmi()) { | 1710 if (instr->hydrogen()->representation().IsSmi()) { |
| 1712 __ SmiUntag(result, left); | 1711 __ SmiUntag(result, left); |
| 1713 __ mul(result, result, right); | 1712 __ mul(result, result, right); |
| 1714 } else { | 1713 } else { |
| 1715 __ mul(result, left, right); | 1714 __ mul(result, left, right); |
| 1716 } | 1715 } |
| 1717 } | 1716 } |
| 1718 | 1717 |
| 1719 if (bailout_on_minus_zero) { | 1718 if (bailout_on_minus_zero) { |
| 1720 Label done; | 1719 Label done; |
| 1721 __ teq(left, Operand(right)); | 1720 __ teq(left, Operand(right)); |
| 1722 __ b(pl, &done); | 1721 __ b(pl, &done); |
| 1723 // Bail out if the result is minus zero. | 1722 // Bail out if the result is minus zero. |
| 1724 __ cmp(result, Operand::Zero()); | 1723 __ cmp(result, Operand::Zero()); |
| 1725 DeoptimizeIf(eq, instr->environment()); | 1724 DeoptimizeIf(eq, instr); |
| 1726 __ bind(&done); | 1725 __ bind(&done); |
| 1727 } | 1726 } |
| 1728 } | 1727 } |
| 1729 } | 1728 } |
| 1730 | 1729 |
| 1731 | 1730 |
| 1732 void LCodeGen::DoBitI(LBitI* instr) { | 1731 void LCodeGen::DoBitI(LBitI* instr) { |
| 1733 LOperand* left_op = instr->left(); | 1732 LOperand* left_op = instr->left(); |
| 1734 LOperand* right_op = instr->right(); | 1733 LOperand* right_op = instr->right(); |
| 1735 DCHECK(left_op->IsRegister()); | 1734 DCHECK(left_op->IsRegister()); |
| (...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1778 switch (instr->op()) { | 1777 switch (instr->op()) { |
| 1779 case Token::ROR: | 1778 case Token::ROR: |
| 1780 __ mov(result, Operand(left, ROR, scratch)); | 1779 __ mov(result, Operand(left, ROR, scratch)); |
| 1781 break; | 1780 break; |
| 1782 case Token::SAR: | 1781 case Token::SAR: |
| 1783 __ mov(result, Operand(left, ASR, scratch)); | 1782 __ mov(result, Operand(left, ASR, scratch)); |
| 1784 break; | 1783 break; |
| 1785 case Token::SHR: | 1784 case Token::SHR: |
| 1786 if (instr->can_deopt()) { | 1785 if (instr->can_deopt()) { |
| 1787 __ mov(result, Operand(left, LSR, scratch), SetCC); | 1786 __ mov(result, Operand(left, LSR, scratch), SetCC); |
| 1788 DeoptimizeIf(mi, instr->environment()); | 1787 DeoptimizeIf(mi, instr); |
| 1789 } else { | 1788 } else { |
| 1790 __ mov(result, Operand(left, LSR, scratch)); | 1789 __ mov(result, Operand(left, LSR, scratch)); |
| 1791 } | 1790 } |
| 1792 break; | 1791 break; |
| 1793 case Token::SHL: | 1792 case Token::SHL: |
| 1794 __ mov(result, Operand(left, LSL, scratch)); | 1793 __ mov(result, Operand(left, LSL, scratch)); |
| 1795 break; | 1794 break; |
| 1796 default: | 1795 default: |
| 1797 UNREACHABLE(); | 1796 UNREACHABLE(); |
| 1798 break; | 1797 break; |
| (...skipping 16 matching lines...) Expand all Loading... |
| 1815 } else { | 1814 } else { |
| 1816 __ Move(result, left); | 1815 __ Move(result, left); |
| 1817 } | 1816 } |
| 1818 break; | 1817 break; |
| 1819 case Token::SHR: | 1818 case Token::SHR: |
| 1820 if (shift_count != 0) { | 1819 if (shift_count != 0) { |
| 1821 __ mov(result, Operand(left, LSR, shift_count)); | 1820 __ mov(result, Operand(left, LSR, shift_count)); |
| 1822 } else { | 1821 } else { |
| 1823 if (instr->can_deopt()) { | 1822 if (instr->can_deopt()) { |
| 1824 __ tst(left, Operand(0x80000000)); | 1823 __ tst(left, Operand(0x80000000)); |
| 1825 DeoptimizeIf(ne, instr->environment()); | 1824 DeoptimizeIf(ne, instr); |
| 1826 } | 1825 } |
| 1827 __ Move(result, left); | 1826 __ Move(result, left); |
| 1828 } | 1827 } |
| 1829 break; | 1828 break; |
| 1830 case Token::SHL: | 1829 case Token::SHL: |
| 1831 if (shift_count != 0) { | 1830 if (shift_count != 0) { |
| 1832 if (instr->hydrogen_value()->representation().IsSmi() && | 1831 if (instr->hydrogen_value()->representation().IsSmi() && |
| 1833 instr->can_deopt()) { | 1832 instr->can_deopt()) { |
| 1834 if (shift_count != 1) { | 1833 if (shift_count != 1) { |
| 1835 __ mov(result, Operand(left, LSL, shift_count - 1)); | 1834 __ mov(result, Operand(left, LSL, shift_count - 1)); |
| 1836 __ SmiTag(result, result, SetCC); | 1835 __ SmiTag(result, result, SetCC); |
| 1837 } else { | 1836 } else { |
| 1838 __ SmiTag(result, left, SetCC); | 1837 __ SmiTag(result, left, SetCC); |
| 1839 } | 1838 } |
| 1840 DeoptimizeIf(vs, instr->environment()); | 1839 DeoptimizeIf(vs, instr); |
| 1841 } else { | 1840 } else { |
| 1842 __ mov(result, Operand(left, LSL, shift_count)); | 1841 __ mov(result, Operand(left, LSL, shift_count)); |
| 1843 } | 1842 } |
| 1844 } else { | 1843 } else { |
| 1845 __ Move(result, left); | 1844 __ Move(result, left); |
| 1846 } | 1845 } |
| 1847 break; | 1846 break; |
| 1848 default: | 1847 default: |
| 1849 UNREACHABLE(); | 1848 UNREACHABLE(); |
| 1850 break; | 1849 break; |
| (...skipping 11 matching lines...) Expand all Loading... |
| 1862 | 1861 |
| 1863 if (right->IsStackSlot()) { | 1862 if (right->IsStackSlot()) { |
| 1864 Register right_reg = EmitLoadRegister(right, ip); | 1863 Register right_reg = EmitLoadRegister(right, ip); |
| 1865 __ sub(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond); | 1864 __ sub(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond); |
| 1866 } else { | 1865 } else { |
| 1867 DCHECK(right->IsRegister() || right->IsConstantOperand()); | 1866 DCHECK(right->IsRegister() || right->IsConstantOperand()); |
| 1868 __ sub(ToRegister(result), ToRegister(left), ToOperand(right), set_cond); | 1867 __ sub(ToRegister(result), ToRegister(left), ToOperand(right), set_cond); |
| 1869 } | 1868 } |
| 1870 | 1869 |
| 1871 if (can_overflow) { | 1870 if (can_overflow) { |
| 1872 DeoptimizeIf(vs, instr->environment()); | 1871 DeoptimizeIf(vs, instr); |
| 1873 } | 1872 } |
| 1874 } | 1873 } |
| 1875 | 1874 |
| 1876 | 1875 |
| 1877 void LCodeGen::DoRSubI(LRSubI* instr) { | 1876 void LCodeGen::DoRSubI(LRSubI* instr) { |
| 1878 LOperand* left = instr->left(); | 1877 LOperand* left = instr->left(); |
| 1879 LOperand* right = instr->right(); | 1878 LOperand* right = instr->right(); |
| 1880 LOperand* result = instr->result(); | 1879 LOperand* result = instr->result(); |
| 1881 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); | 1880 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); |
| 1882 SBit set_cond = can_overflow ? SetCC : LeaveCC; | 1881 SBit set_cond = can_overflow ? SetCC : LeaveCC; |
| 1883 | 1882 |
| 1884 if (right->IsStackSlot()) { | 1883 if (right->IsStackSlot()) { |
| 1885 Register right_reg = EmitLoadRegister(right, ip); | 1884 Register right_reg = EmitLoadRegister(right, ip); |
| 1886 __ rsb(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond); | 1885 __ rsb(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond); |
| 1887 } else { | 1886 } else { |
| 1888 DCHECK(right->IsRegister() || right->IsConstantOperand()); | 1887 DCHECK(right->IsRegister() || right->IsConstantOperand()); |
| 1889 __ rsb(ToRegister(result), ToRegister(left), ToOperand(right), set_cond); | 1888 __ rsb(ToRegister(result), ToRegister(left), ToOperand(right), set_cond); |
| 1890 } | 1889 } |
| 1891 | 1890 |
| 1892 if (can_overflow) { | 1891 if (can_overflow) { |
| 1893 DeoptimizeIf(vs, instr->environment()); | 1892 DeoptimizeIf(vs, instr); |
| 1894 } | 1893 } |
| 1895 } | 1894 } |
| 1896 | 1895 |
| 1897 | 1896 |
| 1898 void LCodeGen::DoConstantI(LConstantI* instr) { | 1897 void LCodeGen::DoConstantI(LConstantI* instr) { |
| 1899 __ mov(ToRegister(instr->result()), Operand(instr->value())); | 1898 __ mov(ToRegister(instr->result()), Operand(instr->value())); |
| 1900 } | 1899 } |
| 1901 | 1900 |
| 1902 | 1901 |
| 1903 void LCodeGen::DoConstantS(LConstantS* instr) { | 1902 void LCodeGen::DoConstantS(LConstantS* instr) { |
| (...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1937 Register result = ToRegister(instr->result()); | 1936 Register result = ToRegister(instr->result()); |
| 1938 Register scratch = ToRegister(instr->temp()); | 1937 Register scratch = ToRegister(instr->temp()); |
| 1939 Smi* index = instr->index(); | 1938 Smi* index = instr->index(); |
| 1940 Label runtime, done; | 1939 Label runtime, done; |
| 1941 DCHECK(object.is(result)); | 1940 DCHECK(object.is(result)); |
| 1942 DCHECK(object.is(r0)); | 1941 DCHECK(object.is(r0)); |
| 1943 DCHECK(!scratch.is(scratch0())); | 1942 DCHECK(!scratch.is(scratch0())); |
| 1944 DCHECK(!scratch.is(object)); | 1943 DCHECK(!scratch.is(object)); |
| 1945 | 1944 |
| 1946 __ SmiTst(object); | 1945 __ SmiTst(object); |
| 1947 DeoptimizeIf(eq, instr->environment()); | 1946 DeoptimizeIf(eq, instr); |
| 1948 __ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE); | 1947 __ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE); |
| 1949 DeoptimizeIf(ne, instr->environment()); | 1948 DeoptimizeIf(ne, instr); |
| 1950 | 1949 |
| 1951 if (index->value() == 0) { | 1950 if (index->value() == 0) { |
| 1952 __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset)); | 1951 __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset)); |
| 1953 } else { | 1952 } else { |
| 1954 if (index->value() < JSDate::kFirstUncachedField) { | 1953 if (index->value() < JSDate::kFirstUncachedField) { |
| 1955 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate()); | 1954 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate()); |
| 1956 __ mov(scratch, Operand(stamp)); | 1955 __ mov(scratch, Operand(stamp)); |
| 1957 __ ldr(scratch, MemOperand(scratch)); | 1956 __ ldr(scratch, MemOperand(scratch)); |
| 1958 __ ldr(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset)); | 1957 __ ldr(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset)); |
| 1959 __ cmp(scratch, scratch0()); | 1958 __ cmp(scratch, scratch0()); |
| (...skipping 96 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2056 | 2055 |
| 2057 if (right->IsStackSlot()) { | 2056 if (right->IsStackSlot()) { |
| 2058 Register right_reg = EmitLoadRegister(right, ip); | 2057 Register right_reg = EmitLoadRegister(right, ip); |
| 2059 __ add(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond); | 2058 __ add(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond); |
| 2060 } else { | 2059 } else { |
| 2061 DCHECK(right->IsRegister() || right->IsConstantOperand()); | 2060 DCHECK(right->IsRegister() || right->IsConstantOperand()); |
| 2062 __ add(ToRegister(result), ToRegister(left), ToOperand(right), set_cond); | 2061 __ add(ToRegister(result), ToRegister(left), ToOperand(right), set_cond); |
| 2063 } | 2062 } |
| 2064 | 2063 |
| 2065 if (can_overflow) { | 2064 if (can_overflow) { |
| 2066 DeoptimizeIf(vs, instr->environment()); | 2065 DeoptimizeIf(vs, instr); |
| 2067 } | 2066 } |
| 2068 } | 2067 } |
| 2069 | 2068 |
| 2070 | 2069 |
| 2071 void LCodeGen::DoMathMinMax(LMathMinMax* instr) { | 2070 void LCodeGen::DoMathMinMax(LMathMinMax* instr) { |
| 2072 LOperand* left = instr->left(); | 2071 LOperand* left = instr->left(); |
| 2073 LOperand* right = instr->right(); | 2072 LOperand* right = instr->right(); |
| 2074 HMathMinMax::Operation operation = instr->hydrogen()->operation(); | 2073 HMathMinMax::Operation operation = instr->hydrogen()->operation(); |
| 2075 if (instr->hydrogen()->representation().IsSmiOrInteger32()) { | 2074 if (instr->hydrogen()->representation().IsSmiOrInteger32()) { |
| 2076 Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge; | 2075 Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge; |
| (...skipping 205 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2282 } | 2281 } |
| 2283 | 2282 |
| 2284 if (expected.Contains(ToBooleanStub::SMI)) { | 2283 if (expected.Contains(ToBooleanStub::SMI)) { |
| 2285 // Smis: 0 -> false, all other -> true. | 2284 // Smis: 0 -> false, all other -> true. |
| 2286 __ cmp(reg, Operand::Zero()); | 2285 __ cmp(reg, Operand::Zero()); |
| 2287 __ b(eq, instr->FalseLabel(chunk_)); | 2286 __ b(eq, instr->FalseLabel(chunk_)); |
| 2288 __ JumpIfSmi(reg, instr->TrueLabel(chunk_)); | 2287 __ JumpIfSmi(reg, instr->TrueLabel(chunk_)); |
| 2289 } else if (expected.NeedsMap()) { | 2288 } else if (expected.NeedsMap()) { |
| 2290 // If we need a map later and have a Smi -> deopt. | 2289 // If we need a map later and have a Smi -> deopt. |
| 2291 __ SmiTst(reg); | 2290 __ SmiTst(reg); |
| 2292 DeoptimizeIf(eq, instr->environment()); | 2291 DeoptimizeIf(eq, instr); |
| 2293 } | 2292 } |
| 2294 | 2293 |
| 2295 const Register map = scratch0(); | 2294 const Register map = scratch0(); |
| 2296 if (expected.NeedsMap()) { | 2295 if (expected.NeedsMap()) { |
| 2297 __ ldr(map, FieldMemOperand(reg, HeapObject::kMapOffset)); | 2296 __ ldr(map, FieldMemOperand(reg, HeapObject::kMapOffset)); |
| 2298 | 2297 |
| 2299 if (expected.CanBeUndetectable()) { | 2298 if (expected.CanBeUndetectable()) { |
| 2300 // Undetectable -> false. | 2299 // Undetectable -> false. |
| 2301 __ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset)); | 2300 __ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset)); |
| 2302 __ tst(ip, Operand(1 << Map::kIsUndetectable)); | 2301 __ tst(ip, Operand(1 << Map::kIsUndetectable)); |
| (...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2338 __ VFPCompareAndSetFlags(dbl_scratch, 0.0); | 2337 __ VFPCompareAndSetFlags(dbl_scratch, 0.0); |
| 2339 __ cmp(r0, r0, vs); // NaN -> false. | 2338 __ cmp(r0, r0, vs); // NaN -> false. |
| 2340 __ b(eq, instr->FalseLabel(chunk_)); // +0, -0 -> false. | 2339 __ b(eq, instr->FalseLabel(chunk_)); // +0, -0 -> false. |
| 2341 __ b(instr->TrueLabel(chunk_)); | 2340 __ b(instr->TrueLabel(chunk_)); |
| 2342 __ bind(¬_heap_number); | 2341 __ bind(¬_heap_number); |
| 2343 } | 2342 } |
| 2344 | 2343 |
| 2345 if (!expected.IsGeneric()) { | 2344 if (!expected.IsGeneric()) { |
| 2346 // We've seen something for the first time -> deopt. | 2345 // We've seen something for the first time -> deopt. |
| 2347 // This can only happen if we are not generic already. | 2346 // This can only happen if we are not generic already. |
| 2348 DeoptimizeIf(al, instr->environment()); | 2347 DeoptimizeIf(al, instr); |
| 2349 } | 2348 } |
| 2350 } | 2349 } |
| 2351 } | 2350 } |
| 2352 } | 2351 } |
| 2353 | 2352 |
| 2354 | 2353 |
| 2355 void LCodeGen::EmitGoto(int block) { | 2354 void LCodeGen::EmitGoto(int block) { |
| 2356 if (!IsNextEmittedBlock(block)) { | 2355 if (!IsNextEmittedBlock(block)) { |
| 2357 __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block))); | 2356 __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block))); |
| 2358 } | 2357 } |
| (...skipping 624 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2983 } | 2982 } |
| 2984 | 2983 |
| 2985 | 2984 |
| 2986 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { | 2985 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { |
| 2987 Register result = ToRegister(instr->result()); | 2986 Register result = ToRegister(instr->result()); |
| 2988 __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell().handle()))); | 2987 __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell().handle()))); |
| 2989 __ ldr(result, FieldMemOperand(ip, Cell::kValueOffset)); | 2988 __ ldr(result, FieldMemOperand(ip, Cell::kValueOffset)); |
| 2990 if (instr->hydrogen()->RequiresHoleCheck()) { | 2989 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 2991 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); | 2990 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); |
| 2992 __ cmp(result, ip); | 2991 __ cmp(result, ip); |
| 2993 DeoptimizeIf(eq, instr->environment()); | 2992 DeoptimizeIf(eq, instr); |
| 2994 } | 2993 } |
| 2995 } | 2994 } |
| 2996 | 2995 |
| 2997 | 2996 |
| 2998 template <class T> | 2997 template <class T> |
| 2999 void LCodeGen::EmitVectorLoadICRegisters(T* instr) { | 2998 void LCodeGen::EmitVectorLoadICRegisters(T* instr) { |
| 3000 DCHECK(FLAG_vector_ics); | 2999 DCHECK(FLAG_vector_ics); |
| 3001 Register vector = ToRegister(instr->temp_vector()); | 3000 Register vector = ToRegister(instr->temp_vector()); |
| 3002 DCHECK(vector.is(VectorLoadICDescriptor::VectorRegister())); | 3001 DCHECK(vector.is(VectorLoadICDescriptor::VectorRegister())); |
| 3003 __ Move(vector, instr->hydrogen()->feedback_vector()); | 3002 __ Move(vector, instr->hydrogen()->feedback_vector()); |
| (...skipping 29 matching lines...) Expand all Loading... |
| 3033 | 3032 |
| 3034 // If the cell we are storing to contains the hole it could have | 3033 // If the cell we are storing to contains the hole it could have |
| 3035 // been deleted from the property dictionary. In that case, we need | 3034 // been deleted from the property dictionary. In that case, we need |
| 3036 // to update the property details in the property dictionary to mark | 3035 // to update the property details in the property dictionary to mark |
| 3037 // it as no longer deleted. | 3036 // it as no longer deleted. |
| 3038 if (instr->hydrogen()->RequiresHoleCheck()) { | 3037 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 3039 // We use a temp to check the payload (CompareRoot might clobber ip). | 3038 // We use a temp to check the payload (CompareRoot might clobber ip). |
| 3040 Register payload = ToRegister(instr->temp()); | 3039 Register payload = ToRegister(instr->temp()); |
| 3041 __ ldr(payload, FieldMemOperand(cell, Cell::kValueOffset)); | 3040 __ ldr(payload, FieldMemOperand(cell, Cell::kValueOffset)); |
| 3042 __ CompareRoot(payload, Heap::kTheHoleValueRootIndex); | 3041 __ CompareRoot(payload, Heap::kTheHoleValueRootIndex); |
| 3043 DeoptimizeIf(eq, instr->environment()); | 3042 DeoptimizeIf(eq, instr); |
| 3044 } | 3043 } |
| 3045 | 3044 |
| 3046 // Store the value. | 3045 // Store the value. |
| 3047 __ str(value, FieldMemOperand(cell, Cell::kValueOffset)); | 3046 __ str(value, FieldMemOperand(cell, Cell::kValueOffset)); |
| 3048 // Cells are always rescanned, so no write barrier here. | 3047 // Cells are always rescanned, so no write barrier here. |
| 3049 } | 3048 } |
| 3050 | 3049 |
| 3051 | 3050 |
| 3052 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { | 3051 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { |
| 3053 Register context = ToRegister(instr->context()); | 3052 Register context = ToRegister(instr->context()); |
| 3054 Register result = ToRegister(instr->result()); | 3053 Register result = ToRegister(instr->result()); |
| 3055 __ ldr(result, ContextOperand(context, instr->slot_index())); | 3054 __ ldr(result, ContextOperand(context, instr->slot_index())); |
| 3056 if (instr->hydrogen()->RequiresHoleCheck()) { | 3055 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 3057 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); | 3056 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); |
| 3058 __ cmp(result, ip); | 3057 __ cmp(result, ip); |
| 3059 if (instr->hydrogen()->DeoptimizesOnHole()) { | 3058 if (instr->hydrogen()->DeoptimizesOnHole()) { |
| 3060 DeoptimizeIf(eq, instr->environment()); | 3059 DeoptimizeIf(eq, instr); |
| 3061 } else { | 3060 } else { |
| 3062 __ mov(result, Operand(factory()->undefined_value()), LeaveCC, eq); | 3061 __ mov(result, Operand(factory()->undefined_value()), LeaveCC, eq); |
| 3063 } | 3062 } |
| 3064 } | 3063 } |
| 3065 } | 3064 } |
| 3066 | 3065 |
| 3067 | 3066 |
| 3068 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { | 3067 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { |
| 3069 Register context = ToRegister(instr->context()); | 3068 Register context = ToRegister(instr->context()); |
| 3070 Register value = ToRegister(instr->value()); | 3069 Register value = ToRegister(instr->value()); |
| 3071 Register scratch = scratch0(); | 3070 Register scratch = scratch0(); |
| 3072 MemOperand target = ContextOperand(context, instr->slot_index()); | 3071 MemOperand target = ContextOperand(context, instr->slot_index()); |
| 3073 | 3072 |
| 3074 Label skip_assignment; | 3073 Label skip_assignment; |
| 3075 | 3074 |
| 3076 if (instr->hydrogen()->RequiresHoleCheck()) { | 3075 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 3077 __ ldr(scratch, target); | 3076 __ ldr(scratch, target); |
| 3078 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); | 3077 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); |
| 3079 __ cmp(scratch, ip); | 3078 __ cmp(scratch, ip); |
| 3080 if (instr->hydrogen()->DeoptimizesOnHole()) { | 3079 if (instr->hydrogen()->DeoptimizesOnHole()) { |
| 3081 DeoptimizeIf(eq, instr->environment()); | 3080 DeoptimizeIf(eq, instr); |
| 3082 } else { | 3081 } else { |
| 3083 __ b(ne, &skip_assignment); | 3082 __ b(ne, &skip_assignment); |
| 3084 } | 3083 } |
| 3085 } | 3084 } |
| 3086 | 3085 |
| 3087 __ str(value, target); | 3086 __ str(value, target); |
| 3088 if (instr->hydrogen()->NeedsWriteBarrier()) { | 3087 if (instr->hydrogen()->NeedsWriteBarrier()) { |
| 3089 SmiCheck check_needed = | 3088 SmiCheck check_needed = |
| 3090 instr->hydrogen()->value()->type().IsHeapObject() | 3089 instr->hydrogen()->value()->type().IsHeapObject() |
| 3091 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; | 3090 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; |
| (...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3151 Register function = ToRegister(instr->function()); | 3150 Register function = ToRegister(instr->function()); |
| 3152 Register result = ToRegister(instr->result()); | 3151 Register result = ToRegister(instr->result()); |
| 3153 | 3152 |
| 3154 // Get the prototype or initial map from the function. | 3153 // Get the prototype or initial map from the function. |
| 3155 __ ldr(result, | 3154 __ ldr(result, |
| 3156 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); | 3155 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); |
| 3157 | 3156 |
| 3158 // Check that the function has a prototype or an initial map. | 3157 // Check that the function has a prototype or an initial map. |
| 3159 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); | 3158 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); |
| 3160 __ cmp(result, ip); | 3159 __ cmp(result, ip); |
| 3161 DeoptimizeIf(eq, instr->environment()); | 3160 DeoptimizeIf(eq, instr); |
| 3162 | 3161 |
| 3163 // If the function does not have an initial map, we're done. | 3162 // If the function does not have an initial map, we're done. |
| 3164 Label done; | 3163 Label done; |
| 3165 __ CompareObjectType(result, scratch, scratch, MAP_TYPE); | 3164 __ CompareObjectType(result, scratch, scratch, MAP_TYPE); |
| 3166 __ b(ne, &done); | 3165 __ b(ne, &done); |
| 3167 | 3166 |
| 3168 // Get the prototype from the initial map. | 3167 // Get the prototype from the initial map. |
| 3169 __ ldr(result, FieldMemOperand(result, Map::kPrototypeOffset)); | 3168 __ ldr(result, FieldMemOperand(result, Map::kPrototypeOffset)); |
| 3170 | 3169 |
| 3171 // All done. | 3170 // All done. |
| (...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3277 break; | 3276 break; |
| 3278 case EXTERNAL_INT32_ELEMENTS: | 3277 case EXTERNAL_INT32_ELEMENTS: |
| 3279 case INT32_ELEMENTS: | 3278 case INT32_ELEMENTS: |
| 3280 __ ldr(result, mem_operand); | 3279 __ ldr(result, mem_operand); |
| 3281 break; | 3280 break; |
| 3282 case EXTERNAL_UINT32_ELEMENTS: | 3281 case EXTERNAL_UINT32_ELEMENTS: |
| 3283 case UINT32_ELEMENTS: | 3282 case UINT32_ELEMENTS: |
| 3284 __ ldr(result, mem_operand); | 3283 __ ldr(result, mem_operand); |
| 3285 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { | 3284 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { |
| 3286 __ cmp(result, Operand(0x80000000)); | 3285 __ cmp(result, Operand(0x80000000)); |
| 3287 DeoptimizeIf(cs, instr->environment()); | 3286 DeoptimizeIf(cs, instr); |
| 3288 } | 3287 } |
| 3289 break; | 3288 break; |
| 3290 case FLOAT32_ELEMENTS: | 3289 case FLOAT32_ELEMENTS: |
| 3291 case FLOAT64_ELEMENTS: | 3290 case FLOAT64_ELEMENTS: |
| 3292 case EXTERNAL_FLOAT32_ELEMENTS: | 3291 case EXTERNAL_FLOAT32_ELEMENTS: |
| 3293 case EXTERNAL_FLOAT64_ELEMENTS: | 3292 case EXTERNAL_FLOAT64_ELEMENTS: |
| 3294 case FAST_HOLEY_DOUBLE_ELEMENTS: | 3293 case FAST_HOLEY_DOUBLE_ELEMENTS: |
| 3295 case FAST_HOLEY_ELEMENTS: | 3294 case FAST_HOLEY_ELEMENTS: |
| 3296 case FAST_HOLEY_SMI_ELEMENTS: | 3295 case FAST_HOLEY_SMI_ELEMENTS: |
| 3297 case FAST_DOUBLE_ELEMENTS: | 3296 case FAST_DOUBLE_ELEMENTS: |
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3330 int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) | 3329 int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) |
| 3331 ? (element_size_shift - kSmiTagSize) : element_size_shift; | 3330 ? (element_size_shift - kSmiTagSize) : element_size_shift; |
| 3332 __ add(scratch, scratch, Operand(key, LSL, shift_size)); | 3331 __ add(scratch, scratch, Operand(key, LSL, shift_size)); |
| 3333 } | 3332 } |
| 3334 | 3333 |
| 3335 __ vldr(result, scratch, 0); | 3334 __ vldr(result, scratch, 0); |
| 3336 | 3335 |
| 3337 if (instr->hydrogen()->RequiresHoleCheck()) { | 3336 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 3338 __ ldr(scratch, MemOperand(scratch, sizeof(kHoleNanLower32))); | 3337 __ ldr(scratch, MemOperand(scratch, sizeof(kHoleNanLower32))); |
| 3339 __ cmp(scratch, Operand(kHoleNanUpper32)); | 3338 __ cmp(scratch, Operand(kHoleNanUpper32)); |
| 3340 DeoptimizeIf(eq, instr->environment()); | 3339 DeoptimizeIf(eq, instr); |
| 3341 } | 3340 } |
| 3342 } | 3341 } |
| 3343 | 3342 |
| 3344 | 3343 |
| 3345 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { | 3344 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { |
| 3346 Register elements = ToRegister(instr->elements()); | 3345 Register elements = ToRegister(instr->elements()); |
| 3347 Register result = ToRegister(instr->result()); | 3346 Register result = ToRegister(instr->result()); |
| 3348 Register scratch = scratch0(); | 3347 Register scratch = scratch0(); |
| 3349 Register store_base = scratch; | 3348 Register store_base = scratch; |
| 3350 int offset = instr->base_offset(); | 3349 int offset = instr->base_offset(); |
| (...skipping 13 matching lines...) Expand all Loading... |
| 3364 } else { | 3363 } else { |
| 3365 __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2)); | 3364 __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2)); |
| 3366 } | 3365 } |
| 3367 } | 3366 } |
| 3368 __ ldr(result, MemOperand(store_base, offset)); | 3367 __ ldr(result, MemOperand(store_base, offset)); |
| 3369 | 3368 |
| 3370 // Check for the hole value. | 3369 // Check for the hole value. |
| 3371 if (instr->hydrogen()->RequiresHoleCheck()) { | 3370 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 3372 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { | 3371 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { |
| 3373 __ SmiTst(result); | 3372 __ SmiTst(result); |
| 3374 DeoptimizeIf(ne, instr->environment()); | 3373 DeoptimizeIf(ne, instr); |
| 3375 } else { | 3374 } else { |
| 3376 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); | 3375 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); |
| 3377 __ cmp(result, scratch); | 3376 __ cmp(result, scratch); |
| 3378 DeoptimizeIf(eq, instr->environment()); | 3377 DeoptimizeIf(eq, instr); |
| 3379 } | 3378 } |
| 3380 } | 3379 } |
| 3381 } | 3380 } |
| 3382 | 3381 |
| 3383 | 3382 |
| 3384 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) { | 3383 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) { |
| 3385 if (instr->is_typed_elements()) { | 3384 if (instr->is_typed_elements()) { |
| 3386 DoLoadKeyedExternalArray(instr); | 3385 DoLoadKeyedExternalArray(instr); |
| 3387 } else if (instr->hydrogen()->representation().IsDouble()) { | 3386 } else if (instr->hydrogen()->representation().IsDouble()) { |
| 3388 DoLoadKeyedFixedDoubleArray(instr); | 3387 DoLoadKeyedFixedDoubleArray(instr); |
| (...skipping 121 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3510 // Normal function. Replace undefined or null with global receiver. | 3509 // Normal function. Replace undefined or null with global receiver. |
| 3511 __ LoadRoot(scratch, Heap::kNullValueRootIndex); | 3510 __ LoadRoot(scratch, Heap::kNullValueRootIndex); |
| 3512 __ cmp(receiver, scratch); | 3511 __ cmp(receiver, scratch); |
| 3513 __ b(eq, &global_object); | 3512 __ b(eq, &global_object); |
| 3514 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); | 3513 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); |
| 3515 __ cmp(receiver, scratch); | 3514 __ cmp(receiver, scratch); |
| 3516 __ b(eq, &global_object); | 3515 __ b(eq, &global_object); |
| 3517 | 3516 |
| 3518 // Deoptimize if the receiver is not a JS object. | 3517 // Deoptimize if the receiver is not a JS object. |
| 3519 __ SmiTst(receiver); | 3518 __ SmiTst(receiver); |
| 3520 DeoptimizeIf(eq, instr->environment()); | 3519 DeoptimizeIf(eq, instr); |
| 3521 __ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE); | 3520 __ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE); |
| 3522 DeoptimizeIf(lt, instr->environment()); | 3521 DeoptimizeIf(lt, instr); |
| 3523 | 3522 |
| 3524 __ b(&result_in_receiver); | 3523 __ b(&result_in_receiver); |
| 3525 __ bind(&global_object); | 3524 __ bind(&global_object); |
| 3526 __ ldr(result, FieldMemOperand(function, JSFunction::kContextOffset)); | 3525 __ ldr(result, FieldMemOperand(function, JSFunction::kContextOffset)); |
| 3527 __ ldr(result, | 3526 __ ldr(result, |
| 3528 ContextOperand(result, Context::GLOBAL_OBJECT_INDEX)); | 3527 ContextOperand(result, Context::GLOBAL_OBJECT_INDEX)); |
| 3529 __ ldr(result, FieldMemOperand(result, GlobalObject::kGlobalProxyOffset)); | 3528 __ ldr(result, FieldMemOperand(result, GlobalObject::kGlobalProxyOffset)); |
| 3530 | 3529 |
| 3531 if (result.is(receiver)) { | 3530 if (result.is(receiver)) { |
| 3532 __ bind(&result_in_receiver); | 3531 __ bind(&result_in_receiver); |
| (...skipping 14 matching lines...) Expand all Loading... |
| 3547 Register elements = ToRegister(instr->elements()); | 3546 Register elements = ToRegister(instr->elements()); |
| 3548 Register scratch = scratch0(); | 3547 Register scratch = scratch0(); |
| 3549 DCHECK(receiver.is(r0)); // Used for parameter count. | 3548 DCHECK(receiver.is(r0)); // Used for parameter count. |
| 3550 DCHECK(function.is(r1)); // Required by InvokeFunction. | 3549 DCHECK(function.is(r1)); // Required by InvokeFunction. |
| 3551 DCHECK(ToRegister(instr->result()).is(r0)); | 3550 DCHECK(ToRegister(instr->result()).is(r0)); |
| 3552 | 3551 |
| 3553 // Copy the arguments to this function possibly from the | 3552 // Copy the arguments to this function possibly from the |
| 3554 // adaptor frame below it. | 3553 // adaptor frame below it. |
| 3555 const uint32_t kArgumentsLimit = 1 * KB; | 3554 const uint32_t kArgumentsLimit = 1 * KB; |
| 3556 __ cmp(length, Operand(kArgumentsLimit)); | 3555 __ cmp(length, Operand(kArgumentsLimit)); |
| 3557 DeoptimizeIf(hi, instr->environment()); | 3556 DeoptimizeIf(hi, instr); |
| 3558 | 3557 |
| 3559 // Push the receiver and use the register to keep the original | 3558 // Push the receiver and use the register to keep the original |
| 3560 // number of arguments. | 3559 // number of arguments. |
| 3561 __ push(receiver); | 3560 __ push(receiver); |
| 3562 __ mov(receiver, length); | 3561 __ mov(receiver, length); |
| 3563 // The arguments are at a one pointer size offset from elements. | 3562 // The arguments are at a one pointer size offset from elements. |
| 3564 __ add(elements, elements, Operand(1 * kPointerSize)); | 3563 __ add(elements, elements, Operand(1 * kPointerSize)); |
| 3565 | 3564 |
| 3566 // Loop through the arguments pushing them onto the execution | 3565 // Loop through the arguments pushing them onto the execution |
| 3567 // stack. | 3566 // stack. |
| (...skipping 109 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3677 DCHECK(instr->context() != NULL); | 3676 DCHECK(instr->context() != NULL); |
| 3678 DCHECK(ToRegister(instr->context()).is(cp)); | 3677 DCHECK(ToRegister(instr->context()).is(cp)); |
| 3679 Register input = ToRegister(instr->value()); | 3678 Register input = ToRegister(instr->value()); |
| 3680 Register result = ToRegister(instr->result()); | 3679 Register result = ToRegister(instr->result()); |
| 3681 Register scratch = scratch0(); | 3680 Register scratch = scratch0(); |
| 3682 | 3681 |
| 3683 // Deoptimize if not a heap number. | 3682 // Deoptimize if not a heap number. |
| 3684 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); | 3683 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); |
| 3685 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); | 3684 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); |
| 3686 __ cmp(scratch, Operand(ip)); | 3685 __ cmp(scratch, Operand(ip)); |
| 3687 DeoptimizeIf(ne, instr->environment()); | 3686 DeoptimizeIf(ne, instr); |
| 3688 | 3687 |
| 3689 Label done; | 3688 Label done; |
| 3690 Register exponent = scratch0(); | 3689 Register exponent = scratch0(); |
| 3691 scratch = no_reg; | 3690 scratch = no_reg; |
| 3692 __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); | 3691 __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); |
| 3693 // Check the sign of the argument. If the argument is positive, just | 3692 // Check the sign of the argument. If the argument is positive, just |
| 3694 // return it. | 3693 // return it. |
| 3695 __ tst(exponent, Operand(HeapNumber::kSignMask)); | 3694 __ tst(exponent, Operand(HeapNumber::kSignMask)); |
| 3696 // Move the input to the result if necessary. | 3695 // Move the input to the result if necessary. |
| 3697 __ Move(result, input); | 3696 __ Move(result, input); |
| (...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3745 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) { | 3744 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) { |
| 3746 Register input = ToRegister(instr->value()); | 3745 Register input = ToRegister(instr->value()); |
| 3747 Register result = ToRegister(instr->result()); | 3746 Register result = ToRegister(instr->result()); |
| 3748 __ cmp(input, Operand::Zero()); | 3747 __ cmp(input, Operand::Zero()); |
| 3749 __ Move(result, input, pl); | 3748 __ Move(result, input, pl); |
| 3750 // We can make rsb conditional because the previous cmp instruction | 3749 // We can make rsb conditional because the previous cmp instruction |
| 3751 // will clear the V (overflow) flag and rsb won't set this flag | 3750 // will clear the V (overflow) flag and rsb won't set this flag |
| 3752 // if input is positive. | 3751 // if input is positive. |
| 3753 __ rsb(result, input, Operand::Zero(), SetCC, mi); | 3752 __ rsb(result, input, Operand::Zero(), SetCC, mi); |
| 3754 // Deoptimize on overflow. | 3753 // Deoptimize on overflow. |
| 3755 DeoptimizeIf(vs, instr->environment()); | 3754 DeoptimizeIf(vs, instr); |
| 3756 } | 3755 } |
| 3757 | 3756 |
| 3758 | 3757 |
| 3759 void LCodeGen::DoMathAbs(LMathAbs* instr) { | 3758 void LCodeGen::DoMathAbs(LMathAbs* instr) { |
| 3760 // Class for deferred case. | 3759 // Class for deferred case. |
| 3761 class DeferredMathAbsTaggedHeapNumber FINAL : public LDeferredCode { | 3760 class DeferredMathAbsTaggedHeapNumber FINAL : public LDeferredCode { |
| 3762 public: | 3761 public: |
| 3763 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr) | 3762 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr) |
| 3764 : LDeferredCode(codegen), instr_(instr) { } | 3763 : LDeferredCode(codegen), instr_(instr) { } |
| 3765 virtual void Generate() OVERRIDE { | 3764 virtual void Generate() OVERRIDE { |
| (...skipping 25 matching lines...) Expand all Loading... |
| 3791 } | 3790 } |
| 3792 | 3791 |
| 3793 | 3792 |
| 3794 void LCodeGen::DoMathFloor(LMathFloor* instr) { | 3793 void LCodeGen::DoMathFloor(LMathFloor* instr) { |
| 3795 DwVfpRegister input = ToDoubleRegister(instr->value()); | 3794 DwVfpRegister input = ToDoubleRegister(instr->value()); |
| 3796 Register result = ToRegister(instr->result()); | 3795 Register result = ToRegister(instr->result()); |
| 3797 Register input_high = scratch0(); | 3796 Register input_high = scratch0(); |
| 3798 Label done, exact; | 3797 Label done, exact; |
| 3799 | 3798 |
| 3800 __ TryInt32Floor(result, input, input_high, double_scratch0(), &done, &exact); | 3799 __ TryInt32Floor(result, input, input_high, double_scratch0(), &done, &exact); |
| 3801 DeoptimizeIf(al, instr->environment()); | 3800 DeoptimizeIf(al, instr); |
| 3802 | 3801 |
| 3803 __ bind(&exact); | 3802 __ bind(&exact); |
| 3804 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 3803 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 3805 // Test for -0. | 3804 // Test for -0. |
| 3806 __ cmp(result, Operand::Zero()); | 3805 __ cmp(result, Operand::Zero()); |
| 3807 __ b(ne, &done); | 3806 __ b(ne, &done); |
| 3808 __ cmp(input_high, Operand::Zero()); | 3807 __ cmp(input_high, Operand::Zero()); |
| 3809 DeoptimizeIf(mi, instr->environment()); | 3808 DeoptimizeIf(mi, instr); |
| 3810 } | 3809 } |
| 3811 __ bind(&done); | 3810 __ bind(&done); |
| 3812 } | 3811 } |
| 3813 | 3812 |
| 3814 | 3813 |
| 3815 void LCodeGen::DoMathRound(LMathRound* instr) { | 3814 void LCodeGen::DoMathRound(LMathRound* instr) { |
| 3816 DwVfpRegister input = ToDoubleRegister(instr->value()); | 3815 DwVfpRegister input = ToDoubleRegister(instr->value()); |
| 3817 Register result = ToRegister(instr->result()); | 3816 Register result = ToRegister(instr->result()); |
| 3818 DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp()); | 3817 DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp()); |
| 3819 DwVfpRegister input_plus_dot_five = double_scratch1; | 3818 DwVfpRegister input_plus_dot_five = double_scratch1; |
| 3820 Register input_high = scratch0(); | 3819 Register input_high = scratch0(); |
| 3821 DwVfpRegister dot_five = double_scratch0(); | 3820 DwVfpRegister dot_five = double_scratch0(); |
| 3822 Label convert, done; | 3821 Label convert, done; |
| 3823 | 3822 |
| 3824 __ Vmov(dot_five, 0.5, scratch0()); | 3823 __ Vmov(dot_five, 0.5, scratch0()); |
| 3825 __ vabs(double_scratch1, input); | 3824 __ vabs(double_scratch1, input); |
| 3826 __ VFPCompareAndSetFlags(double_scratch1, dot_five); | 3825 __ VFPCompareAndSetFlags(double_scratch1, dot_five); |
| 3827 // If input is in [-0.5, -0], the result is -0. | 3826 // If input is in [-0.5, -0], the result is -0. |
| 3828 // If input is in [+0, +0.5[, the result is +0. | 3827 // If input is in [+0, +0.5[, the result is +0. |
| 3829 // If the input is +0.5, the result is 1. | 3828 // If the input is +0.5, the result is 1. |
| 3830 __ b(hi, &convert); // Out of [-0.5, +0.5]. | 3829 __ b(hi, &convert); // Out of [-0.5, +0.5]. |
| 3831 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 3830 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 3832 __ VmovHigh(input_high, input); | 3831 __ VmovHigh(input_high, input); |
| 3833 __ cmp(input_high, Operand::Zero()); | 3832 __ cmp(input_high, Operand::Zero()); |
| 3834 DeoptimizeIf(mi, instr->environment()); // [-0.5, -0]. | 3833 DeoptimizeIf(mi, instr); // [-0.5, -0]. |
| 3835 } | 3834 } |
| 3836 __ VFPCompareAndSetFlags(input, dot_five); | 3835 __ VFPCompareAndSetFlags(input, dot_five); |
| 3837 __ mov(result, Operand(1), LeaveCC, eq); // +0.5. | 3836 __ mov(result, Operand(1), LeaveCC, eq); // +0.5. |
| 3838 // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on | 3837 // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on |
| 3839 // flag kBailoutOnMinusZero. | 3838 // flag kBailoutOnMinusZero. |
| 3840 __ mov(result, Operand::Zero(), LeaveCC, ne); | 3839 __ mov(result, Operand::Zero(), LeaveCC, ne); |
| 3841 __ b(&done); | 3840 __ b(&done); |
| 3842 | 3841 |
| 3843 __ bind(&convert); | 3842 __ bind(&convert); |
| 3844 __ vadd(input_plus_dot_five, input, dot_five); | 3843 __ vadd(input_plus_dot_five, input, dot_five); |
| 3845 // Reuse dot_five (double_scratch0) as we no longer need this value. | 3844 // Reuse dot_five (double_scratch0) as we no longer need this value. |
| 3846 __ TryInt32Floor(result, input_plus_dot_five, input_high, double_scratch0(), | 3845 __ TryInt32Floor(result, input_plus_dot_five, input_high, double_scratch0(), |
| 3847 &done, &done); | 3846 &done, &done); |
| 3848 DeoptimizeIf(al, instr->environment()); | 3847 DeoptimizeIf(al, instr); |
| 3849 __ bind(&done); | 3848 __ bind(&done); |
| 3850 } | 3849 } |
| 3851 | 3850 |
| 3852 | 3851 |
| 3853 void LCodeGen::DoMathFround(LMathFround* instr) { | 3852 void LCodeGen::DoMathFround(LMathFround* instr) { |
| 3854 DwVfpRegister input_reg = ToDoubleRegister(instr->value()); | 3853 DwVfpRegister input_reg = ToDoubleRegister(instr->value()); |
| 3855 DwVfpRegister output_reg = ToDoubleRegister(instr->result()); | 3854 DwVfpRegister output_reg = ToDoubleRegister(instr->result()); |
| 3856 LowDwVfpRegister scratch = double_scratch0(); | 3855 LowDwVfpRegister scratch = double_scratch0(); |
| 3857 __ vcvt_f32_f64(scratch.low(), input_reg); | 3856 __ vcvt_f32_f64(scratch.low(), input_reg); |
| 3858 __ vcvt_f64_f32(output_reg, scratch.low()); | 3857 __ vcvt_f64_f32(output_reg, scratch.low()); |
| (...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3902 if (exponent_type.IsSmi()) { | 3901 if (exponent_type.IsSmi()) { |
| 3903 MathPowStub stub(isolate(), MathPowStub::TAGGED); | 3902 MathPowStub stub(isolate(), MathPowStub::TAGGED); |
| 3904 __ CallStub(&stub); | 3903 __ CallStub(&stub); |
| 3905 } else if (exponent_type.IsTagged()) { | 3904 } else if (exponent_type.IsTagged()) { |
| 3906 Label no_deopt; | 3905 Label no_deopt; |
| 3907 __ JumpIfSmi(tagged_exponent, &no_deopt); | 3906 __ JumpIfSmi(tagged_exponent, &no_deopt); |
| 3908 DCHECK(!r6.is(tagged_exponent)); | 3907 DCHECK(!r6.is(tagged_exponent)); |
| 3909 __ ldr(r6, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset)); | 3908 __ ldr(r6, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset)); |
| 3910 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); | 3909 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); |
| 3911 __ cmp(r6, Operand(ip)); | 3910 __ cmp(r6, Operand(ip)); |
| 3912 DeoptimizeIf(ne, instr->environment()); | 3911 DeoptimizeIf(ne, instr); |
| 3913 __ bind(&no_deopt); | 3912 __ bind(&no_deopt); |
| 3914 MathPowStub stub(isolate(), MathPowStub::TAGGED); | 3913 MathPowStub stub(isolate(), MathPowStub::TAGGED); |
| 3915 __ CallStub(&stub); | 3914 __ CallStub(&stub); |
| 3916 } else if (exponent_type.IsInteger32()) { | 3915 } else if (exponent_type.IsInteger32()) { |
| 3917 MathPowStub stub(isolate(), MathPowStub::INTEGER); | 3916 MathPowStub stub(isolate(), MathPowStub::INTEGER); |
| 3918 __ CallStub(&stub); | 3917 __ CallStub(&stub); |
| 3919 } else { | 3918 } else { |
| 3920 DCHECK(exponent_type.IsDouble()); | 3919 DCHECK(exponent_type.IsDouble()); |
| 3921 MathPowStub stub(isolate(), MathPowStub::DOUBLE); | 3920 MathPowStub stub(isolate(), MathPowStub::DOUBLE); |
| 3922 __ CallStub(&stub); | 3921 __ CallStub(&stub); |
| (...skipping 331 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4254 Register index = ToRegister(instr->index()); | 4253 Register index = ToRegister(instr->index()); |
| 4255 Operand length = ToOperand(instr->length()); | 4254 Operand length = ToOperand(instr->length()); |
| 4256 __ cmp(index, length); | 4255 __ cmp(index, length); |
| 4257 } | 4256 } |
| 4258 if (FLAG_debug_code && instr->hydrogen()->skip_check()) { | 4257 if (FLAG_debug_code && instr->hydrogen()->skip_check()) { |
| 4259 Label done; | 4258 Label done; |
| 4260 __ b(NegateCondition(cc), &done); | 4259 __ b(NegateCondition(cc), &done); |
| 4261 __ stop("eliminated bounds check failed"); | 4260 __ stop("eliminated bounds check failed"); |
| 4262 __ bind(&done); | 4261 __ bind(&done); |
| 4263 } else { | 4262 } else { |
| 4264 DeoptimizeIf(cc, instr->environment()); | 4263 DeoptimizeIf(cc, instr); |
| 4265 } | 4264 } |
| 4266 } | 4265 } |
| 4267 | 4266 |
| 4268 | 4267 |
| 4269 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { | 4268 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { |
| 4270 Register external_pointer = ToRegister(instr->elements()); | 4269 Register external_pointer = ToRegister(instr->elements()); |
| 4271 Register key = no_reg; | 4270 Register key = no_reg; |
| 4272 ElementsKind elements_kind = instr->elements_kind(); | 4271 ElementsKind elements_kind = instr->elements_kind(); |
| 4273 bool key_is_constant = instr->key()->IsConstantOperand(); | 4272 bool key_is_constant = instr->key()->IsConstantOperand(); |
| 4274 int constant_key = 0; | 4273 int constant_key = 0; |
| (...skipping 227 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4502 } | 4501 } |
| 4503 __ bind(¬_applicable); | 4502 __ bind(¬_applicable); |
| 4504 } | 4503 } |
| 4505 | 4504 |
| 4506 | 4505 |
| 4507 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { | 4506 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { |
| 4508 Register object = ToRegister(instr->object()); | 4507 Register object = ToRegister(instr->object()); |
| 4509 Register temp = ToRegister(instr->temp()); | 4508 Register temp = ToRegister(instr->temp()); |
| 4510 Label no_memento_found; | 4509 Label no_memento_found; |
| 4511 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found); | 4510 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found); |
| 4512 DeoptimizeIf(eq, instr->environment()); | 4511 DeoptimizeIf(eq, instr); |
| 4513 __ bind(&no_memento_found); | 4512 __ bind(&no_memento_found); |
| 4514 } | 4513 } |
| 4515 | 4514 |
| 4516 | 4515 |
| 4517 void LCodeGen::DoStringAdd(LStringAdd* instr) { | 4516 void LCodeGen::DoStringAdd(LStringAdd* instr) { |
| 4518 DCHECK(ToRegister(instr->context()).is(cp)); | 4517 DCHECK(ToRegister(instr->context()).is(cp)); |
| 4519 DCHECK(ToRegister(instr->left()).is(r1)); | 4518 DCHECK(ToRegister(instr->left()).is(r1)); |
| 4520 DCHECK(ToRegister(instr->right()).is(r0)); | 4519 DCHECK(ToRegister(instr->right()).is(r0)); |
| 4521 StringAddStub stub(isolate(), | 4520 StringAddStub stub(isolate(), |
| 4522 instr->hydrogen()->flags(), | 4521 instr->hydrogen()->flags(), |
| (...skipping 313 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4836 } | 4835 } |
| 4837 | 4836 |
| 4838 | 4837 |
| 4839 void LCodeGen::DoSmiTag(LSmiTag* instr) { | 4838 void LCodeGen::DoSmiTag(LSmiTag* instr) { |
| 4840 HChange* hchange = instr->hydrogen(); | 4839 HChange* hchange = instr->hydrogen(); |
| 4841 Register input = ToRegister(instr->value()); | 4840 Register input = ToRegister(instr->value()); |
| 4842 Register output = ToRegister(instr->result()); | 4841 Register output = ToRegister(instr->result()); |
| 4843 if (hchange->CheckFlag(HValue::kCanOverflow) && | 4842 if (hchange->CheckFlag(HValue::kCanOverflow) && |
| 4844 hchange->value()->CheckFlag(HValue::kUint32)) { | 4843 hchange->value()->CheckFlag(HValue::kUint32)) { |
| 4845 __ tst(input, Operand(0xc0000000)); | 4844 __ tst(input, Operand(0xc0000000)); |
| 4846 DeoptimizeIf(ne, instr->environment()); | 4845 DeoptimizeIf(ne, instr); |
| 4847 } | 4846 } |
| 4848 if (hchange->CheckFlag(HValue::kCanOverflow) && | 4847 if (hchange->CheckFlag(HValue::kCanOverflow) && |
| 4849 !hchange->value()->CheckFlag(HValue::kUint32)) { | 4848 !hchange->value()->CheckFlag(HValue::kUint32)) { |
| 4850 __ SmiTag(output, input, SetCC); | 4849 __ SmiTag(output, input, SetCC); |
| 4851 DeoptimizeIf(vs, instr->environment()); | 4850 DeoptimizeIf(vs, instr); |
| 4852 } else { | 4851 } else { |
| 4853 __ SmiTag(output, input); | 4852 __ SmiTag(output, input); |
| 4854 } | 4853 } |
| 4855 } | 4854 } |
| 4856 | 4855 |
| 4857 | 4856 |
| 4858 void LCodeGen::DoSmiUntag(LSmiUntag* instr) { | 4857 void LCodeGen::DoSmiUntag(LSmiUntag* instr) { |
| 4859 Register input = ToRegister(instr->value()); | 4858 Register input = ToRegister(instr->value()); |
| 4860 Register result = ToRegister(instr->result()); | 4859 Register result = ToRegister(instr->result()); |
| 4861 if (instr->needs_check()) { | 4860 if (instr->needs_check()) { |
| 4862 STATIC_ASSERT(kHeapObjectTag == 1); | 4861 STATIC_ASSERT(kHeapObjectTag == 1); |
| 4863 // If the input is a HeapObject, SmiUntag will set the carry flag. | 4862 // If the input is a HeapObject, SmiUntag will set the carry flag. |
| 4864 __ SmiUntag(result, input, SetCC); | 4863 __ SmiUntag(result, input, SetCC); |
| 4865 DeoptimizeIf(cs, instr->environment()); | 4864 DeoptimizeIf(cs, instr); |
| 4866 } else { | 4865 } else { |
| 4867 __ SmiUntag(result, input); | 4866 __ SmiUntag(result, input); |
| 4868 } | 4867 } |
| 4869 } | 4868 } |
| 4870 | 4869 |
| 4871 | 4870 |
| 4872 void LCodeGen::EmitNumberUntagD(Register input_reg, | 4871 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg, |
| 4873 DwVfpRegister result_reg, | 4872 DwVfpRegister result_reg, |
| 4874 bool can_convert_undefined_to_nan, | |
| 4875 bool deoptimize_on_minus_zero, | |
| 4876 LEnvironment* env, | |
| 4877 NumberUntagDMode mode) { | 4873 NumberUntagDMode mode) { |
| 4874 bool can_convert_undefined_to_nan = |
| 4875 instr->hydrogen()->can_convert_undefined_to_nan(); |
| 4876 bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero(); |
| 4877 |
| 4878 Register scratch = scratch0(); | 4878 Register scratch = scratch0(); |
| 4879 SwVfpRegister flt_scratch = double_scratch0().low(); | 4879 SwVfpRegister flt_scratch = double_scratch0().low(); |
| 4880 DCHECK(!result_reg.is(double_scratch0())); | 4880 DCHECK(!result_reg.is(double_scratch0())); |
| 4881 Label convert, load_smi, done; | 4881 Label convert, load_smi, done; |
| 4882 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { | 4882 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { |
| 4883 // Smi check. | 4883 // Smi check. |
| 4884 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi); | 4884 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi); |
| 4885 // Heap number map check. | 4885 // Heap number map check. |
| 4886 __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); | 4886 __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); |
| 4887 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); | 4887 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); |
| 4888 __ cmp(scratch, Operand(ip)); | 4888 __ cmp(scratch, Operand(ip)); |
| 4889 if (can_convert_undefined_to_nan) { | 4889 if (can_convert_undefined_to_nan) { |
| 4890 __ b(ne, &convert); | 4890 __ b(ne, &convert); |
| 4891 } else { | 4891 } else { |
| 4892 DeoptimizeIf(ne, env); | 4892 DeoptimizeIf(ne, instr); |
| 4893 } | 4893 } |
| 4894 // load heap number | 4894 // load heap number |
| 4895 __ vldr(result_reg, input_reg, HeapNumber::kValueOffset - kHeapObjectTag); | 4895 __ vldr(result_reg, input_reg, HeapNumber::kValueOffset - kHeapObjectTag); |
| 4896 if (deoptimize_on_minus_zero) { | 4896 if (deoptimize_on_minus_zero) { |
| 4897 __ VmovLow(scratch, result_reg); | 4897 __ VmovLow(scratch, result_reg); |
| 4898 __ cmp(scratch, Operand::Zero()); | 4898 __ cmp(scratch, Operand::Zero()); |
| 4899 __ b(ne, &done); | 4899 __ b(ne, &done); |
| 4900 __ VmovHigh(scratch, result_reg); | 4900 __ VmovHigh(scratch, result_reg); |
| 4901 __ cmp(scratch, Operand(HeapNumber::kSignMask)); | 4901 __ cmp(scratch, Operand(HeapNumber::kSignMask)); |
| 4902 DeoptimizeIf(eq, env); | 4902 DeoptimizeIf(eq, instr); |
| 4903 } | 4903 } |
| 4904 __ jmp(&done); | 4904 __ jmp(&done); |
| 4905 if (can_convert_undefined_to_nan) { | 4905 if (can_convert_undefined_to_nan) { |
| 4906 __ bind(&convert); | 4906 __ bind(&convert); |
| 4907 // Convert undefined (and hole) to NaN. | 4907 // Convert undefined (and hole) to NaN. |
| 4908 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); | 4908 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); |
| 4909 __ cmp(input_reg, Operand(ip)); | 4909 __ cmp(input_reg, Operand(ip)); |
| 4910 DeoptimizeIf(ne, env); | 4910 DeoptimizeIf(ne, instr); |
| 4911 __ LoadRoot(scratch, Heap::kNanValueRootIndex); | 4911 __ LoadRoot(scratch, Heap::kNanValueRootIndex); |
| 4912 __ vldr(result_reg, scratch, HeapNumber::kValueOffset - kHeapObjectTag); | 4912 __ vldr(result_reg, scratch, HeapNumber::kValueOffset - kHeapObjectTag); |
| 4913 __ jmp(&done); | 4913 __ jmp(&done); |
| 4914 } | 4914 } |
| 4915 } else { | 4915 } else { |
| 4916 __ SmiUntag(scratch, input_reg); | 4916 __ SmiUntag(scratch, input_reg); |
| 4917 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); | 4917 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); |
| 4918 } | 4918 } |
| 4919 // Smi to double register conversion | 4919 // Smi to double register conversion |
| 4920 __ bind(&load_smi); | 4920 __ bind(&load_smi); |
| (...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4969 __ LoadRoot(ip, Heap::kTrueValueRootIndex); | 4969 __ LoadRoot(ip, Heap::kTrueValueRootIndex); |
| 4970 __ cmp(scratch2, Operand(ip)); | 4970 __ cmp(scratch2, Operand(ip)); |
| 4971 __ b(ne, &check_false); | 4971 __ b(ne, &check_false); |
| 4972 __ mov(input_reg, Operand(1)); | 4972 __ mov(input_reg, Operand(1)); |
| 4973 __ b(&done); | 4973 __ b(&done); |
| 4974 | 4974 |
| 4975 __ bind(&check_false); | 4975 __ bind(&check_false); |
| 4976 __ LoadRoot(ip, Heap::kFalseValueRootIndex); | 4976 __ LoadRoot(ip, Heap::kFalseValueRootIndex); |
| 4977 __ cmp(scratch2, Operand(ip)); | 4977 __ cmp(scratch2, Operand(ip)); |
| 4978 __ RecordComment("Deferred TaggedToI: cannot truncate"); | 4978 __ RecordComment("Deferred TaggedToI: cannot truncate"); |
| 4979 DeoptimizeIf(ne, instr->environment()); | 4979 DeoptimizeIf(ne, instr); |
| 4980 __ mov(input_reg, Operand::Zero()); | 4980 __ mov(input_reg, Operand::Zero()); |
| 4981 } else { | 4981 } else { |
| 4982 __ RecordComment("Deferred TaggedToI: not a heap number"); | 4982 __ RecordComment("Deferred TaggedToI: not a heap number"); |
| 4983 DeoptimizeIf(ne, instr->environment()); | 4983 DeoptimizeIf(ne, instr); |
| 4984 | 4984 |
| 4985 __ sub(ip, scratch2, Operand(kHeapObjectTag)); | 4985 __ sub(ip, scratch2, Operand(kHeapObjectTag)); |
| 4986 __ vldr(double_scratch2, ip, HeapNumber::kValueOffset); | 4986 __ vldr(double_scratch2, ip, HeapNumber::kValueOffset); |
| 4987 __ TryDoubleToInt32Exact(input_reg, double_scratch2, double_scratch); | 4987 __ TryDoubleToInt32Exact(input_reg, double_scratch2, double_scratch); |
| 4988 __ RecordComment("Deferred TaggedToI: lost precision or NaN"); | 4988 __ RecordComment("Deferred TaggedToI: lost precision or NaN"); |
| 4989 DeoptimizeIf(ne, instr->environment()); | 4989 DeoptimizeIf(ne, instr); |
| 4990 | 4990 |
| 4991 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 4991 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 4992 __ cmp(input_reg, Operand::Zero()); | 4992 __ cmp(input_reg, Operand::Zero()); |
| 4993 __ b(ne, &done); | 4993 __ b(ne, &done); |
| 4994 __ VmovHigh(scratch1, double_scratch2); | 4994 __ VmovHigh(scratch1, double_scratch2); |
| 4995 __ tst(scratch1, Operand(HeapNumber::kSignMask)); | 4995 __ tst(scratch1, Operand(HeapNumber::kSignMask)); |
| 4996 __ RecordComment("Deferred TaggedToI: minus zero"); | 4996 __ RecordComment("Deferred TaggedToI: minus zero"); |
| 4997 DeoptimizeIf(ne, instr->environment()); | 4997 DeoptimizeIf(ne, instr); |
| 4998 } | 4998 } |
| 4999 } | 4999 } |
| 5000 __ bind(&done); | 5000 __ bind(&done); |
| 5001 } | 5001 } |
| 5002 | 5002 |
| 5003 | 5003 |
| 5004 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { | 5004 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { |
| 5005 class DeferredTaggedToI FINAL : public LDeferredCode { | 5005 class DeferredTaggedToI FINAL : public LDeferredCode { |
| 5006 public: | 5006 public: |
| 5007 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) | 5007 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) |
| (...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5042 LOperand* result = instr->result(); | 5042 LOperand* result = instr->result(); |
| 5043 DCHECK(result->IsDoubleRegister()); | 5043 DCHECK(result->IsDoubleRegister()); |
| 5044 | 5044 |
| 5045 Register input_reg = ToRegister(input); | 5045 Register input_reg = ToRegister(input); |
| 5046 DwVfpRegister result_reg = ToDoubleRegister(result); | 5046 DwVfpRegister result_reg = ToDoubleRegister(result); |
| 5047 | 5047 |
| 5048 HValue* value = instr->hydrogen()->value(); | 5048 HValue* value = instr->hydrogen()->value(); |
| 5049 NumberUntagDMode mode = value->representation().IsSmi() | 5049 NumberUntagDMode mode = value->representation().IsSmi() |
| 5050 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED; | 5050 ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED; |
| 5051 | 5051 |
| 5052 EmitNumberUntagD(input_reg, result_reg, | 5052 EmitNumberUntagD(instr, input_reg, result_reg, mode); |
| 5053 instr->hydrogen()->can_convert_undefined_to_nan(), | |
| 5054 instr->hydrogen()->deoptimize_on_minus_zero(), | |
| 5055 instr->environment(), | |
| 5056 mode); | |
| 5057 } | 5053 } |
| 5058 | 5054 |
| 5059 | 5055 |
| 5060 void LCodeGen::DoDoubleToI(LDoubleToI* instr) { | 5056 void LCodeGen::DoDoubleToI(LDoubleToI* instr) { |
| 5061 Register result_reg = ToRegister(instr->result()); | 5057 Register result_reg = ToRegister(instr->result()); |
| 5062 Register scratch1 = scratch0(); | 5058 Register scratch1 = scratch0(); |
| 5063 DwVfpRegister double_input = ToDoubleRegister(instr->value()); | 5059 DwVfpRegister double_input = ToDoubleRegister(instr->value()); |
| 5064 LowDwVfpRegister double_scratch = double_scratch0(); | 5060 LowDwVfpRegister double_scratch = double_scratch0(); |
| 5065 | 5061 |
| 5066 if (instr->truncating()) { | 5062 if (instr->truncating()) { |
| 5067 __ TruncateDoubleToI(result_reg, double_input); | 5063 __ TruncateDoubleToI(result_reg, double_input); |
| 5068 } else { | 5064 } else { |
| 5069 __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch); | 5065 __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch); |
| 5070 // Deoptimize if the input wasn't a int32 (inside a double). | 5066 // Deoptimize if the input wasn't a int32 (inside a double). |
| 5071 DeoptimizeIf(ne, instr->environment()); | 5067 DeoptimizeIf(ne, instr); |
| 5072 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 5068 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 5073 Label done; | 5069 Label done; |
| 5074 __ cmp(result_reg, Operand::Zero()); | 5070 __ cmp(result_reg, Operand::Zero()); |
| 5075 __ b(ne, &done); | 5071 __ b(ne, &done); |
| 5076 __ VmovHigh(scratch1, double_input); | 5072 __ VmovHigh(scratch1, double_input); |
| 5077 __ tst(scratch1, Operand(HeapNumber::kSignMask)); | 5073 __ tst(scratch1, Operand(HeapNumber::kSignMask)); |
| 5078 DeoptimizeIf(ne, instr->environment()); | 5074 DeoptimizeIf(ne, instr); |
| 5079 __ bind(&done); | 5075 __ bind(&done); |
| 5080 } | 5076 } |
| 5081 } | 5077 } |
| 5082 } | 5078 } |
| 5083 | 5079 |
| 5084 | 5080 |
| 5085 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { | 5081 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { |
| 5086 Register result_reg = ToRegister(instr->result()); | 5082 Register result_reg = ToRegister(instr->result()); |
| 5087 Register scratch1 = scratch0(); | 5083 Register scratch1 = scratch0(); |
| 5088 DwVfpRegister double_input = ToDoubleRegister(instr->value()); | 5084 DwVfpRegister double_input = ToDoubleRegister(instr->value()); |
| 5089 LowDwVfpRegister double_scratch = double_scratch0(); | 5085 LowDwVfpRegister double_scratch = double_scratch0(); |
| 5090 | 5086 |
| 5091 if (instr->truncating()) { | 5087 if (instr->truncating()) { |
| 5092 __ TruncateDoubleToI(result_reg, double_input); | 5088 __ TruncateDoubleToI(result_reg, double_input); |
| 5093 } else { | 5089 } else { |
| 5094 __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch); | 5090 __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch); |
| 5095 // Deoptimize if the input wasn't a int32 (inside a double). | 5091 // Deoptimize if the input wasn't a int32 (inside a double). |
| 5096 DeoptimizeIf(ne, instr->environment()); | 5092 DeoptimizeIf(ne, instr); |
| 5097 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 5093 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 5098 Label done; | 5094 Label done; |
| 5099 __ cmp(result_reg, Operand::Zero()); | 5095 __ cmp(result_reg, Operand::Zero()); |
| 5100 __ b(ne, &done); | 5096 __ b(ne, &done); |
| 5101 __ VmovHigh(scratch1, double_input); | 5097 __ VmovHigh(scratch1, double_input); |
| 5102 __ tst(scratch1, Operand(HeapNumber::kSignMask)); | 5098 __ tst(scratch1, Operand(HeapNumber::kSignMask)); |
| 5103 DeoptimizeIf(ne, instr->environment()); | 5099 DeoptimizeIf(ne, instr); |
| 5104 __ bind(&done); | 5100 __ bind(&done); |
| 5105 } | 5101 } |
| 5106 } | 5102 } |
| 5107 __ SmiTag(result_reg, SetCC); | 5103 __ SmiTag(result_reg, SetCC); |
| 5108 DeoptimizeIf(vs, instr->environment()); | 5104 DeoptimizeIf(vs, instr); |
| 5109 } | 5105 } |
| 5110 | 5106 |
| 5111 | 5107 |
| 5112 void LCodeGen::DoCheckSmi(LCheckSmi* instr) { | 5108 void LCodeGen::DoCheckSmi(LCheckSmi* instr) { |
| 5113 LOperand* input = instr->value(); | 5109 LOperand* input = instr->value(); |
| 5114 __ SmiTst(ToRegister(input)); | 5110 __ SmiTst(ToRegister(input)); |
| 5115 DeoptimizeIf(ne, instr->environment()); | 5111 DeoptimizeIf(ne, instr); |
| 5116 } | 5112 } |
| 5117 | 5113 |
| 5118 | 5114 |
| 5119 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { | 5115 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { |
| 5120 if (!instr->hydrogen()->value()->type().IsHeapObject()) { | 5116 if (!instr->hydrogen()->value()->type().IsHeapObject()) { |
| 5121 LOperand* input = instr->value(); | 5117 LOperand* input = instr->value(); |
| 5122 __ SmiTst(ToRegister(input)); | 5118 __ SmiTst(ToRegister(input)); |
| 5123 DeoptimizeIf(eq, instr->environment()); | 5119 DeoptimizeIf(eq, instr); |
| 5124 } | 5120 } |
| 5125 } | 5121 } |
| 5126 | 5122 |
| 5127 | 5123 |
| 5128 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { | 5124 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { |
| 5129 Register input = ToRegister(instr->value()); | 5125 Register input = ToRegister(instr->value()); |
| 5130 Register scratch = scratch0(); | 5126 Register scratch = scratch0(); |
| 5131 | 5127 |
| 5132 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); | 5128 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); |
| 5133 __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); | 5129 __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); |
| 5134 | 5130 |
| 5135 if (instr->hydrogen()->is_interval_check()) { | 5131 if (instr->hydrogen()->is_interval_check()) { |
| 5136 InstanceType first; | 5132 InstanceType first; |
| 5137 InstanceType last; | 5133 InstanceType last; |
| 5138 instr->hydrogen()->GetCheckInterval(&first, &last); | 5134 instr->hydrogen()->GetCheckInterval(&first, &last); |
| 5139 | 5135 |
| 5140 __ cmp(scratch, Operand(first)); | 5136 __ cmp(scratch, Operand(first)); |
| 5141 | 5137 |
| 5142 // If there is only one type in the interval check for equality. | 5138 // If there is only one type in the interval check for equality. |
| 5143 if (first == last) { | 5139 if (first == last) { |
| 5144 DeoptimizeIf(ne, instr->environment()); | 5140 DeoptimizeIf(ne, instr); |
| 5145 } else { | 5141 } else { |
| 5146 DeoptimizeIf(lo, instr->environment()); | 5142 DeoptimizeIf(lo, instr); |
| 5147 // Omit check for the last type. | 5143 // Omit check for the last type. |
| 5148 if (last != LAST_TYPE) { | 5144 if (last != LAST_TYPE) { |
| 5149 __ cmp(scratch, Operand(last)); | 5145 __ cmp(scratch, Operand(last)); |
| 5150 DeoptimizeIf(hi, instr->environment()); | 5146 DeoptimizeIf(hi, instr); |
| 5151 } | 5147 } |
| 5152 } | 5148 } |
| 5153 } else { | 5149 } else { |
| 5154 uint8_t mask; | 5150 uint8_t mask; |
| 5155 uint8_t tag; | 5151 uint8_t tag; |
| 5156 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); | 5152 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); |
| 5157 | 5153 |
| 5158 if (base::bits::IsPowerOfTwo32(mask)) { | 5154 if (base::bits::IsPowerOfTwo32(mask)) { |
| 5159 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag)); | 5155 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag)); |
| 5160 __ tst(scratch, Operand(mask)); | 5156 __ tst(scratch, Operand(mask)); |
| 5161 DeoptimizeIf(tag == 0 ? ne : eq, instr->environment()); | 5157 DeoptimizeIf(tag == 0 ? ne : eq, instr); |
| 5162 } else { | 5158 } else { |
| 5163 __ and_(scratch, scratch, Operand(mask)); | 5159 __ and_(scratch, scratch, Operand(mask)); |
| 5164 __ cmp(scratch, Operand(tag)); | 5160 __ cmp(scratch, Operand(tag)); |
| 5165 DeoptimizeIf(ne, instr->environment()); | 5161 DeoptimizeIf(ne, instr); |
| 5166 } | 5162 } |
| 5167 } | 5163 } |
| 5168 } | 5164 } |
| 5169 | 5165 |
| 5170 | 5166 |
| 5171 void LCodeGen::DoCheckValue(LCheckValue* instr) { | 5167 void LCodeGen::DoCheckValue(LCheckValue* instr) { |
| 5172 Register reg = ToRegister(instr->value()); | 5168 Register reg = ToRegister(instr->value()); |
| 5173 Handle<HeapObject> object = instr->hydrogen()->object().handle(); | 5169 Handle<HeapObject> object = instr->hydrogen()->object().handle(); |
| 5174 AllowDeferredHandleDereference smi_check; | 5170 AllowDeferredHandleDereference smi_check; |
| 5175 if (isolate()->heap()->InNewSpace(*object)) { | 5171 if (isolate()->heap()->InNewSpace(*object)) { |
| 5176 Register reg = ToRegister(instr->value()); | 5172 Register reg = ToRegister(instr->value()); |
| 5177 Handle<Cell> cell = isolate()->factory()->NewCell(object); | 5173 Handle<Cell> cell = isolate()->factory()->NewCell(object); |
| 5178 __ mov(ip, Operand(Handle<Object>(cell))); | 5174 __ mov(ip, Operand(Handle<Object>(cell))); |
| 5179 __ ldr(ip, FieldMemOperand(ip, Cell::kValueOffset)); | 5175 __ ldr(ip, FieldMemOperand(ip, Cell::kValueOffset)); |
| 5180 __ cmp(reg, ip); | 5176 __ cmp(reg, ip); |
| 5181 } else { | 5177 } else { |
| 5182 __ cmp(reg, Operand(object)); | 5178 __ cmp(reg, Operand(object)); |
| 5183 } | 5179 } |
| 5184 DeoptimizeIf(ne, instr->environment()); | 5180 DeoptimizeIf(ne, instr); |
| 5185 } | 5181 } |
| 5186 | 5182 |
| 5187 | 5183 |
| 5188 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { | 5184 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { |
| 5189 { | 5185 { |
| 5190 PushSafepointRegistersScope scope(this); | 5186 PushSafepointRegistersScope scope(this); |
| 5191 __ push(object); | 5187 __ push(object); |
| 5192 __ mov(cp, Operand::Zero()); | 5188 __ mov(cp, Operand::Zero()); |
| 5193 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); | 5189 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); |
| 5194 RecordSafepointWithRegisters( | 5190 RecordSafepointWithRegisters( |
| 5195 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); | 5191 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); |
| 5196 __ StoreToSafepointRegisterSlot(r0, scratch0()); | 5192 __ StoreToSafepointRegisterSlot(r0, scratch0()); |
| 5197 } | 5193 } |
| 5198 __ tst(scratch0(), Operand(kSmiTagMask)); | 5194 __ tst(scratch0(), Operand(kSmiTagMask)); |
| 5199 DeoptimizeIf(eq, instr->environment()); | 5195 DeoptimizeIf(eq, instr); |
| 5200 } | 5196 } |
| 5201 | 5197 |
| 5202 | 5198 |
| 5203 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { | 5199 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { |
| 5204 class DeferredCheckMaps FINAL : public LDeferredCode { | 5200 class DeferredCheckMaps FINAL : public LDeferredCode { |
| 5205 public: | 5201 public: |
| 5206 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) | 5202 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) |
| 5207 : LDeferredCode(codegen), instr_(instr), object_(object) { | 5203 : LDeferredCode(codegen), instr_(instr), object_(object) { |
| 5208 SetExit(check_maps()); | 5204 SetExit(check_maps()); |
| 5209 } | 5205 } |
| (...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5246 Handle<Map> map = maps->at(i).handle(); | 5242 Handle<Map> map = maps->at(i).handle(); |
| 5247 __ CompareMap(map_reg, map, &success); | 5243 __ CompareMap(map_reg, map, &success); |
| 5248 __ b(eq, &success); | 5244 __ b(eq, &success); |
| 5249 } | 5245 } |
| 5250 | 5246 |
| 5251 Handle<Map> map = maps->at(maps->size() - 1).handle(); | 5247 Handle<Map> map = maps->at(maps->size() - 1).handle(); |
| 5252 __ CompareMap(map_reg, map, &success); | 5248 __ CompareMap(map_reg, map, &success); |
| 5253 if (instr->hydrogen()->HasMigrationTarget()) { | 5249 if (instr->hydrogen()->HasMigrationTarget()) { |
| 5254 __ b(ne, deferred->entry()); | 5250 __ b(ne, deferred->entry()); |
| 5255 } else { | 5251 } else { |
| 5256 DeoptimizeIf(ne, instr->environment()); | 5252 DeoptimizeIf(ne, instr); |
| 5257 } | 5253 } |
| 5258 | 5254 |
| 5259 __ bind(&success); | 5255 __ bind(&success); |
| 5260 } | 5256 } |
| 5261 | 5257 |
| 5262 | 5258 |
| 5263 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { | 5259 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { |
| 5264 DwVfpRegister value_reg = ToDoubleRegister(instr->unclamped()); | 5260 DwVfpRegister value_reg = ToDoubleRegister(instr->unclamped()); |
| 5265 Register result_reg = ToRegister(instr->result()); | 5261 Register result_reg = ToRegister(instr->result()); |
| 5266 __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0()); | 5262 __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0()); |
| (...skipping 18 matching lines...) Expand all Loading... |
| 5285 __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi); | 5281 __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi); |
| 5286 | 5282 |
| 5287 // Check for heap number | 5283 // Check for heap number |
| 5288 __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); | 5284 __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); |
| 5289 __ cmp(scratch, Operand(factory()->heap_number_map())); | 5285 __ cmp(scratch, Operand(factory()->heap_number_map())); |
| 5290 __ b(eq, &heap_number); | 5286 __ b(eq, &heap_number); |
| 5291 | 5287 |
| 5292 // Check for undefined. Undefined is converted to zero for clamping | 5288 // Check for undefined. Undefined is converted to zero for clamping |
| 5293 // conversions. | 5289 // conversions. |
| 5294 __ cmp(input_reg, Operand(factory()->undefined_value())); | 5290 __ cmp(input_reg, Operand(factory()->undefined_value())); |
| 5295 DeoptimizeIf(ne, instr->environment()); | 5291 DeoptimizeIf(ne, instr); |
| 5296 __ mov(result_reg, Operand::Zero()); | 5292 __ mov(result_reg, Operand::Zero()); |
| 5297 __ jmp(&done); | 5293 __ jmp(&done); |
| 5298 | 5294 |
| 5299 // Heap number | 5295 // Heap number |
| 5300 __ bind(&heap_number); | 5296 __ bind(&heap_number); |
| 5301 __ vldr(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset)); | 5297 __ vldr(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset)); |
| 5302 __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0()); | 5298 __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0()); |
| 5303 __ jmp(&done); | 5299 __ jmp(&done); |
| 5304 | 5300 |
| 5305 // smi | 5301 // smi |
| (...skipping 357 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5663 Deoptimizer::BailoutType type = instr->hydrogen()->type(); | 5659 Deoptimizer::BailoutType type = instr->hydrogen()->type(); |
| 5664 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the | 5660 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the |
| 5665 // needed return address), even though the implementation of LAZY and EAGER is | 5661 // needed return address), even though the implementation of LAZY and EAGER is |
| 5666 // now identical. When LAZY is eventually completely folded into EAGER, remove | 5662 // now identical. When LAZY is eventually completely folded into EAGER, remove |
| 5667 // the special case below. | 5663 // the special case below. |
| 5668 if (info()->IsStub() && type == Deoptimizer::EAGER) { | 5664 if (info()->IsStub() && type == Deoptimizer::EAGER) { |
| 5669 type = Deoptimizer::LAZY; | 5665 type = Deoptimizer::LAZY; |
| 5670 } | 5666 } |
| 5671 | 5667 |
| 5672 Comment(";;; deoptimize: %s", instr->hydrogen()->reason()); | 5668 Comment(";;; deoptimize: %s", instr->hydrogen()->reason()); |
| 5673 DeoptimizeIf(al, instr->environment(), type); | 5669 DeoptimizeIf(al, instr, type); |
| 5674 } | 5670 } |
| 5675 | 5671 |
| 5676 | 5672 |
| 5677 void LCodeGen::DoDummy(LDummy* instr) { | 5673 void LCodeGen::DoDummy(LDummy* instr) { |
| 5678 // Nothing to see here, move on! | 5674 // Nothing to see here, move on! |
| 5679 } | 5675 } |
| 5680 | 5676 |
| 5681 | 5677 |
| 5682 void LCodeGen::DoDummyUse(LDummyUse* instr) { | 5678 void LCodeGen::DoDummyUse(LDummyUse* instr) { |
| 5683 // Nothing to see here, move on! | 5679 // Nothing to see here, move on! |
| (...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5756 DCHECK(!environment->HasBeenRegistered()); | 5752 DCHECK(!environment->HasBeenRegistered()); |
| 5757 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); | 5753 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); |
| 5758 | 5754 |
| 5759 GenerateOsrPrologue(); | 5755 GenerateOsrPrologue(); |
| 5760 } | 5756 } |
| 5761 | 5757 |
| 5762 | 5758 |
| 5763 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { | 5759 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { |
| 5764 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); | 5760 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); |
| 5765 __ cmp(r0, ip); | 5761 __ cmp(r0, ip); |
| 5766 DeoptimizeIf(eq, instr->environment()); | 5762 DeoptimizeIf(eq, instr); |
| 5767 | 5763 |
| 5768 Register null_value = r5; | 5764 Register null_value = r5; |
| 5769 __ LoadRoot(null_value, Heap::kNullValueRootIndex); | 5765 __ LoadRoot(null_value, Heap::kNullValueRootIndex); |
| 5770 __ cmp(r0, null_value); | 5766 __ cmp(r0, null_value); |
| 5771 DeoptimizeIf(eq, instr->environment()); | 5767 DeoptimizeIf(eq, instr); |
| 5772 | 5768 |
| 5773 __ SmiTst(r0); | 5769 __ SmiTst(r0); |
| 5774 DeoptimizeIf(eq, instr->environment()); | 5770 DeoptimizeIf(eq, instr); |
| 5775 | 5771 |
| 5776 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); | 5772 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); |
| 5777 __ CompareObjectType(r0, r1, r1, LAST_JS_PROXY_TYPE); | 5773 __ CompareObjectType(r0, r1, r1, LAST_JS_PROXY_TYPE); |
| 5778 DeoptimizeIf(le, instr->environment()); | 5774 DeoptimizeIf(le, instr); |
| 5779 | 5775 |
| 5780 Label use_cache, call_runtime; | 5776 Label use_cache, call_runtime; |
| 5781 __ CheckEnumCache(null_value, &call_runtime); | 5777 __ CheckEnumCache(null_value, &call_runtime); |
| 5782 | 5778 |
| 5783 __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset)); | 5779 __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset)); |
| 5784 __ b(&use_cache); | 5780 __ b(&use_cache); |
| 5785 | 5781 |
| 5786 // Get the set of properties to enumerate. | 5782 // Get the set of properties to enumerate. |
| 5787 __ bind(&call_runtime); | 5783 __ bind(&call_runtime); |
| 5788 __ push(r0); | 5784 __ push(r0); |
| 5789 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr); | 5785 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr); |
| 5790 | 5786 |
| 5791 __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); | 5787 __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); |
| 5792 __ LoadRoot(ip, Heap::kMetaMapRootIndex); | 5788 __ LoadRoot(ip, Heap::kMetaMapRootIndex); |
| 5793 __ cmp(r1, ip); | 5789 __ cmp(r1, ip); |
| 5794 DeoptimizeIf(ne, instr->environment()); | 5790 DeoptimizeIf(ne, instr); |
| 5795 __ bind(&use_cache); | 5791 __ bind(&use_cache); |
| 5796 } | 5792 } |
| 5797 | 5793 |
| 5798 | 5794 |
| 5799 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { | 5795 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { |
| 5800 Register map = ToRegister(instr->map()); | 5796 Register map = ToRegister(instr->map()); |
| 5801 Register result = ToRegister(instr->result()); | 5797 Register result = ToRegister(instr->result()); |
| 5802 Label load_cache, done; | 5798 Label load_cache, done; |
| 5803 __ EnumLength(result, map); | 5799 __ EnumLength(result, map); |
| 5804 __ cmp(result, Operand(Smi::FromInt(0))); | 5800 __ cmp(result, Operand(Smi::FromInt(0))); |
| 5805 __ b(ne, &load_cache); | 5801 __ b(ne, &load_cache); |
| 5806 __ mov(result, Operand(isolate()->factory()->empty_fixed_array())); | 5802 __ mov(result, Operand(isolate()->factory()->empty_fixed_array())); |
| 5807 __ jmp(&done); | 5803 __ jmp(&done); |
| 5808 | 5804 |
| 5809 __ bind(&load_cache); | 5805 __ bind(&load_cache); |
| 5810 __ LoadInstanceDescriptors(map, result); | 5806 __ LoadInstanceDescriptors(map, result); |
| 5811 __ ldr(result, | 5807 __ ldr(result, |
| 5812 FieldMemOperand(result, DescriptorArray::kEnumCacheOffset)); | 5808 FieldMemOperand(result, DescriptorArray::kEnumCacheOffset)); |
| 5813 __ ldr(result, | 5809 __ ldr(result, |
| 5814 FieldMemOperand(result, FixedArray::SizeFor(instr->idx()))); | 5810 FieldMemOperand(result, FixedArray::SizeFor(instr->idx()))); |
| 5815 __ cmp(result, Operand::Zero()); | 5811 __ cmp(result, Operand::Zero()); |
| 5816 DeoptimizeIf(eq, instr->environment()); | 5812 DeoptimizeIf(eq, instr); |
| 5817 | 5813 |
| 5818 __ bind(&done); | 5814 __ bind(&done); |
| 5819 } | 5815 } |
| 5820 | 5816 |
| 5821 | 5817 |
| 5822 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { | 5818 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { |
| 5823 Register object = ToRegister(instr->value()); | 5819 Register object = ToRegister(instr->value()); |
| 5824 Register map = ToRegister(instr->map()); | 5820 Register map = ToRegister(instr->map()); |
| 5825 __ ldr(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset)); | 5821 __ ldr(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset)); |
| 5826 __ cmp(map, scratch0()); | 5822 __ cmp(map, scratch0()); |
| 5827 DeoptimizeIf(ne, instr->environment()); | 5823 DeoptimizeIf(ne, instr); |
| 5828 } | 5824 } |
| 5829 | 5825 |
| 5830 | 5826 |
| 5831 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, | 5827 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, |
| 5832 Register result, | 5828 Register result, |
| 5833 Register object, | 5829 Register object, |
| 5834 Register index) { | 5830 Register index) { |
| 5835 PushSafepointRegistersScope scope(this); | 5831 PushSafepointRegistersScope scope(this); |
| 5836 __ Push(object); | 5832 __ Push(object); |
| 5837 __ Push(index); | 5833 __ Push(index); |
| (...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5914 __ Push(scope_info); | 5910 __ Push(scope_info); |
| 5915 __ push(ToRegister(instr->function())); | 5911 __ push(ToRegister(instr->function())); |
| 5916 CallRuntime(Runtime::kPushBlockContext, 2, instr); | 5912 CallRuntime(Runtime::kPushBlockContext, 2, instr); |
| 5917 RecordSafepoint(Safepoint::kNoLazyDeopt); | 5913 RecordSafepoint(Safepoint::kNoLazyDeopt); |
| 5918 } | 5914 } |
| 5919 | 5915 |
| 5920 | 5916 |
| 5921 #undef __ | 5917 #undef __ |
| 5922 | 5918 |
| 5923 } } // namespace v8::internal | 5919 } } // namespace v8::internal |
| OLD | NEW |