| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/v8.h" | 5 #include "src/v8.h" |
| 6 | 6 |
| 7 #include "src/arm/lithium-codegen-arm.h" | 7 #include "src/arm/lithium-codegen-arm.h" |
| 8 #include "src/arm/lithium-gap-resolver-arm.h" | 8 #include "src/arm/lithium-gap-resolver-arm.h" |
| 9 #include "src/base/bits.h" | 9 #include "src/base/bits.h" |
| 10 #include "src/code-factory.h" | 10 #include "src/code-factory.h" |
| (...skipping 823 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 834 int pc_offset = masm()->pc_offset(); | 834 int pc_offset = masm()->pc_offset(); |
| 835 environment->Register(deoptimization_index, | 835 environment->Register(deoptimization_index, |
| 836 translation.index(), | 836 translation.index(), |
| 837 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); | 837 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); |
| 838 deoptimizations_.Add(environment, zone()); | 838 deoptimizations_.Add(environment, zone()); |
| 839 } | 839 } |
| 840 } | 840 } |
| 841 | 841 |
| 842 | 842 |
| 843 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr, | 843 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr, |
| 844 const char* detail, | 844 Deoptimizer::DeoptReason deopt_reason, |
| 845 Deoptimizer::BailoutType bailout_type) { | 845 Deoptimizer::BailoutType bailout_type) { |
| 846 LEnvironment* environment = instr->environment(); | 846 LEnvironment* environment = instr->environment(); |
| 847 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); | 847 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); |
| 848 DCHECK(environment->HasBeenRegistered()); | 848 DCHECK(environment->HasBeenRegistered()); |
| 849 int id = environment->deoptimization_index(); | 849 int id = environment->deoptimization_index(); |
| 850 DCHECK(info()->IsOptimizing() || info()->IsStub()); | 850 DCHECK(info()->IsOptimizing() || info()->IsStub()); |
| 851 Address entry = | 851 Address entry = |
| 852 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); | 852 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); |
| 853 if (entry == NULL) { | 853 if (entry == NULL) { |
| 854 Abort(kBailoutWasNotPrepared); | 854 Abort(kBailoutWasNotPrepared); |
| (...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 888 condition = ne; | 888 condition = ne; |
| 889 __ cmp(scratch, Operand::Zero()); | 889 __ cmp(scratch, Operand::Zero()); |
| 890 } | 890 } |
| 891 } | 891 } |
| 892 | 892 |
| 893 if (info()->ShouldTrapOnDeopt()) { | 893 if (info()->ShouldTrapOnDeopt()) { |
| 894 __ stop("trap_on_deopt", condition); | 894 __ stop("trap_on_deopt", condition); |
| 895 } | 895 } |
| 896 | 896 |
| 897 Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(), | 897 Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(), |
| 898 instr->Mnemonic(), detail); | 898 instr->Mnemonic(), deopt_reason); |
| 899 DCHECK(info()->IsStub() || frame_is_built_); | 899 DCHECK(info()->IsStub() || frame_is_built_); |
| 900 // Go through jump table if we need to handle condition, build frame, or | 900 // Go through jump table if we need to handle condition, build frame, or |
| 901 // restore caller doubles. | 901 // restore caller doubles. |
| 902 if (condition == al && frame_is_built_ && | 902 if (condition == al && frame_is_built_ && |
| 903 !info()->saves_caller_doubles()) { | 903 !info()->saves_caller_doubles()) { |
| 904 DeoptComment(reason); | 904 DeoptComment(reason); |
| 905 __ Call(entry, RelocInfo::RUNTIME_ENTRY); | 905 __ Call(entry, RelocInfo::RUNTIME_ENTRY); |
| 906 } else { | 906 } else { |
| 907 Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type, | 907 Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type, |
| 908 !frame_is_built_); | 908 !frame_is_built_); |
| 909 // We often have several deopts to the same entry, reuse the last | 909 // We often have several deopts to the same entry, reuse the last |
| 910 // jump entry if this is the case. | 910 // jump entry if this is the case. |
| 911 if (jump_table_.is_empty() || | 911 if (jump_table_.is_empty() || |
| 912 !table_entry.IsEquivalentTo(jump_table_.last())) { | 912 !table_entry.IsEquivalentTo(jump_table_.last())) { |
| 913 jump_table_.Add(table_entry, zone()); | 913 jump_table_.Add(table_entry, zone()); |
| 914 } | 914 } |
| 915 __ b(condition, &jump_table_.last().label); | 915 __ b(condition, &jump_table_.last().label); |
| 916 } | 916 } |
| 917 } | 917 } |
| 918 | 918 |
| 919 | 919 |
| 920 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr, | 920 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr, |
| 921 const char* detail) { | 921 Deoptimizer::DeoptReason deopt_reason) { |
| 922 Deoptimizer::BailoutType bailout_type = info()->IsStub() | 922 Deoptimizer::BailoutType bailout_type = info()->IsStub() |
| 923 ? Deoptimizer::LAZY | 923 ? Deoptimizer::LAZY |
| 924 : Deoptimizer::EAGER; | 924 : Deoptimizer::EAGER; |
| 925 DeoptimizeIf(condition, instr, detail, bailout_type); | 925 DeoptimizeIf(condition, instr, deopt_reason, bailout_type); |
| 926 } | 926 } |
| 927 | 927 |
| 928 | 928 |
| 929 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { | 929 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { |
| 930 int length = deoptimizations_.length(); | 930 int length = deoptimizations_.length(); |
| 931 if (length == 0) return; | 931 if (length == 0) return; |
| 932 Handle<DeoptimizationInputData> data = | 932 Handle<DeoptimizationInputData> data = |
| 933 DeoptimizationInputData::New(isolate(), length, TENURED); | 933 DeoptimizationInputData::New(isolate(), length, TENURED); |
| 934 | 934 |
| 935 Handle<ByteArray> translations = | 935 Handle<ByteArray> translations = |
| (...skipping 215 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1151 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); | 1151 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); |
| 1152 Label dividend_is_not_negative, done; | 1152 Label dividend_is_not_negative, done; |
| 1153 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { | 1153 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { |
| 1154 __ cmp(dividend, Operand::Zero()); | 1154 __ cmp(dividend, Operand::Zero()); |
| 1155 __ b(pl, ÷nd_is_not_negative); | 1155 __ b(pl, ÷nd_is_not_negative); |
| 1156 // Note that this is correct even for kMinInt operands. | 1156 // Note that this is correct even for kMinInt operands. |
| 1157 __ rsb(dividend, dividend, Operand::Zero()); | 1157 __ rsb(dividend, dividend, Operand::Zero()); |
| 1158 __ and_(dividend, dividend, Operand(mask)); | 1158 __ and_(dividend, dividend, Operand(mask)); |
| 1159 __ rsb(dividend, dividend, Operand::Zero(), SetCC); | 1159 __ rsb(dividend, dividend, Operand::Zero(), SetCC); |
| 1160 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1160 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1161 DeoptimizeIf(eq, instr, "minus zero"); | 1161 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); |
| 1162 } | 1162 } |
| 1163 __ b(&done); | 1163 __ b(&done); |
| 1164 } | 1164 } |
| 1165 | 1165 |
| 1166 __ bind(÷nd_is_not_negative); | 1166 __ bind(÷nd_is_not_negative); |
| 1167 __ and_(dividend, dividend, Operand(mask)); | 1167 __ and_(dividend, dividend, Operand(mask)); |
| 1168 __ bind(&done); | 1168 __ bind(&done); |
| 1169 } | 1169 } |
| 1170 | 1170 |
| 1171 | 1171 |
| 1172 void LCodeGen::DoModByConstI(LModByConstI* instr) { | 1172 void LCodeGen::DoModByConstI(LModByConstI* instr) { |
| 1173 Register dividend = ToRegister(instr->dividend()); | 1173 Register dividend = ToRegister(instr->dividend()); |
| 1174 int32_t divisor = instr->divisor(); | 1174 int32_t divisor = instr->divisor(); |
| 1175 Register result = ToRegister(instr->result()); | 1175 Register result = ToRegister(instr->result()); |
| 1176 DCHECK(!dividend.is(result)); | 1176 DCHECK(!dividend.is(result)); |
| 1177 | 1177 |
| 1178 if (divisor == 0) { | 1178 if (divisor == 0) { |
| 1179 DeoptimizeIf(al, instr, "division by zero"); | 1179 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero); |
| 1180 return; | 1180 return; |
| 1181 } | 1181 } |
| 1182 | 1182 |
| 1183 __ TruncatingDiv(result, dividend, Abs(divisor)); | 1183 __ TruncatingDiv(result, dividend, Abs(divisor)); |
| 1184 __ mov(ip, Operand(Abs(divisor))); | 1184 __ mov(ip, Operand(Abs(divisor))); |
| 1185 __ smull(result, ip, result, ip); | 1185 __ smull(result, ip, result, ip); |
| 1186 __ sub(result, dividend, result, SetCC); | 1186 __ sub(result, dividend, result, SetCC); |
| 1187 | 1187 |
| 1188 // Check for negative zero. | 1188 // Check for negative zero. |
| 1189 HMod* hmod = instr->hydrogen(); | 1189 HMod* hmod = instr->hydrogen(); |
| 1190 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1190 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1191 Label remainder_not_zero; | 1191 Label remainder_not_zero; |
| 1192 __ b(ne, &remainder_not_zero); | 1192 __ b(ne, &remainder_not_zero); |
| 1193 __ cmp(dividend, Operand::Zero()); | 1193 __ cmp(dividend, Operand::Zero()); |
| 1194 DeoptimizeIf(lt, instr, "minus zero"); | 1194 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); |
| 1195 __ bind(&remainder_not_zero); | 1195 __ bind(&remainder_not_zero); |
| 1196 } | 1196 } |
| 1197 } | 1197 } |
| 1198 | 1198 |
| 1199 | 1199 |
| 1200 void LCodeGen::DoModI(LModI* instr) { | 1200 void LCodeGen::DoModI(LModI* instr) { |
| 1201 HMod* hmod = instr->hydrogen(); | 1201 HMod* hmod = instr->hydrogen(); |
| 1202 if (CpuFeatures::IsSupported(SUDIV)) { | 1202 if (CpuFeatures::IsSupported(SUDIV)) { |
| 1203 CpuFeatureScope scope(masm(), SUDIV); | 1203 CpuFeatureScope scope(masm(), SUDIV); |
| 1204 | 1204 |
| 1205 Register left_reg = ToRegister(instr->left()); | 1205 Register left_reg = ToRegister(instr->left()); |
| 1206 Register right_reg = ToRegister(instr->right()); | 1206 Register right_reg = ToRegister(instr->right()); |
| 1207 Register result_reg = ToRegister(instr->result()); | 1207 Register result_reg = ToRegister(instr->result()); |
| 1208 | 1208 |
| 1209 Label done; | 1209 Label done; |
| 1210 // Check for x % 0, sdiv might signal an exception. We have to deopt in this | 1210 // Check for x % 0, sdiv might signal an exception. We have to deopt in this |
| 1211 // case because we can't return a NaN. | 1211 // case because we can't return a NaN. |
| 1212 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { | 1212 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { |
| 1213 __ cmp(right_reg, Operand::Zero()); | 1213 __ cmp(right_reg, Operand::Zero()); |
| 1214 DeoptimizeIf(eq, instr, "division by zero"); | 1214 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero); |
| 1215 } | 1215 } |
| 1216 | 1216 |
| 1217 // Check for kMinInt % -1, sdiv will return kMinInt, which is not what we | 1217 // Check for kMinInt % -1, sdiv will return kMinInt, which is not what we |
| 1218 // want. We have to deopt if we care about -0, because we can't return that. | 1218 // want. We have to deopt if we care about -0, because we can't return that. |
| 1219 if (hmod->CheckFlag(HValue::kCanOverflow)) { | 1219 if (hmod->CheckFlag(HValue::kCanOverflow)) { |
| 1220 Label no_overflow_possible; | 1220 Label no_overflow_possible; |
| 1221 __ cmp(left_reg, Operand(kMinInt)); | 1221 __ cmp(left_reg, Operand(kMinInt)); |
| 1222 __ b(ne, &no_overflow_possible); | 1222 __ b(ne, &no_overflow_possible); |
| 1223 __ cmp(right_reg, Operand(-1)); | 1223 __ cmp(right_reg, Operand(-1)); |
| 1224 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1224 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1225 DeoptimizeIf(eq, instr, "minus zero"); | 1225 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); |
| 1226 } else { | 1226 } else { |
| 1227 __ b(ne, &no_overflow_possible); | 1227 __ b(ne, &no_overflow_possible); |
| 1228 __ mov(result_reg, Operand::Zero()); | 1228 __ mov(result_reg, Operand::Zero()); |
| 1229 __ jmp(&done); | 1229 __ jmp(&done); |
| 1230 } | 1230 } |
| 1231 __ bind(&no_overflow_possible); | 1231 __ bind(&no_overflow_possible); |
| 1232 } | 1232 } |
| 1233 | 1233 |
| 1234 // For 'r3 = r1 % r2' we can have the following ARM code: | 1234 // For 'r3 = r1 % r2' we can have the following ARM code: |
| 1235 // sdiv r3, r1, r2 | 1235 // sdiv r3, r1, r2 |
| 1236 // mls r3, r3, r2, r1 | 1236 // mls r3, r3, r2, r1 |
| 1237 | 1237 |
| 1238 __ sdiv(result_reg, left_reg, right_reg); | 1238 __ sdiv(result_reg, left_reg, right_reg); |
| 1239 __ Mls(result_reg, result_reg, right_reg, left_reg); | 1239 __ Mls(result_reg, result_reg, right_reg, left_reg); |
| 1240 | 1240 |
| 1241 // If we care about -0, test if the dividend is <0 and the result is 0. | 1241 // If we care about -0, test if the dividend is <0 and the result is 0. |
| 1242 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1242 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1243 __ cmp(result_reg, Operand::Zero()); | 1243 __ cmp(result_reg, Operand::Zero()); |
| 1244 __ b(ne, &done); | 1244 __ b(ne, &done); |
| 1245 __ cmp(left_reg, Operand::Zero()); | 1245 __ cmp(left_reg, Operand::Zero()); |
| 1246 DeoptimizeIf(lt, instr, "minus zero"); | 1246 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); |
| 1247 } | 1247 } |
| 1248 __ bind(&done); | 1248 __ bind(&done); |
| 1249 | 1249 |
| 1250 } else { | 1250 } else { |
| 1251 // General case, without any SDIV support. | 1251 // General case, without any SDIV support. |
| 1252 Register left_reg = ToRegister(instr->left()); | 1252 Register left_reg = ToRegister(instr->left()); |
| 1253 Register right_reg = ToRegister(instr->right()); | 1253 Register right_reg = ToRegister(instr->right()); |
| 1254 Register result_reg = ToRegister(instr->result()); | 1254 Register result_reg = ToRegister(instr->result()); |
| 1255 Register scratch = scratch0(); | 1255 Register scratch = scratch0(); |
| 1256 DCHECK(!scratch.is(left_reg)); | 1256 DCHECK(!scratch.is(left_reg)); |
| 1257 DCHECK(!scratch.is(right_reg)); | 1257 DCHECK(!scratch.is(right_reg)); |
| 1258 DCHECK(!scratch.is(result_reg)); | 1258 DCHECK(!scratch.is(result_reg)); |
| 1259 DwVfpRegister dividend = ToDoubleRegister(instr->temp()); | 1259 DwVfpRegister dividend = ToDoubleRegister(instr->temp()); |
| 1260 DwVfpRegister divisor = ToDoubleRegister(instr->temp2()); | 1260 DwVfpRegister divisor = ToDoubleRegister(instr->temp2()); |
| 1261 DCHECK(!divisor.is(dividend)); | 1261 DCHECK(!divisor.is(dividend)); |
| 1262 LowDwVfpRegister quotient = double_scratch0(); | 1262 LowDwVfpRegister quotient = double_scratch0(); |
| 1263 DCHECK(!quotient.is(dividend)); | 1263 DCHECK(!quotient.is(dividend)); |
| 1264 DCHECK(!quotient.is(divisor)); | 1264 DCHECK(!quotient.is(divisor)); |
| 1265 | 1265 |
| 1266 Label done; | 1266 Label done; |
| 1267 // Check for x % 0, we have to deopt in this case because we can't return a | 1267 // Check for x % 0, we have to deopt in this case because we can't return a |
| 1268 // NaN. | 1268 // NaN. |
| 1269 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { | 1269 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { |
| 1270 __ cmp(right_reg, Operand::Zero()); | 1270 __ cmp(right_reg, Operand::Zero()); |
| 1271 DeoptimizeIf(eq, instr, "division by zero"); | 1271 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero); |
| 1272 } | 1272 } |
| 1273 | 1273 |
| 1274 __ Move(result_reg, left_reg); | 1274 __ Move(result_reg, left_reg); |
| 1275 // Load the arguments in VFP registers. The divisor value is preloaded | 1275 // Load the arguments in VFP registers. The divisor value is preloaded |
| 1276 // before. Be careful that 'right_reg' is only live on entry. | 1276 // before. Be careful that 'right_reg' is only live on entry. |
| 1277 // TODO(svenpanne) The last comments seems to be wrong nowadays. | 1277 // TODO(svenpanne) The last comments seems to be wrong nowadays. |
| 1278 __ vmov(double_scratch0().low(), left_reg); | 1278 __ vmov(double_scratch0().low(), left_reg); |
| 1279 __ vcvt_f64_s32(dividend, double_scratch0().low()); | 1279 __ vcvt_f64_s32(dividend, double_scratch0().low()); |
| 1280 __ vmov(double_scratch0().low(), right_reg); | 1280 __ vmov(double_scratch0().low(), right_reg); |
| 1281 __ vcvt_f64_s32(divisor, double_scratch0().low()); | 1281 __ vcvt_f64_s32(divisor, double_scratch0().low()); |
| 1282 | 1282 |
| 1283 // We do not care about the sign of the divisor. Note that we still handle | 1283 // We do not care about the sign of the divisor. Note that we still handle |
| 1284 // the kMinInt % -1 case correctly, though. | 1284 // the kMinInt % -1 case correctly, though. |
| 1285 __ vabs(divisor, divisor); | 1285 __ vabs(divisor, divisor); |
| 1286 // Compute the quotient and round it to a 32bit integer. | 1286 // Compute the quotient and round it to a 32bit integer. |
| 1287 __ vdiv(quotient, dividend, divisor); | 1287 __ vdiv(quotient, dividend, divisor); |
| 1288 __ vcvt_s32_f64(quotient.low(), quotient); | 1288 __ vcvt_s32_f64(quotient.low(), quotient); |
| 1289 __ vcvt_f64_s32(quotient, quotient.low()); | 1289 __ vcvt_f64_s32(quotient, quotient.low()); |
| 1290 | 1290 |
| 1291 // Compute the remainder in result. | 1291 // Compute the remainder in result. |
| 1292 __ vmul(double_scratch0(), divisor, quotient); | 1292 __ vmul(double_scratch0(), divisor, quotient); |
| 1293 __ vcvt_s32_f64(double_scratch0().low(), double_scratch0()); | 1293 __ vcvt_s32_f64(double_scratch0().low(), double_scratch0()); |
| 1294 __ vmov(scratch, double_scratch0().low()); | 1294 __ vmov(scratch, double_scratch0().low()); |
| 1295 __ sub(result_reg, left_reg, scratch, SetCC); | 1295 __ sub(result_reg, left_reg, scratch, SetCC); |
| 1296 | 1296 |
| 1297 // If we care about -0, test if the dividend is <0 and the result is 0. | 1297 // If we care about -0, test if the dividend is <0 and the result is 0. |
| 1298 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1298 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1299 __ b(ne, &done); | 1299 __ b(ne, &done); |
| 1300 __ cmp(left_reg, Operand::Zero()); | 1300 __ cmp(left_reg, Operand::Zero()); |
| 1301 DeoptimizeIf(mi, instr, "minus zero"); | 1301 DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero); |
| 1302 } | 1302 } |
| 1303 __ bind(&done); | 1303 __ bind(&done); |
| 1304 } | 1304 } |
| 1305 } | 1305 } |
| 1306 | 1306 |
| 1307 | 1307 |
| 1308 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { | 1308 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { |
| 1309 Register dividend = ToRegister(instr->dividend()); | 1309 Register dividend = ToRegister(instr->dividend()); |
| 1310 int32_t divisor = instr->divisor(); | 1310 int32_t divisor = instr->divisor(); |
| 1311 Register result = ToRegister(instr->result()); | 1311 Register result = ToRegister(instr->result()); |
| 1312 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor))); | 1312 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor))); |
| 1313 DCHECK(!result.is(dividend)); | 1313 DCHECK(!result.is(dividend)); |
| 1314 | 1314 |
| 1315 // Check for (0 / -x) that will produce negative zero. | 1315 // Check for (0 / -x) that will produce negative zero. |
| 1316 HDiv* hdiv = instr->hydrogen(); | 1316 HDiv* hdiv = instr->hydrogen(); |
| 1317 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | 1317 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
| 1318 __ cmp(dividend, Operand::Zero()); | 1318 __ cmp(dividend, Operand::Zero()); |
| 1319 DeoptimizeIf(eq, instr, "minus zero"); | 1319 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); |
| 1320 } | 1320 } |
| 1321 // Check for (kMinInt / -1). | 1321 // Check for (kMinInt / -1). |
| 1322 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { | 1322 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { |
| 1323 __ cmp(dividend, Operand(kMinInt)); | 1323 __ cmp(dividend, Operand(kMinInt)); |
| 1324 DeoptimizeIf(eq, instr, "overflow"); | 1324 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow); |
| 1325 } | 1325 } |
| 1326 // Deoptimize if remainder will not be 0. | 1326 // Deoptimize if remainder will not be 0. |
| 1327 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && | 1327 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && |
| 1328 divisor != 1 && divisor != -1) { | 1328 divisor != 1 && divisor != -1) { |
| 1329 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); | 1329 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); |
| 1330 __ tst(dividend, Operand(mask)); | 1330 __ tst(dividend, Operand(mask)); |
| 1331 DeoptimizeIf(ne, instr, "lost precision"); | 1331 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision); |
| 1332 } | 1332 } |
| 1333 | 1333 |
| 1334 if (divisor == -1) { // Nice shortcut, not needed for correctness. | 1334 if (divisor == -1) { // Nice shortcut, not needed for correctness. |
| 1335 __ rsb(result, dividend, Operand(0)); | 1335 __ rsb(result, dividend, Operand(0)); |
| 1336 return; | 1336 return; |
| 1337 } | 1337 } |
| 1338 int32_t shift = WhichPowerOf2Abs(divisor); | 1338 int32_t shift = WhichPowerOf2Abs(divisor); |
| 1339 if (shift == 0) { | 1339 if (shift == 0) { |
| 1340 __ mov(result, dividend); | 1340 __ mov(result, dividend); |
| 1341 } else if (shift == 1) { | 1341 } else if (shift == 1) { |
| 1342 __ add(result, dividend, Operand(dividend, LSR, 31)); | 1342 __ add(result, dividend, Operand(dividend, LSR, 31)); |
| 1343 } else { | 1343 } else { |
| 1344 __ mov(result, Operand(dividend, ASR, 31)); | 1344 __ mov(result, Operand(dividend, ASR, 31)); |
| 1345 __ add(result, dividend, Operand(result, LSR, 32 - shift)); | 1345 __ add(result, dividend, Operand(result, LSR, 32 - shift)); |
| 1346 } | 1346 } |
| 1347 if (shift > 0) __ mov(result, Operand(result, ASR, shift)); | 1347 if (shift > 0) __ mov(result, Operand(result, ASR, shift)); |
| 1348 if (divisor < 0) __ rsb(result, result, Operand(0)); | 1348 if (divisor < 0) __ rsb(result, result, Operand(0)); |
| 1349 } | 1349 } |
| 1350 | 1350 |
| 1351 | 1351 |
| 1352 void LCodeGen::DoDivByConstI(LDivByConstI* instr) { | 1352 void LCodeGen::DoDivByConstI(LDivByConstI* instr) { |
| 1353 Register dividend = ToRegister(instr->dividend()); | 1353 Register dividend = ToRegister(instr->dividend()); |
| 1354 int32_t divisor = instr->divisor(); | 1354 int32_t divisor = instr->divisor(); |
| 1355 Register result = ToRegister(instr->result()); | 1355 Register result = ToRegister(instr->result()); |
| 1356 DCHECK(!dividend.is(result)); | 1356 DCHECK(!dividend.is(result)); |
| 1357 | 1357 |
| 1358 if (divisor == 0) { | 1358 if (divisor == 0) { |
| 1359 DeoptimizeIf(al, instr, "division by zero"); | 1359 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero); |
| 1360 return; | 1360 return; |
| 1361 } | 1361 } |
| 1362 | 1362 |
| 1363 // Check for (0 / -x) that will produce negative zero. | 1363 // Check for (0 / -x) that will produce negative zero. |
| 1364 HDiv* hdiv = instr->hydrogen(); | 1364 HDiv* hdiv = instr->hydrogen(); |
| 1365 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | 1365 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
| 1366 __ cmp(dividend, Operand::Zero()); | 1366 __ cmp(dividend, Operand::Zero()); |
| 1367 DeoptimizeIf(eq, instr, "minus zero"); | 1367 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); |
| 1368 } | 1368 } |
| 1369 | 1369 |
| 1370 __ TruncatingDiv(result, dividend, Abs(divisor)); | 1370 __ TruncatingDiv(result, dividend, Abs(divisor)); |
| 1371 if (divisor < 0) __ rsb(result, result, Operand::Zero()); | 1371 if (divisor < 0) __ rsb(result, result, Operand::Zero()); |
| 1372 | 1372 |
| 1373 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { | 1373 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { |
| 1374 __ mov(ip, Operand(divisor)); | 1374 __ mov(ip, Operand(divisor)); |
| 1375 __ smull(scratch0(), ip, result, ip); | 1375 __ smull(scratch0(), ip, result, ip); |
| 1376 __ sub(scratch0(), scratch0(), dividend, SetCC); | 1376 __ sub(scratch0(), scratch0(), dividend, SetCC); |
| 1377 DeoptimizeIf(ne, instr, "lost precision"); | 1377 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision); |
| 1378 } | 1378 } |
| 1379 } | 1379 } |
| 1380 | 1380 |
| 1381 | 1381 |
| 1382 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. | 1382 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. |
| 1383 void LCodeGen::DoDivI(LDivI* instr) { | 1383 void LCodeGen::DoDivI(LDivI* instr) { |
| 1384 HBinaryOperation* hdiv = instr->hydrogen(); | 1384 HBinaryOperation* hdiv = instr->hydrogen(); |
| 1385 Register dividend = ToRegister(instr->dividend()); | 1385 Register dividend = ToRegister(instr->dividend()); |
| 1386 Register divisor = ToRegister(instr->divisor()); | 1386 Register divisor = ToRegister(instr->divisor()); |
| 1387 Register result = ToRegister(instr->result()); | 1387 Register result = ToRegister(instr->result()); |
| 1388 | 1388 |
| 1389 // Check for x / 0. | 1389 // Check for x / 0. |
| 1390 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { | 1390 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { |
| 1391 __ cmp(divisor, Operand::Zero()); | 1391 __ cmp(divisor, Operand::Zero()); |
| 1392 DeoptimizeIf(eq, instr, "division by zero"); | 1392 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero); |
| 1393 } | 1393 } |
| 1394 | 1394 |
| 1395 // Check for (0 / -x) that will produce negative zero. | 1395 // Check for (0 / -x) that will produce negative zero. |
| 1396 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1396 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1397 Label positive; | 1397 Label positive; |
| 1398 if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) { | 1398 if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) { |
| 1399 // Do the test only if it hadn't be done above. | 1399 // Do the test only if it hadn't be done above. |
| 1400 __ cmp(divisor, Operand::Zero()); | 1400 __ cmp(divisor, Operand::Zero()); |
| 1401 } | 1401 } |
| 1402 __ b(pl, &positive); | 1402 __ b(pl, &positive); |
| 1403 __ cmp(dividend, Operand::Zero()); | 1403 __ cmp(dividend, Operand::Zero()); |
| 1404 DeoptimizeIf(eq, instr, "minus zero"); | 1404 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); |
| 1405 __ bind(&positive); | 1405 __ bind(&positive); |
| 1406 } | 1406 } |
| 1407 | 1407 |
| 1408 // Check for (kMinInt / -1). | 1408 // Check for (kMinInt / -1). |
| 1409 if (hdiv->CheckFlag(HValue::kCanOverflow) && | 1409 if (hdiv->CheckFlag(HValue::kCanOverflow) && |
| 1410 (!CpuFeatures::IsSupported(SUDIV) || | 1410 (!CpuFeatures::IsSupported(SUDIV) || |
| 1411 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) { | 1411 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) { |
| 1412 // We don't need to check for overflow when truncating with sdiv | 1412 // We don't need to check for overflow when truncating with sdiv |
| 1413 // support because, on ARM, sdiv kMinInt, -1 -> kMinInt. | 1413 // support because, on ARM, sdiv kMinInt, -1 -> kMinInt. |
| 1414 __ cmp(dividend, Operand(kMinInt)); | 1414 __ cmp(dividend, Operand(kMinInt)); |
| 1415 __ cmp(divisor, Operand(-1), eq); | 1415 __ cmp(divisor, Operand(-1), eq); |
| 1416 DeoptimizeIf(eq, instr, "overflow"); | 1416 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow); |
| 1417 } | 1417 } |
| 1418 | 1418 |
| 1419 if (CpuFeatures::IsSupported(SUDIV)) { | 1419 if (CpuFeatures::IsSupported(SUDIV)) { |
| 1420 CpuFeatureScope scope(masm(), SUDIV); | 1420 CpuFeatureScope scope(masm(), SUDIV); |
| 1421 __ sdiv(result, dividend, divisor); | 1421 __ sdiv(result, dividend, divisor); |
| 1422 } else { | 1422 } else { |
| 1423 DoubleRegister vleft = ToDoubleRegister(instr->temp()); | 1423 DoubleRegister vleft = ToDoubleRegister(instr->temp()); |
| 1424 DoubleRegister vright = double_scratch0(); | 1424 DoubleRegister vright = double_scratch0(); |
| 1425 __ vmov(double_scratch0().low(), dividend); | 1425 __ vmov(double_scratch0().low(), dividend); |
| 1426 __ vcvt_f64_s32(vleft, double_scratch0().low()); | 1426 __ vcvt_f64_s32(vleft, double_scratch0().low()); |
| 1427 __ vmov(double_scratch0().low(), divisor); | 1427 __ vmov(double_scratch0().low(), divisor); |
| 1428 __ vcvt_f64_s32(vright, double_scratch0().low()); | 1428 __ vcvt_f64_s32(vright, double_scratch0().low()); |
| 1429 __ vdiv(vleft, vleft, vright); // vleft now contains the result. | 1429 __ vdiv(vleft, vleft, vright); // vleft now contains the result. |
| 1430 __ vcvt_s32_f64(double_scratch0().low(), vleft); | 1430 __ vcvt_s32_f64(double_scratch0().low(), vleft); |
| 1431 __ vmov(result, double_scratch0().low()); | 1431 __ vmov(result, double_scratch0().low()); |
| 1432 } | 1432 } |
| 1433 | 1433 |
| 1434 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { | 1434 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { |
| 1435 // Compute remainder and deopt if it's not zero. | 1435 // Compute remainder and deopt if it's not zero. |
| 1436 Register remainder = scratch0(); | 1436 Register remainder = scratch0(); |
| 1437 __ Mls(remainder, result, divisor, dividend); | 1437 __ Mls(remainder, result, divisor, dividend); |
| 1438 __ cmp(remainder, Operand::Zero()); | 1438 __ cmp(remainder, Operand::Zero()); |
| 1439 DeoptimizeIf(ne, instr, "lost precision"); | 1439 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision); |
| 1440 } | 1440 } |
| 1441 } | 1441 } |
| 1442 | 1442 |
| 1443 | 1443 |
| 1444 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) { | 1444 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) { |
| 1445 DwVfpRegister addend = ToDoubleRegister(instr->addend()); | 1445 DwVfpRegister addend = ToDoubleRegister(instr->addend()); |
| 1446 DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier()); | 1446 DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier()); |
| 1447 DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand()); | 1447 DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand()); |
| 1448 | 1448 |
| 1449 // This is computed in-place. | 1449 // This is computed in-place. |
| (...skipping 30 matching lines...) Expand all Loading... |
| 1480 // can simply do an arithmetic right shift. | 1480 // can simply do an arithmetic right shift. |
| 1481 int32_t shift = WhichPowerOf2Abs(divisor); | 1481 int32_t shift = WhichPowerOf2Abs(divisor); |
| 1482 if (divisor > 1) { | 1482 if (divisor > 1) { |
| 1483 __ mov(result, Operand(dividend, ASR, shift)); | 1483 __ mov(result, Operand(dividend, ASR, shift)); |
| 1484 return; | 1484 return; |
| 1485 } | 1485 } |
| 1486 | 1486 |
| 1487 // If the divisor is negative, we have to negate and handle edge cases. | 1487 // If the divisor is negative, we have to negate and handle edge cases. |
| 1488 __ rsb(result, dividend, Operand::Zero(), SetCC); | 1488 __ rsb(result, dividend, Operand::Zero(), SetCC); |
| 1489 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1489 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1490 DeoptimizeIf(eq, instr, "minus zero"); | 1490 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); |
| 1491 } | 1491 } |
| 1492 | 1492 |
| 1493 // Dividing by -1 is basically negation, unless we overflow. | 1493 // Dividing by -1 is basically negation, unless we overflow. |
| 1494 if (divisor == -1) { | 1494 if (divisor == -1) { |
| 1495 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { | 1495 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { |
| 1496 DeoptimizeIf(vs, instr, "overflow"); | 1496 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); |
| 1497 } | 1497 } |
| 1498 return; | 1498 return; |
| 1499 } | 1499 } |
| 1500 | 1500 |
| 1501 // If the negation could not overflow, simply shifting is OK. | 1501 // If the negation could not overflow, simply shifting is OK. |
| 1502 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { | 1502 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { |
| 1503 __ mov(result, Operand(result, ASR, shift)); | 1503 __ mov(result, Operand(result, ASR, shift)); |
| 1504 return; | 1504 return; |
| 1505 } | 1505 } |
| 1506 | 1506 |
| 1507 __ mov(result, Operand(kMinInt / divisor), LeaveCC, vs); | 1507 __ mov(result, Operand(kMinInt / divisor), LeaveCC, vs); |
| 1508 __ mov(result, Operand(result, ASR, shift), LeaveCC, vc); | 1508 __ mov(result, Operand(result, ASR, shift), LeaveCC, vc); |
| 1509 } | 1509 } |
| 1510 | 1510 |
| 1511 | 1511 |
| 1512 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { | 1512 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { |
| 1513 Register dividend = ToRegister(instr->dividend()); | 1513 Register dividend = ToRegister(instr->dividend()); |
| 1514 int32_t divisor = instr->divisor(); | 1514 int32_t divisor = instr->divisor(); |
| 1515 Register result = ToRegister(instr->result()); | 1515 Register result = ToRegister(instr->result()); |
| 1516 DCHECK(!dividend.is(result)); | 1516 DCHECK(!dividend.is(result)); |
| 1517 | 1517 |
| 1518 if (divisor == 0) { | 1518 if (divisor == 0) { |
| 1519 DeoptimizeIf(al, instr, "division by zero"); | 1519 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero); |
| 1520 return; | 1520 return; |
| 1521 } | 1521 } |
| 1522 | 1522 |
| 1523 // Check for (0 / -x) that will produce negative zero. | 1523 // Check for (0 / -x) that will produce negative zero. |
| 1524 HMathFloorOfDiv* hdiv = instr->hydrogen(); | 1524 HMathFloorOfDiv* hdiv = instr->hydrogen(); |
| 1525 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | 1525 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
| 1526 __ cmp(dividend, Operand::Zero()); | 1526 __ cmp(dividend, Operand::Zero()); |
| 1527 DeoptimizeIf(eq, instr, "minus zero"); | 1527 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); |
| 1528 } | 1528 } |
| 1529 | 1529 |
| 1530 // Easy case: We need no dynamic check for the dividend and the flooring | 1530 // Easy case: We need no dynamic check for the dividend and the flooring |
| 1531 // division is the same as the truncating division. | 1531 // division is the same as the truncating division. |
| 1532 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || | 1532 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || |
| 1533 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { | 1533 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { |
| 1534 __ TruncatingDiv(result, dividend, Abs(divisor)); | 1534 __ TruncatingDiv(result, dividend, Abs(divisor)); |
| 1535 if (divisor < 0) __ rsb(result, result, Operand::Zero()); | 1535 if (divisor < 0) __ rsb(result, result, Operand::Zero()); |
| 1536 return; | 1536 return; |
| 1537 } | 1537 } |
| (...skipping 20 matching lines...) Expand all Loading... |
| 1558 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI. | 1558 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI. |
| 1559 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) { | 1559 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) { |
| 1560 HBinaryOperation* hdiv = instr->hydrogen(); | 1560 HBinaryOperation* hdiv = instr->hydrogen(); |
| 1561 Register left = ToRegister(instr->dividend()); | 1561 Register left = ToRegister(instr->dividend()); |
| 1562 Register right = ToRegister(instr->divisor()); | 1562 Register right = ToRegister(instr->divisor()); |
| 1563 Register result = ToRegister(instr->result()); | 1563 Register result = ToRegister(instr->result()); |
| 1564 | 1564 |
| 1565 // Check for x / 0. | 1565 // Check for x / 0. |
| 1566 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { | 1566 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { |
| 1567 __ cmp(right, Operand::Zero()); | 1567 __ cmp(right, Operand::Zero()); |
| 1568 DeoptimizeIf(eq, instr, "division by zero"); | 1568 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero); |
| 1569 } | 1569 } |
| 1570 | 1570 |
| 1571 // Check for (0 / -x) that will produce negative zero. | 1571 // Check for (0 / -x) that will produce negative zero. |
| 1572 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1572 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 1573 Label positive; | 1573 Label positive; |
| 1574 if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) { | 1574 if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) { |
| 1575 // Do the test only if it hadn't be done above. | 1575 // Do the test only if it hadn't be done above. |
| 1576 __ cmp(right, Operand::Zero()); | 1576 __ cmp(right, Operand::Zero()); |
| 1577 } | 1577 } |
| 1578 __ b(pl, &positive); | 1578 __ b(pl, &positive); |
| 1579 __ cmp(left, Operand::Zero()); | 1579 __ cmp(left, Operand::Zero()); |
| 1580 DeoptimizeIf(eq, instr, "minus zero"); | 1580 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); |
| 1581 __ bind(&positive); | 1581 __ bind(&positive); |
| 1582 } | 1582 } |
| 1583 | 1583 |
| 1584 // Check for (kMinInt / -1). | 1584 // Check for (kMinInt / -1). |
| 1585 if (hdiv->CheckFlag(HValue::kCanOverflow) && | 1585 if (hdiv->CheckFlag(HValue::kCanOverflow) && |
| 1586 (!CpuFeatures::IsSupported(SUDIV) || | 1586 (!CpuFeatures::IsSupported(SUDIV) || |
| 1587 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) { | 1587 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) { |
| 1588 // We don't need to check for overflow when truncating with sdiv | 1588 // We don't need to check for overflow when truncating with sdiv |
| 1589 // support because, on ARM, sdiv kMinInt, -1 -> kMinInt. | 1589 // support because, on ARM, sdiv kMinInt, -1 -> kMinInt. |
| 1590 __ cmp(left, Operand(kMinInt)); | 1590 __ cmp(left, Operand(kMinInt)); |
| 1591 __ cmp(right, Operand(-1), eq); | 1591 __ cmp(right, Operand(-1), eq); |
| 1592 DeoptimizeIf(eq, instr, "overflow"); | 1592 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow); |
| 1593 } | 1593 } |
| 1594 | 1594 |
| 1595 if (CpuFeatures::IsSupported(SUDIV)) { | 1595 if (CpuFeatures::IsSupported(SUDIV)) { |
| 1596 CpuFeatureScope scope(masm(), SUDIV); | 1596 CpuFeatureScope scope(masm(), SUDIV); |
| 1597 __ sdiv(result, left, right); | 1597 __ sdiv(result, left, right); |
| 1598 } else { | 1598 } else { |
| 1599 DoubleRegister vleft = ToDoubleRegister(instr->temp()); | 1599 DoubleRegister vleft = ToDoubleRegister(instr->temp()); |
| 1600 DoubleRegister vright = double_scratch0(); | 1600 DoubleRegister vright = double_scratch0(); |
| 1601 __ vmov(double_scratch0().low(), left); | 1601 __ vmov(double_scratch0().low(), left); |
| 1602 __ vcvt_f64_s32(vleft, double_scratch0().low()); | 1602 __ vcvt_f64_s32(vleft, double_scratch0().low()); |
| (...skipping 25 matching lines...) Expand all Loading... |
| 1628 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); | 1628 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); |
| 1629 bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); | 1629 bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); |
| 1630 | 1630 |
| 1631 if (right_op->IsConstantOperand()) { | 1631 if (right_op->IsConstantOperand()) { |
| 1632 int32_t constant = ToInteger32(LConstantOperand::cast(right_op)); | 1632 int32_t constant = ToInteger32(LConstantOperand::cast(right_op)); |
| 1633 | 1633 |
| 1634 if (bailout_on_minus_zero && (constant < 0)) { | 1634 if (bailout_on_minus_zero && (constant < 0)) { |
| 1635 // The case of a null constant will be handled separately. | 1635 // The case of a null constant will be handled separately. |
| 1636 // If constant is negative and left is null, the result should be -0. | 1636 // If constant is negative and left is null, the result should be -0. |
| 1637 __ cmp(left, Operand::Zero()); | 1637 __ cmp(left, Operand::Zero()); |
| 1638 DeoptimizeIf(eq, instr, "minus zero"); | 1638 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); |
| 1639 } | 1639 } |
| 1640 | 1640 |
| 1641 switch (constant) { | 1641 switch (constant) { |
| 1642 case -1: | 1642 case -1: |
| 1643 if (overflow) { | 1643 if (overflow) { |
| 1644 __ rsb(result, left, Operand::Zero(), SetCC); | 1644 __ rsb(result, left, Operand::Zero(), SetCC); |
| 1645 DeoptimizeIf(vs, instr, "overflow"); | 1645 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); |
| 1646 } else { | 1646 } else { |
| 1647 __ rsb(result, left, Operand::Zero()); | 1647 __ rsb(result, left, Operand::Zero()); |
| 1648 } | 1648 } |
| 1649 break; | 1649 break; |
| 1650 case 0: | 1650 case 0: |
| 1651 if (bailout_on_minus_zero) { | 1651 if (bailout_on_minus_zero) { |
| 1652 // If left is strictly negative and the constant is null, the | 1652 // If left is strictly negative and the constant is null, the |
| 1653 // result is -0. Deoptimize if required, otherwise return 0. | 1653 // result is -0. Deoptimize if required, otherwise return 0. |
| 1654 __ cmp(left, Operand::Zero()); | 1654 __ cmp(left, Operand::Zero()); |
| 1655 DeoptimizeIf(mi, instr, "minus zero"); | 1655 DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero); |
| 1656 } | 1656 } |
| 1657 __ mov(result, Operand::Zero()); | 1657 __ mov(result, Operand::Zero()); |
| 1658 break; | 1658 break; |
| 1659 case 1: | 1659 case 1: |
| 1660 __ Move(result, left); | 1660 __ Move(result, left); |
| 1661 break; | 1661 break; |
| 1662 default: | 1662 default: |
| 1663 // Multiplying by powers of two and powers of two plus or minus | 1663 // Multiplying by powers of two and powers of two plus or minus |
| 1664 // one can be done faster with shifted operands. | 1664 // one can be done faster with shifted operands. |
| 1665 // For other constants we emit standard code. | 1665 // For other constants we emit standard code. |
| (...skipping 29 matching lines...) Expand all Loading... |
| 1695 if (overflow) { | 1695 if (overflow) { |
| 1696 Register scratch = scratch0(); | 1696 Register scratch = scratch0(); |
| 1697 // scratch:result = left * right. | 1697 // scratch:result = left * right. |
| 1698 if (instr->hydrogen()->representation().IsSmi()) { | 1698 if (instr->hydrogen()->representation().IsSmi()) { |
| 1699 __ SmiUntag(result, left); | 1699 __ SmiUntag(result, left); |
| 1700 __ smull(result, scratch, result, right); | 1700 __ smull(result, scratch, result, right); |
| 1701 } else { | 1701 } else { |
| 1702 __ smull(result, scratch, left, right); | 1702 __ smull(result, scratch, left, right); |
| 1703 } | 1703 } |
| 1704 __ cmp(scratch, Operand(result, ASR, 31)); | 1704 __ cmp(scratch, Operand(result, ASR, 31)); |
| 1705 DeoptimizeIf(ne, instr, "overflow"); | 1705 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow); |
| 1706 } else { | 1706 } else { |
| 1707 if (instr->hydrogen()->representation().IsSmi()) { | 1707 if (instr->hydrogen()->representation().IsSmi()) { |
| 1708 __ SmiUntag(result, left); | 1708 __ SmiUntag(result, left); |
| 1709 __ mul(result, result, right); | 1709 __ mul(result, result, right); |
| 1710 } else { | 1710 } else { |
| 1711 __ mul(result, left, right); | 1711 __ mul(result, left, right); |
| 1712 } | 1712 } |
| 1713 } | 1713 } |
| 1714 | 1714 |
| 1715 if (bailout_on_minus_zero) { | 1715 if (bailout_on_minus_zero) { |
| 1716 Label done; | 1716 Label done; |
| 1717 __ teq(left, Operand(right)); | 1717 __ teq(left, Operand(right)); |
| 1718 __ b(pl, &done); | 1718 __ b(pl, &done); |
| 1719 // Bail out if the result is minus zero. | 1719 // Bail out if the result is minus zero. |
| 1720 __ cmp(result, Operand::Zero()); | 1720 __ cmp(result, Operand::Zero()); |
| 1721 DeoptimizeIf(eq, instr, "minus zero"); | 1721 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); |
| 1722 __ bind(&done); | 1722 __ bind(&done); |
| 1723 } | 1723 } |
| 1724 } | 1724 } |
| 1725 } | 1725 } |
| 1726 | 1726 |
| 1727 | 1727 |
| 1728 void LCodeGen::DoBitI(LBitI* instr) { | 1728 void LCodeGen::DoBitI(LBitI* instr) { |
| 1729 LOperand* left_op = instr->left(); | 1729 LOperand* left_op = instr->left(); |
| 1730 LOperand* right_op = instr->right(); | 1730 LOperand* right_op = instr->right(); |
| 1731 DCHECK(left_op->IsRegister()); | 1731 DCHECK(left_op->IsRegister()); |
| (...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1774 switch (instr->op()) { | 1774 switch (instr->op()) { |
| 1775 case Token::ROR: | 1775 case Token::ROR: |
| 1776 __ mov(result, Operand(left, ROR, scratch)); | 1776 __ mov(result, Operand(left, ROR, scratch)); |
| 1777 break; | 1777 break; |
| 1778 case Token::SAR: | 1778 case Token::SAR: |
| 1779 __ mov(result, Operand(left, ASR, scratch)); | 1779 __ mov(result, Operand(left, ASR, scratch)); |
| 1780 break; | 1780 break; |
| 1781 case Token::SHR: | 1781 case Token::SHR: |
| 1782 if (instr->can_deopt()) { | 1782 if (instr->can_deopt()) { |
| 1783 __ mov(result, Operand(left, LSR, scratch), SetCC); | 1783 __ mov(result, Operand(left, LSR, scratch), SetCC); |
| 1784 DeoptimizeIf(mi, instr, "negative value"); | 1784 DeoptimizeIf(mi, instr, Deoptimizer::kNegativeValue); |
| 1785 } else { | 1785 } else { |
| 1786 __ mov(result, Operand(left, LSR, scratch)); | 1786 __ mov(result, Operand(left, LSR, scratch)); |
| 1787 } | 1787 } |
| 1788 break; | 1788 break; |
| 1789 case Token::SHL: | 1789 case Token::SHL: |
| 1790 __ mov(result, Operand(left, LSL, scratch)); | 1790 __ mov(result, Operand(left, LSL, scratch)); |
| 1791 break; | 1791 break; |
| 1792 default: | 1792 default: |
| 1793 UNREACHABLE(); | 1793 UNREACHABLE(); |
| 1794 break; | 1794 break; |
| (...skipping 16 matching lines...) Expand all Loading... |
| 1811 } else { | 1811 } else { |
| 1812 __ Move(result, left); | 1812 __ Move(result, left); |
| 1813 } | 1813 } |
| 1814 break; | 1814 break; |
| 1815 case Token::SHR: | 1815 case Token::SHR: |
| 1816 if (shift_count != 0) { | 1816 if (shift_count != 0) { |
| 1817 __ mov(result, Operand(left, LSR, shift_count)); | 1817 __ mov(result, Operand(left, LSR, shift_count)); |
| 1818 } else { | 1818 } else { |
| 1819 if (instr->can_deopt()) { | 1819 if (instr->can_deopt()) { |
| 1820 __ tst(left, Operand(0x80000000)); | 1820 __ tst(left, Operand(0x80000000)); |
| 1821 DeoptimizeIf(ne, instr, "negative value"); | 1821 DeoptimizeIf(ne, instr, Deoptimizer::kNegativeValue); |
| 1822 } | 1822 } |
| 1823 __ Move(result, left); | 1823 __ Move(result, left); |
| 1824 } | 1824 } |
| 1825 break; | 1825 break; |
| 1826 case Token::SHL: | 1826 case Token::SHL: |
| 1827 if (shift_count != 0) { | 1827 if (shift_count != 0) { |
| 1828 if (instr->hydrogen_value()->representation().IsSmi() && | 1828 if (instr->hydrogen_value()->representation().IsSmi() && |
| 1829 instr->can_deopt()) { | 1829 instr->can_deopt()) { |
| 1830 if (shift_count != 1) { | 1830 if (shift_count != 1) { |
| 1831 __ mov(result, Operand(left, LSL, shift_count - 1)); | 1831 __ mov(result, Operand(left, LSL, shift_count - 1)); |
| 1832 __ SmiTag(result, result, SetCC); | 1832 __ SmiTag(result, result, SetCC); |
| 1833 } else { | 1833 } else { |
| 1834 __ SmiTag(result, left, SetCC); | 1834 __ SmiTag(result, left, SetCC); |
| 1835 } | 1835 } |
| 1836 DeoptimizeIf(vs, instr, "overflow"); | 1836 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); |
| 1837 } else { | 1837 } else { |
| 1838 __ mov(result, Operand(left, LSL, shift_count)); | 1838 __ mov(result, Operand(left, LSL, shift_count)); |
| 1839 } | 1839 } |
| 1840 } else { | 1840 } else { |
| 1841 __ Move(result, left); | 1841 __ Move(result, left); |
| 1842 } | 1842 } |
| 1843 break; | 1843 break; |
| 1844 default: | 1844 default: |
| 1845 UNREACHABLE(); | 1845 UNREACHABLE(); |
| 1846 break; | 1846 break; |
| (...skipping 11 matching lines...) Expand all Loading... |
| 1858 | 1858 |
| 1859 if (right->IsStackSlot()) { | 1859 if (right->IsStackSlot()) { |
| 1860 Register right_reg = EmitLoadRegister(right, ip); | 1860 Register right_reg = EmitLoadRegister(right, ip); |
| 1861 __ sub(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond); | 1861 __ sub(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond); |
| 1862 } else { | 1862 } else { |
| 1863 DCHECK(right->IsRegister() || right->IsConstantOperand()); | 1863 DCHECK(right->IsRegister() || right->IsConstantOperand()); |
| 1864 __ sub(ToRegister(result), ToRegister(left), ToOperand(right), set_cond); | 1864 __ sub(ToRegister(result), ToRegister(left), ToOperand(right), set_cond); |
| 1865 } | 1865 } |
| 1866 | 1866 |
| 1867 if (can_overflow) { | 1867 if (can_overflow) { |
| 1868 DeoptimizeIf(vs, instr, "overflow"); | 1868 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); |
| 1869 } | 1869 } |
| 1870 } | 1870 } |
| 1871 | 1871 |
| 1872 | 1872 |
| 1873 void LCodeGen::DoRSubI(LRSubI* instr) { | 1873 void LCodeGen::DoRSubI(LRSubI* instr) { |
| 1874 LOperand* left = instr->left(); | 1874 LOperand* left = instr->left(); |
| 1875 LOperand* right = instr->right(); | 1875 LOperand* right = instr->right(); |
| 1876 LOperand* result = instr->result(); | 1876 LOperand* result = instr->result(); |
| 1877 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); | 1877 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); |
| 1878 SBit set_cond = can_overflow ? SetCC : LeaveCC; | 1878 SBit set_cond = can_overflow ? SetCC : LeaveCC; |
| 1879 | 1879 |
| 1880 if (right->IsStackSlot()) { | 1880 if (right->IsStackSlot()) { |
| 1881 Register right_reg = EmitLoadRegister(right, ip); | 1881 Register right_reg = EmitLoadRegister(right, ip); |
| 1882 __ rsb(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond); | 1882 __ rsb(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond); |
| 1883 } else { | 1883 } else { |
| 1884 DCHECK(right->IsRegister() || right->IsConstantOperand()); | 1884 DCHECK(right->IsRegister() || right->IsConstantOperand()); |
| 1885 __ rsb(ToRegister(result), ToRegister(left), ToOperand(right), set_cond); | 1885 __ rsb(ToRegister(result), ToRegister(left), ToOperand(right), set_cond); |
| 1886 } | 1886 } |
| 1887 | 1887 |
| 1888 if (can_overflow) { | 1888 if (can_overflow) { |
| 1889 DeoptimizeIf(vs, instr, "overflow"); | 1889 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); |
| 1890 } | 1890 } |
| 1891 } | 1891 } |
| 1892 | 1892 |
| 1893 | 1893 |
| 1894 void LCodeGen::DoConstantI(LConstantI* instr) { | 1894 void LCodeGen::DoConstantI(LConstantI* instr) { |
| 1895 __ mov(ToRegister(instr->result()), Operand(instr->value())); | 1895 __ mov(ToRegister(instr->result()), Operand(instr->value())); |
| 1896 } | 1896 } |
| 1897 | 1897 |
| 1898 | 1898 |
| 1899 void LCodeGen::DoConstantS(LConstantS* instr) { | 1899 void LCodeGen::DoConstantS(LConstantS* instr) { |
| (...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1947 Register result = ToRegister(instr->result()); | 1947 Register result = ToRegister(instr->result()); |
| 1948 Register scratch = ToRegister(instr->temp()); | 1948 Register scratch = ToRegister(instr->temp()); |
| 1949 Smi* index = instr->index(); | 1949 Smi* index = instr->index(); |
| 1950 Label runtime, done; | 1950 Label runtime, done; |
| 1951 DCHECK(object.is(result)); | 1951 DCHECK(object.is(result)); |
| 1952 DCHECK(object.is(r0)); | 1952 DCHECK(object.is(r0)); |
| 1953 DCHECK(!scratch.is(scratch0())); | 1953 DCHECK(!scratch.is(scratch0())); |
| 1954 DCHECK(!scratch.is(object)); | 1954 DCHECK(!scratch.is(object)); |
| 1955 | 1955 |
| 1956 __ SmiTst(object); | 1956 __ SmiTst(object); |
| 1957 DeoptimizeIf(eq, instr, "Smi"); | 1957 DeoptimizeIf(eq, instr, Deoptimizer::kSmi); |
| 1958 __ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE); | 1958 __ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE); |
| 1959 DeoptimizeIf(ne, instr, "not a date object"); | 1959 DeoptimizeIf(ne, instr, Deoptimizer::kNotADateObject); |
| 1960 | 1960 |
| 1961 if (index->value() == 0) { | 1961 if (index->value() == 0) { |
| 1962 __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset)); | 1962 __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset)); |
| 1963 } else { | 1963 } else { |
| 1964 if (index->value() < JSDate::kFirstUncachedField) { | 1964 if (index->value() < JSDate::kFirstUncachedField) { |
| 1965 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate()); | 1965 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate()); |
| 1966 __ mov(scratch, Operand(stamp)); | 1966 __ mov(scratch, Operand(stamp)); |
| 1967 __ ldr(scratch, MemOperand(scratch)); | 1967 __ ldr(scratch, MemOperand(scratch)); |
| 1968 __ ldr(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset)); | 1968 __ ldr(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset)); |
| 1969 __ cmp(scratch, scratch0()); | 1969 __ cmp(scratch, scratch0()); |
| (...skipping 96 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2066 | 2066 |
| 2067 if (right->IsStackSlot()) { | 2067 if (right->IsStackSlot()) { |
| 2068 Register right_reg = EmitLoadRegister(right, ip); | 2068 Register right_reg = EmitLoadRegister(right, ip); |
| 2069 __ add(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond); | 2069 __ add(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond); |
| 2070 } else { | 2070 } else { |
| 2071 DCHECK(right->IsRegister() || right->IsConstantOperand()); | 2071 DCHECK(right->IsRegister() || right->IsConstantOperand()); |
| 2072 __ add(ToRegister(result), ToRegister(left), ToOperand(right), set_cond); | 2072 __ add(ToRegister(result), ToRegister(left), ToOperand(right), set_cond); |
| 2073 } | 2073 } |
| 2074 | 2074 |
| 2075 if (can_overflow) { | 2075 if (can_overflow) { |
| 2076 DeoptimizeIf(vs, instr, "overflow"); | 2076 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); |
| 2077 } | 2077 } |
| 2078 } | 2078 } |
| 2079 | 2079 |
| 2080 | 2080 |
| 2081 void LCodeGen::DoMathMinMax(LMathMinMax* instr) { | 2081 void LCodeGen::DoMathMinMax(LMathMinMax* instr) { |
| 2082 LOperand* left = instr->left(); | 2082 LOperand* left = instr->left(); |
| 2083 LOperand* right = instr->right(); | 2083 LOperand* right = instr->right(); |
| 2084 HMathMinMax::Operation operation = instr->hydrogen()->operation(); | 2084 HMathMinMax::Operation operation = instr->hydrogen()->operation(); |
| 2085 if (instr->hydrogen()->representation().IsSmiOrInteger32()) { | 2085 if (instr->hydrogen()->representation().IsSmiOrInteger32()) { |
| 2086 Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge; | 2086 Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge; |
| (...skipping 205 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2292 } | 2292 } |
| 2293 | 2293 |
| 2294 if (expected.Contains(ToBooleanStub::SMI)) { | 2294 if (expected.Contains(ToBooleanStub::SMI)) { |
| 2295 // Smis: 0 -> false, all other -> true. | 2295 // Smis: 0 -> false, all other -> true. |
| 2296 __ cmp(reg, Operand::Zero()); | 2296 __ cmp(reg, Operand::Zero()); |
| 2297 __ b(eq, instr->FalseLabel(chunk_)); | 2297 __ b(eq, instr->FalseLabel(chunk_)); |
| 2298 __ JumpIfSmi(reg, instr->TrueLabel(chunk_)); | 2298 __ JumpIfSmi(reg, instr->TrueLabel(chunk_)); |
| 2299 } else if (expected.NeedsMap()) { | 2299 } else if (expected.NeedsMap()) { |
| 2300 // If we need a map later and have a Smi -> deopt. | 2300 // If we need a map later and have a Smi -> deopt. |
| 2301 __ SmiTst(reg); | 2301 __ SmiTst(reg); |
| 2302 DeoptimizeIf(eq, instr, "Smi"); | 2302 DeoptimizeIf(eq, instr, Deoptimizer::kSmi); |
| 2303 } | 2303 } |
| 2304 | 2304 |
| 2305 const Register map = scratch0(); | 2305 const Register map = scratch0(); |
| 2306 if (expected.NeedsMap()) { | 2306 if (expected.NeedsMap()) { |
| 2307 __ ldr(map, FieldMemOperand(reg, HeapObject::kMapOffset)); | 2307 __ ldr(map, FieldMemOperand(reg, HeapObject::kMapOffset)); |
| 2308 | 2308 |
| 2309 if (expected.CanBeUndetectable()) { | 2309 if (expected.CanBeUndetectable()) { |
| 2310 // Undetectable -> false. | 2310 // Undetectable -> false. |
| 2311 __ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset)); | 2311 __ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset)); |
| 2312 __ tst(ip, Operand(1 << Map::kIsUndetectable)); | 2312 __ tst(ip, Operand(1 << Map::kIsUndetectable)); |
| (...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2348 __ VFPCompareAndSetFlags(dbl_scratch, 0.0); | 2348 __ VFPCompareAndSetFlags(dbl_scratch, 0.0); |
| 2349 __ cmp(r0, r0, vs); // NaN -> false. | 2349 __ cmp(r0, r0, vs); // NaN -> false. |
| 2350 __ b(eq, instr->FalseLabel(chunk_)); // +0, -0 -> false. | 2350 __ b(eq, instr->FalseLabel(chunk_)); // +0, -0 -> false. |
| 2351 __ b(instr->TrueLabel(chunk_)); | 2351 __ b(instr->TrueLabel(chunk_)); |
| 2352 __ bind(¬_heap_number); | 2352 __ bind(¬_heap_number); |
| 2353 } | 2353 } |
| 2354 | 2354 |
| 2355 if (!expected.IsGeneric()) { | 2355 if (!expected.IsGeneric()) { |
| 2356 // We've seen something for the first time -> deopt. | 2356 // We've seen something for the first time -> deopt. |
| 2357 // This can only happen if we are not generic already. | 2357 // This can only happen if we are not generic already. |
| 2358 DeoptimizeIf(al, instr, "unexpected object"); | 2358 DeoptimizeIf(al, instr, Deoptimizer::kUnexpectedObject); |
| 2359 } | 2359 } |
| 2360 } | 2360 } |
| 2361 } | 2361 } |
| 2362 } | 2362 } |
| 2363 | 2363 |
| 2364 | 2364 |
| 2365 void LCodeGen::EmitGoto(int block) { | 2365 void LCodeGen::EmitGoto(int block) { |
| 2366 if (!IsNextEmittedBlock(block)) { | 2366 if (!IsNextEmittedBlock(block)) { |
| 2367 __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block))); | 2367 __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block))); |
| 2368 } | 2368 } |
| (...skipping 625 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2994 } | 2994 } |
| 2995 | 2995 |
| 2996 | 2996 |
| 2997 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { | 2997 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { |
| 2998 Register result = ToRegister(instr->result()); | 2998 Register result = ToRegister(instr->result()); |
| 2999 __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell().handle()))); | 2999 __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell().handle()))); |
| 3000 __ ldr(result, FieldMemOperand(ip, Cell::kValueOffset)); | 3000 __ ldr(result, FieldMemOperand(ip, Cell::kValueOffset)); |
| 3001 if (instr->hydrogen()->RequiresHoleCheck()) { | 3001 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 3002 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); | 3002 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); |
| 3003 __ cmp(result, ip); | 3003 __ cmp(result, ip); |
| 3004 DeoptimizeIf(eq, instr, "hole"); | 3004 DeoptimizeIf(eq, instr, Deoptimizer::kHole); |
| 3005 } | 3005 } |
| 3006 } | 3006 } |
| 3007 | 3007 |
| 3008 | 3008 |
| 3009 template <class T> | 3009 template <class T> |
| 3010 void LCodeGen::EmitVectorLoadICRegisters(T* instr) { | 3010 void LCodeGen::EmitVectorLoadICRegisters(T* instr) { |
| 3011 DCHECK(FLAG_vector_ics); | 3011 DCHECK(FLAG_vector_ics); |
| 3012 Register vector_register = ToRegister(instr->temp_vector()); | 3012 Register vector_register = ToRegister(instr->temp_vector()); |
| 3013 Register slot_register = VectorLoadICDescriptor::SlotRegister(); | 3013 Register slot_register = VectorLoadICDescriptor::SlotRegister(); |
| 3014 DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister())); | 3014 DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister())); |
| (...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3049 | 3049 |
| 3050 // If the cell we are storing to contains the hole it could have | 3050 // If the cell we are storing to contains the hole it could have |
| 3051 // been deleted from the property dictionary. In that case, we need | 3051 // been deleted from the property dictionary. In that case, we need |
| 3052 // to update the property details in the property dictionary to mark | 3052 // to update the property details in the property dictionary to mark |
| 3053 // it as no longer deleted. | 3053 // it as no longer deleted. |
| 3054 if (instr->hydrogen()->RequiresHoleCheck()) { | 3054 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 3055 // We use a temp to check the payload (CompareRoot might clobber ip). | 3055 // We use a temp to check the payload (CompareRoot might clobber ip). |
| 3056 Register payload = ToRegister(instr->temp()); | 3056 Register payload = ToRegister(instr->temp()); |
| 3057 __ ldr(payload, FieldMemOperand(cell, Cell::kValueOffset)); | 3057 __ ldr(payload, FieldMemOperand(cell, Cell::kValueOffset)); |
| 3058 __ CompareRoot(payload, Heap::kTheHoleValueRootIndex); | 3058 __ CompareRoot(payload, Heap::kTheHoleValueRootIndex); |
| 3059 DeoptimizeIf(eq, instr, "hole"); | 3059 DeoptimizeIf(eq, instr, Deoptimizer::kHole); |
| 3060 } | 3060 } |
| 3061 | 3061 |
| 3062 // Store the value. | 3062 // Store the value. |
| 3063 __ str(value, FieldMemOperand(cell, Cell::kValueOffset)); | 3063 __ str(value, FieldMemOperand(cell, Cell::kValueOffset)); |
| 3064 // Cells are always rescanned, so no write barrier here. | 3064 // Cells are always rescanned, so no write barrier here. |
| 3065 } | 3065 } |
| 3066 | 3066 |
| 3067 | 3067 |
| 3068 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { | 3068 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { |
| 3069 Register context = ToRegister(instr->context()); | 3069 Register context = ToRegister(instr->context()); |
| 3070 Register result = ToRegister(instr->result()); | 3070 Register result = ToRegister(instr->result()); |
| 3071 __ ldr(result, ContextOperand(context, instr->slot_index())); | 3071 __ ldr(result, ContextOperand(context, instr->slot_index())); |
| 3072 if (instr->hydrogen()->RequiresHoleCheck()) { | 3072 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 3073 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); | 3073 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); |
| 3074 __ cmp(result, ip); | 3074 __ cmp(result, ip); |
| 3075 if (instr->hydrogen()->DeoptimizesOnHole()) { | 3075 if (instr->hydrogen()->DeoptimizesOnHole()) { |
| 3076 DeoptimizeIf(eq, instr, "hole"); | 3076 DeoptimizeIf(eq, instr, Deoptimizer::kHole); |
| 3077 } else { | 3077 } else { |
| 3078 __ mov(result, Operand(factory()->undefined_value()), LeaveCC, eq); | 3078 __ mov(result, Operand(factory()->undefined_value()), LeaveCC, eq); |
| 3079 } | 3079 } |
| 3080 } | 3080 } |
| 3081 } | 3081 } |
| 3082 | 3082 |
| 3083 | 3083 |
| 3084 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { | 3084 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { |
| 3085 Register context = ToRegister(instr->context()); | 3085 Register context = ToRegister(instr->context()); |
| 3086 Register value = ToRegister(instr->value()); | 3086 Register value = ToRegister(instr->value()); |
| 3087 Register scratch = scratch0(); | 3087 Register scratch = scratch0(); |
| 3088 MemOperand target = ContextOperand(context, instr->slot_index()); | 3088 MemOperand target = ContextOperand(context, instr->slot_index()); |
| 3089 | 3089 |
| 3090 Label skip_assignment; | 3090 Label skip_assignment; |
| 3091 | 3091 |
| 3092 if (instr->hydrogen()->RequiresHoleCheck()) { | 3092 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 3093 __ ldr(scratch, target); | 3093 __ ldr(scratch, target); |
| 3094 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); | 3094 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); |
| 3095 __ cmp(scratch, ip); | 3095 __ cmp(scratch, ip); |
| 3096 if (instr->hydrogen()->DeoptimizesOnHole()) { | 3096 if (instr->hydrogen()->DeoptimizesOnHole()) { |
| 3097 DeoptimizeIf(eq, instr, "hole"); | 3097 DeoptimizeIf(eq, instr, Deoptimizer::kHole); |
| 3098 } else { | 3098 } else { |
| 3099 __ b(ne, &skip_assignment); | 3099 __ b(ne, &skip_assignment); |
| 3100 } | 3100 } |
| 3101 } | 3101 } |
| 3102 | 3102 |
| 3103 __ str(value, target); | 3103 __ str(value, target); |
| 3104 if (instr->hydrogen()->NeedsWriteBarrier()) { | 3104 if (instr->hydrogen()->NeedsWriteBarrier()) { |
| 3105 SmiCheck check_needed = | 3105 SmiCheck check_needed = |
| 3106 instr->hydrogen()->value()->type().IsHeapObject() | 3106 instr->hydrogen()->value()->type().IsHeapObject() |
| 3107 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; | 3107 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; |
| (...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3168 Register function = ToRegister(instr->function()); | 3168 Register function = ToRegister(instr->function()); |
| 3169 Register result = ToRegister(instr->result()); | 3169 Register result = ToRegister(instr->result()); |
| 3170 | 3170 |
| 3171 // Get the prototype or initial map from the function. | 3171 // Get the prototype or initial map from the function. |
| 3172 __ ldr(result, | 3172 __ ldr(result, |
| 3173 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); | 3173 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); |
| 3174 | 3174 |
| 3175 // Check that the function has a prototype or an initial map. | 3175 // Check that the function has a prototype or an initial map. |
| 3176 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); | 3176 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); |
| 3177 __ cmp(result, ip); | 3177 __ cmp(result, ip); |
| 3178 DeoptimizeIf(eq, instr, "hole"); | 3178 DeoptimizeIf(eq, instr, Deoptimizer::kHole); |
| 3179 | 3179 |
| 3180 // If the function does not have an initial map, we're done. | 3180 // If the function does not have an initial map, we're done. |
| 3181 Label done; | 3181 Label done; |
| 3182 __ CompareObjectType(result, scratch, scratch, MAP_TYPE); | 3182 __ CompareObjectType(result, scratch, scratch, MAP_TYPE); |
| 3183 __ b(ne, &done); | 3183 __ b(ne, &done); |
| 3184 | 3184 |
| 3185 // Get the prototype from the initial map. | 3185 // Get the prototype from the initial map. |
| 3186 __ ldr(result, FieldMemOperand(result, Map::kPrototypeOffset)); | 3186 __ ldr(result, FieldMemOperand(result, Map::kPrototypeOffset)); |
| 3187 | 3187 |
| 3188 // All done. | 3188 // All done. |
| (...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3294 break; | 3294 break; |
| 3295 case EXTERNAL_INT32_ELEMENTS: | 3295 case EXTERNAL_INT32_ELEMENTS: |
| 3296 case INT32_ELEMENTS: | 3296 case INT32_ELEMENTS: |
| 3297 __ ldr(result, mem_operand); | 3297 __ ldr(result, mem_operand); |
| 3298 break; | 3298 break; |
| 3299 case EXTERNAL_UINT32_ELEMENTS: | 3299 case EXTERNAL_UINT32_ELEMENTS: |
| 3300 case UINT32_ELEMENTS: | 3300 case UINT32_ELEMENTS: |
| 3301 __ ldr(result, mem_operand); | 3301 __ ldr(result, mem_operand); |
| 3302 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { | 3302 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { |
| 3303 __ cmp(result, Operand(0x80000000)); | 3303 __ cmp(result, Operand(0x80000000)); |
| 3304 DeoptimizeIf(cs, instr, "negative value"); | 3304 DeoptimizeIf(cs, instr, Deoptimizer::kNegativeValue); |
| 3305 } | 3305 } |
| 3306 break; | 3306 break; |
| 3307 case FLOAT32_ELEMENTS: | 3307 case FLOAT32_ELEMENTS: |
| 3308 case FLOAT64_ELEMENTS: | 3308 case FLOAT64_ELEMENTS: |
| 3309 case EXTERNAL_FLOAT32_ELEMENTS: | 3309 case EXTERNAL_FLOAT32_ELEMENTS: |
| 3310 case EXTERNAL_FLOAT64_ELEMENTS: | 3310 case EXTERNAL_FLOAT64_ELEMENTS: |
| 3311 case FAST_HOLEY_DOUBLE_ELEMENTS: | 3311 case FAST_HOLEY_DOUBLE_ELEMENTS: |
| 3312 case FAST_HOLEY_ELEMENTS: | 3312 case FAST_HOLEY_ELEMENTS: |
| 3313 case FAST_HOLEY_SMI_ELEMENTS: | 3313 case FAST_HOLEY_SMI_ELEMENTS: |
| 3314 case FAST_DOUBLE_ELEMENTS: | 3314 case FAST_DOUBLE_ELEMENTS: |
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3347 int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) | 3347 int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) |
| 3348 ? (element_size_shift - kSmiTagSize) : element_size_shift; | 3348 ? (element_size_shift - kSmiTagSize) : element_size_shift; |
| 3349 __ add(scratch, scratch, Operand(key, LSL, shift_size)); | 3349 __ add(scratch, scratch, Operand(key, LSL, shift_size)); |
| 3350 } | 3350 } |
| 3351 | 3351 |
| 3352 __ vldr(result, scratch, 0); | 3352 __ vldr(result, scratch, 0); |
| 3353 | 3353 |
| 3354 if (instr->hydrogen()->RequiresHoleCheck()) { | 3354 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 3355 __ ldr(scratch, MemOperand(scratch, sizeof(kHoleNanLower32))); | 3355 __ ldr(scratch, MemOperand(scratch, sizeof(kHoleNanLower32))); |
| 3356 __ cmp(scratch, Operand(kHoleNanUpper32)); | 3356 __ cmp(scratch, Operand(kHoleNanUpper32)); |
| 3357 DeoptimizeIf(eq, instr, "hole"); | 3357 DeoptimizeIf(eq, instr, Deoptimizer::kHole); |
| 3358 } | 3358 } |
| 3359 } | 3359 } |
| 3360 | 3360 |
| 3361 | 3361 |
| 3362 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { | 3362 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { |
| 3363 Register elements = ToRegister(instr->elements()); | 3363 Register elements = ToRegister(instr->elements()); |
| 3364 Register result = ToRegister(instr->result()); | 3364 Register result = ToRegister(instr->result()); |
| 3365 Register scratch = scratch0(); | 3365 Register scratch = scratch0(); |
| 3366 Register store_base = scratch; | 3366 Register store_base = scratch; |
| 3367 int offset = instr->base_offset(); | 3367 int offset = instr->base_offset(); |
| (...skipping 13 matching lines...) Expand all Loading... |
| 3381 } else { | 3381 } else { |
| 3382 __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2)); | 3382 __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2)); |
| 3383 } | 3383 } |
| 3384 } | 3384 } |
| 3385 __ ldr(result, MemOperand(store_base, offset)); | 3385 __ ldr(result, MemOperand(store_base, offset)); |
| 3386 | 3386 |
| 3387 // Check for the hole value. | 3387 // Check for the hole value. |
| 3388 if (instr->hydrogen()->RequiresHoleCheck()) { | 3388 if (instr->hydrogen()->RequiresHoleCheck()) { |
| 3389 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { | 3389 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { |
| 3390 __ SmiTst(result); | 3390 __ SmiTst(result); |
| 3391 DeoptimizeIf(ne, instr, "not a Smi"); | 3391 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi); |
| 3392 } else { | 3392 } else { |
| 3393 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); | 3393 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); |
| 3394 __ cmp(result, scratch); | 3394 __ cmp(result, scratch); |
| 3395 DeoptimizeIf(eq, instr, "hole"); | 3395 DeoptimizeIf(eq, instr, Deoptimizer::kHole); |
| 3396 } | 3396 } |
| 3397 } | 3397 } |
| 3398 } | 3398 } |
| 3399 | 3399 |
| 3400 | 3400 |
| 3401 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) { | 3401 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) { |
| 3402 if (instr->is_typed_elements()) { | 3402 if (instr->is_typed_elements()) { |
| 3403 DoLoadKeyedExternalArray(instr); | 3403 DoLoadKeyedExternalArray(instr); |
| 3404 } else if (instr->hydrogen()->representation().IsDouble()) { | 3404 } else if (instr->hydrogen()->representation().IsDouble()) { |
| 3405 DoLoadKeyedFixedDoubleArray(instr); | 3405 DoLoadKeyedFixedDoubleArray(instr); |
| (...skipping 121 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3527 // Normal function. Replace undefined or null with global receiver. | 3527 // Normal function. Replace undefined or null with global receiver. |
| 3528 __ LoadRoot(scratch, Heap::kNullValueRootIndex); | 3528 __ LoadRoot(scratch, Heap::kNullValueRootIndex); |
| 3529 __ cmp(receiver, scratch); | 3529 __ cmp(receiver, scratch); |
| 3530 __ b(eq, &global_object); | 3530 __ b(eq, &global_object); |
| 3531 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); | 3531 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); |
| 3532 __ cmp(receiver, scratch); | 3532 __ cmp(receiver, scratch); |
| 3533 __ b(eq, &global_object); | 3533 __ b(eq, &global_object); |
| 3534 | 3534 |
| 3535 // Deoptimize if the receiver is not a JS object. | 3535 // Deoptimize if the receiver is not a JS object. |
| 3536 __ SmiTst(receiver); | 3536 __ SmiTst(receiver); |
| 3537 DeoptimizeIf(eq, instr, "Smi"); | 3537 DeoptimizeIf(eq, instr, Deoptimizer::kSmi); |
| 3538 __ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE); | 3538 __ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE); |
| 3539 DeoptimizeIf(lt, instr, "not a JavaScript object"); | 3539 DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject); |
| 3540 | 3540 |
| 3541 __ b(&result_in_receiver); | 3541 __ b(&result_in_receiver); |
| 3542 __ bind(&global_object); | 3542 __ bind(&global_object); |
| 3543 __ ldr(result, FieldMemOperand(function, JSFunction::kContextOffset)); | 3543 __ ldr(result, FieldMemOperand(function, JSFunction::kContextOffset)); |
| 3544 __ ldr(result, | 3544 __ ldr(result, |
| 3545 ContextOperand(result, Context::GLOBAL_OBJECT_INDEX)); | 3545 ContextOperand(result, Context::GLOBAL_OBJECT_INDEX)); |
| 3546 __ ldr(result, FieldMemOperand(result, GlobalObject::kGlobalProxyOffset)); | 3546 __ ldr(result, FieldMemOperand(result, GlobalObject::kGlobalProxyOffset)); |
| 3547 | 3547 |
| 3548 if (result.is(receiver)) { | 3548 if (result.is(receiver)) { |
| 3549 __ bind(&result_in_receiver); | 3549 __ bind(&result_in_receiver); |
| (...skipping 14 matching lines...) Expand all Loading... |
| 3564 Register elements = ToRegister(instr->elements()); | 3564 Register elements = ToRegister(instr->elements()); |
| 3565 Register scratch = scratch0(); | 3565 Register scratch = scratch0(); |
| 3566 DCHECK(receiver.is(r0)); // Used for parameter count. | 3566 DCHECK(receiver.is(r0)); // Used for parameter count. |
| 3567 DCHECK(function.is(r1)); // Required by InvokeFunction. | 3567 DCHECK(function.is(r1)); // Required by InvokeFunction. |
| 3568 DCHECK(ToRegister(instr->result()).is(r0)); | 3568 DCHECK(ToRegister(instr->result()).is(r0)); |
| 3569 | 3569 |
| 3570 // Copy the arguments to this function possibly from the | 3570 // Copy the arguments to this function possibly from the |
| 3571 // adaptor frame below it. | 3571 // adaptor frame below it. |
| 3572 const uint32_t kArgumentsLimit = 1 * KB; | 3572 const uint32_t kArgumentsLimit = 1 * KB; |
| 3573 __ cmp(length, Operand(kArgumentsLimit)); | 3573 __ cmp(length, Operand(kArgumentsLimit)); |
| 3574 DeoptimizeIf(hi, instr, "too many arguments"); | 3574 DeoptimizeIf(hi, instr, Deoptimizer::kTooManyArguments); |
| 3575 | 3575 |
| 3576 // Push the receiver and use the register to keep the original | 3576 // Push the receiver and use the register to keep the original |
| 3577 // number of arguments. | 3577 // number of arguments. |
| 3578 __ push(receiver); | 3578 __ push(receiver); |
| 3579 __ mov(receiver, length); | 3579 __ mov(receiver, length); |
| 3580 // The arguments are at a one pointer size offset from elements. | 3580 // The arguments are at a one pointer size offset from elements. |
| 3581 __ add(elements, elements, Operand(1 * kPointerSize)); | 3581 __ add(elements, elements, Operand(1 * kPointerSize)); |
| 3582 | 3582 |
| 3583 // Loop through the arguments pushing them onto the execution | 3583 // Loop through the arguments pushing them onto the execution |
| 3584 // stack. | 3584 // stack. |
| (...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3690 DCHECK(instr->context() != NULL); | 3690 DCHECK(instr->context() != NULL); |
| 3691 DCHECK(ToRegister(instr->context()).is(cp)); | 3691 DCHECK(ToRegister(instr->context()).is(cp)); |
| 3692 Register input = ToRegister(instr->value()); | 3692 Register input = ToRegister(instr->value()); |
| 3693 Register result = ToRegister(instr->result()); | 3693 Register result = ToRegister(instr->result()); |
| 3694 Register scratch = scratch0(); | 3694 Register scratch = scratch0(); |
| 3695 | 3695 |
| 3696 // Deoptimize if not a heap number. | 3696 // Deoptimize if not a heap number. |
| 3697 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); | 3697 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); |
| 3698 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); | 3698 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); |
| 3699 __ cmp(scratch, Operand(ip)); | 3699 __ cmp(scratch, Operand(ip)); |
| 3700 DeoptimizeIf(ne, instr, "not a heap number"); | 3700 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber); |
| 3701 | 3701 |
| 3702 Label done; | 3702 Label done; |
| 3703 Register exponent = scratch0(); | 3703 Register exponent = scratch0(); |
| 3704 scratch = no_reg; | 3704 scratch = no_reg; |
| 3705 __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); | 3705 __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); |
| 3706 // Check the sign of the argument. If the argument is positive, just | 3706 // Check the sign of the argument. If the argument is positive, just |
| 3707 // return it. | 3707 // return it. |
| 3708 __ tst(exponent, Operand(HeapNumber::kSignMask)); | 3708 __ tst(exponent, Operand(HeapNumber::kSignMask)); |
| 3709 // Move the input to the result if necessary. | 3709 // Move the input to the result if necessary. |
| 3710 __ Move(result, input); | 3710 __ Move(result, input); |
| (...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3758 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) { | 3758 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) { |
| 3759 Register input = ToRegister(instr->value()); | 3759 Register input = ToRegister(instr->value()); |
| 3760 Register result = ToRegister(instr->result()); | 3760 Register result = ToRegister(instr->result()); |
| 3761 __ cmp(input, Operand::Zero()); | 3761 __ cmp(input, Operand::Zero()); |
| 3762 __ Move(result, input, pl); | 3762 __ Move(result, input, pl); |
| 3763 // We can make rsb conditional because the previous cmp instruction | 3763 // We can make rsb conditional because the previous cmp instruction |
| 3764 // will clear the V (overflow) flag and rsb won't set this flag | 3764 // will clear the V (overflow) flag and rsb won't set this flag |
| 3765 // if input is positive. | 3765 // if input is positive. |
| 3766 __ rsb(result, input, Operand::Zero(), SetCC, mi); | 3766 __ rsb(result, input, Operand::Zero(), SetCC, mi); |
| 3767 // Deoptimize on overflow. | 3767 // Deoptimize on overflow. |
| 3768 DeoptimizeIf(vs, instr, "overflow"); | 3768 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); |
| 3769 } | 3769 } |
| 3770 | 3770 |
| 3771 | 3771 |
| 3772 void LCodeGen::DoMathAbs(LMathAbs* instr) { | 3772 void LCodeGen::DoMathAbs(LMathAbs* instr) { |
| 3773 // Class for deferred case. | 3773 // Class for deferred case. |
| 3774 class DeferredMathAbsTaggedHeapNumber FINAL : public LDeferredCode { | 3774 class DeferredMathAbsTaggedHeapNumber FINAL : public LDeferredCode { |
| 3775 public: | 3775 public: |
| 3776 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr) | 3776 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr) |
| 3777 : LDeferredCode(codegen), instr_(instr) { } | 3777 : LDeferredCode(codegen), instr_(instr) { } |
| 3778 void Generate() OVERRIDE { | 3778 void Generate() OVERRIDE { |
| (...skipping 26 matching lines...) Expand all Loading... |
| 3805 } | 3805 } |
| 3806 | 3806 |
| 3807 | 3807 |
| 3808 void LCodeGen::DoMathFloor(LMathFloor* instr) { | 3808 void LCodeGen::DoMathFloor(LMathFloor* instr) { |
| 3809 DwVfpRegister input = ToDoubleRegister(instr->value()); | 3809 DwVfpRegister input = ToDoubleRegister(instr->value()); |
| 3810 Register result = ToRegister(instr->result()); | 3810 Register result = ToRegister(instr->result()); |
| 3811 Register input_high = scratch0(); | 3811 Register input_high = scratch0(); |
| 3812 Label done, exact; | 3812 Label done, exact; |
| 3813 | 3813 |
| 3814 __ TryInt32Floor(result, input, input_high, double_scratch0(), &done, &exact); | 3814 __ TryInt32Floor(result, input, input_high, double_scratch0(), &done, &exact); |
| 3815 DeoptimizeIf(al, instr, "lost precision or NaN"); | 3815 DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN); |
| 3816 | 3816 |
| 3817 __ bind(&exact); | 3817 __ bind(&exact); |
| 3818 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 3818 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 3819 // Test for -0. | 3819 // Test for -0. |
| 3820 __ cmp(result, Operand::Zero()); | 3820 __ cmp(result, Operand::Zero()); |
| 3821 __ b(ne, &done); | 3821 __ b(ne, &done); |
| 3822 __ cmp(input_high, Operand::Zero()); | 3822 __ cmp(input_high, Operand::Zero()); |
| 3823 DeoptimizeIf(mi, instr, "minus zero"); | 3823 DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero); |
| 3824 } | 3824 } |
| 3825 __ bind(&done); | 3825 __ bind(&done); |
| 3826 } | 3826 } |
| 3827 | 3827 |
| 3828 | 3828 |
| 3829 void LCodeGen::DoMathRound(LMathRound* instr) { | 3829 void LCodeGen::DoMathRound(LMathRound* instr) { |
| 3830 DwVfpRegister input = ToDoubleRegister(instr->value()); | 3830 DwVfpRegister input = ToDoubleRegister(instr->value()); |
| 3831 Register result = ToRegister(instr->result()); | 3831 Register result = ToRegister(instr->result()); |
| 3832 DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp()); | 3832 DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp()); |
| 3833 DwVfpRegister input_plus_dot_five = double_scratch1; | 3833 DwVfpRegister input_plus_dot_five = double_scratch1; |
| 3834 Register input_high = scratch0(); | 3834 Register input_high = scratch0(); |
| 3835 DwVfpRegister dot_five = double_scratch0(); | 3835 DwVfpRegister dot_five = double_scratch0(); |
| 3836 Label convert, done; | 3836 Label convert, done; |
| 3837 | 3837 |
| 3838 __ Vmov(dot_five, 0.5, scratch0()); | 3838 __ Vmov(dot_five, 0.5, scratch0()); |
| 3839 __ vabs(double_scratch1, input); | 3839 __ vabs(double_scratch1, input); |
| 3840 __ VFPCompareAndSetFlags(double_scratch1, dot_five); | 3840 __ VFPCompareAndSetFlags(double_scratch1, dot_five); |
| 3841 // If input is in [-0.5, -0], the result is -0. | 3841 // If input is in [-0.5, -0], the result is -0. |
| 3842 // If input is in [+0, +0.5[, the result is +0. | 3842 // If input is in [+0, +0.5[, the result is +0. |
| 3843 // If the input is +0.5, the result is 1. | 3843 // If the input is +0.5, the result is 1. |
| 3844 __ b(hi, &convert); // Out of [-0.5, +0.5]. | 3844 __ b(hi, &convert); // Out of [-0.5, +0.5]. |
| 3845 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 3845 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 3846 __ VmovHigh(input_high, input); | 3846 __ VmovHigh(input_high, input); |
| 3847 __ cmp(input_high, Operand::Zero()); | 3847 __ cmp(input_high, Operand::Zero()); |
| 3848 // [-0.5, -0]. | 3848 // [-0.5, -0]. |
| 3849 DeoptimizeIf(mi, instr, "minus zero"); | 3849 DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero); |
| 3850 } | 3850 } |
| 3851 __ VFPCompareAndSetFlags(input, dot_five); | 3851 __ VFPCompareAndSetFlags(input, dot_five); |
| 3852 __ mov(result, Operand(1), LeaveCC, eq); // +0.5. | 3852 __ mov(result, Operand(1), LeaveCC, eq); // +0.5. |
| 3853 // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on | 3853 // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on |
| 3854 // flag kBailoutOnMinusZero. | 3854 // flag kBailoutOnMinusZero. |
| 3855 __ mov(result, Operand::Zero(), LeaveCC, ne); | 3855 __ mov(result, Operand::Zero(), LeaveCC, ne); |
| 3856 __ b(&done); | 3856 __ b(&done); |
| 3857 | 3857 |
| 3858 __ bind(&convert); | 3858 __ bind(&convert); |
| 3859 __ vadd(input_plus_dot_five, input, dot_five); | 3859 __ vadd(input_plus_dot_five, input, dot_five); |
| 3860 // Reuse dot_five (double_scratch0) as we no longer need this value. | 3860 // Reuse dot_five (double_scratch0) as we no longer need this value. |
| 3861 __ TryInt32Floor(result, input_plus_dot_five, input_high, double_scratch0(), | 3861 __ TryInt32Floor(result, input_plus_dot_five, input_high, double_scratch0(), |
| 3862 &done, &done); | 3862 &done, &done); |
| 3863 DeoptimizeIf(al, instr, "lost precision or NaN"); | 3863 DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN); |
| 3864 __ bind(&done); | 3864 __ bind(&done); |
| 3865 } | 3865 } |
| 3866 | 3866 |
| 3867 | 3867 |
| 3868 void LCodeGen::DoMathFround(LMathFround* instr) { | 3868 void LCodeGen::DoMathFround(LMathFround* instr) { |
| 3869 DwVfpRegister input_reg = ToDoubleRegister(instr->value()); | 3869 DwVfpRegister input_reg = ToDoubleRegister(instr->value()); |
| 3870 DwVfpRegister output_reg = ToDoubleRegister(instr->result()); | 3870 DwVfpRegister output_reg = ToDoubleRegister(instr->result()); |
| 3871 LowDwVfpRegister scratch = double_scratch0(); | 3871 LowDwVfpRegister scratch = double_scratch0(); |
| 3872 __ vcvt_f32_f64(scratch.low(), input_reg); | 3872 __ vcvt_f32_f64(scratch.low(), input_reg); |
| 3873 __ vcvt_f64_f32(output_reg, scratch.low()); | 3873 __ vcvt_f64_f32(output_reg, scratch.low()); |
| (...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3917 if (exponent_type.IsSmi()) { | 3917 if (exponent_type.IsSmi()) { |
| 3918 MathPowStub stub(isolate(), MathPowStub::TAGGED); | 3918 MathPowStub stub(isolate(), MathPowStub::TAGGED); |
| 3919 __ CallStub(&stub); | 3919 __ CallStub(&stub); |
| 3920 } else if (exponent_type.IsTagged()) { | 3920 } else if (exponent_type.IsTagged()) { |
| 3921 Label no_deopt; | 3921 Label no_deopt; |
| 3922 __ JumpIfSmi(tagged_exponent, &no_deopt); | 3922 __ JumpIfSmi(tagged_exponent, &no_deopt); |
| 3923 DCHECK(!r6.is(tagged_exponent)); | 3923 DCHECK(!r6.is(tagged_exponent)); |
| 3924 __ ldr(r6, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset)); | 3924 __ ldr(r6, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset)); |
| 3925 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); | 3925 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); |
| 3926 __ cmp(r6, Operand(ip)); | 3926 __ cmp(r6, Operand(ip)); |
| 3927 DeoptimizeIf(ne, instr, "not a heap number"); | 3927 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber); |
| 3928 __ bind(&no_deopt); | 3928 __ bind(&no_deopt); |
| 3929 MathPowStub stub(isolate(), MathPowStub::TAGGED); | 3929 MathPowStub stub(isolate(), MathPowStub::TAGGED); |
| 3930 __ CallStub(&stub); | 3930 __ CallStub(&stub); |
| 3931 } else if (exponent_type.IsInteger32()) { | 3931 } else if (exponent_type.IsInteger32()) { |
| 3932 MathPowStub stub(isolate(), MathPowStub::INTEGER); | 3932 MathPowStub stub(isolate(), MathPowStub::INTEGER); |
| 3933 __ CallStub(&stub); | 3933 __ CallStub(&stub); |
| 3934 } else { | 3934 } else { |
| 3935 DCHECK(exponent_type.IsDouble()); | 3935 DCHECK(exponent_type.IsDouble()); |
| 3936 MathPowStub stub(isolate(), MathPowStub::DOUBLE); | 3936 MathPowStub stub(isolate(), MathPowStub::DOUBLE); |
| 3937 __ CallStub(&stub); | 3937 __ CallStub(&stub); |
| (...skipping 388 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4326 Register index = ToRegister(instr->index()); | 4326 Register index = ToRegister(instr->index()); |
| 4327 Operand length = ToOperand(instr->length()); | 4327 Operand length = ToOperand(instr->length()); |
| 4328 __ cmp(index, length); | 4328 __ cmp(index, length); |
| 4329 } | 4329 } |
| 4330 if (FLAG_debug_code && instr->hydrogen()->skip_check()) { | 4330 if (FLAG_debug_code && instr->hydrogen()->skip_check()) { |
| 4331 Label done; | 4331 Label done; |
| 4332 __ b(NegateCondition(cc), &done); | 4332 __ b(NegateCondition(cc), &done); |
| 4333 __ stop("eliminated bounds check failed"); | 4333 __ stop("eliminated bounds check failed"); |
| 4334 __ bind(&done); | 4334 __ bind(&done); |
| 4335 } else { | 4335 } else { |
| 4336 DeoptimizeIf(cc, instr, "out of bounds"); | 4336 DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds); |
| 4337 } | 4337 } |
| 4338 } | 4338 } |
| 4339 | 4339 |
| 4340 | 4340 |
| 4341 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { | 4341 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { |
| 4342 Register external_pointer = ToRegister(instr->elements()); | 4342 Register external_pointer = ToRegister(instr->elements()); |
| 4343 Register key = no_reg; | 4343 Register key = no_reg; |
| 4344 ElementsKind elements_kind = instr->elements_kind(); | 4344 ElementsKind elements_kind = instr->elements_kind(); |
| 4345 bool key_is_constant = instr->key()->IsConstantOperand(); | 4345 bool key_is_constant = instr->key()->IsConstantOperand(); |
| 4346 int constant_key = 0; | 4346 int constant_key = 0; |
| (...skipping 227 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4574 } | 4574 } |
| 4575 __ bind(¬_applicable); | 4575 __ bind(¬_applicable); |
| 4576 } | 4576 } |
| 4577 | 4577 |
| 4578 | 4578 |
| 4579 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { | 4579 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { |
| 4580 Register object = ToRegister(instr->object()); | 4580 Register object = ToRegister(instr->object()); |
| 4581 Register temp = ToRegister(instr->temp()); | 4581 Register temp = ToRegister(instr->temp()); |
| 4582 Label no_memento_found; | 4582 Label no_memento_found; |
| 4583 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found); | 4583 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found); |
| 4584 DeoptimizeIf(eq, instr, "memento found"); | 4584 DeoptimizeIf(eq, instr, Deoptimizer::kMementoFound); |
| 4585 __ bind(&no_memento_found); | 4585 __ bind(&no_memento_found); |
| 4586 } | 4586 } |
| 4587 | 4587 |
| 4588 | 4588 |
| 4589 void LCodeGen::DoStringAdd(LStringAdd* instr) { | 4589 void LCodeGen::DoStringAdd(LStringAdd* instr) { |
| 4590 DCHECK(ToRegister(instr->context()).is(cp)); | 4590 DCHECK(ToRegister(instr->context()).is(cp)); |
| 4591 DCHECK(ToRegister(instr->left()).is(r1)); | 4591 DCHECK(ToRegister(instr->left()).is(r1)); |
| 4592 DCHECK(ToRegister(instr->right()).is(r0)); | 4592 DCHECK(ToRegister(instr->right()).is(r0)); |
| 4593 StringAddStub stub(isolate(), | 4593 StringAddStub stub(isolate(), |
| 4594 instr->hydrogen()->flags(), | 4594 instr->hydrogen()->flags(), |
| (...skipping 314 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4909 } | 4909 } |
| 4910 | 4910 |
| 4911 | 4911 |
| 4912 void LCodeGen::DoSmiTag(LSmiTag* instr) { | 4912 void LCodeGen::DoSmiTag(LSmiTag* instr) { |
| 4913 HChange* hchange = instr->hydrogen(); | 4913 HChange* hchange = instr->hydrogen(); |
| 4914 Register input = ToRegister(instr->value()); | 4914 Register input = ToRegister(instr->value()); |
| 4915 Register output = ToRegister(instr->result()); | 4915 Register output = ToRegister(instr->result()); |
| 4916 if (hchange->CheckFlag(HValue::kCanOverflow) && | 4916 if (hchange->CheckFlag(HValue::kCanOverflow) && |
| 4917 hchange->value()->CheckFlag(HValue::kUint32)) { | 4917 hchange->value()->CheckFlag(HValue::kUint32)) { |
| 4918 __ tst(input, Operand(0xc0000000)); | 4918 __ tst(input, Operand(0xc0000000)); |
| 4919 DeoptimizeIf(ne, instr, "overflow"); | 4919 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow); |
| 4920 } | 4920 } |
| 4921 if (hchange->CheckFlag(HValue::kCanOverflow) && | 4921 if (hchange->CheckFlag(HValue::kCanOverflow) && |
| 4922 !hchange->value()->CheckFlag(HValue::kUint32)) { | 4922 !hchange->value()->CheckFlag(HValue::kUint32)) { |
| 4923 __ SmiTag(output, input, SetCC); | 4923 __ SmiTag(output, input, SetCC); |
| 4924 DeoptimizeIf(vs, instr, "overflow"); | 4924 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); |
| 4925 } else { | 4925 } else { |
| 4926 __ SmiTag(output, input); | 4926 __ SmiTag(output, input); |
| 4927 } | 4927 } |
| 4928 } | 4928 } |
| 4929 | 4929 |
| 4930 | 4930 |
| 4931 void LCodeGen::DoSmiUntag(LSmiUntag* instr) { | 4931 void LCodeGen::DoSmiUntag(LSmiUntag* instr) { |
| 4932 Register input = ToRegister(instr->value()); | 4932 Register input = ToRegister(instr->value()); |
| 4933 Register result = ToRegister(instr->result()); | 4933 Register result = ToRegister(instr->result()); |
| 4934 if (instr->needs_check()) { | 4934 if (instr->needs_check()) { |
| 4935 STATIC_ASSERT(kHeapObjectTag == 1); | 4935 STATIC_ASSERT(kHeapObjectTag == 1); |
| 4936 // If the input is a HeapObject, SmiUntag will set the carry flag. | 4936 // If the input is a HeapObject, SmiUntag will set the carry flag. |
| 4937 __ SmiUntag(result, input, SetCC); | 4937 __ SmiUntag(result, input, SetCC); |
| 4938 DeoptimizeIf(cs, instr, "not a Smi"); | 4938 DeoptimizeIf(cs, instr, Deoptimizer::kNotASmi); |
| 4939 } else { | 4939 } else { |
| 4940 __ SmiUntag(result, input); | 4940 __ SmiUntag(result, input); |
| 4941 } | 4941 } |
| 4942 } | 4942 } |
| 4943 | 4943 |
| 4944 | 4944 |
| 4945 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg, | 4945 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg, |
| 4946 DwVfpRegister result_reg, | 4946 DwVfpRegister result_reg, |
| 4947 NumberUntagDMode mode) { | 4947 NumberUntagDMode mode) { |
| 4948 bool can_convert_undefined_to_nan = | 4948 bool can_convert_undefined_to_nan = |
| 4949 instr->hydrogen()->can_convert_undefined_to_nan(); | 4949 instr->hydrogen()->can_convert_undefined_to_nan(); |
| 4950 bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero(); | 4950 bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero(); |
| 4951 | 4951 |
| 4952 Register scratch = scratch0(); | 4952 Register scratch = scratch0(); |
| 4953 SwVfpRegister flt_scratch = double_scratch0().low(); | 4953 SwVfpRegister flt_scratch = double_scratch0().low(); |
| 4954 DCHECK(!result_reg.is(double_scratch0())); | 4954 DCHECK(!result_reg.is(double_scratch0())); |
| 4955 Label convert, load_smi, done; | 4955 Label convert, load_smi, done; |
| 4956 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { | 4956 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { |
| 4957 // Smi check. | 4957 // Smi check. |
| 4958 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi); | 4958 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi); |
| 4959 // Heap number map check. | 4959 // Heap number map check. |
| 4960 __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); | 4960 __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); |
| 4961 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); | 4961 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); |
| 4962 __ cmp(scratch, Operand(ip)); | 4962 __ cmp(scratch, Operand(ip)); |
| 4963 if (can_convert_undefined_to_nan) { | 4963 if (can_convert_undefined_to_nan) { |
| 4964 __ b(ne, &convert); | 4964 __ b(ne, &convert); |
| 4965 } else { | 4965 } else { |
| 4966 DeoptimizeIf(ne, instr, "not a heap number"); | 4966 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber); |
| 4967 } | 4967 } |
| 4968 // load heap number | 4968 // load heap number |
| 4969 __ vldr(result_reg, input_reg, HeapNumber::kValueOffset - kHeapObjectTag); | 4969 __ vldr(result_reg, input_reg, HeapNumber::kValueOffset - kHeapObjectTag); |
| 4970 if (deoptimize_on_minus_zero) { | 4970 if (deoptimize_on_minus_zero) { |
| 4971 __ VmovLow(scratch, result_reg); | 4971 __ VmovLow(scratch, result_reg); |
| 4972 __ cmp(scratch, Operand::Zero()); | 4972 __ cmp(scratch, Operand::Zero()); |
| 4973 __ b(ne, &done); | 4973 __ b(ne, &done); |
| 4974 __ VmovHigh(scratch, result_reg); | 4974 __ VmovHigh(scratch, result_reg); |
| 4975 __ cmp(scratch, Operand(HeapNumber::kSignMask)); | 4975 __ cmp(scratch, Operand(HeapNumber::kSignMask)); |
| 4976 DeoptimizeIf(eq, instr, "minus zero"); | 4976 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); |
| 4977 } | 4977 } |
| 4978 __ jmp(&done); | 4978 __ jmp(&done); |
| 4979 if (can_convert_undefined_to_nan) { | 4979 if (can_convert_undefined_to_nan) { |
| 4980 __ bind(&convert); | 4980 __ bind(&convert); |
| 4981 // Convert undefined (and hole) to NaN. | 4981 // Convert undefined (and hole) to NaN. |
| 4982 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); | 4982 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); |
| 4983 __ cmp(input_reg, Operand(ip)); | 4983 __ cmp(input_reg, Operand(ip)); |
| 4984 DeoptimizeIf(ne, instr, "not a heap number/undefined"); | 4984 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined); |
| 4985 __ LoadRoot(scratch, Heap::kNanValueRootIndex); | 4985 __ LoadRoot(scratch, Heap::kNanValueRootIndex); |
| 4986 __ vldr(result_reg, scratch, HeapNumber::kValueOffset - kHeapObjectTag); | 4986 __ vldr(result_reg, scratch, HeapNumber::kValueOffset - kHeapObjectTag); |
| 4987 __ jmp(&done); | 4987 __ jmp(&done); |
| 4988 } | 4988 } |
| 4989 } else { | 4989 } else { |
| 4990 __ SmiUntag(scratch, input_reg); | 4990 __ SmiUntag(scratch, input_reg); |
| 4991 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); | 4991 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); |
| 4992 } | 4992 } |
| 4993 // Smi to double register conversion | 4993 // Smi to double register conversion |
| 4994 __ bind(&load_smi); | 4994 __ bind(&load_smi); |
| (...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5042 __ bind(&check_bools); | 5042 __ bind(&check_bools); |
| 5043 __ LoadRoot(ip, Heap::kTrueValueRootIndex); | 5043 __ LoadRoot(ip, Heap::kTrueValueRootIndex); |
| 5044 __ cmp(scratch2, Operand(ip)); | 5044 __ cmp(scratch2, Operand(ip)); |
| 5045 __ b(ne, &check_false); | 5045 __ b(ne, &check_false); |
| 5046 __ mov(input_reg, Operand(1)); | 5046 __ mov(input_reg, Operand(1)); |
| 5047 __ b(&done); | 5047 __ b(&done); |
| 5048 | 5048 |
| 5049 __ bind(&check_false); | 5049 __ bind(&check_false); |
| 5050 __ LoadRoot(ip, Heap::kFalseValueRootIndex); | 5050 __ LoadRoot(ip, Heap::kFalseValueRootIndex); |
| 5051 __ cmp(scratch2, Operand(ip)); | 5051 __ cmp(scratch2, Operand(ip)); |
| 5052 DeoptimizeIf(ne, instr, "not a heap number/undefined/true/false"); | 5052 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedBoolean); |
| 5053 __ mov(input_reg, Operand::Zero()); | 5053 __ mov(input_reg, Operand::Zero()); |
| 5054 } else { | 5054 } else { |
| 5055 DeoptimizeIf(ne, instr, "not a heap number"); | 5055 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber); |
| 5056 | 5056 |
| 5057 __ sub(ip, scratch2, Operand(kHeapObjectTag)); | 5057 __ sub(ip, scratch2, Operand(kHeapObjectTag)); |
| 5058 __ vldr(double_scratch2, ip, HeapNumber::kValueOffset); | 5058 __ vldr(double_scratch2, ip, HeapNumber::kValueOffset); |
| 5059 __ TryDoubleToInt32Exact(input_reg, double_scratch2, double_scratch); | 5059 __ TryDoubleToInt32Exact(input_reg, double_scratch2, double_scratch); |
| 5060 DeoptimizeIf(ne, instr, "lost precision or NaN"); | 5060 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN); |
| 5061 | 5061 |
| 5062 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 5062 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 5063 __ cmp(input_reg, Operand::Zero()); | 5063 __ cmp(input_reg, Operand::Zero()); |
| 5064 __ b(ne, &done); | 5064 __ b(ne, &done); |
| 5065 __ VmovHigh(scratch1, double_scratch2); | 5065 __ VmovHigh(scratch1, double_scratch2); |
| 5066 __ tst(scratch1, Operand(HeapNumber::kSignMask)); | 5066 __ tst(scratch1, Operand(HeapNumber::kSignMask)); |
| 5067 DeoptimizeIf(ne, instr, "minus zero"); | 5067 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero); |
| 5068 } | 5068 } |
| 5069 } | 5069 } |
| 5070 __ bind(&done); | 5070 __ bind(&done); |
| 5071 } | 5071 } |
| 5072 | 5072 |
| 5073 | 5073 |
| 5074 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { | 5074 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { |
| 5075 class DeferredTaggedToI FINAL : public LDeferredCode { | 5075 class DeferredTaggedToI FINAL : public LDeferredCode { |
| 5076 public: | 5076 public: |
| 5077 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) | 5077 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) |
| (...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5126 Register result_reg = ToRegister(instr->result()); | 5126 Register result_reg = ToRegister(instr->result()); |
| 5127 Register scratch1 = scratch0(); | 5127 Register scratch1 = scratch0(); |
| 5128 DwVfpRegister double_input = ToDoubleRegister(instr->value()); | 5128 DwVfpRegister double_input = ToDoubleRegister(instr->value()); |
| 5129 LowDwVfpRegister double_scratch = double_scratch0(); | 5129 LowDwVfpRegister double_scratch = double_scratch0(); |
| 5130 | 5130 |
| 5131 if (instr->truncating()) { | 5131 if (instr->truncating()) { |
| 5132 __ TruncateDoubleToI(result_reg, double_input); | 5132 __ TruncateDoubleToI(result_reg, double_input); |
| 5133 } else { | 5133 } else { |
| 5134 __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch); | 5134 __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch); |
| 5135 // Deoptimize if the input wasn't a int32 (inside a double). | 5135 // Deoptimize if the input wasn't a int32 (inside a double). |
| 5136 DeoptimizeIf(ne, instr, "lost precision or NaN"); | 5136 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN); |
| 5137 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 5137 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 5138 Label done; | 5138 Label done; |
| 5139 __ cmp(result_reg, Operand::Zero()); | 5139 __ cmp(result_reg, Operand::Zero()); |
| 5140 __ b(ne, &done); | 5140 __ b(ne, &done); |
| 5141 __ VmovHigh(scratch1, double_input); | 5141 __ VmovHigh(scratch1, double_input); |
| 5142 __ tst(scratch1, Operand(HeapNumber::kSignMask)); | 5142 __ tst(scratch1, Operand(HeapNumber::kSignMask)); |
| 5143 DeoptimizeIf(ne, instr, "minus zero"); | 5143 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero); |
| 5144 __ bind(&done); | 5144 __ bind(&done); |
| 5145 } | 5145 } |
| 5146 } | 5146 } |
| 5147 } | 5147 } |
| 5148 | 5148 |
| 5149 | 5149 |
| 5150 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { | 5150 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { |
| 5151 Register result_reg = ToRegister(instr->result()); | 5151 Register result_reg = ToRegister(instr->result()); |
| 5152 Register scratch1 = scratch0(); | 5152 Register scratch1 = scratch0(); |
| 5153 DwVfpRegister double_input = ToDoubleRegister(instr->value()); | 5153 DwVfpRegister double_input = ToDoubleRegister(instr->value()); |
| 5154 LowDwVfpRegister double_scratch = double_scratch0(); | 5154 LowDwVfpRegister double_scratch = double_scratch0(); |
| 5155 | 5155 |
| 5156 if (instr->truncating()) { | 5156 if (instr->truncating()) { |
| 5157 __ TruncateDoubleToI(result_reg, double_input); | 5157 __ TruncateDoubleToI(result_reg, double_input); |
| 5158 } else { | 5158 } else { |
| 5159 __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch); | 5159 __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch); |
| 5160 // Deoptimize if the input wasn't a int32 (inside a double). | 5160 // Deoptimize if the input wasn't a int32 (inside a double). |
| 5161 DeoptimizeIf(ne, instr, "lost precision or NaN"); | 5161 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN); |
| 5162 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 5162 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
| 5163 Label done; | 5163 Label done; |
| 5164 __ cmp(result_reg, Operand::Zero()); | 5164 __ cmp(result_reg, Operand::Zero()); |
| 5165 __ b(ne, &done); | 5165 __ b(ne, &done); |
| 5166 __ VmovHigh(scratch1, double_input); | 5166 __ VmovHigh(scratch1, double_input); |
| 5167 __ tst(scratch1, Operand(HeapNumber::kSignMask)); | 5167 __ tst(scratch1, Operand(HeapNumber::kSignMask)); |
| 5168 DeoptimizeIf(ne, instr, "minus zero"); | 5168 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero); |
| 5169 __ bind(&done); | 5169 __ bind(&done); |
| 5170 } | 5170 } |
| 5171 } | 5171 } |
| 5172 __ SmiTag(result_reg, SetCC); | 5172 __ SmiTag(result_reg, SetCC); |
| 5173 DeoptimizeIf(vs, instr, "overflow"); | 5173 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); |
| 5174 } | 5174 } |
| 5175 | 5175 |
| 5176 | 5176 |
| 5177 void LCodeGen::DoCheckSmi(LCheckSmi* instr) { | 5177 void LCodeGen::DoCheckSmi(LCheckSmi* instr) { |
| 5178 LOperand* input = instr->value(); | 5178 LOperand* input = instr->value(); |
| 5179 __ SmiTst(ToRegister(input)); | 5179 __ SmiTst(ToRegister(input)); |
| 5180 DeoptimizeIf(ne, instr, "not a Smi"); | 5180 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi); |
| 5181 } | 5181 } |
| 5182 | 5182 |
| 5183 | 5183 |
| 5184 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { | 5184 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { |
| 5185 if (!instr->hydrogen()->value()->type().IsHeapObject()) { | 5185 if (!instr->hydrogen()->value()->type().IsHeapObject()) { |
| 5186 LOperand* input = instr->value(); | 5186 LOperand* input = instr->value(); |
| 5187 __ SmiTst(ToRegister(input)); | 5187 __ SmiTst(ToRegister(input)); |
| 5188 DeoptimizeIf(eq, instr, "Smi"); | 5188 DeoptimizeIf(eq, instr, Deoptimizer::kSmi); |
| 5189 } | 5189 } |
| 5190 } | 5190 } |
| 5191 | 5191 |
| 5192 | 5192 |
| 5193 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { | 5193 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { |
| 5194 Register input = ToRegister(instr->value()); | 5194 Register input = ToRegister(instr->value()); |
| 5195 Register scratch = scratch0(); | 5195 Register scratch = scratch0(); |
| 5196 | 5196 |
| 5197 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); | 5197 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); |
| 5198 __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); | 5198 __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); |
| 5199 | 5199 |
| 5200 if (instr->hydrogen()->is_interval_check()) { | 5200 if (instr->hydrogen()->is_interval_check()) { |
| 5201 InstanceType first; | 5201 InstanceType first; |
| 5202 InstanceType last; | 5202 InstanceType last; |
| 5203 instr->hydrogen()->GetCheckInterval(&first, &last); | 5203 instr->hydrogen()->GetCheckInterval(&first, &last); |
| 5204 | 5204 |
| 5205 __ cmp(scratch, Operand(first)); | 5205 __ cmp(scratch, Operand(first)); |
| 5206 | 5206 |
| 5207 // If there is only one type in the interval check for equality. | 5207 // If there is only one type in the interval check for equality. |
| 5208 if (first == last) { | 5208 if (first == last) { |
| 5209 DeoptimizeIf(ne, instr, "wrong instance type"); | 5209 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType); |
| 5210 } else { | 5210 } else { |
| 5211 DeoptimizeIf(lo, instr, "wrong instance type"); | 5211 DeoptimizeIf(lo, instr, Deoptimizer::kWrongInstanceType); |
| 5212 // Omit check for the last type. | 5212 // Omit check for the last type. |
| 5213 if (last != LAST_TYPE) { | 5213 if (last != LAST_TYPE) { |
| 5214 __ cmp(scratch, Operand(last)); | 5214 __ cmp(scratch, Operand(last)); |
| 5215 DeoptimizeIf(hi, instr, "wrong instance type"); | 5215 DeoptimizeIf(hi, instr, Deoptimizer::kWrongInstanceType); |
| 5216 } | 5216 } |
| 5217 } | 5217 } |
| 5218 } else { | 5218 } else { |
| 5219 uint8_t mask; | 5219 uint8_t mask; |
| 5220 uint8_t tag; | 5220 uint8_t tag; |
| 5221 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); | 5221 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); |
| 5222 | 5222 |
| 5223 if (base::bits::IsPowerOfTwo32(mask)) { | 5223 if (base::bits::IsPowerOfTwo32(mask)) { |
| 5224 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag)); | 5224 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag)); |
| 5225 __ tst(scratch, Operand(mask)); | 5225 __ tst(scratch, Operand(mask)); |
| 5226 DeoptimizeIf(tag == 0 ? ne : eq, instr, "wrong instance type"); | 5226 DeoptimizeIf(tag == 0 ? ne : eq, instr, Deoptimizer::kWrongInstanceType); |
| 5227 } else { | 5227 } else { |
| 5228 __ and_(scratch, scratch, Operand(mask)); | 5228 __ and_(scratch, scratch, Operand(mask)); |
| 5229 __ cmp(scratch, Operand(tag)); | 5229 __ cmp(scratch, Operand(tag)); |
| 5230 DeoptimizeIf(ne, instr, "wrong instance type"); | 5230 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType); |
| 5231 } | 5231 } |
| 5232 } | 5232 } |
| 5233 } | 5233 } |
| 5234 | 5234 |
| 5235 | 5235 |
| 5236 void LCodeGen::DoCheckValue(LCheckValue* instr) { | 5236 void LCodeGen::DoCheckValue(LCheckValue* instr) { |
| 5237 Register reg = ToRegister(instr->value()); | 5237 Register reg = ToRegister(instr->value()); |
| 5238 Handle<HeapObject> object = instr->hydrogen()->object().handle(); | 5238 Handle<HeapObject> object = instr->hydrogen()->object().handle(); |
| 5239 AllowDeferredHandleDereference smi_check; | 5239 AllowDeferredHandleDereference smi_check; |
| 5240 if (isolate()->heap()->InNewSpace(*object)) { | 5240 if (isolate()->heap()->InNewSpace(*object)) { |
| 5241 Register reg = ToRegister(instr->value()); | 5241 Register reg = ToRegister(instr->value()); |
| 5242 Handle<Cell> cell = isolate()->factory()->NewCell(object); | 5242 Handle<Cell> cell = isolate()->factory()->NewCell(object); |
| 5243 __ mov(ip, Operand(Handle<Object>(cell))); | 5243 __ mov(ip, Operand(Handle<Object>(cell))); |
| 5244 __ ldr(ip, FieldMemOperand(ip, Cell::kValueOffset)); | 5244 __ ldr(ip, FieldMemOperand(ip, Cell::kValueOffset)); |
| 5245 __ cmp(reg, ip); | 5245 __ cmp(reg, ip); |
| 5246 } else { | 5246 } else { |
| 5247 __ cmp(reg, Operand(object)); | 5247 __ cmp(reg, Operand(object)); |
| 5248 } | 5248 } |
| 5249 DeoptimizeIf(ne, instr, "value mismatch"); | 5249 DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch); |
| 5250 } | 5250 } |
| 5251 | 5251 |
| 5252 | 5252 |
| 5253 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { | 5253 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { |
| 5254 { | 5254 { |
| 5255 PushSafepointRegistersScope scope(this); | 5255 PushSafepointRegistersScope scope(this); |
| 5256 __ push(object); | 5256 __ push(object); |
| 5257 __ mov(cp, Operand::Zero()); | 5257 __ mov(cp, Operand::Zero()); |
| 5258 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); | 5258 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); |
| 5259 RecordSafepointWithRegisters( | 5259 RecordSafepointWithRegisters( |
| 5260 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); | 5260 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); |
| 5261 __ StoreToSafepointRegisterSlot(r0, scratch0()); | 5261 __ StoreToSafepointRegisterSlot(r0, scratch0()); |
| 5262 } | 5262 } |
| 5263 __ tst(scratch0(), Operand(kSmiTagMask)); | 5263 __ tst(scratch0(), Operand(kSmiTagMask)); |
| 5264 DeoptimizeIf(eq, instr, "instance migration failed"); | 5264 DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed); |
| 5265 } | 5265 } |
| 5266 | 5266 |
| 5267 | 5267 |
| 5268 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { | 5268 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { |
| 5269 class DeferredCheckMaps FINAL : public LDeferredCode { | 5269 class DeferredCheckMaps FINAL : public LDeferredCode { |
| 5270 public: | 5270 public: |
| 5271 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) | 5271 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) |
| 5272 : LDeferredCode(codegen), instr_(instr), object_(object) { | 5272 : LDeferredCode(codegen), instr_(instr), object_(object) { |
| 5273 SetExit(check_maps()); | 5273 SetExit(check_maps()); |
| 5274 } | 5274 } |
| (...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5312 Handle<Map> map = maps->at(i).handle(); | 5312 Handle<Map> map = maps->at(i).handle(); |
| 5313 __ CompareMap(map_reg, map, &success); | 5313 __ CompareMap(map_reg, map, &success); |
| 5314 __ b(eq, &success); | 5314 __ b(eq, &success); |
| 5315 } | 5315 } |
| 5316 | 5316 |
| 5317 Handle<Map> map = maps->at(maps->size() - 1).handle(); | 5317 Handle<Map> map = maps->at(maps->size() - 1).handle(); |
| 5318 __ CompareMap(map_reg, map, &success); | 5318 __ CompareMap(map_reg, map, &success); |
| 5319 if (instr->hydrogen()->HasMigrationTarget()) { | 5319 if (instr->hydrogen()->HasMigrationTarget()) { |
| 5320 __ b(ne, deferred->entry()); | 5320 __ b(ne, deferred->entry()); |
| 5321 } else { | 5321 } else { |
| 5322 DeoptimizeIf(ne, instr, "wrong map"); | 5322 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap); |
| 5323 } | 5323 } |
| 5324 | 5324 |
| 5325 __ bind(&success); | 5325 __ bind(&success); |
| 5326 } | 5326 } |
| 5327 | 5327 |
| 5328 | 5328 |
| 5329 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { | 5329 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { |
| 5330 DwVfpRegister value_reg = ToDoubleRegister(instr->unclamped()); | 5330 DwVfpRegister value_reg = ToDoubleRegister(instr->unclamped()); |
| 5331 Register result_reg = ToRegister(instr->result()); | 5331 Register result_reg = ToRegister(instr->result()); |
| 5332 __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0()); | 5332 __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0()); |
| (...skipping 18 matching lines...) Expand all Loading... |
| 5351 __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi); | 5351 __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi); |
| 5352 | 5352 |
| 5353 // Check for heap number | 5353 // Check for heap number |
| 5354 __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); | 5354 __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); |
| 5355 __ cmp(scratch, Operand(factory()->heap_number_map())); | 5355 __ cmp(scratch, Operand(factory()->heap_number_map())); |
| 5356 __ b(eq, &heap_number); | 5356 __ b(eq, &heap_number); |
| 5357 | 5357 |
| 5358 // Check for undefined. Undefined is converted to zero for clamping | 5358 // Check for undefined. Undefined is converted to zero for clamping |
| 5359 // conversions. | 5359 // conversions. |
| 5360 __ cmp(input_reg, Operand(factory()->undefined_value())); | 5360 __ cmp(input_reg, Operand(factory()->undefined_value())); |
| 5361 DeoptimizeIf(ne, instr, "not a heap number/undefined"); | 5361 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined); |
| 5362 __ mov(result_reg, Operand::Zero()); | 5362 __ mov(result_reg, Operand::Zero()); |
| 5363 __ jmp(&done); | 5363 __ jmp(&done); |
| 5364 | 5364 |
| 5365 // Heap number | 5365 // Heap number |
| 5366 __ bind(&heap_number); | 5366 __ bind(&heap_number); |
| 5367 __ vldr(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset)); | 5367 __ vldr(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset)); |
| 5368 __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0()); | 5368 __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0()); |
| 5369 __ jmp(&done); | 5369 __ jmp(&done); |
| 5370 | 5370 |
| 5371 // smi | 5371 // smi |
| (...skipping 447 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5819 DCHECK(!environment->HasBeenRegistered()); | 5819 DCHECK(!environment->HasBeenRegistered()); |
| 5820 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); | 5820 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); |
| 5821 | 5821 |
| 5822 GenerateOsrPrologue(); | 5822 GenerateOsrPrologue(); |
| 5823 } | 5823 } |
| 5824 | 5824 |
| 5825 | 5825 |
| 5826 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { | 5826 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { |
| 5827 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); | 5827 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); |
| 5828 __ cmp(r0, ip); | 5828 __ cmp(r0, ip); |
| 5829 DeoptimizeIf(eq, instr, "undefined"); | 5829 DeoptimizeIf(eq, instr, Deoptimizer::kUndefined); |
| 5830 | 5830 |
| 5831 Register null_value = r5; | 5831 Register null_value = r5; |
| 5832 __ LoadRoot(null_value, Heap::kNullValueRootIndex); | 5832 __ LoadRoot(null_value, Heap::kNullValueRootIndex); |
| 5833 __ cmp(r0, null_value); | 5833 __ cmp(r0, null_value); |
| 5834 DeoptimizeIf(eq, instr, "null"); | 5834 DeoptimizeIf(eq, instr, Deoptimizer::kNull); |
| 5835 | 5835 |
| 5836 __ SmiTst(r0); | 5836 __ SmiTst(r0); |
| 5837 DeoptimizeIf(eq, instr, "Smi"); | 5837 DeoptimizeIf(eq, instr, Deoptimizer::kSmi); |
| 5838 | 5838 |
| 5839 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); | 5839 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); |
| 5840 __ CompareObjectType(r0, r1, r1, LAST_JS_PROXY_TYPE); | 5840 __ CompareObjectType(r0, r1, r1, LAST_JS_PROXY_TYPE); |
| 5841 DeoptimizeIf(le, instr, "wrong instance type"); | 5841 DeoptimizeIf(le, instr, Deoptimizer::kWrongInstanceType); |
| 5842 | 5842 |
| 5843 Label use_cache, call_runtime; | 5843 Label use_cache, call_runtime; |
| 5844 __ CheckEnumCache(null_value, &call_runtime); | 5844 __ CheckEnumCache(null_value, &call_runtime); |
| 5845 | 5845 |
| 5846 __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset)); | 5846 __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset)); |
| 5847 __ b(&use_cache); | 5847 __ b(&use_cache); |
| 5848 | 5848 |
| 5849 // Get the set of properties to enumerate. | 5849 // Get the set of properties to enumerate. |
| 5850 __ bind(&call_runtime); | 5850 __ bind(&call_runtime); |
| 5851 __ push(r0); | 5851 __ push(r0); |
| 5852 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr); | 5852 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr); |
| 5853 | 5853 |
| 5854 __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); | 5854 __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); |
| 5855 __ LoadRoot(ip, Heap::kMetaMapRootIndex); | 5855 __ LoadRoot(ip, Heap::kMetaMapRootIndex); |
| 5856 __ cmp(r1, ip); | 5856 __ cmp(r1, ip); |
| 5857 DeoptimizeIf(ne, instr, "wrong map"); | 5857 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap); |
| 5858 __ bind(&use_cache); | 5858 __ bind(&use_cache); |
| 5859 } | 5859 } |
| 5860 | 5860 |
| 5861 | 5861 |
| 5862 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { | 5862 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { |
| 5863 Register map = ToRegister(instr->map()); | 5863 Register map = ToRegister(instr->map()); |
| 5864 Register result = ToRegister(instr->result()); | 5864 Register result = ToRegister(instr->result()); |
| 5865 Label load_cache, done; | 5865 Label load_cache, done; |
| 5866 __ EnumLength(result, map); | 5866 __ EnumLength(result, map); |
| 5867 __ cmp(result, Operand(Smi::FromInt(0))); | 5867 __ cmp(result, Operand(Smi::FromInt(0))); |
| 5868 __ b(ne, &load_cache); | 5868 __ b(ne, &load_cache); |
| 5869 __ mov(result, Operand(isolate()->factory()->empty_fixed_array())); | 5869 __ mov(result, Operand(isolate()->factory()->empty_fixed_array())); |
| 5870 __ jmp(&done); | 5870 __ jmp(&done); |
| 5871 | 5871 |
| 5872 __ bind(&load_cache); | 5872 __ bind(&load_cache); |
| 5873 __ LoadInstanceDescriptors(map, result); | 5873 __ LoadInstanceDescriptors(map, result); |
| 5874 __ ldr(result, | 5874 __ ldr(result, |
| 5875 FieldMemOperand(result, DescriptorArray::kEnumCacheOffset)); | 5875 FieldMemOperand(result, DescriptorArray::kEnumCacheOffset)); |
| 5876 __ ldr(result, | 5876 __ ldr(result, |
| 5877 FieldMemOperand(result, FixedArray::SizeFor(instr->idx()))); | 5877 FieldMemOperand(result, FixedArray::SizeFor(instr->idx()))); |
| 5878 __ cmp(result, Operand::Zero()); | 5878 __ cmp(result, Operand::Zero()); |
| 5879 DeoptimizeIf(eq, instr, "no cache"); | 5879 DeoptimizeIf(eq, instr, Deoptimizer::kNoCache); |
| 5880 | 5880 |
| 5881 __ bind(&done); | 5881 __ bind(&done); |
| 5882 } | 5882 } |
| 5883 | 5883 |
| 5884 | 5884 |
| 5885 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { | 5885 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { |
| 5886 Register object = ToRegister(instr->value()); | 5886 Register object = ToRegister(instr->value()); |
| 5887 Register map = ToRegister(instr->map()); | 5887 Register map = ToRegister(instr->map()); |
| 5888 __ ldr(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset)); | 5888 __ ldr(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset)); |
| 5889 __ cmp(map, scratch0()); | 5889 __ cmp(map, scratch0()); |
| 5890 DeoptimizeIf(ne, instr, "wrong map"); | 5890 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap); |
| 5891 } | 5891 } |
| 5892 | 5892 |
| 5893 | 5893 |
| 5894 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, | 5894 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, |
| 5895 Register result, | 5895 Register result, |
| 5896 Register object, | 5896 Register object, |
| 5897 Register index) { | 5897 Register index) { |
| 5898 PushSafepointRegistersScope scope(this); | 5898 PushSafepointRegistersScope scope(this); |
| 5899 __ Push(object); | 5899 __ Push(object); |
| 5900 __ Push(index); | 5900 __ Push(index); |
| (...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5978 __ Push(scope_info); | 5978 __ Push(scope_info); |
| 5979 __ push(ToRegister(instr->function())); | 5979 __ push(ToRegister(instr->function())); |
| 5980 CallRuntime(Runtime::kPushBlockContext, 2, instr); | 5980 CallRuntime(Runtime::kPushBlockContext, 2, instr); |
| 5981 RecordSafepoint(Safepoint::kNoLazyDeopt); | 5981 RecordSafepoint(Safepoint::kNoLazyDeopt); |
| 5982 } | 5982 } |
| 5983 | 5983 |
| 5984 | 5984 |
| 5985 #undef __ | 5985 #undef __ |
| 5986 | 5986 |
| 5987 } } // namespace v8::internal | 5987 } } // namespace v8::internal |
| OLD | NEW |