OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/v8.h" | 5 #include "src/v8.h" |
6 | 6 |
7 #include "src/arm/lithium-codegen-arm.h" | 7 #include "src/arm/lithium-codegen-arm.h" |
8 #include "src/arm/lithium-gap-resolver-arm.h" | 8 #include "src/arm/lithium-gap-resolver-arm.h" |
9 #include "src/base/bits.h" | 9 #include "src/base/bits.h" |
10 #include "src/code-factory.h" | 10 #include "src/code-factory.h" |
(...skipping 822 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
833 int pc_offset = masm()->pc_offset(); | 833 int pc_offset = masm()->pc_offset(); |
834 environment->Register(deoptimization_index, | 834 environment->Register(deoptimization_index, |
835 translation.index(), | 835 translation.index(), |
836 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); | 836 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); |
837 deoptimizations_.Add(environment, zone()); | 837 deoptimizations_.Add(environment, zone()); |
838 } | 838 } |
839 } | 839 } |
840 | 840 |
841 | 841 |
842 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr, | 842 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr, |
843 const char* detail, | 843 Deoptimizer::DeoptReason deopt_reason, |
844 Deoptimizer::BailoutType bailout_type) { | 844 Deoptimizer::BailoutType bailout_type) { |
845 LEnvironment* environment = instr->environment(); | 845 LEnvironment* environment = instr->environment(); |
846 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); | 846 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); |
847 DCHECK(environment->HasBeenRegistered()); | 847 DCHECK(environment->HasBeenRegistered()); |
848 int id = environment->deoptimization_index(); | 848 int id = environment->deoptimization_index(); |
849 DCHECK(info()->IsOptimizing() || info()->IsStub()); | 849 DCHECK(info()->IsOptimizing() || info()->IsStub()); |
850 Address entry = | 850 Address entry = |
851 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); | 851 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); |
852 if (entry == NULL) { | 852 if (entry == NULL) { |
853 Abort(kBailoutWasNotPrepared); | 853 Abort(kBailoutWasNotPrepared); |
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
887 condition = ne; | 887 condition = ne; |
888 __ cmp(scratch, Operand::Zero()); | 888 __ cmp(scratch, Operand::Zero()); |
889 } | 889 } |
890 } | 890 } |
891 | 891 |
892 if (info()->ShouldTrapOnDeopt()) { | 892 if (info()->ShouldTrapOnDeopt()) { |
893 __ stop("trap_on_deopt", condition); | 893 __ stop("trap_on_deopt", condition); |
894 } | 894 } |
895 | 895 |
896 Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(), | 896 Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(), |
897 instr->Mnemonic(), detail); | 897 instr->Mnemonic(), deopt_reason); |
898 DCHECK(info()->IsStub() || frame_is_built_); | 898 DCHECK(info()->IsStub() || frame_is_built_); |
899 // Go through jump table if we need to handle condition, build frame, or | 899 // Go through jump table if we need to handle condition, build frame, or |
900 // restore caller doubles. | 900 // restore caller doubles. |
901 if (condition == al && frame_is_built_ && | 901 if (condition == al && frame_is_built_ && |
902 !info()->saves_caller_doubles()) { | 902 !info()->saves_caller_doubles()) { |
903 DeoptComment(reason); | 903 DeoptComment(reason); |
904 __ Call(entry, RelocInfo::RUNTIME_ENTRY); | 904 __ Call(entry, RelocInfo::RUNTIME_ENTRY); |
905 } else { | 905 } else { |
906 Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type, | 906 Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type, |
907 !frame_is_built_); | 907 !frame_is_built_); |
908 // We often have several deopts to the same entry, reuse the last | 908 // We often have several deopts to the same entry, reuse the last |
909 // jump entry if this is the case. | 909 // jump entry if this is the case. |
910 if (jump_table_.is_empty() || | 910 if (jump_table_.is_empty() || |
911 !table_entry.IsEquivalentTo(jump_table_.last())) { | 911 !table_entry.IsEquivalentTo(jump_table_.last())) { |
912 jump_table_.Add(table_entry, zone()); | 912 jump_table_.Add(table_entry, zone()); |
913 } | 913 } |
914 __ b(condition, &jump_table_.last().label); | 914 __ b(condition, &jump_table_.last().label); |
915 } | 915 } |
916 } | 916 } |
917 | 917 |
918 | 918 |
919 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr, | 919 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr, |
920 const char* detail) { | 920 Deoptimizer::DeoptReason deopt_reason) { |
921 Deoptimizer::BailoutType bailout_type = info()->IsStub() | 921 Deoptimizer::BailoutType bailout_type = info()->IsStub() |
922 ? Deoptimizer::LAZY | 922 ? Deoptimizer::LAZY |
923 : Deoptimizer::EAGER; | 923 : Deoptimizer::EAGER; |
924 DeoptimizeIf(condition, instr, detail, bailout_type); | 924 DeoptimizeIf(condition, instr, deopt_reason, bailout_type); |
925 } | 925 } |
926 | 926 |
927 | 927 |
928 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { | 928 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { |
929 int length = deoptimizations_.length(); | 929 int length = deoptimizations_.length(); |
930 if (length == 0) return; | 930 if (length == 0) return; |
931 Handle<DeoptimizationInputData> data = | 931 Handle<DeoptimizationInputData> data = |
932 DeoptimizationInputData::New(isolate(), length, TENURED); | 932 DeoptimizationInputData::New(isolate(), length, TENURED); |
933 | 933 |
934 Handle<ByteArray> translations = | 934 Handle<ByteArray> translations = |
(...skipping 215 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1150 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); | 1150 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); |
1151 Label dividend_is_not_negative, done; | 1151 Label dividend_is_not_negative, done; |
1152 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { | 1152 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { |
1153 __ cmp(dividend, Operand::Zero()); | 1153 __ cmp(dividend, Operand::Zero()); |
1154 __ b(pl, ÷nd_is_not_negative); | 1154 __ b(pl, ÷nd_is_not_negative); |
1155 // Note that this is correct even for kMinInt operands. | 1155 // Note that this is correct even for kMinInt operands. |
1156 __ rsb(dividend, dividend, Operand::Zero()); | 1156 __ rsb(dividend, dividend, Operand::Zero()); |
1157 __ and_(dividend, dividend, Operand(mask)); | 1157 __ and_(dividend, dividend, Operand(mask)); |
1158 __ rsb(dividend, dividend, Operand::Zero(), SetCC); | 1158 __ rsb(dividend, dividend, Operand::Zero(), SetCC); |
1159 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1159 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
1160 DeoptimizeIf(eq, instr, "minus zero"); | 1160 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); |
1161 } | 1161 } |
1162 __ b(&done); | 1162 __ b(&done); |
1163 } | 1163 } |
1164 | 1164 |
1165 __ bind(÷nd_is_not_negative); | 1165 __ bind(÷nd_is_not_negative); |
1166 __ and_(dividend, dividend, Operand(mask)); | 1166 __ and_(dividend, dividend, Operand(mask)); |
1167 __ bind(&done); | 1167 __ bind(&done); |
1168 } | 1168 } |
1169 | 1169 |
1170 | 1170 |
1171 void LCodeGen::DoModByConstI(LModByConstI* instr) { | 1171 void LCodeGen::DoModByConstI(LModByConstI* instr) { |
1172 Register dividend = ToRegister(instr->dividend()); | 1172 Register dividend = ToRegister(instr->dividend()); |
1173 int32_t divisor = instr->divisor(); | 1173 int32_t divisor = instr->divisor(); |
1174 Register result = ToRegister(instr->result()); | 1174 Register result = ToRegister(instr->result()); |
1175 DCHECK(!dividend.is(result)); | 1175 DCHECK(!dividend.is(result)); |
1176 | 1176 |
1177 if (divisor == 0) { | 1177 if (divisor == 0) { |
1178 DeoptimizeIf(al, instr, "division by zero"); | 1178 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero); |
1179 return; | 1179 return; |
1180 } | 1180 } |
1181 | 1181 |
1182 __ TruncatingDiv(result, dividend, Abs(divisor)); | 1182 __ TruncatingDiv(result, dividend, Abs(divisor)); |
1183 __ mov(ip, Operand(Abs(divisor))); | 1183 __ mov(ip, Operand(Abs(divisor))); |
1184 __ smull(result, ip, result, ip); | 1184 __ smull(result, ip, result, ip); |
1185 __ sub(result, dividend, result, SetCC); | 1185 __ sub(result, dividend, result, SetCC); |
1186 | 1186 |
1187 // Check for negative zero. | 1187 // Check for negative zero. |
1188 HMod* hmod = instr->hydrogen(); | 1188 HMod* hmod = instr->hydrogen(); |
1189 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1189 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
1190 Label remainder_not_zero; | 1190 Label remainder_not_zero; |
1191 __ b(ne, &remainder_not_zero); | 1191 __ b(ne, &remainder_not_zero); |
1192 __ cmp(dividend, Operand::Zero()); | 1192 __ cmp(dividend, Operand::Zero()); |
1193 DeoptimizeIf(lt, instr, "minus zero"); | 1193 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); |
1194 __ bind(&remainder_not_zero); | 1194 __ bind(&remainder_not_zero); |
1195 } | 1195 } |
1196 } | 1196 } |
1197 | 1197 |
1198 | 1198 |
1199 void LCodeGen::DoModI(LModI* instr) { | 1199 void LCodeGen::DoModI(LModI* instr) { |
1200 HMod* hmod = instr->hydrogen(); | 1200 HMod* hmod = instr->hydrogen(); |
1201 if (CpuFeatures::IsSupported(SUDIV)) { | 1201 if (CpuFeatures::IsSupported(SUDIV)) { |
1202 CpuFeatureScope scope(masm(), SUDIV); | 1202 CpuFeatureScope scope(masm(), SUDIV); |
1203 | 1203 |
1204 Register left_reg = ToRegister(instr->left()); | 1204 Register left_reg = ToRegister(instr->left()); |
1205 Register right_reg = ToRegister(instr->right()); | 1205 Register right_reg = ToRegister(instr->right()); |
1206 Register result_reg = ToRegister(instr->result()); | 1206 Register result_reg = ToRegister(instr->result()); |
1207 | 1207 |
1208 Label done; | 1208 Label done; |
1209 // Check for x % 0, sdiv might signal an exception. We have to deopt in this | 1209 // Check for x % 0, sdiv might signal an exception. We have to deopt in this |
1210 // case because we can't return a NaN. | 1210 // case because we can't return a NaN. |
1211 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { | 1211 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { |
1212 __ cmp(right_reg, Operand::Zero()); | 1212 __ cmp(right_reg, Operand::Zero()); |
1213 DeoptimizeIf(eq, instr, "division by zero"); | 1213 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero); |
1214 } | 1214 } |
1215 | 1215 |
1216 // Check for kMinInt % -1, sdiv will return kMinInt, which is not what we | 1216 // Check for kMinInt % -1, sdiv will return kMinInt, which is not what we |
1217 // want. We have to deopt if we care about -0, because we can't return that. | 1217 // want. We have to deopt if we care about -0, because we can't return that. |
1218 if (hmod->CheckFlag(HValue::kCanOverflow)) { | 1218 if (hmod->CheckFlag(HValue::kCanOverflow)) { |
1219 Label no_overflow_possible; | 1219 Label no_overflow_possible; |
1220 __ cmp(left_reg, Operand(kMinInt)); | 1220 __ cmp(left_reg, Operand(kMinInt)); |
1221 __ b(ne, &no_overflow_possible); | 1221 __ b(ne, &no_overflow_possible); |
1222 __ cmp(right_reg, Operand(-1)); | 1222 __ cmp(right_reg, Operand(-1)); |
1223 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1223 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
1224 DeoptimizeIf(eq, instr, "minus zero"); | 1224 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); |
1225 } else { | 1225 } else { |
1226 __ b(ne, &no_overflow_possible); | 1226 __ b(ne, &no_overflow_possible); |
1227 __ mov(result_reg, Operand::Zero()); | 1227 __ mov(result_reg, Operand::Zero()); |
1228 __ jmp(&done); | 1228 __ jmp(&done); |
1229 } | 1229 } |
1230 __ bind(&no_overflow_possible); | 1230 __ bind(&no_overflow_possible); |
1231 } | 1231 } |
1232 | 1232 |
1233 // For 'r3 = r1 % r2' we can have the following ARM code: | 1233 // For 'r3 = r1 % r2' we can have the following ARM code: |
1234 // sdiv r3, r1, r2 | 1234 // sdiv r3, r1, r2 |
1235 // mls r3, r3, r2, r1 | 1235 // mls r3, r3, r2, r1 |
1236 | 1236 |
1237 __ sdiv(result_reg, left_reg, right_reg); | 1237 __ sdiv(result_reg, left_reg, right_reg); |
1238 __ Mls(result_reg, result_reg, right_reg, left_reg); | 1238 __ Mls(result_reg, result_reg, right_reg, left_reg); |
1239 | 1239 |
1240 // If we care about -0, test if the dividend is <0 and the result is 0. | 1240 // If we care about -0, test if the dividend is <0 and the result is 0. |
1241 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1241 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
1242 __ cmp(result_reg, Operand::Zero()); | 1242 __ cmp(result_reg, Operand::Zero()); |
1243 __ b(ne, &done); | 1243 __ b(ne, &done); |
1244 __ cmp(left_reg, Operand::Zero()); | 1244 __ cmp(left_reg, Operand::Zero()); |
1245 DeoptimizeIf(lt, instr, "minus zero"); | 1245 DeoptimizeIf(lt, instr, Deoptimizer::kMinusZero); |
1246 } | 1246 } |
1247 __ bind(&done); | 1247 __ bind(&done); |
1248 | 1248 |
1249 } else { | 1249 } else { |
1250 // General case, without any SDIV support. | 1250 // General case, without any SDIV support. |
1251 Register left_reg = ToRegister(instr->left()); | 1251 Register left_reg = ToRegister(instr->left()); |
1252 Register right_reg = ToRegister(instr->right()); | 1252 Register right_reg = ToRegister(instr->right()); |
1253 Register result_reg = ToRegister(instr->result()); | 1253 Register result_reg = ToRegister(instr->result()); |
1254 Register scratch = scratch0(); | 1254 Register scratch = scratch0(); |
1255 DCHECK(!scratch.is(left_reg)); | 1255 DCHECK(!scratch.is(left_reg)); |
1256 DCHECK(!scratch.is(right_reg)); | 1256 DCHECK(!scratch.is(right_reg)); |
1257 DCHECK(!scratch.is(result_reg)); | 1257 DCHECK(!scratch.is(result_reg)); |
1258 DwVfpRegister dividend = ToDoubleRegister(instr->temp()); | 1258 DwVfpRegister dividend = ToDoubleRegister(instr->temp()); |
1259 DwVfpRegister divisor = ToDoubleRegister(instr->temp2()); | 1259 DwVfpRegister divisor = ToDoubleRegister(instr->temp2()); |
1260 DCHECK(!divisor.is(dividend)); | 1260 DCHECK(!divisor.is(dividend)); |
1261 LowDwVfpRegister quotient = double_scratch0(); | 1261 LowDwVfpRegister quotient = double_scratch0(); |
1262 DCHECK(!quotient.is(dividend)); | 1262 DCHECK(!quotient.is(dividend)); |
1263 DCHECK(!quotient.is(divisor)); | 1263 DCHECK(!quotient.is(divisor)); |
1264 | 1264 |
1265 Label done; | 1265 Label done; |
1266 // Check for x % 0, we have to deopt in this case because we can't return a | 1266 // Check for x % 0, we have to deopt in this case because we can't return a |
1267 // NaN. | 1267 // NaN. |
1268 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { | 1268 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { |
1269 __ cmp(right_reg, Operand::Zero()); | 1269 __ cmp(right_reg, Operand::Zero()); |
1270 DeoptimizeIf(eq, instr, "division by zero"); | 1270 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero); |
1271 } | 1271 } |
1272 | 1272 |
1273 __ Move(result_reg, left_reg); | 1273 __ Move(result_reg, left_reg); |
1274 // Load the arguments in VFP registers. The divisor value is preloaded | 1274 // Load the arguments in VFP registers. The divisor value is preloaded |
1275 // before. Be careful that 'right_reg' is only live on entry. | 1275 // before. Be careful that 'right_reg' is only live on entry. |
1276 // TODO(svenpanne) The last comments seems to be wrong nowadays. | 1276 // TODO(svenpanne) The last comments seems to be wrong nowadays. |
1277 __ vmov(double_scratch0().low(), left_reg); | 1277 __ vmov(double_scratch0().low(), left_reg); |
1278 __ vcvt_f64_s32(dividend, double_scratch0().low()); | 1278 __ vcvt_f64_s32(dividend, double_scratch0().low()); |
1279 __ vmov(double_scratch0().low(), right_reg); | 1279 __ vmov(double_scratch0().low(), right_reg); |
1280 __ vcvt_f64_s32(divisor, double_scratch0().low()); | 1280 __ vcvt_f64_s32(divisor, double_scratch0().low()); |
1281 | 1281 |
1282 // We do not care about the sign of the divisor. Note that we still handle | 1282 // We do not care about the sign of the divisor. Note that we still handle |
1283 // the kMinInt % -1 case correctly, though. | 1283 // the kMinInt % -1 case correctly, though. |
1284 __ vabs(divisor, divisor); | 1284 __ vabs(divisor, divisor); |
1285 // Compute the quotient and round it to a 32bit integer. | 1285 // Compute the quotient and round it to a 32bit integer. |
1286 __ vdiv(quotient, dividend, divisor); | 1286 __ vdiv(quotient, dividend, divisor); |
1287 __ vcvt_s32_f64(quotient.low(), quotient); | 1287 __ vcvt_s32_f64(quotient.low(), quotient); |
1288 __ vcvt_f64_s32(quotient, quotient.low()); | 1288 __ vcvt_f64_s32(quotient, quotient.low()); |
1289 | 1289 |
1290 // Compute the remainder in result. | 1290 // Compute the remainder in result. |
1291 __ vmul(double_scratch0(), divisor, quotient); | 1291 __ vmul(double_scratch0(), divisor, quotient); |
1292 __ vcvt_s32_f64(double_scratch0().low(), double_scratch0()); | 1292 __ vcvt_s32_f64(double_scratch0().low(), double_scratch0()); |
1293 __ vmov(scratch, double_scratch0().low()); | 1293 __ vmov(scratch, double_scratch0().low()); |
1294 __ sub(result_reg, left_reg, scratch, SetCC); | 1294 __ sub(result_reg, left_reg, scratch, SetCC); |
1295 | 1295 |
1296 // If we care about -0, test if the dividend is <0 and the result is 0. | 1296 // If we care about -0, test if the dividend is <0 and the result is 0. |
1297 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1297 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { |
1298 __ b(ne, &done); | 1298 __ b(ne, &done); |
1299 __ cmp(left_reg, Operand::Zero()); | 1299 __ cmp(left_reg, Operand::Zero()); |
1300 DeoptimizeIf(mi, instr, "minus zero"); | 1300 DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero); |
1301 } | 1301 } |
1302 __ bind(&done); | 1302 __ bind(&done); |
1303 } | 1303 } |
1304 } | 1304 } |
1305 | 1305 |
1306 | 1306 |
1307 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { | 1307 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { |
1308 Register dividend = ToRegister(instr->dividend()); | 1308 Register dividend = ToRegister(instr->dividend()); |
1309 int32_t divisor = instr->divisor(); | 1309 int32_t divisor = instr->divisor(); |
1310 Register result = ToRegister(instr->result()); | 1310 Register result = ToRegister(instr->result()); |
1311 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor))); | 1311 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor))); |
1312 DCHECK(!result.is(dividend)); | 1312 DCHECK(!result.is(dividend)); |
1313 | 1313 |
1314 // Check for (0 / -x) that will produce negative zero. | 1314 // Check for (0 / -x) that will produce negative zero. |
1315 HDiv* hdiv = instr->hydrogen(); | 1315 HDiv* hdiv = instr->hydrogen(); |
1316 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | 1316 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
1317 __ cmp(dividend, Operand::Zero()); | 1317 __ cmp(dividend, Operand::Zero()); |
1318 DeoptimizeIf(eq, instr, "minus zero"); | 1318 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); |
1319 } | 1319 } |
1320 // Check for (kMinInt / -1). | 1320 // Check for (kMinInt / -1). |
1321 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { | 1321 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { |
1322 __ cmp(dividend, Operand(kMinInt)); | 1322 __ cmp(dividend, Operand(kMinInt)); |
1323 DeoptimizeIf(eq, instr, "overflow"); | 1323 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow); |
1324 } | 1324 } |
1325 // Deoptimize if remainder will not be 0. | 1325 // Deoptimize if remainder will not be 0. |
1326 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && | 1326 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && |
1327 divisor != 1 && divisor != -1) { | 1327 divisor != 1 && divisor != -1) { |
1328 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); | 1328 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); |
1329 __ tst(dividend, Operand(mask)); | 1329 __ tst(dividend, Operand(mask)); |
1330 DeoptimizeIf(ne, instr, "lost precision"); | 1330 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision); |
1331 } | 1331 } |
1332 | 1332 |
1333 if (divisor == -1) { // Nice shortcut, not needed for correctness. | 1333 if (divisor == -1) { // Nice shortcut, not needed for correctness. |
1334 __ rsb(result, dividend, Operand(0)); | 1334 __ rsb(result, dividend, Operand(0)); |
1335 return; | 1335 return; |
1336 } | 1336 } |
1337 int32_t shift = WhichPowerOf2Abs(divisor); | 1337 int32_t shift = WhichPowerOf2Abs(divisor); |
1338 if (shift == 0) { | 1338 if (shift == 0) { |
1339 __ mov(result, dividend); | 1339 __ mov(result, dividend); |
1340 } else if (shift == 1) { | 1340 } else if (shift == 1) { |
1341 __ add(result, dividend, Operand(dividend, LSR, 31)); | 1341 __ add(result, dividend, Operand(dividend, LSR, 31)); |
1342 } else { | 1342 } else { |
1343 __ mov(result, Operand(dividend, ASR, 31)); | 1343 __ mov(result, Operand(dividend, ASR, 31)); |
1344 __ add(result, dividend, Operand(result, LSR, 32 - shift)); | 1344 __ add(result, dividend, Operand(result, LSR, 32 - shift)); |
1345 } | 1345 } |
1346 if (shift > 0) __ mov(result, Operand(result, ASR, shift)); | 1346 if (shift > 0) __ mov(result, Operand(result, ASR, shift)); |
1347 if (divisor < 0) __ rsb(result, result, Operand(0)); | 1347 if (divisor < 0) __ rsb(result, result, Operand(0)); |
1348 } | 1348 } |
1349 | 1349 |
1350 | 1350 |
1351 void LCodeGen::DoDivByConstI(LDivByConstI* instr) { | 1351 void LCodeGen::DoDivByConstI(LDivByConstI* instr) { |
1352 Register dividend = ToRegister(instr->dividend()); | 1352 Register dividend = ToRegister(instr->dividend()); |
1353 int32_t divisor = instr->divisor(); | 1353 int32_t divisor = instr->divisor(); |
1354 Register result = ToRegister(instr->result()); | 1354 Register result = ToRegister(instr->result()); |
1355 DCHECK(!dividend.is(result)); | 1355 DCHECK(!dividend.is(result)); |
1356 | 1356 |
1357 if (divisor == 0) { | 1357 if (divisor == 0) { |
1358 DeoptimizeIf(al, instr, "division by zero"); | 1358 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero); |
1359 return; | 1359 return; |
1360 } | 1360 } |
1361 | 1361 |
1362 // Check for (0 / -x) that will produce negative zero. | 1362 // Check for (0 / -x) that will produce negative zero. |
1363 HDiv* hdiv = instr->hydrogen(); | 1363 HDiv* hdiv = instr->hydrogen(); |
1364 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | 1364 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
1365 __ cmp(dividend, Operand::Zero()); | 1365 __ cmp(dividend, Operand::Zero()); |
1366 DeoptimizeIf(eq, instr, "minus zero"); | 1366 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); |
1367 } | 1367 } |
1368 | 1368 |
1369 __ TruncatingDiv(result, dividend, Abs(divisor)); | 1369 __ TruncatingDiv(result, dividend, Abs(divisor)); |
1370 if (divisor < 0) __ rsb(result, result, Operand::Zero()); | 1370 if (divisor < 0) __ rsb(result, result, Operand::Zero()); |
1371 | 1371 |
1372 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { | 1372 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { |
1373 __ mov(ip, Operand(divisor)); | 1373 __ mov(ip, Operand(divisor)); |
1374 __ smull(scratch0(), ip, result, ip); | 1374 __ smull(scratch0(), ip, result, ip); |
1375 __ sub(scratch0(), scratch0(), dividend, SetCC); | 1375 __ sub(scratch0(), scratch0(), dividend, SetCC); |
1376 DeoptimizeIf(ne, instr, "lost precision"); | 1376 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision); |
1377 } | 1377 } |
1378 } | 1378 } |
1379 | 1379 |
1380 | 1380 |
1381 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. | 1381 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. |
1382 void LCodeGen::DoDivI(LDivI* instr) { | 1382 void LCodeGen::DoDivI(LDivI* instr) { |
1383 HBinaryOperation* hdiv = instr->hydrogen(); | 1383 HBinaryOperation* hdiv = instr->hydrogen(); |
1384 Register dividend = ToRegister(instr->dividend()); | 1384 Register dividend = ToRegister(instr->dividend()); |
1385 Register divisor = ToRegister(instr->divisor()); | 1385 Register divisor = ToRegister(instr->divisor()); |
1386 Register result = ToRegister(instr->result()); | 1386 Register result = ToRegister(instr->result()); |
1387 | 1387 |
1388 // Check for x / 0. | 1388 // Check for x / 0. |
1389 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { | 1389 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { |
1390 __ cmp(divisor, Operand::Zero()); | 1390 __ cmp(divisor, Operand::Zero()); |
1391 DeoptimizeIf(eq, instr, "division by zero"); | 1391 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero); |
1392 } | 1392 } |
1393 | 1393 |
1394 // Check for (0 / -x) that will produce negative zero. | 1394 // Check for (0 / -x) that will produce negative zero. |
1395 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1395 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { |
1396 Label positive; | 1396 Label positive; |
1397 if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) { | 1397 if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) { |
1398 // Do the test only if it hadn't be done above. | 1398 // Do the test only if it hadn't be done above. |
1399 __ cmp(divisor, Operand::Zero()); | 1399 __ cmp(divisor, Operand::Zero()); |
1400 } | 1400 } |
1401 __ b(pl, &positive); | 1401 __ b(pl, &positive); |
1402 __ cmp(dividend, Operand::Zero()); | 1402 __ cmp(dividend, Operand::Zero()); |
1403 DeoptimizeIf(eq, instr, "minus zero"); | 1403 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); |
1404 __ bind(&positive); | 1404 __ bind(&positive); |
1405 } | 1405 } |
1406 | 1406 |
1407 // Check for (kMinInt / -1). | 1407 // Check for (kMinInt / -1). |
1408 if (hdiv->CheckFlag(HValue::kCanOverflow) && | 1408 if (hdiv->CheckFlag(HValue::kCanOverflow) && |
1409 (!CpuFeatures::IsSupported(SUDIV) || | 1409 (!CpuFeatures::IsSupported(SUDIV) || |
1410 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) { | 1410 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) { |
1411 // We don't need to check for overflow when truncating with sdiv | 1411 // We don't need to check for overflow when truncating with sdiv |
1412 // support because, on ARM, sdiv kMinInt, -1 -> kMinInt. | 1412 // support because, on ARM, sdiv kMinInt, -1 -> kMinInt. |
1413 __ cmp(dividend, Operand(kMinInt)); | 1413 __ cmp(dividend, Operand(kMinInt)); |
1414 __ cmp(divisor, Operand(-1), eq); | 1414 __ cmp(divisor, Operand(-1), eq); |
1415 DeoptimizeIf(eq, instr, "overflow"); | 1415 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow); |
1416 } | 1416 } |
1417 | 1417 |
1418 if (CpuFeatures::IsSupported(SUDIV)) { | 1418 if (CpuFeatures::IsSupported(SUDIV)) { |
1419 CpuFeatureScope scope(masm(), SUDIV); | 1419 CpuFeatureScope scope(masm(), SUDIV); |
1420 __ sdiv(result, dividend, divisor); | 1420 __ sdiv(result, dividend, divisor); |
1421 } else { | 1421 } else { |
1422 DoubleRegister vleft = ToDoubleRegister(instr->temp()); | 1422 DoubleRegister vleft = ToDoubleRegister(instr->temp()); |
1423 DoubleRegister vright = double_scratch0(); | 1423 DoubleRegister vright = double_scratch0(); |
1424 __ vmov(double_scratch0().low(), dividend); | 1424 __ vmov(double_scratch0().low(), dividend); |
1425 __ vcvt_f64_s32(vleft, double_scratch0().low()); | 1425 __ vcvt_f64_s32(vleft, double_scratch0().low()); |
1426 __ vmov(double_scratch0().low(), divisor); | 1426 __ vmov(double_scratch0().low(), divisor); |
1427 __ vcvt_f64_s32(vright, double_scratch0().low()); | 1427 __ vcvt_f64_s32(vright, double_scratch0().low()); |
1428 __ vdiv(vleft, vleft, vright); // vleft now contains the result. | 1428 __ vdiv(vleft, vleft, vright); // vleft now contains the result. |
1429 __ vcvt_s32_f64(double_scratch0().low(), vleft); | 1429 __ vcvt_s32_f64(double_scratch0().low(), vleft); |
1430 __ vmov(result, double_scratch0().low()); | 1430 __ vmov(result, double_scratch0().low()); |
1431 } | 1431 } |
1432 | 1432 |
1433 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { | 1433 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { |
1434 // Compute remainder and deopt if it's not zero. | 1434 // Compute remainder and deopt if it's not zero. |
1435 Register remainder = scratch0(); | 1435 Register remainder = scratch0(); |
1436 __ Mls(remainder, result, divisor, dividend); | 1436 __ Mls(remainder, result, divisor, dividend); |
1437 __ cmp(remainder, Operand::Zero()); | 1437 __ cmp(remainder, Operand::Zero()); |
1438 DeoptimizeIf(ne, instr, "lost precision"); | 1438 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecision); |
1439 } | 1439 } |
1440 } | 1440 } |
1441 | 1441 |
1442 | 1442 |
1443 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) { | 1443 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) { |
1444 DwVfpRegister addend = ToDoubleRegister(instr->addend()); | 1444 DwVfpRegister addend = ToDoubleRegister(instr->addend()); |
1445 DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier()); | 1445 DwVfpRegister multiplier = ToDoubleRegister(instr->multiplier()); |
1446 DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand()); | 1446 DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand()); |
1447 | 1447 |
1448 // This is computed in-place. | 1448 // This is computed in-place. |
(...skipping 30 matching lines...) Expand all Loading... |
1479 // can simply do an arithmetic right shift. | 1479 // can simply do an arithmetic right shift. |
1480 int32_t shift = WhichPowerOf2Abs(divisor); | 1480 int32_t shift = WhichPowerOf2Abs(divisor); |
1481 if (divisor > 1) { | 1481 if (divisor > 1) { |
1482 __ mov(result, Operand(dividend, ASR, shift)); | 1482 __ mov(result, Operand(dividend, ASR, shift)); |
1483 return; | 1483 return; |
1484 } | 1484 } |
1485 | 1485 |
1486 // If the divisor is negative, we have to negate and handle edge cases. | 1486 // If the divisor is negative, we have to negate and handle edge cases. |
1487 __ rsb(result, dividend, Operand::Zero(), SetCC); | 1487 __ rsb(result, dividend, Operand::Zero(), SetCC); |
1488 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1488 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
1489 DeoptimizeIf(eq, instr, "minus zero"); | 1489 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); |
1490 } | 1490 } |
1491 | 1491 |
1492 // Dividing by -1 is basically negation, unless we overflow. | 1492 // Dividing by -1 is basically negation, unless we overflow. |
1493 if (divisor == -1) { | 1493 if (divisor == -1) { |
1494 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { | 1494 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { |
1495 DeoptimizeIf(vs, instr, "overflow"); | 1495 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); |
1496 } | 1496 } |
1497 return; | 1497 return; |
1498 } | 1498 } |
1499 | 1499 |
1500 // If the negation could not overflow, simply shifting is OK. | 1500 // If the negation could not overflow, simply shifting is OK. |
1501 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { | 1501 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { |
1502 __ mov(result, Operand(result, ASR, shift)); | 1502 __ mov(result, Operand(result, ASR, shift)); |
1503 return; | 1503 return; |
1504 } | 1504 } |
1505 | 1505 |
1506 __ mov(result, Operand(kMinInt / divisor), LeaveCC, vs); | 1506 __ mov(result, Operand(kMinInt / divisor), LeaveCC, vs); |
1507 __ mov(result, Operand(result, ASR, shift), LeaveCC, vc); | 1507 __ mov(result, Operand(result, ASR, shift), LeaveCC, vc); |
1508 } | 1508 } |
1509 | 1509 |
1510 | 1510 |
1511 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { | 1511 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) { |
1512 Register dividend = ToRegister(instr->dividend()); | 1512 Register dividend = ToRegister(instr->dividend()); |
1513 int32_t divisor = instr->divisor(); | 1513 int32_t divisor = instr->divisor(); |
1514 Register result = ToRegister(instr->result()); | 1514 Register result = ToRegister(instr->result()); |
1515 DCHECK(!dividend.is(result)); | 1515 DCHECK(!dividend.is(result)); |
1516 | 1516 |
1517 if (divisor == 0) { | 1517 if (divisor == 0) { |
1518 DeoptimizeIf(al, instr, "division by zero"); | 1518 DeoptimizeIf(al, instr, Deoptimizer::kDivisionByZero); |
1519 return; | 1519 return; |
1520 } | 1520 } |
1521 | 1521 |
1522 // Check for (0 / -x) that will produce negative zero. | 1522 // Check for (0 / -x) that will produce negative zero. |
1523 HMathFloorOfDiv* hdiv = instr->hydrogen(); | 1523 HMathFloorOfDiv* hdiv = instr->hydrogen(); |
1524 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { | 1524 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { |
1525 __ cmp(dividend, Operand::Zero()); | 1525 __ cmp(dividend, Operand::Zero()); |
1526 DeoptimizeIf(eq, instr, "minus zero"); | 1526 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); |
1527 } | 1527 } |
1528 | 1528 |
1529 // Easy case: We need no dynamic check for the dividend and the flooring | 1529 // Easy case: We need no dynamic check for the dividend and the flooring |
1530 // division is the same as the truncating division. | 1530 // division is the same as the truncating division. |
1531 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || | 1531 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || |
1532 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { | 1532 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { |
1533 __ TruncatingDiv(result, dividend, Abs(divisor)); | 1533 __ TruncatingDiv(result, dividend, Abs(divisor)); |
1534 if (divisor < 0) __ rsb(result, result, Operand::Zero()); | 1534 if (divisor < 0) __ rsb(result, result, Operand::Zero()); |
1535 return; | 1535 return; |
1536 } | 1536 } |
(...skipping 20 matching lines...) Expand all Loading... |
1557 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI. | 1557 // TODO(svenpanne) Refactor this to avoid code duplication with DoDivI. |
1558 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) { | 1558 void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) { |
1559 HBinaryOperation* hdiv = instr->hydrogen(); | 1559 HBinaryOperation* hdiv = instr->hydrogen(); |
1560 Register left = ToRegister(instr->dividend()); | 1560 Register left = ToRegister(instr->dividend()); |
1561 Register right = ToRegister(instr->divisor()); | 1561 Register right = ToRegister(instr->divisor()); |
1562 Register result = ToRegister(instr->result()); | 1562 Register result = ToRegister(instr->result()); |
1563 | 1563 |
1564 // Check for x / 0. | 1564 // Check for x / 0. |
1565 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { | 1565 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { |
1566 __ cmp(right, Operand::Zero()); | 1566 __ cmp(right, Operand::Zero()); |
1567 DeoptimizeIf(eq, instr, "division by zero"); | 1567 DeoptimizeIf(eq, instr, Deoptimizer::kDivisionByZero); |
1568 } | 1568 } |
1569 | 1569 |
1570 // Check for (0 / -x) that will produce negative zero. | 1570 // Check for (0 / -x) that will produce negative zero. |
1571 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { | 1571 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { |
1572 Label positive; | 1572 Label positive; |
1573 if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) { | 1573 if (!instr->hydrogen_value()->CheckFlag(HValue::kCanBeDivByZero)) { |
1574 // Do the test only if it hadn't be done above. | 1574 // Do the test only if it hadn't be done above. |
1575 __ cmp(right, Operand::Zero()); | 1575 __ cmp(right, Operand::Zero()); |
1576 } | 1576 } |
1577 __ b(pl, &positive); | 1577 __ b(pl, &positive); |
1578 __ cmp(left, Operand::Zero()); | 1578 __ cmp(left, Operand::Zero()); |
1579 DeoptimizeIf(eq, instr, "minus zero"); | 1579 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); |
1580 __ bind(&positive); | 1580 __ bind(&positive); |
1581 } | 1581 } |
1582 | 1582 |
1583 // Check for (kMinInt / -1). | 1583 // Check for (kMinInt / -1). |
1584 if (hdiv->CheckFlag(HValue::kCanOverflow) && | 1584 if (hdiv->CheckFlag(HValue::kCanOverflow) && |
1585 (!CpuFeatures::IsSupported(SUDIV) || | 1585 (!CpuFeatures::IsSupported(SUDIV) || |
1586 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) { | 1586 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32))) { |
1587 // We don't need to check for overflow when truncating with sdiv | 1587 // We don't need to check for overflow when truncating with sdiv |
1588 // support because, on ARM, sdiv kMinInt, -1 -> kMinInt. | 1588 // support because, on ARM, sdiv kMinInt, -1 -> kMinInt. |
1589 __ cmp(left, Operand(kMinInt)); | 1589 __ cmp(left, Operand(kMinInt)); |
1590 __ cmp(right, Operand(-1), eq); | 1590 __ cmp(right, Operand(-1), eq); |
1591 DeoptimizeIf(eq, instr, "overflow"); | 1591 DeoptimizeIf(eq, instr, Deoptimizer::kOverflow); |
1592 } | 1592 } |
1593 | 1593 |
1594 if (CpuFeatures::IsSupported(SUDIV)) { | 1594 if (CpuFeatures::IsSupported(SUDIV)) { |
1595 CpuFeatureScope scope(masm(), SUDIV); | 1595 CpuFeatureScope scope(masm(), SUDIV); |
1596 __ sdiv(result, left, right); | 1596 __ sdiv(result, left, right); |
1597 } else { | 1597 } else { |
1598 DoubleRegister vleft = ToDoubleRegister(instr->temp()); | 1598 DoubleRegister vleft = ToDoubleRegister(instr->temp()); |
1599 DoubleRegister vright = double_scratch0(); | 1599 DoubleRegister vright = double_scratch0(); |
1600 __ vmov(double_scratch0().low(), left); | 1600 __ vmov(double_scratch0().low(), left); |
1601 __ vcvt_f64_s32(vleft, double_scratch0().low()); | 1601 __ vcvt_f64_s32(vleft, double_scratch0().low()); |
(...skipping 25 matching lines...) Expand all Loading... |
1627 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); | 1627 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); |
1628 bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); | 1628 bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); |
1629 | 1629 |
1630 if (right_op->IsConstantOperand()) { | 1630 if (right_op->IsConstantOperand()) { |
1631 int32_t constant = ToInteger32(LConstantOperand::cast(right_op)); | 1631 int32_t constant = ToInteger32(LConstantOperand::cast(right_op)); |
1632 | 1632 |
1633 if (bailout_on_minus_zero && (constant < 0)) { | 1633 if (bailout_on_minus_zero && (constant < 0)) { |
1634 // The case of a null constant will be handled separately. | 1634 // The case of a null constant will be handled separately. |
1635 // If constant is negative and left is null, the result should be -0. | 1635 // If constant is negative and left is null, the result should be -0. |
1636 __ cmp(left, Operand::Zero()); | 1636 __ cmp(left, Operand::Zero()); |
1637 DeoptimizeIf(eq, instr, "minus zero"); | 1637 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); |
1638 } | 1638 } |
1639 | 1639 |
1640 switch (constant) { | 1640 switch (constant) { |
1641 case -1: | 1641 case -1: |
1642 if (overflow) { | 1642 if (overflow) { |
1643 __ rsb(result, left, Operand::Zero(), SetCC); | 1643 __ rsb(result, left, Operand::Zero(), SetCC); |
1644 DeoptimizeIf(vs, instr, "overflow"); | 1644 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); |
1645 } else { | 1645 } else { |
1646 __ rsb(result, left, Operand::Zero()); | 1646 __ rsb(result, left, Operand::Zero()); |
1647 } | 1647 } |
1648 break; | 1648 break; |
1649 case 0: | 1649 case 0: |
1650 if (bailout_on_minus_zero) { | 1650 if (bailout_on_minus_zero) { |
1651 // If left is strictly negative and the constant is null, the | 1651 // If left is strictly negative and the constant is null, the |
1652 // result is -0. Deoptimize if required, otherwise return 0. | 1652 // result is -0. Deoptimize if required, otherwise return 0. |
1653 __ cmp(left, Operand::Zero()); | 1653 __ cmp(left, Operand::Zero()); |
1654 DeoptimizeIf(mi, instr, "minus zero"); | 1654 DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero); |
1655 } | 1655 } |
1656 __ mov(result, Operand::Zero()); | 1656 __ mov(result, Operand::Zero()); |
1657 break; | 1657 break; |
1658 case 1: | 1658 case 1: |
1659 __ Move(result, left); | 1659 __ Move(result, left); |
1660 break; | 1660 break; |
1661 default: | 1661 default: |
1662 // Multiplying by powers of two and powers of two plus or minus | 1662 // Multiplying by powers of two and powers of two plus or minus |
1663 // one can be done faster with shifted operands. | 1663 // one can be done faster with shifted operands. |
1664 // For other constants we emit standard code. | 1664 // For other constants we emit standard code. |
(...skipping 29 matching lines...) Expand all Loading... |
1694 if (overflow) { | 1694 if (overflow) { |
1695 Register scratch = scratch0(); | 1695 Register scratch = scratch0(); |
1696 // scratch:result = left * right. | 1696 // scratch:result = left * right. |
1697 if (instr->hydrogen()->representation().IsSmi()) { | 1697 if (instr->hydrogen()->representation().IsSmi()) { |
1698 __ SmiUntag(result, left); | 1698 __ SmiUntag(result, left); |
1699 __ smull(result, scratch, result, right); | 1699 __ smull(result, scratch, result, right); |
1700 } else { | 1700 } else { |
1701 __ smull(result, scratch, left, right); | 1701 __ smull(result, scratch, left, right); |
1702 } | 1702 } |
1703 __ cmp(scratch, Operand(result, ASR, 31)); | 1703 __ cmp(scratch, Operand(result, ASR, 31)); |
1704 DeoptimizeIf(ne, instr, "overflow"); | 1704 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow); |
1705 } else { | 1705 } else { |
1706 if (instr->hydrogen()->representation().IsSmi()) { | 1706 if (instr->hydrogen()->representation().IsSmi()) { |
1707 __ SmiUntag(result, left); | 1707 __ SmiUntag(result, left); |
1708 __ mul(result, result, right); | 1708 __ mul(result, result, right); |
1709 } else { | 1709 } else { |
1710 __ mul(result, left, right); | 1710 __ mul(result, left, right); |
1711 } | 1711 } |
1712 } | 1712 } |
1713 | 1713 |
1714 if (bailout_on_minus_zero) { | 1714 if (bailout_on_minus_zero) { |
1715 Label done; | 1715 Label done; |
1716 __ teq(left, Operand(right)); | 1716 __ teq(left, Operand(right)); |
1717 __ b(pl, &done); | 1717 __ b(pl, &done); |
1718 // Bail out if the result is minus zero. | 1718 // Bail out if the result is minus zero. |
1719 __ cmp(result, Operand::Zero()); | 1719 __ cmp(result, Operand::Zero()); |
1720 DeoptimizeIf(eq, instr, "minus zero"); | 1720 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); |
1721 __ bind(&done); | 1721 __ bind(&done); |
1722 } | 1722 } |
1723 } | 1723 } |
1724 } | 1724 } |
1725 | 1725 |
1726 | 1726 |
1727 void LCodeGen::DoBitI(LBitI* instr) { | 1727 void LCodeGen::DoBitI(LBitI* instr) { |
1728 LOperand* left_op = instr->left(); | 1728 LOperand* left_op = instr->left(); |
1729 LOperand* right_op = instr->right(); | 1729 LOperand* right_op = instr->right(); |
1730 DCHECK(left_op->IsRegister()); | 1730 DCHECK(left_op->IsRegister()); |
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1773 switch (instr->op()) { | 1773 switch (instr->op()) { |
1774 case Token::ROR: | 1774 case Token::ROR: |
1775 __ mov(result, Operand(left, ROR, scratch)); | 1775 __ mov(result, Operand(left, ROR, scratch)); |
1776 break; | 1776 break; |
1777 case Token::SAR: | 1777 case Token::SAR: |
1778 __ mov(result, Operand(left, ASR, scratch)); | 1778 __ mov(result, Operand(left, ASR, scratch)); |
1779 break; | 1779 break; |
1780 case Token::SHR: | 1780 case Token::SHR: |
1781 if (instr->can_deopt()) { | 1781 if (instr->can_deopt()) { |
1782 __ mov(result, Operand(left, LSR, scratch), SetCC); | 1782 __ mov(result, Operand(left, LSR, scratch), SetCC); |
1783 DeoptimizeIf(mi, instr, "negative value"); | 1783 DeoptimizeIf(mi, instr, Deoptimizer::kNegativeValue); |
1784 } else { | 1784 } else { |
1785 __ mov(result, Operand(left, LSR, scratch)); | 1785 __ mov(result, Operand(left, LSR, scratch)); |
1786 } | 1786 } |
1787 break; | 1787 break; |
1788 case Token::SHL: | 1788 case Token::SHL: |
1789 __ mov(result, Operand(left, LSL, scratch)); | 1789 __ mov(result, Operand(left, LSL, scratch)); |
1790 break; | 1790 break; |
1791 default: | 1791 default: |
1792 UNREACHABLE(); | 1792 UNREACHABLE(); |
1793 break; | 1793 break; |
(...skipping 16 matching lines...) Expand all Loading... |
1810 } else { | 1810 } else { |
1811 __ Move(result, left); | 1811 __ Move(result, left); |
1812 } | 1812 } |
1813 break; | 1813 break; |
1814 case Token::SHR: | 1814 case Token::SHR: |
1815 if (shift_count != 0) { | 1815 if (shift_count != 0) { |
1816 __ mov(result, Operand(left, LSR, shift_count)); | 1816 __ mov(result, Operand(left, LSR, shift_count)); |
1817 } else { | 1817 } else { |
1818 if (instr->can_deopt()) { | 1818 if (instr->can_deopt()) { |
1819 __ tst(left, Operand(0x80000000)); | 1819 __ tst(left, Operand(0x80000000)); |
1820 DeoptimizeIf(ne, instr, "negative value"); | 1820 DeoptimizeIf(ne, instr, Deoptimizer::kNegativeValue); |
1821 } | 1821 } |
1822 __ Move(result, left); | 1822 __ Move(result, left); |
1823 } | 1823 } |
1824 break; | 1824 break; |
1825 case Token::SHL: | 1825 case Token::SHL: |
1826 if (shift_count != 0) { | 1826 if (shift_count != 0) { |
1827 if (instr->hydrogen_value()->representation().IsSmi() && | 1827 if (instr->hydrogen_value()->representation().IsSmi() && |
1828 instr->can_deopt()) { | 1828 instr->can_deopt()) { |
1829 if (shift_count != 1) { | 1829 if (shift_count != 1) { |
1830 __ mov(result, Operand(left, LSL, shift_count - 1)); | 1830 __ mov(result, Operand(left, LSL, shift_count - 1)); |
1831 __ SmiTag(result, result, SetCC); | 1831 __ SmiTag(result, result, SetCC); |
1832 } else { | 1832 } else { |
1833 __ SmiTag(result, left, SetCC); | 1833 __ SmiTag(result, left, SetCC); |
1834 } | 1834 } |
1835 DeoptimizeIf(vs, instr, "overflow"); | 1835 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); |
1836 } else { | 1836 } else { |
1837 __ mov(result, Operand(left, LSL, shift_count)); | 1837 __ mov(result, Operand(left, LSL, shift_count)); |
1838 } | 1838 } |
1839 } else { | 1839 } else { |
1840 __ Move(result, left); | 1840 __ Move(result, left); |
1841 } | 1841 } |
1842 break; | 1842 break; |
1843 default: | 1843 default: |
1844 UNREACHABLE(); | 1844 UNREACHABLE(); |
1845 break; | 1845 break; |
(...skipping 11 matching lines...) Expand all Loading... |
1857 | 1857 |
1858 if (right->IsStackSlot()) { | 1858 if (right->IsStackSlot()) { |
1859 Register right_reg = EmitLoadRegister(right, ip); | 1859 Register right_reg = EmitLoadRegister(right, ip); |
1860 __ sub(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond); | 1860 __ sub(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond); |
1861 } else { | 1861 } else { |
1862 DCHECK(right->IsRegister() || right->IsConstantOperand()); | 1862 DCHECK(right->IsRegister() || right->IsConstantOperand()); |
1863 __ sub(ToRegister(result), ToRegister(left), ToOperand(right), set_cond); | 1863 __ sub(ToRegister(result), ToRegister(left), ToOperand(right), set_cond); |
1864 } | 1864 } |
1865 | 1865 |
1866 if (can_overflow) { | 1866 if (can_overflow) { |
1867 DeoptimizeIf(vs, instr, "overflow"); | 1867 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); |
1868 } | 1868 } |
1869 } | 1869 } |
1870 | 1870 |
1871 | 1871 |
1872 void LCodeGen::DoRSubI(LRSubI* instr) { | 1872 void LCodeGen::DoRSubI(LRSubI* instr) { |
1873 LOperand* left = instr->left(); | 1873 LOperand* left = instr->left(); |
1874 LOperand* right = instr->right(); | 1874 LOperand* right = instr->right(); |
1875 LOperand* result = instr->result(); | 1875 LOperand* result = instr->result(); |
1876 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); | 1876 bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); |
1877 SBit set_cond = can_overflow ? SetCC : LeaveCC; | 1877 SBit set_cond = can_overflow ? SetCC : LeaveCC; |
1878 | 1878 |
1879 if (right->IsStackSlot()) { | 1879 if (right->IsStackSlot()) { |
1880 Register right_reg = EmitLoadRegister(right, ip); | 1880 Register right_reg = EmitLoadRegister(right, ip); |
1881 __ rsb(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond); | 1881 __ rsb(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond); |
1882 } else { | 1882 } else { |
1883 DCHECK(right->IsRegister() || right->IsConstantOperand()); | 1883 DCHECK(right->IsRegister() || right->IsConstantOperand()); |
1884 __ rsb(ToRegister(result), ToRegister(left), ToOperand(right), set_cond); | 1884 __ rsb(ToRegister(result), ToRegister(left), ToOperand(right), set_cond); |
1885 } | 1885 } |
1886 | 1886 |
1887 if (can_overflow) { | 1887 if (can_overflow) { |
1888 DeoptimizeIf(vs, instr, "overflow"); | 1888 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); |
1889 } | 1889 } |
1890 } | 1890 } |
1891 | 1891 |
1892 | 1892 |
1893 void LCodeGen::DoConstantI(LConstantI* instr) { | 1893 void LCodeGen::DoConstantI(LConstantI* instr) { |
1894 __ mov(ToRegister(instr->result()), Operand(instr->value())); | 1894 __ mov(ToRegister(instr->result()), Operand(instr->value())); |
1895 } | 1895 } |
1896 | 1896 |
1897 | 1897 |
1898 void LCodeGen::DoConstantS(LConstantS* instr) { | 1898 void LCodeGen::DoConstantS(LConstantS* instr) { |
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1946 Register result = ToRegister(instr->result()); | 1946 Register result = ToRegister(instr->result()); |
1947 Register scratch = ToRegister(instr->temp()); | 1947 Register scratch = ToRegister(instr->temp()); |
1948 Smi* index = instr->index(); | 1948 Smi* index = instr->index(); |
1949 Label runtime, done; | 1949 Label runtime, done; |
1950 DCHECK(object.is(result)); | 1950 DCHECK(object.is(result)); |
1951 DCHECK(object.is(r0)); | 1951 DCHECK(object.is(r0)); |
1952 DCHECK(!scratch.is(scratch0())); | 1952 DCHECK(!scratch.is(scratch0())); |
1953 DCHECK(!scratch.is(object)); | 1953 DCHECK(!scratch.is(object)); |
1954 | 1954 |
1955 __ SmiTst(object); | 1955 __ SmiTst(object); |
1956 DeoptimizeIf(eq, instr, "Smi"); | 1956 DeoptimizeIf(eq, instr, Deoptimizer::kSmi); |
1957 __ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE); | 1957 __ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE); |
1958 DeoptimizeIf(ne, instr, "not a date object"); | 1958 DeoptimizeIf(ne, instr, Deoptimizer::kNotADateObject); |
1959 | 1959 |
1960 if (index->value() == 0) { | 1960 if (index->value() == 0) { |
1961 __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset)); | 1961 __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset)); |
1962 } else { | 1962 } else { |
1963 if (index->value() < JSDate::kFirstUncachedField) { | 1963 if (index->value() < JSDate::kFirstUncachedField) { |
1964 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate()); | 1964 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate()); |
1965 __ mov(scratch, Operand(stamp)); | 1965 __ mov(scratch, Operand(stamp)); |
1966 __ ldr(scratch, MemOperand(scratch)); | 1966 __ ldr(scratch, MemOperand(scratch)); |
1967 __ ldr(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset)); | 1967 __ ldr(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset)); |
1968 __ cmp(scratch, scratch0()); | 1968 __ cmp(scratch, scratch0()); |
(...skipping 96 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2065 | 2065 |
2066 if (right->IsStackSlot()) { | 2066 if (right->IsStackSlot()) { |
2067 Register right_reg = EmitLoadRegister(right, ip); | 2067 Register right_reg = EmitLoadRegister(right, ip); |
2068 __ add(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond); | 2068 __ add(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond); |
2069 } else { | 2069 } else { |
2070 DCHECK(right->IsRegister() || right->IsConstantOperand()); | 2070 DCHECK(right->IsRegister() || right->IsConstantOperand()); |
2071 __ add(ToRegister(result), ToRegister(left), ToOperand(right), set_cond); | 2071 __ add(ToRegister(result), ToRegister(left), ToOperand(right), set_cond); |
2072 } | 2072 } |
2073 | 2073 |
2074 if (can_overflow) { | 2074 if (can_overflow) { |
2075 DeoptimizeIf(vs, instr, "overflow"); | 2075 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); |
2076 } | 2076 } |
2077 } | 2077 } |
2078 | 2078 |
2079 | 2079 |
2080 void LCodeGen::DoMathMinMax(LMathMinMax* instr) { | 2080 void LCodeGen::DoMathMinMax(LMathMinMax* instr) { |
2081 LOperand* left = instr->left(); | 2081 LOperand* left = instr->left(); |
2082 LOperand* right = instr->right(); | 2082 LOperand* right = instr->right(); |
2083 HMathMinMax::Operation operation = instr->hydrogen()->operation(); | 2083 HMathMinMax::Operation operation = instr->hydrogen()->operation(); |
2084 if (instr->hydrogen()->representation().IsSmiOrInteger32()) { | 2084 if (instr->hydrogen()->representation().IsSmiOrInteger32()) { |
2085 Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge; | 2085 Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge; |
(...skipping 204 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2290 } | 2290 } |
2291 | 2291 |
2292 if (expected.Contains(ToBooleanStub::SMI)) { | 2292 if (expected.Contains(ToBooleanStub::SMI)) { |
2293 // Smis: 0 -> false, all other -> true. | 2293 // Smis: 0 -> false, all other -> true. |
2294 __ cmp(reg, Operand::Zero()); | 2294 __ cmp(reg, Operand::Zero()); |
2295 __ b(eq, instr->FalseLabel(chunk_)); | 2295 __ b(eq, instr->FalseLabel(chunk_)); |
2296 __ JumpIfSmi(reg, instr->TrueLabel(chunk_)); | 2296 __ JumpIfSmi(reg, instr->TrueLabel(chunk_)); |
2297 } else if (expected.NeedsMap()) { | 2297 } else if (expected.NeedsMap()) { |
2298 // If we need a map later and have a Smi -> deopt. | 2298 // If we need a map later and have a Smi -> deopt. |
2299 __ SmiTst(reg); | 2299 __ SmiTst(reg); |
2300 DeoptimizeIf(eq, instr, "Smi"); | 2300 DeoptimizeIf(eq, instr, Deoptimizer::kSmi); |
2301 } | 2301 } |
2302 | 2302 |
2303 const Register map = scratch0(); | 2303 const Register map = scratch0(); |
2304 if (expected.NeedsMap()) { | 2304 if (expected.NeedsMap()) { |
2305 __ ldr(map, FieldMemOperand(reg, HeapObject::kMapOffset)); | 2305 __ ldr(map, FieldMemOperand(reg, HeapObject::kMapOffset)); |
2306 | 2306 |
2307 if (expected.CanBeUndetectable()) { | 2307 if (expected.CanBeUndetectable()) { |
2308 // Undetectable -> false. | 2308 // Undetectable -> false. |
2309 __ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset)); | 2309 __ ldrb(ip, FieldMemOperand(map, Map::kBitFieldOffset)); |
2310 __ tst(ip, Operand(1 << Map::kIsUndetectable)); | 2310 __ tst(ip, Operand(1 << Map::kIsUndetectable)); |
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2346 __ VFPCompareAndSetFlags(dbl_scratch, 0.0); | 2346 __ VFPCompareAndSetFlags(dbl_scratch, 0.0); |
2347 __ cmp(r0, r0, vs); // NaN -> false. | 2347 __ cmp(r0, r0, vs); // NaN -> false. |
2348 __ b(eq, instr->FalseLabel(chunk_)); // +0, -0 -> false. | 2348 __ b(eq, instr->FalseLabel(chunk_)); // +0, -0 -> false. |
2349 __ b(instr->TrueLabel(chunk_)); | 2349 __ b(instr->TrueLabel(chunk_)); |
2350 __ bind(¬_heap_number); | 2350 __ bind(¬_heap_number); |
2351 } | 2351 } |
2352 | 2352 |
2353 if (!expected.IsGeneric()) { | 2353 if (!expected.IsGeneric()) { |
2354 // We've seen something for the first time -> deopt. | 2354 // We've seen something for the first time -> deopt. |
2355 // This can only happen if we are not generic already. | 2355 // This can only happen if we are not generic already. |
2356 DeoptimizeIf(al, instr, "unexpected object"); | 2356 DeoptimizeIf(al, instr, Deoptimizer::kUnexpectedObject); |
2357 } | 2357 } |
2358 } | 2358 } |
2359 } | 2359 } |
2360 } | 2360 } |
2361 | 2361 |
2362 | 2362 |
2363 void LCodeGen::EmitGoto(int block) { | 2363 void LCodeGen::EmitGoto(int block) { |
2364 if (!IsNextEmittedBlock(block)) { | 2364 if (!IsNextEmittedBlock(block)) { |
2365 __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block))); | 2365 __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block))); |
2366 } | 2366 } |
(...skipping 625 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2992 } | 2992 } |
2993 | 2993 |
2994 | 2994 |
2995 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { | 2995 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { |
2996 Register result = ToRegister(instr->result()); | 2996 Register result = ToRegister(instr->result()); |
2997 __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell().handle()))); | 2997 __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell().handle()))); |
2998 __ ldr(result, FieldMemOperand(ip, Cell::kValueOffset)); | 2998 __ ldr(result, FieldMemOperand(ip, Cell::kValueOffset)); |
2999 if (instr->hydrogen()->RequiresHoleCheck()) { | 2999 if (instr->hydrogen()->RequiresHoleCheck()) { |
3000 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); | 3000 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); |
3001 __ cmp(result, ip); | 3001 __ cmp(result, ip); |
3002 DeoptimizeIf(eq, instr, "hole"); | 3002 DeoptimizeIf(eq, instr, Deoptimizer::kHole); |
3003 } | 3003 } |
3004 } | 3004 } |
3005 | 3005 |
3006 | 3006 |
3007 template <class T> | 3007 template <class T> |
3008 void LCodeGen::EmitVectorLoadICRegisters(T* instr) { | 3008 void LCodeGen::EmitVectorLoadICRegisters(T* instr) { |
3009 DCHECK(FLAG_vector_ics); | 3009 DCHECK(FLAG_vector_ics); |
3010 Register vector_register = ToRegister(instr->temp_vector()); | 3010 Register vector_register = ToRegister(instr->temp_vector()); |
3011 Register slot_register = VectorLoadICDescriptor::SlotRegister(); | 3011 Register slot_register = VectorLoadICDescriptor::SlotRegister(); |
3012 DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister())); | 3012 DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister())); |
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3047 | 3047 |
3048 // If the cell we are storing to contains the hole it could have | 3048 // If the cell we are storing to contains the hole it could have |
3049 // been deleted from the property dictionary. In that case, we need | 3049 // been deleted from the property dictionary. In that case, we need |
3050 // to update the property details in the property dictionary to mark | 3050 // to update the property details in the property dictionary to mark |
3051 // it as no longer deleted. | 3051 // it as no longer deleted. |
3052 if (instr->hydrogen()->RequiresHoleCheck()) { | 3052 if (instr->hydrogen()->RequiresHoleCheck()) { |
3053 // We use a temp to check the payload (CompareRoot might clobber ip). | 3053 // We use a temp to check the payload (CompareRoot might clobber ip). |
3054 Register payload = ToRegister(instr->temp()); | 3054 Register payload = ToRegister(instr->temp()); |
3055 __ ldr(payload, FieldMemOperand(cell, Cell::kValueOffset)); | 3055 __ ldr(payload, FieldMemOperand(cell, Cell::kValueOffset)); |
3056 __ CompareRoot(payload, Heap::kTheHoleValueRootIndex); | 3056 __ CompareRoot(payload, Heap::kTheHoleValueRootIndex); |
3057 DeoptimizeIf(eq, instr, "hole"); | 3057 DeoptimizeIf(eq, instr, Deoptimizer::kHole); |
3058 } | 3058 } |
3059 | 3059 |
3060 // Store the value. | 3060 // Store the value. |
3061 __ str(value, FieldMemOperand(cell, Cell::kValueOffset)); | 3061 __ str(value, FieldMemOperand(cell, Cell::kValueOffset)); |
3062 // Cells are always rescanned, so no write barrier here. | 3062 // Cells are always rescanned, so no write barrier here. |
3063 } | 3063 } |
3064 | 3064 |
3065 | 3065 |
3066 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { | 3066 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { |
3067 Register context = ToRegister(instr->context()); | 3067 Register context = ToRegister(instr->context()); |
3068 Register result = ToRegister(instr->result()); | 3068 Register result = ToRegister(instr->result()); |
3069 __ ldr(result, ContextOperand(context, instr->slot_index())); | 3069 __ ldr(result, ContextOperand(context, instr->slot_index())); |
3070 if (instr->hydrogen()->RequiresHoleCheck()) { | 3070 if (instr->hydrogen()->RequiresHoleCheck()) { |
3071 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); | 3071 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); |
3072 __ cmp(result, ip); | 3072 __ cmp(result, ip); |
3073 if (instr->hydrogen()->DeoptimizesOnHole()) { | 3073 if (instr->hydrogen()->DeoptimizesOnHole()) { |
3074 DeoptimizeIf(eq, instr, "hole"); | 3074 DeoptimizeIf(eq, instr, Deoptimizer::kHole); |
3075 } else { | 3075 } else { |
3076 __ mov(result, Operand(factory()->undefined_value()), LeaveCC, eq); | 3076 __ mov(result, Operand(factory()->undefined_value()), LeaveCC, eq); |
3077 } | 3077 } |
3078 } | 3078 } |
3079 } | 3079 } |
3080 | 3080 |
3081 | 3081 |
3082 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { | 3082 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { |
3083 Register context = ToRegister(instr->context()); | 3083 Register context = ToRegister(instr->context()); |
3084 Register value = ToRegister(instr->value()); | 3084 Register value = ToRegister(instr->value()); |
3085 Register scratch = scratch0(); | 3085 Register scratch = scratch0(); |
3086 MemOperand target = ContextOperand(context, instr->slot_index()); | 3086 MemOperand target = ContextOperand(context, instr->slot_index()); |
3087 | 3087 |
3088 Label skip_assignment; | 3088 Label skip_assignment; |
3089 | 3089 |
3090 if (instr->hydrogen()->RequiresHoleCheck()) { | 3090 if (instr->hydrogen()->RequiresHoleCheck()) { |
3091 __ ldr(scratch, target); | 3091 __ ldr(scratch, target); |
3092 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); | 3092 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); |
3093 __ cmp(scratch, ip); | 3093 __ cmp(scratch, ip); |
3094 if (instr->hydrogen()->DeoptimizesOnHole()) { | 3094 if (instr->hydrogen()->DeoptimizesOnHole()) { |
3095 DeoptimizeIf(eq, instr, "hole"); | 3095 DeoptimizeIf(eq, instr, Deoptimizer::kHole); |
3096 } else { | 3096 } else { |
3097 __ b(ne, &skip_assignment); | 3097 __ b(ne, &skip_assignment); |
3098 } | 3098 } |
3099 } | 3099 } |
3100 | 3100 |
3101 __ str(value, target); | 3101 __ str(value, target); |
3102 if (instr->hydrogen()->NeedsWriteBarrier()) { | 3102 if (instr->hydrogen()->NeedsWriteBarrier()) { |
3103 SmiCheck check_needed = | 3103 SmiCheck check_needed = |
3104 instr->hydrogen()->value()->type().IsHeapObject() | 3104 instr->hydrogen()->value()->type().IsHeapObject() |
3105 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; | 3105 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; |
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3166 Register function = ToRegister(instr->function()); | 3166 Register function = ToRegister(instr->function()); |
3167 Register result = ToRegister(instr->result()); | 3167 Register result = ToRegister(instr->result()); |
3168 | 3168 |
3169 // Get the prototype or initial map from the function. | 3169 // Get the prototype or initial map from the function. |
3170 __ ldr(result, | 3170 __ ldr(result, |
3171 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); | 3171 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); |
3172 | 3172 |
3173 // Check that the function has a prototype or an initial map. | 3173 // Check that the function has a prototype or an initial map. |
3174 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); | 3174 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); |
3175 __ cmp(result, ip); | 3175 __ cmp(result, ip); |
3176 DeoptimizeIf(eq, instr, "hole"); | 3176 DeoptimizeIf(eq, instr, Deoptimizer::kHole); |
3177 | 3177 |
3178 // If the function does not have an initial map, we're done. | 3178 // If the function does not have an initial map, we're done. |
3179 Label done; | 3179 Label done; |
3180 __ CompareObjectType(result, scratch, scratch, MAP_TYPE); | 3180 __ CompareObjectType(result, scratch, scratch, MAP_TYPE); |
3181 __ b(ne, &done); | 3181 __ b(ne, &done); |
3182 | 3182 |
3183 // Get the prototype from the initial map. | 3183 // Get the prototype from the initial map. |
3184 __ ldr(result, FieldMemOperand(result, Map::kPrototypeOffset)); | 3184 __ ldr(result, FieldMemOperand(result, Map::kPrototypeOffset)); |
3185 | 3185 |
3186 // All done. | 3186 // All done. |
(...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3292 break; | 3292 break; |
3293 case EXTERNAL_INT32_ELEMENTS: | 3293 case EXTERNAL_INT32_ELEMENTS: |
3294 case INT32_ELEMENTS: | 3294 case INT32_ELEMENTS: |
3295 __ ldr(result, mem_operand); | 3295 __ ldr(result, mem_operand); |
3296 break; | 3296 break; |
3297 case EXTERNAL_UINT32_ELEMENTS: | 3297 case EXTERNAL_UINT32_ELEMENTS: |
3298 case UINT32_ELEMENTS: | 3298 case UINT32_ELEMENTS: |
3299 __ ldr(result, mem_operand); | 3299 __ ldr(result, mem_operand); |
3300 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { | 3300 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { |
3301 __ cmp(result, Operand(0x80000000)); | 3301 __ cmp(result, Operand(0x80000000)); |
3302 DeoptimizeIf(cs, instr, "negative value"); | 3302 DeoptimizeIf(cs, instr, Deoptimizer::kNegativeValue); |
3303 } | 3303 } |
3304 break; | 3304 break; |
3305 case FLOAT32_ELEMENTS: | 3305 case FLOAT32_ELEMENTS: |
3306 case FLOAT64_ELEMENTS: | 3306 case FLOAT64_ELEMENTS: |
3307 case EXTERNAL_FLOAT32_ELEMENTS: | 3307 case EXTERNAL_FLOAT32_ELEMENTS: |
3308 case EXTERNAL_FLOAT64_ELEMENTS: | 3308 case EXTERNAL_FLOAT64_ELEMENTS: |
3309 case FAST_HOLEY_DOUBLE_ELEMENTS: | 3309 case FAST_HOLEY_DOUBLE_ELEMENTS: |
3310 case FAST_HOLEY_ELEMENTS: | 3310 case FAST_HOLEY_ELEMENTS: |
3311 case FAST_HOLEY_SMI_ELEMENTS: | 3311 case FAST_HOLEY_SMI_ELEMENTS: |
3312 case FAST_DOUBLE_ELEMENTS: | 3312 case FAST_DOUBLE_ELEMENTS: |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3345 int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) | 3345 int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) |
3346 ? (element_size_shift - kSmiTagSize) : element_size_shift; | 3346 ? (element_size_shift - kSmiTagSize) : element_size_shift; |
3347 __ add(scratch, scratch, Operand(key, LSL, shift_size)); | 3347 __ add(scratch, scratch, Operand(key, LSL, shift_size)); |
3348 } | 3348 } |
3349 | 3349 |
3350 __ vldr(result, scratch, 0); | 3350 __ vldr(result, scratch, 0); |
3351 | 3351 |
3352 if (instr->hydrogen()->RequiresHoleCheck()) { | 3352 if (instr->hydrogen()->RequiresHoleCheck()) { |
3353 __ ldr(scratch, MemOperand(scratch, sizeof(kHoleNanLower32))); | 3353 __ ldr(scratch, MemOperand(scratch, sizeof(kHoleNanLower32))); |
3354 __ cmp(scratch, Operand(kHoleNanUpper32)); | 3354 __ cmp(scratch, Operand(kHoleNanUpper32)); |
3355 DeoptimizeIf(eq, instr, "hole"); | 3355 DeoptimizeIf(eq, instr, Deoptimizer::kHole); |
3356 } | 3356 } |
3357 } | 3357 } |
3358 | 3358 |
3359 | 3359 |
3360 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { | 3360 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { |
3361 Register elements = ToRegister(instr->elements()); | 3361 Register elements = ToRegister(instr->elements()); |
3362 Register result = ToRegister(instr->result()); | 3362 Register result = ToRegister(instr->result()); |
3363 Register scratch = scratch0(); | 3363 Register scratch = scratch0(); |
3364 Register store_base = scratch; | 3364 Register store_base = scratch; |
3365 int offset = instr->base_offset(); | 3365 int offset = instr->base_offset(); |
(...skipping 13 matching lines...) Expand all Loading... |
3379 } else { | 3379 } else { |
3380 __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2)); | 3380 __ add(scratch, elements, Operand(key, LSL, kPointerSizeLog2)); |
3381 } | 3381 } |
3382 } | 3382 } |
3383 __ ldr(result, MemOperand(store_base, offset)); | 3383 __ ldr(result, MemOperand(store_base, offset)); |
3384 | 3384 |
3385 // Check for the hole value. | 3385 // Check for the hole value. |
3386 if (instr->hydrogen()->RequiresHoleCheck()) { | 3386 if (instr->hydrogen()->RequiresHoleCheck()) { |
3387 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { | 3387 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { |
3388 __ SmiTst(result); | 3388 __ SmiTst(result); |
3389 DeoptimizeIf(ne, instr, "not a Smi"); | 3389 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi); |
3390 } else { | 3390 } else { |
3391 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); | 3391 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); |
3392 __ cmp(result, scratch); | 3392 __ cmp(result, scratch); |
3393 DeoptimizeIf(eq, instr, "hole"); | 3393 DeoptimizeIf(eq, instr, Deoptimizer::kHole); |
3394 } | 3394 } |
3395 } | 3395 } |
3396 } | 3396 } |
3397 | 3397 |
3398 | 3398 |
3399 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) { | 3399 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) { |
3400 if (instr->is_typed_elements()) { | 3400 if (instr->is_typed_elements()) { |
3401 DoLoadKeyedExternalArray(instr); | 3401 DoLoadKeyedExternalArray(instr); |
3402 } else if (instr->hydrogen()->representation().IsDouble()) { | 3402 } else if (instr->hydrogen()->representation().IsDouble()) { |
3403 DoLoadKeyedFixedDoubleArray(instr); | 3403 DoLoadKeyedFixedDoubleArray(instr); |
(...skipping 121 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3525 // Normal function. Replace undefined or null with global receiver. | 3525 // Normal function. Replace undefined or null with global receiver. |
3526 __ LoadRoot(scratch, Heap::kNullValueRootIndex); | 3526 __ LoadRoot(scratch, Heap::kNullValueRootIndex); |
3527 __ cmp(receiver, scratch); | 3527 __ cmp(receiver, scratch); |
3528 __ b(eq, &global_object); | 3528 __ b(eq, &global_object); |
3529 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); | 3529 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); |
3530 __ cmp(receiver, scratch); | 3530 __ cmp(receiver, scratch); |
3531 __ b(eq, &global_object); | 3531 __ b(eq, &global_object); |
3532 | 3532 |
3533 // Deoptimize if the receiver is not a JS object. | 3533 // Deoptimize if the receiver is not a JS object. |
3534 __ SmiTst(receiver); | 3534 __ SmiTst(receiver); |
3535 DeoptimizeIf(eq, instr, "Smi"); | 3535 DeoptimizeIf(eq, instr, Deoptimizer::kSmi); |
3536 __ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE); | 3536 __ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE); |
3537 DeoptimizeIf(lt, instr, "not a JavaScript object"); | 3537 DeoptimizeIf(lt, instr, Deoptimizer::kNotAJavaScriptObject); |
3538 | 3538 |
3539 __ b(&result_in_receiver); | 3539 __ b(&result_in_receiver); |
3540 __ bind(&global_object); | 3540 __ bind(&global_object); |
3541 __ ldr(result, FieldMemOperand(function, JSFunction::kContextOffset)); | 3541 __ ldr(result, FieldMemOperand(function, JSFunction::kContextOffset)); |
3542 __ ldr(result, | 3542 __ ldr(result, |
3543 ContextOperand(result, Context::GLOBAL_OBJECT_INDEX)); | 3543 ContextOperand(result, Context::GLOBAL_OBJECT_INDEX)); |
3544 __ ldr(result, FieldMemOperand(result, GlobalObject::kGlobalProxyOffset)); | 3544 __ ldr(result, FieldMemOperand(result, GlobalObject::kGlobalProxyOffset)); |
3545 | 3545 |
3546 if (result.is(receiver)) { | 3546 if (result.is(receiver)) { |
3547 __ bind(&result_in_receiver); | 3547 __ bind(&result_in_receiver); |
(...skipping 14 matching lines...) Expand all Loading... |
3562 Register elements = ToRegister(instr->elements()); | 3562 Register elements = ToRegister(instr->elements()); |
3563 Register scratch = scratch0(); | 3563 Register scratch = scratch0(); |
3564 DCHECK(receiver.is(r0)); // Used for parameter count. | 3564 DCHECK(receiver.is(r0)); // Used for parameter count. |
3565 DCHECK(function.is(r1)); // Required by InvokeFunction. | 3565 DCHECK(function.is(r1)); // Required by InvokeFunction. |
3566 DCHECK(ToRegister(instr->result()).is(r0)); | 3566 DCHECK(ToRegister(instr->result()).is(r0)); |
3567 | 3567 |
3568 // Copy the arguments to this function possibly from the | 3568 // Copy the arguments to this function possibly from the |
3569 // adaptor frame below it. | 3569 // adaptor frame below it. |
3570 const uint32_t kArgumentsLimit = 1 * KB; | 3570 const uint32_t kArgumentsLimit = 1 * KB; |
3571 __ cmp(length, Operand(kArgumentsLimit)); | 3571 __ cmp(length, Operand(kArgumentsLimit)); |
3572 DeoptimizeIf(hi, instr, "too many arguments"); | 3572 DeoptimizeIf(hi, instr, Deoptimizer::kTooManyArguments); |
3573 | 3573 |
3574 // Push the receiver and use the register to keep the original | 3574 // Push the receiver and use the register to keep the original |
3575 // number of arguments. | 3575 // number of arguments. |
3576 __ push(receiver); | 3576 __ push(receiver); |
3577 __ mov(receiver, length); | 3577 __ mov(receiver, length); |
3578 // The arguments are at a one pointer size offset from elements. | 3578 // The arguments are at a one pointer size offset from elements. |
3579 __ add(elements, elements, Operand(1 * kPointerSize)); | 3579 __ add(elements, elements, Operand(1 * kPointerSize)); |
3580 | 3580 |
3581 // Loop through the arguments pushing them onto the execution | 3581 // Loop through the arguments pushing them onto the execution |
3582 // stack. | 3582 // stack. |
(...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3688 DCHECK(instr->context() != NULL); | 3688 DCHECK(instr->context() != NULL); |
3689 DCHECK(ToRegister(instr->context()).is(cp)); | 3689 DCHECK(ToRegister(instr->context()).is(cp)); |
3690 Register input = ToRegister(instr->value()); | 3690 Register input = ToRegister(instr->value()); |
3691 Register result = ToRegister(instr->result()); | 3691 Register result = ToRegister(instr->result()); |
3692 Register scratch = scratch0(); | 3692 Register scratch = scratch0(); |
3693 | 3693 |
3694 // Deoptimize if not a heap number. | 3694 // Deoptimize if not a heap number. |
3695 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); | 3695 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); |
3696 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); | 3696 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); |
3697 __ cmp(scratch, Operand(ip)); | 3697 __ cmp(scratch, Operand(ip)); |
3698 DeoptimizeIf(ne, instr, "not a heap number"); | 3698 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber); |
3699 | 3699 |
3700 Label done; | 3700 Label done; |
3701 Register exponent = scratch0(); | 3701 Register exponent = scratch0(); |
3702 scratch = no_reg; | 3702 scratch = no_reg; |
3703 __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); | 3703 __ ldr(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); |
3704 // Check the sign of the argument. If the argument is positive, just | 3704 // Check the sign of the argument. If the argument is positive, just |
3705 // return it. | 3705 // return it. |
3706 __ tst(exponent, Operand(HeapNumber::kSignMask)); | 3706 __ tst(exponent, Operand(HeapNumber::kSignMask)); |
3707 // Move the input to the result if necessary. | 3707 // Move the input to the result if necessary. |
3708 __ Move(result, input); | 3708 __ Move(result, input); |
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3756 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) { | 3756 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) { |
3757 Register input = ToRegister(instr->value()); | 3757 Register input = ToRegister(instr->value()); |
3758 Register result = ToRegister(instr->result()); | 3758 Register result = ToRegister(instr->result()); |
3759 __ cmp(input, Operand::Zero()); | 3759 __ cmp(input, Operand::Zero()); |
3760 __ Move(result, input, pl); | 3760 __ Move(result, input, pl); |
3761 // We can make rsb conditional because the previous cmp instruction | 3761 // We can make rsb conditional because the previous cmp instruction |
3762 // will clear the V (overflow) flag and rsb won't set this flag | 3762 // will clear the V (overflow) flag and rsb won't set this flag |
3763 // if input is positive. | 3763 // if input is positive. |
3764 __ rsb(result, input, Operand::Zero(), SetCC, mi); | 3764 __ rsb(result, input, Operand::Zero(), SetCC, mi); |
3765 // Deoptimize on overflow. | 3765 // Deoptimize on overflow. |
3766 DeoptimizeIf(vs, instr, "overflow"); | 3766 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); |
3767 } | 3767 } |
3768 | 3768 |
3769 | 3769 |
3770 void LCodeGen::DoMathAbs(LMathAbs* instr) { | 3770 void LCodeGen::DoMathAbs(LMathAbs* instr) { |
3771 // Class for deferred case. | 3771 // Class for deferred case. |
3772 class DeferredMathAbsTaggedHeapNumber FINAL : public LDeferredCode { | 3772 class DeferredMathAbsTaggedHeapNumber FINAL : public LDeferredCode { |
3773 public: | 3773 public: |
3774 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr) | 3774 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr) |
3775 : LDeferredCode(codegen), instr_(instr) { } | 3775 : LDeferredCode(codegen), instr_(instr) { } |
3776 void Generate() OVERRIDE { | 3776 void Generate() OVERRIDE { |
(...skipping 26 matching lines...) Expand all Loading... |
3803 } | 3803 } |
3804 | 3804 |
3805 | 3805 |
3806 void LCodeGen::DoMathFloor(LMathFloor* instr) { | 3806 void LCodeGen::DoMathFloor(LMathFloor* instr) { |
3807 DwVfpRegister input = ToDoubleRegister(instr->value()); | 3807 DwVfpRegister input = ToDoubleRegister(instr->value()); |
3808 Register result = ToRegister(instr->result()); | 3808 Register result = ToRegister(instr->result()); |
3809 Register input_high = scratch0(); | 3809 Register input_high = scratch0(); |
3810 Label done, exact; | 3810 Label done, exact; |
3811 | 3811 |
3812 __ TryInt32Floor(result, input, input_high, double_scratch0(), &done, &exact); | 3812 __ TryInt32Floor(result, input, input_high, double_scratch0(), &done, &exact); |
3813 DeoptimizeIf(al, instr, "lost precision or NaN"); | 3813 DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN); |
3814 | 3814 |
3815 __ bind(&exact); | 3815 __ bind(&exact); |
3816 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 3816 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
3817 // Test for -0. | 3817 // Test for -0. |
3818 __ cmp(result, Operand::Zero()); | 3818 __ cmp(result, Operand::Zero()); |
3819 __ b(ne, &done); | 3819 __ b(ne, &done); |
3820 __ cmp(input_high, Operand::Zero()); | 3820 __ cmp(input_high, Operand::Zero()); |
3821 DeoptimizeIf(mi, instr, "minus zero"); | 3821 DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero); |
3822 } | 3822 } |
3823 __ bind(&done); | 3823 __ bind(&done); |
3824 } | 3824 } |
3825 | 3825 |
3826 | 3826 |
3827 void LCodeGen::DoMathRound(LMathRound* instr) { | 3827 void LCodeGen::DoMathRound(LMathRound* instr) { |
3828 DwVfpRegister input = ToDoubleRegister(instr->value()); | 3828 DwVfpRegister input = ToDoubleRegister(instr->value()); |
3829 Register result = ToRegister(instr->result()); | 3829 Register result = ToRegister(instr->result()); |
3830 DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp()); | 3830 DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp()); |
3831 DwVfpRegister input_plus_dot_five = double_scratch1; | 3831 DwVfpRegister input_plus_dot_five = double_scratch1; |
3832 Register input_high = scratch0(); | 3832 Register input_high = scratch0(); |
3833 DwVfpRegister dot_five = double_scratch0(); | 3833 DwVfpRegister dot_five = double_scratch0(); |
3834 Label convert, done; | 3834 Label convert, done; |
3835 | 3835 |
3836 __ Vmov(dot_five, 0.5, scratch0()); | 3836 __ Vmov(dot_five, 0.5, scratch0()); |
3837 __ vabs(double_scratch1, input); | 3837 __ vabs(double_scratch1, input); |
3838 __ VFPCompareAndSetFlags(double_scratch1, dot_five); | 3838 __ VFPCompareAndSetFlags(double_scratch1, dot_five); |
3839 // If input is in [-0.5, -0], the result is -0. | 3839 // If input is in [-0.5, -0], the result is -0. |
3840 // If input is in [+0, +0.5[, the result is +0. | 3840 // If input is in [+0, +0.5[, the result is +0. |
3841 // If the input is +0.5, the result is 1. | 3841 // If the input is +0.5, the result is 1. |
3842 __ b(hi, &convert); // Out of [-0.5, +0.5]. | 3842 __ b(hi, &convert); // Out of [-0.5, +0.5]. |
3843 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 3843 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
3844 __ VmovHigh(input_high, input); | 3844 __ VmovHigh(input_high, input); |
3845 __ cmp(input_high, Operand::Zero()); | 3845 __ cmp(input_high, Operand::Zero()); |
3846 // [-0.5, -0]. | 3846 // [-0.5, -0]. |
3847 DeoptimizeIf(mi, instr, "minus zero"); | 3847 DeoptimizeIf(mi, instr, Deoptimizer::kMinusZero); |
3848 } | 3848 } |
3849 __ VFPCompareAndSetFlags(input, dot_five); | 3849 __ VFPCompareAndSetFlags(input, dot_five); |
3850 __ mov(result, Operand(1), LeaveCC, eq); // +0.5. | 3850 __ mov(result, Operand(1), LeaveCC, eq); // +0.5. |
3851 // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on | 3851 // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on |
3852 // flag kBailoutOnMinusZero. | 3852 // flag kBailoutOnMinusZero. |
3853 __ mov(result, Operand::Zero(), LeaveCC, ne); | 3853 __ mov(result, Operand::Zero(), LeaveCC, ne); |
3854 __ b(&done); | 3854 __ b(&done); |
3855 | 3855 |
3856 __ bind(&convert); | 3856 __ bind(&convert); |
3857 __ vadd(input_plus_dot_five, input, dot_five); | 3857 __ vadd(input_plus_dot_five, input, dot_five); |
3858 // Reuse dot_five (double_scratch0) as we no longer need this value. | 3858 // Reuse dot_five (double_scratch0) as we no longer need this value. |
3859 __ TryInt32Floor(result, input_plus_dot_five, input_high, double_scratch0(), | 3859 __ TryInt32Floor(result, input_plus_dot_five, input_high, double_scratch0(), |
3860 &done, &done); | 3860 &done, &done); |
3861 DeoptimizeIf(al, instr, "lost precision or NaN"); | 3861 DeoptimizeIf(al, instr, Deoptimizer::kLostPrecisionOrNaN); |
3862 __ bind(&done); | 3862 __ bind(&done); |
3863 } | 3863 } |
3864 | 3864 |
3865 | 3865 |
3866 void LCodeGen::DoMathFround(LMathFround* instr) { | 3866 void LCodeGen::DoMathFround(LMathFround* instr) { |
3867 DwVfpRegister input_reg = ToDoubleRegister(instr->value()); | 3867 DwVfpRegister input_reg = ToDoubleRegister(instr->value()); |
3868 DwVfpRegister output_reg = ToDoubleRegister(instr->result()); | 3868 DwVfpRegister output_reg = ToDoubleRegister(instr->result()); |
3869 LowDwVfpRegister scratch = double_scratch0(); | 3869 LowDwVfpRegister scratch = double_scratch0(); |
3870 __ vcvt_f32_f64(scratch.low(), input_reg); | 3870 __ vcvt_f32_f64(scratch.low(), input_reg); |
3871 __ vcvt_f64_f32(output_reg, scratch.low()); | 3871 __ vcvt_f64_f32(output_reg, scratch.low()); |
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3915 if (exponent_type.IsSmi()) { | 3915 if (exponent_type.IsSmi()) { |
3916 MathPowStub stub(isolate(), MathPowStub::TAGGED); | 3916 MathPowStub stub(isolate(), MathPowStub::TAGGED); |
3917 __ CallStub(&stub); | 3917 __ CallStub(&stub); |
3918 } else if (exponent_type.IsTagged()) { | 3918 } else if (exponent_type.IsTagged()) { |
3919 Label no_deopt; | 3919 Label no_deopt; |
3920 __ JumpIfSmi(tagged_exponent, &no_deopt); | 3920 __ JumpIfSmi(tagged_exponent, &no_deopt); |
3921 DCHECK(!r6.is(tagged_exponent)); | 3921 DCHECK(!r6.is(tagged_exponent)); |
3922 __ ldr(r6, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset)); | 3922 __ ldr(r6, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset)); |
3923 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); | 3923 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); |
3924 __ cmp(r6, Operand(ip)); | 3924 __ cmp(r6, Operand(ip)); |
3925 DeoptimizeIf(ne, instr, "not a heap number"); | 3925 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber); |
3926 __ bind(&no_deopt); | 3926 __ bind(&no_deopt); |
3927 MathPowStub stub(isolate(), MathPowStub::TAGGED); | 3927 MathPowStub stub(isolate(), MathPowStub::TAGGED); |
3928 __ CallStub(&stub); | 3928 __ CallStub(&stub); |
3929 } else if (exponent_type.IsInteger32()) { | 3929 } else if (exponent_type.IsInteger32()) { |
3930 MathPowStub stub(isolate(), MathPowStub::INTEGER); | 3930 MathPowStub stub(isolate(), MathPowStub::INTEGER); |
3931 __ CallStub(&stub); | 3931 __ CallStub(&stub); |
3932 } else { | 3932 } else { |
3933 DCHECK(exponent_type.IsDouble()); | 3933 DCHECK(exponent_type.IsDouble()); |
3934 MathPowStub stub(isolate(), MathPowStub::DOUBLE); | 3934 MathPowStub stub(isolate(), MathPowStub::DOUBLE); |
3935 __ CallStub(&stub); | 3935 __ CallStub(&stub); |
(...skipping 388 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4324 Register index = ToRegister(instr->index()); | 4324 Register index = ToRegister(instr->index()); |
4325 Operand length = ToOperand(instr->length()); | 4325 Operand length = ToOperand(instr->length()); |
4326 __ cmp(index, length); | 4326 __ cmp(index, length); |
4327 } | 4327 } |
4328 if (FLAG_debug_code && instr->hydrogen()->skip_check()) { | 4328 if (FLAG_debug_code && instr->hydrogen()->skip_check()) { |
4329 Label done; | 4329 Label done; |
4330 __ b(NegateCondition(cc), &done); | 4330 __ b(NegateCondition(cc), &done); |
4331 __ stop("eliminated bounds check failed"); | 4331 __ stop("eliminated bounds check failed"); |
4332 __ bind(&done); | 4332 __ bind(&done); |
4333 } else { | 4333 } else { |
4334 DeoptimizeIf(cc, instr, "out of bounds"); | 4334 DeoptimizeIf(cc, instr, Deoptimizer::kOutOfBounds); |
4335 } | 4335 } |
4336 } | 4336 } |
4337 | 4337 |
4338 | 4338 |
4339 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { | 4339 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { |
4340 Register external_pointer = ToRegister(instr->elements()); | 4340 Register external_pointer = ToRegister(instr->elements()); |
4341 Register key = no_reg; | 4341 Register key = no_reg; |
4342 ElementsKind elements_kind = instr->elements_kind(); | 4342 ElementsKind elements_kind = instr->elements_kind(); |
4343 bool key_is_constant = instr->key()->IsConstantOperand(); | 4343 bool key_is_constant = instr->key()->IsConstantOperand(); |
4344 int constant_key = 0; | 4344 int constant_key = 0; |
(...skipping 227 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4572 } | 4572 } |
4573 __ bind(¬_applicable); | 4573 __ bind(¬_applicable); |
4574 } | 4574 } |
4575 | 4575 |
4576 | 4576 |
4577 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { | 4577 void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) { |
4578 Register object = ToRegister(instr->object()); | 4578 Register object = ToRegister(instr->object()); |
4579 Register temp = ToRegister(instr->temp()); | 4579 Register temp = ToRegister(instr->temp()); |
4580 Label no_memento_found; | 4580 Label no_memento_found; |
4581 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found); | 4581 __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found); |
4582 DeoptimizeIf(eq, instr, "memento found"); | 4582 DeoptimizeIf(eq, instr, Deoptimizer::kMementoFound); |
4583 __ bind(&no_memento_found); | 4583 __ bind(&no_memento_found); |
4584 } | 4584 } |
4585 | 4585 |
4586 | 4586 |
4587 void LCodeGen::DoStringAdd(LStringAdd* instr) { | 4587 void LCodeGen::DoStringAdd(LStringAdd* instr) { |
4588 DCHECK(ToRegister(instr->context()).is(cp)); | 4588 DCHECK(ToRegister(instr->context()).is(cp)); |
4589 DCHECK(ToRegister(instr->left()).is(r1)); | 4589 DCHECK(ToRegister(instr->left()).is(r1)); |
4590 DCHECK(ToRegister(instr->right()).is(r0)); | 4590 DCHECK(ToRegister(instr->right()).is(r0)); |
4591 StringAddStub stub(isolate(), | 4591 StringAddStub stub(isolate(), |
4592 instr->hydrogen()->flags(), | 4592 instr->hydrogen()->flags(), |
(...skipping 314 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4907 } | 4907 } |
4908 | 4908 |
4909 | 4909 |
4910 void LCodeGen::DoSmiTag(LSmiTag* instr) { | 4910 void LCodeGen::DoSmiTag(LSmiTag* instr) { |
4911 HChange* hchange = instr->hydrogen(); | 4911 HChange* hchange = instr->hydrogen(); |
4912 Register input = ToRegister(instr->value()); | 4912 Register input = ToRegister(instr->value()); |
4913 Register output = ToRegister(instr->result()); | 4913 Register output = ToRegister(instr->result()); |
4914 if (hchange->CheckFlag(HValue::kCanOverflow) && | 4914 if (hchange->CheckFlag(HValue::kCanOverflow) && |
4915 hchange->value()->CheckFlag(HValue::kUint32)) { | 4915 hchange->value()->CheckFlag(HValue::kUint32)) { |
4916 __ tst(input, Operand(0xc0000000)); | 4916 __ tst(input, Operand(0xc0000000)); |
4917 DeoptimizeIf(ne, instr, "overflow"); | 4917 DeoptimizeIf(ne, instr, Deoptimizer::kOverflow); |
4918 } | 4918 } |
4919 if (hchange->CheckFlag(HValue::kCanOverflow) && | 4919 if (hchange->CheckFlag(HValue::kCanOverflow) && |
4920 !hchange->value()->CheckFlag(HValue::kUint32)) { | 4920 !hchange->value()->CheckFlag(HValue::kUint32)) { |
4921 __ SmiTag(output, input, SetCC); | 4921 __ SmiTag(output, input, SetCC); |
4922 DeoptimizeIf(vs, instr, "overflow"); | 4922 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); |
4923 } else { | 4923 } else { |
4924 __ SmiTag(output, input); | 4924 __ SmiTag(output, input); |
4925 } | 4925 } |
4926 } | 4926 } |
4927 | 4927 |
4928 | 4928 |
4929 void LCodeGen::DoSmiUntag(LSmiUntag* instr) { | 4929 void LCodeGen::DoSmiUntag(LSmiUntag* instr) { |
4930 Register input = ToRegister(instr->value()); | 4930 Register input = ToRegister(instr->value()); |
4931 Register result = ToRegister(instr->result()); | 4931 Register result = ToRegister(instr->result()); |
4932 if (instr->needs_check()) { | 4932 if (instr->needs_check()) { |
4933 STATIC_ASSERT(kHeapObjectTag == 1); | 4933 STATIC_ASSERT(kHeapObjectTag == 1); |
4934 // If the input is a HeapObject, SmiUntag will set the carry flag. | 4934 // If the input is a HeapObject, SmiUntag will set the carry flag. |
4935 __ SmiUntag(result, input, SetCC); | 4935 __ SmiUntag(result, input, SetCC); |
4936 DeoptimizeIf(cs, instr, "not a Smi"); | 4936 DeoptimizeIf(cs, instr, Deoptimizer::kNotASmi); |
4937 } else { | 4937 } else { |
4938 __ SmiUntag(result, input); | 4938 __ SmiUntag(result, input); |
4939 } | 4939 } |
4940 } | 4940 } |
4941 | 4941 |
4942 | 4942 |
4943 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg, | 4943 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg, |
4944 DwVfpRegister result_reg, | 4944 DwVfpRegister result_reg, |
4945 NumberUntagDMode mode) { | 4945 NumberUntagDMode mode) { |
4946 bool can_convert_undefined_to_nan = | 4946 bool can_convert_undefined_to_nan = |
4947 instr->hydrogen()->can_convert_undefined_to_nan(); | 4947 instr->hydrogen()->can_convert_undefined_to_nan(); |
4948 bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero(); | 4948 bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero(); |
4949 | 4949 |
4950 Register scratch = scratch0(); | 4950 Register scratch = scratch0(); |
4951 SwVfpRegister flt_scratch = double_scratch0().low(); | 4951 SwVfpRegister flt_scratch = double_scratch0().low(); |
4952 DCHECK(!result_reg.is(double_scratch0())); | 4952 DCHECK(!result_reg.is(double_scratch0())); |
4953 Label convert, load_smi, done; | 4953 Label convert, load_smi, done; |
4954 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { | 4954 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { |
4955 // Smi check. | 4955 // Smi check. |
4956 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi); | 4956 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi); |
4957 // Heap number map check. | 4957 // Heap number map check. |
4958 __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); | 4958 __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); |
4959 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); | 4959 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); |
4960 __ cmp(scratch, Operand(ip)); | 4960 __ cmp(scratch, Operand(ip)); |
4961 if (can_convert_undefined_to_nan) { | 4961 if (can_convert_undefined_to_nan) { |
4962 __ b(ne, &convert); | 4962 __ b(ne, &convert); |
4963 } else { | 4963 } else { |
4964 DeoptimizeIf(ne, instr, "not a heap number"); | 4964 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber); |
4965 } | 4965 } |
4966 // load heap number | 4966 // load heap number |
4967 __ vldr(result_reg, input_reg, HeapNumber::kValueOffset - kHeapObjectTag); | 4967 __ vldr(result_reg, input_reg, HeapNumber::kValueOffset - kHeapObjectTag); |
4968 if (deoptimize_on_minus_zero) { | 4968 if (deoptimize_on_minus_zero) { |
4969 __ VmovLow(scratch, result_reg); | 4969 __ VmovLow(scratch, result_reg); |
4970 __ cmp(scratch, Operand::Zero()); | 4970 __ cmp(scratch, Operand::Zero()); |
4971 __ b(ne, &done); | 4971 __ b(ne, &done); |
4972 __ VmovHigh(scratch, result_reg); | 4972 __ VmovHigh(scratch, result_reg); |
4973 __ cmp(scratch, Operand(HeapNumber::kSignMask)); | 4973 __ cmp(scratch, Operand(HeapNumber::kSignMask)); |
4974 DeoptimizeIf(eq, instr, "minus zero"); | 4974 DeoptimizeIf(eq, instr, Deoptimizer::kMinusZero); |
4975 } | 4975 } |
4976 __ jmp(&done); | 4976 __ jmp(&done); |
4977 if (can_convert_undefined_to_nan) { | 4977 if (can_convert_undefined_to_nan) { |
4978 __ bind(&convert); | 4978 __ bind(&convert); |
4979 // Convert undefined (and hole) to NaN. | 4979 // Convert undefined (and hole) to NaN. |
4980 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); | 4980 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); |
4981 __ cmp(input_reg, Operand(ip)); | 4981 __ cmp(input_reg, Operand(ip)); |
4982 DeoptimizeIf(ne, instr, "not a heap number/undefined"); | 4982 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined); |
4983 __ LoadRoot(scratch, Heap::kNanValueRootIndex); | 4983 __ LoadRoot(scratch, Heap::kNanValueRootIndex); |
4984 __ vldr(result_reg, scratch, HeapNumber::kValueOffset - kHeapObjectTag); | 4984 __ vldr(result_reg, scratch, HeapNumber::kValueOffset - kHeapObjectTag); |
4985 __ jmp(&done); | 4985 __ jmp(&done); |
4986 } | 4986 } |
4987 } else { | 4987 } else { |
4988 __ SmiUntag(scratch, input_reg); | 4988 __ SmiUntag(scratch, input_reg); |
4989 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); | 4989 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); |
4990 } | 4990 } |
4991 // Smi to double register conversion | 4991 // Smi to double register conversion |
4992 __ bind(&load_smi); | 4992 __ bind(&load_smi); |
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5040 __ bind(&check_bools); | 5040 __ bind(&check_bools); |
5041 __ LoadRoot(ip, Heap::kTrueValueRootIndex); | 5041 __ LoadRoot(ip, Heap::kTrueValueRootIndex); |
5042 __ cmp(scratch2, Operand(ip)); | 5042 __ cmp(scratch2, Operand(ip)); |
5043 __ b(ne, &check_false); | 5043 __ b(ne, &check_false); |
5044 __ mov(input_reg, Operand(1)); | 5044 __ mov(input_reg, Operand(1)); |
5045 __ b(&done); | 5045 __ b(&done); |
5046 | 5046 |
5047 __ bind(&check_false); | 5047 __ bind(&check_false); |
5048 __ LoadRoot(ip, Heap::kFalseValueRootIndex); | 5048 __ LoadRoot(ip, Heap::kFalseValueRootIndex); |
5049 __ cmp(scratch2, Operand(ip)); | 5049 __ cmp(scratch2, Operand(ip)); |
5050 DeoptimizeIf(ne, instr, "not a heap number/undefined/true/false"); | 5050 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefinedBoolean); |
5051 __ mov(input_reg, Operand::Zero()); | 5051 __ mov(input_reg, Operand::Zero()); |
5052 } else { | 5052 } else { |
5053 DeoptimizeIf(ne, instr, "not a heap number"); | 5053 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumber); |
5054 | 5054 |
5055 __ sub(ip, scratch2, Operand(kHeapObjectTag)); | 5055 __ sub(ip, scratch2, Operand(kHeapObjectTag)); |
5056 __ vldr(double_scratch2, ip, HeapNumber::kValueOffset); | 5056 __ vldr(double_scratch2, ip, HeapNumber::kValueOffset); |
5057 __ TryDoubleToInt32Exact(input_reg, double_scratch2, double_scratch); | 5057 __ TryDoubleToInt32Exact(input_reg, double_scratch2, double_scratch); |
5058 DeoptimizeIf(ne, instr, "lost precision or NaN"); | 5058 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN); |
5059 | 5059 |
5060 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 5060 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
5061 __ cmp(input_reg, Operand::Zero()); | 5061 __ cmp(input_reg, Operand::Zero()); |
5062 __ b(ne, &done); | 5062 __ b(ne, &done); |
5063 __ VmovHigh(scratch1, double_scratch2); | 5063 __ VmovHigh(scratch1, double_scratch2); |
5064 __ tst(scratch1, Operand(HeapNumber::kSignMask)); | 5064 __ tst(scratch1, Operand(HeapNumber::kSignMask)); |
5065 DeoptimizeIf(ne, instr, "minus zero"); | 5065 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero); |
5066 } | 5066 } |
5067 } | 5067 } |
5068 __ bind(&done); | 5068 __ bind(&done); |
5069 } | 5069 } |
5070 | 5070 |
5071 | 5071 |
5072 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { | 5072 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { |
5073 class DeferredTaggedToI FINAL : public LDeferredCode { | 5073 class DeferredTaggedToI FINAL : public LDeferredCode { |
5074 public: | 5074 public: |
5075 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) | 5075 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) |
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5124 Register result_reg = ToRegister(instr->result()); | 5124 Register result_reg = ToRegister(instr->result()); |
5125 Register scratch1 = scratch0(); | 5125 Register scratch1 = scratch0(); |
5126 DwVfpRegister double_input = ToDoubleRegister(instr->value()); | 5126 DwVfpRegister double_input = ToDoubleRegister(instr->value()); |
5127 LowDwVfpRegister double_scratch = double_scratch0(); | 5127 LowDwVfpRegister double_scratch = double_scratch0(); |
5128 | 5128 |
5129 if (instr->truncating()) { | 5129 if (instr->truncating()) { |
5130 __ TruncateDoubleToI(result_reg, double_input); | 5130 __ TruncateDoubleToI(result_reg, double_input); |
5131 } else { | 5131 } else { |
5132 __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch); | 5132 __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch); |
5133 // Deoptimize if the input wasn't a int32 (inside a double). | 5133 // Deoptimize if the input wasn't a int32 (inside a double). |
5134 DeoptimizeIf(ne, instr, "lost precision or NaN"); | 5134 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN); |
5135 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 5135 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
5136 Label done; | 5136 Label done; |
5137 __ cmp(result_reg, Operand::Zero()); | 5137 __ cmp(result_reg, Operand::Zero()); |
5138 __ b(ne, &done); | 5138 __ b(ne, &done); |
5139 __ VmovHigh(scratch1, double_input); | 5139 __ VmovHigh(scratch1, double_input); |
5140 __ tst(scratch1, Operand(HeapNumber::kSignMask)); | 5140 __ tst(scratch1, Operand(HeapNumber::kSignMask)); |
5141 DeoptimizeIf(ne, instr, "minus zero"); | 5141 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero); |
5142 __ bind(&done); | 5142 __ bind(&done); |
5143 } | 5143 } |
5144 } | 5144 } |
5145 } | 5145 } |
5146 | 5146 |
5147 | 5147 |
5148 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { | 5148 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { |
5149 Register result_reg = ToRegister(instr->result()); | 5149 Register result_reg = ToRegister(instr->result()); |
5150 Register scratch1 = scratch0(); | 5150 Register scratch1 = scratch0(); |
5151 DwVfpRegister double_input = ToDoubleRegister(instr->value()); | 5151 DwVfpRegister double_input = ToDoubleRegister(instr->value()); |
5152 LowDwVfpRegister double_scratch = double_scratch0(); | 5152 LowDwVfpRegister double_scratch = double_scratch0(); |
5153 | 5153 |
5154 if (instr->truncating()) { | 5154 if (instr->truncating()) { |
5155 __ TruncateDoubleToI(result_reg, double_input); | 5155 __ TruncateDoubleToI(result_reg, double_input); |
5156 } else { | 5156 } else { |
5157 __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch); | 5157 __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch); |
5158 // Deoptimize if the input wasn't a int32 (inside a double). | 5158 // Deoptimize if the input wasn't a int32 (inside a double). |
5159 DeoptimizeIf(ne, instr, "lost precision or NaN"); | 5159 DeoptimizeIf(ne, instr, Deoptimizer::kLostPrecisionOrNaN); |
5160 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { | 5160 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { |
5161 Label done; | 5161 Label done; |
5162 __ cmp(result_reg, Operand::Zero()); | 5162 __ cmp(result_reg, Operand::Zero()); |
5163 __ b(ne, &done); | 5163 __ b(ne, &done); |
5164 __ VmovHigh(scratch1, double_input); | 5164 __ VmovHigh(scratch1, double_input); |
5165 __ tst(scratch1, Operand(HeapNumber::kSignMask)); | 5165 __ tst(scratch1, Operand(HeapNumber::kSignMask)); |
5166 DeoptimizeIf(ne, instr, "minus zero"); | 5166 DeoptimizeIf(ne, instr, Deoptimizer::kMinusZero); |
5167 __ bind(&done); | 5167 __ bind(&done); |
5168 } | 5168 } |
5169 } | 5169 } |
5170 __ SmiTag(result_reg, SetCC); | 5170 __ SmiTag(result_reg, SetCC); |
5171 DeoptimizeIf(vs, instr, "overflow"); | 5171 DeoptimizeIf(vs, instr, Deoptimizer::kOverflow); |
5172 } | 5172 } |
5173 | 5173 |
5174 | 5174 |
5175 void LCodeGen::DoCheckSmi(LCheckSmi* instr) { | 5175 void LCodeGen::DoCheckSmi(LCheckSmi* instr) { |
5176 LOperand* input = instr->value(); | 5176 LOperand* input = instr->value(); |
5177 __ SmiTst(ToRegister(input)); | 5177 __ SmiTst(ToRegister(input)); |
5178 DeoptimizeIf(ne, instr, "not a Smi"); | 5178 DeoptimizeIf(ne, instr, Deoptimizer::kNotASmi); |
5179 } | 5179 } |
5180 | 5180 |
5181 | 5181 |
5182 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { | 5182 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { |
5183 if (!instr->hydrogen()->value()->type().IsHeapObject()) { | 5183 if (!instr->hydrogen()->value()->type().IsHeapObject()) { |
5184 LOperand* input = instr->value(); | 5184 LOperand* input = instr->value(); |
5185 __ SmiTst(ToRegister(input)); | 5185 __ SmiTst(ToRegister(input)); |
5186 DeoptimizeIf(eq, instr, "Smi"); | 5186 DeoptimizeIf(eq, instr, Deoptimizer::kSmi); |
5187 } | 5187 } |
5188 } | 5188 } |
5189 | 5189 |
5190 | 5190 |
5191 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { | 5191 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { |
5192 Register input = ToRegister(instr->value()); | 5192 Register input = ToRegister(instr->value()); |
5193 Register scratch = scratch0(); | 5193 Register scratch = scratch0(); |
5194 | 5194 |
5195 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); | 5195 __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); |
5196 __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); | 5196 __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); |
5197 | 5197 |
5198 if (instr->hydrogen()->is_interval_check()) { | 5198 if (instr->hydrogen()->is_interval_check()) { |
5199 InstanceType first; | 5199 InstanceType first; |
5200 InstanceType last; | 5200 InstanceType last; |
5201 instr->hydrogen()->GetCheckInterval(&first, &last); | 5201 instr->hydrogen()->GetCheckInterval(&first, &last); |
5202 | 5202 |
5203 __ cmp(scratch, Operand(first)); | 5203 __ cmp(scratch, Operand(first)); |
5204 | 5204 |
5205 // If there is only one type in the interval check for equality. | 5205 // If there is only one type in the interval check for equality. |
5206 if (first == last) { | 5206 if (first == last) { |
5207 DeoptimizeIf(ne, instr, "wrong instance type"); | 5207 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType); |
5208 } else { | 5208 } else { |
5209 DeoptimizeIf(lo, instr, "wrong instance type"); | 5209 DeoptimizeIf(lo, instr, Deoptimizer::kWrongInstanceType); |
5210 // Omit check for the last type. | 5210 // Omit check for the last type. |
5211 if (last != LAST_TYPE) { | 5211 if (last != LAST_TYPE) { |
5212 __ cmp(scratch, Operand(last)); | 5212 __ cmp(scratch, Operand(last)); |
5213 DeoptimizeIf(hi, instr, "wrong instance type"); | 5213 DeoptimizeIf(hi, instr, Deoptimizer::kWrongInstanceType); |
5214 } | 5214 } |
5215 } | 5215 } |
5216 } else { | 5216 } else { |
5217 uint8_t mask; | 5217 uint8_t mask; |
5218 uint8_t tag; | 5218 uint8_t tag; |
5219 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); | 5219 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); |
5220 | 5220 |
5221 if (base::bits::IsPowerOfTwo32(mask)) { | 5221 if (base::bits::IsPowerOfTwo32(mask)) { |
5222 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag)); | 5222 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag)); |
5223 __ tst(scratch, Operand(mask)); | 5223 __ tst(scratch, Operand(mask)); |
5224 DeoptimizeIf(tag == 0 ? ne : eq, instr, "wrong instance type"); | 5224 DeoptimizeIf(tag == 0 ? ne : eq, instr, Deoptimizer::kWrongInstanceType); |
5225 } else { | 5225 } else { |
5226 __ and_(scratch, scratch, Operand(mask)); | 5226 __ and_(scratch, scratch, Operand(mask)); |
5227 __ cmp(scratch, Operand(tag)); | 5227 __ cmp(scratch, Operand(tag)); |
5228 DeoptimizeIf(ne, instr, "wrong instance type"); | 5228 DeoptimizeIf(ne, instr, Deoptimizer::kWrongInstanceType); |
5229 } | 5229 } |
5230 } | 5230 } |
5231 } | 5231 } |
5232 | 5232 |
5233 | 5233 |
5234 void LCodeGen::DoCheckValue(LCheckValue* instr) { | 5234 void LCodeGen::DoCheckValue(LCheckValue* instr) { |
5235 Register reg = ToRegister(instr->value()); | 5235 Register reg = ToRegister(instr->value()); |
5236 Handle<HeapObject> object = instr->hydrogen()->object().handle(); | 5236 Handle<HeapObject> object = instr->hydrogen()->object().handle(); |
5237 AllowDeferredHandleDereference smi_check; | 5237 AllowDeferredHandleDereference smi_check; |
5238 if (isolate()->heap()->InNewSpace(*object)) { | 5238 if (isolate()->heap()->InNewSpace(*object)) { |
5239 Register reg = ToRegister(instr->value()); | 5239 Register reg = ToRegister(instr->value()); |
5240 Handle<Cell> cell = isolate()->factory()->NewCell(object); | 5240 Handle<Cell> cell = isolate()->factory()->NewCell(object); |
5241 __ mov(ip, Operand(Handle<Object>(cell))); | 5241 __ mov(ip, Operand(Handle<Object>(cell))); |
5242 __ ldr(ip, FieldMemOperand(ip, Cell::kValueOffset)); | 5242 __ ldr(ip, FieldMemOperand(ip, Cell::kValueOffset)); |
5243 __ cmp(reg, ip); | 5243 __ cmp(reg, ip); |
5244 } else { | 5244 } else { |
5245 __ cmp(reg, Operand(object)); | 5245 __ cmp(reg, Operand(object)); |
5246 } | 5246 } |
5247 DeoptimizeIf(ne, instr, "value mismatch"); | 5247 DeoptimizeIf(ne, instr, Deoptimizer::kValueMismatch); |
5248 } | 5248 } |
5249 | 5249 |
5250 | 5250 |
5251 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { | 5251 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { |
5252 { | 5252 { |
5253 PushSafepointRegistersScope scope(this); | 5253 PushSafepointRegistersScope scope(this); |
5254 __ push(object); | 5254 __ push(object); |
5255 __ mov(cp, Operand::Zero()); | 5255 __ mov(cp, Operand::Zero()); |
5256 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); | 5256 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); |
5257 RecordSafepointWithRegisters( | 5257 RecordSafepointWithRegisters( |
5258 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); | 5258 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); |
5259 __ StoreToSafepointRegisterSlot(r0, scratch0()); | 5259 __ StoreToSafepointRegisterSlot(r0, scratch0()); |
5260 } | 5260 } |
5261 __ tst(scratch0(), Operand(kSmiTagMask)); | 5261 __ tst(scratch0(), Operand(kSmiTagMask)); |
5262 DeoptimizeIf(eq, instr, "instance migration failed"); | 5262 DeoptimizeIf(eq, instr, Deoptimizer::kInstanceMigrationFailed); |
5263 } | 5263 } |
5264 | 5264 |
5265 | 5265 |
5266 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { | 5266 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { |
5267 class DeferredCheckMaps FINAL : public LDeferredCode { | 5267 class DeferredCheckMaps FINAL : public LDeferredCode { |
5268 public: | 5268 public: |
5269 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) | 5269 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) |
5270 : LDeferredCode(codegen), instr_(instr), object_(object) { | 5270 : LDeferredCode(codegen), instr_(instr), object_(object) { |
5271 SetExit(check_maps()); | 5271 SetExit(check_maps()); |
5272 } | 5272 } |
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5310 Handle<Map> map = maps->at(i).handle(); | 5310 Handle<Map> map = maps->at(i).handle(); |
5311 __ CompareMap(map_reg, map, &success); | 5311 __ CompareMap(map_reg, map, &success); |
5312 __ b(eq, &success); | 5312 __ b(eq, &success); |
5313 } | 5313 } |
5314 | 5314 |
5315 Handle<Map> map = maps->at(maps->size() - 1).handle(); | 5315 Handle<Map> map = maps->at(maps->size() - 1).handle(); |
5316 __ CompareMap(map_reg, map, &success); | 5316 __ CompareMap(map_reg, map, &success); |
5317 if (instr->hydrogen()->HasMigrationTarget()) { | 5317 if (instr->hydrogen()->HasMigrationTarget()) { |
5318 __ b(ne, deferred->entry()); | 5318 __ b(ne, deferred->entry()); |
5319 } else { | 5319 } else { |
5320 DeoptimizeIf(ne, instr, "wrong map"); | 5320 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap); |
5321 } | 5321 } |
5322 | 5322 |
5323 __ bind(&success); | 5323 __ bind(&success); |
5324 } | 5324 } |
5325 | 5325 |
5326 | 5326 |
5327 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { | 5327 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { |
5328 DwVfpRegister value_reg = ToDoubleRegister(instr->unclamped()); | 5328 DwVfpRegister value_reg = ToDoubleRegister(instr->unclamped()); |
5329 Register result_reg = ToRegister(instr->result()); | 5329 Register result_reg = ToRegister(instr->result()); |
5330 __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0()); | 5330 __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0()); |
(...skipping 18 matching lines...) Expand all Loading... |
5349 __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi); | 5349 __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi); |
5350 | 5350 |
5351 // Check for heap number | 5351 // Check for heap number |
5352 __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); | 5352 __ ldr(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); |
5353 __ cmp(scratch, Operand(factory()->heap_number_map())); | 5353 __ cmp(scratch, Operand(factory()->heap_number_map())); |
5354 __ b(eq, &heap_number); | 5354 __ b(eq, &heap_number); |
5355 | 5355 |
5356 // Check for undefined. Undefined is converted to zero for clamping | 5356 // Check for undefined. Undefined is converted to zero for clamping |
5357 // conversions. | 5357 // conversions. |
5358 __ cmp(input_reg, Operand(factory()->undefined_value())); | 5358 __ cmp(input_reg, Operand(factory()->undefined_value())); |
5359 DeoptimizeIf(ne, instr, "not a heap number/undefined"); | 5359 DeoptimizeIf(ne, instr, Deoptimizer::kNotAHeapNumberUndefined); |
5360 __ mov(result_reg, Operand::Zero()); | 5360 __ mov(result_reg, Operand::Zero()); |
5361 __ jmp(&done); | 5361 __ jmp(&done); |
5362 | 5362 |
5363 // Heap number | 5363 // Heap number |
5364 __ bind(&heap_number); | 5364 __ bind(&heap_number); |
5365 __ vldr(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset)); | 5365 __ vldr(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset)); |
5366 __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0()); | 5366 __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0()); |
5367 __ jmp(&done); | 5367 __ jmp(&done); |
5368 | 5368 |
5369 // smi | 5369 // smi |
(...skipping 447 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5817 DCHECK(!environment->HasBeenRegistered()); | 5817 DCHECK(!environment->HasBeenRegistered()); |
5818 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); | 5818 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); |
5819 | 5819 |
5820 GenerateOsrPrologue(); | 5820 GenerateOsrPrologue(); |
5821 } | 5821 } |
5822 | 5822 |
5823 | 5823 |
5824 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { | 5824 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { |
5825 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); | 5825 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); |
5826 __ cmp(r0, ip); | 5826 __ cmp(r0, ip); |
5827 DeoptimizeIf(eq, instr, "undefined"); | 5827 DeoptimizeIf(eq, instr, Deoptimizer::kUndefined); |
5828 | 5828 |
5829 Register null_value = r5; | 5829 Register null_value = r5; |
5830 __ LoadRoot(null_value, Heap::kNullValueRootIndex); | 5830 __ LoadRoot(null_value, Heap::kNullValueRootIndex); |
5831 __ cmp(r0, null_value); | 5831 __ cmp(r0, null_value); |
5832 DeoptimizeIf(eq, instr, "null"); | 5832 DeoptimizeIf(eq, instr, Deoptimizer::kNull); |
5833 | 5833 |
5834 __ SmiTst(r0); | 5834 __ SmiTst(r0); |
5835 DeoptimizeIf(eq, instr, "Smi"); | 5835 DeoptimizeIf(eq, instr, Deoptimizer::kSmi); |
5836 | 5836 |
5837 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); | 5837 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); |
5838 __ CompareObjectType(r0, r1, r1, LAST_JS_PROXY_TYPE); | 5838 __ CompareObjectType(r0, r1, r1, LAST_JS_PROXY_TYPE); |
5839 DeoptimizeIf(le, instr, "wrong instance type"); | 5839 DeoptimizeIf(le, instr, Deoptimizer::kWrongInstanceType); |
5840 | 5840 |
5841 Label use_cache, call_runtime; | 5841 Label use_cache, call_runtime; |
5842 __ CheckEnumCache(null_value, &call_runtime); | 5842 __ CheckEnumCache(null_value, &call_runtime); |
5843 | 5843 |
5844 __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset)); | 5844 __ ldr(r0, FieldMemOperand(r0, HeapObject::kMapOffset)); |
5845 __ b(&use_cache); | 5845 __ b(&use_cache); |
5846 | 5846 |
5847 // Get the set of properties to enumerate. | 5847 // Get the set of properties to enumerate. |
5848 __ bind(&call_runtime); | 5848 __ bind(&call_runtime); |
5849 __ push(r0); | 5849 __ push(r0); |
5850 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr); | 5850 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr); |
5851 | 5851 |
5852 __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); | 5852 __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); |
5853 __ LoadRoot(ip, Heap::kMetaMapRootIndex); | 5853 __ LoadRoot(ip, Heap::kMetaMapRootIndex); |
5854 __ cmp(r1, ip); | 5854 __ cmp(r1, ip); |
5855 DeoptimizeIf(ne, instr, "wrong map"); | 5855 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap); |
5856 __ bind(&use_cache); | 5856 __ bind(&use_cache); |
5857 } | 5857 } |
5858 | 5858 |
5859 | 5859 |
5860 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { | 5860 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { |
5861 Register map = ToRegister(instr->map()); | 5861 Register map = ToRegister(instr->map()); |
5862 Register result = ToRegister(instr->result()); | 5862 Register result = ToRegister(instr->result()); |
5863 Label load_cache, done; | 5863 Label load_cache, done; |
5864 __ EnumLength(result, map); | 5864 __ EnumLength(result, map); |
5865 __ cmp(result, Operand(Smi::FromInt(0))); | 5865 __ cmp(result, Operand(Smi::FromInt(0))); |
5866 __ b(ne, &load_cache); | 5866 __ b(ne, &load_cache); |
5867 __ mov(result, Operand(isolate()->factory()->empty_fixed_array())); | 5867 __ mov(result, Operand(isolate()->factory()->empty_fixed_array())); |
5868 __ jmp(&done); | 5868 __ jmp(&done); |
5869 | 5869 |
5870 __ bind(&load_cache); | 5870 __ bind(&load_cache); |
5871 __ LoadInstanceDescriptors(map, result); | 5871 __ LoadInstanceDescriptors(map, result); |
5872 __ ldr(result, | 5872 __ ldr(result, |
5873 FieldMemOperand(result, DescriptorArray::kEnumCacheOffset)); | 5873 FieldMemOperand(result, DescriptorArray::kEnumCacheOffset)); |
5874 __ ldr(result, | 5874 __ ldr(result, |
5875 FieldMemOperand(result, FixedArray::SizeFor(instr->idx()))); | 5875 FieldMemOperand(result, FixedArray::SizeFor(instr->idx()))); |
5876 __ cmp(result, Operand::Zero()); | 5876 __ cmp(result, Operand::Zero()); |
5877 DeoptimizeIf(eq, instr, "no cache"); | 5877 DeoptimizeIf(eq, instr, Deoptimizer::kNoCache); |
5878 | 5878 |
5879 __ bind(&done); | 5879 __ bind(&done); |
5880 } | 5880 } |
5881 | 5881 |
5882 | 5882 |
5883 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { | 5883 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { |
5884 Register object = ToRegister(instr->value()); | 5884 Register object = ToRegister(instr->value()); |
5885 Register map = ToRegister(instr->map()); | 5885 Register map = ToRegister(instr->map()); |
5886 __ ldr(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset)); | 5886 __ ldr(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset)); |
5887 __ cmp(map, scratch0()); | 5887 __ cmp(map, scratch0()); |
5888 DeoptimizeIf(ne, instr, "wrong map"); | 5888 DeoptimizeIf(ne, instr, Deoptimizer::kWrongMap); |
5889 } | 5889 } |
5890 | 5890 |
5891 | 5891 |
5892 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, | 5892 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, |
5893 Register result, | 5893 Register result, |
5894 Register object, | 5894 Register object, |
5895 Register index) { | 5895 Register index) { |
5896 PushSafepointRegistersScope scope(this); | 5896 PushSafepointRegistersScope scope(this); |
5897 __ Push(object); | 5897 __ Push(object); |
5898 __ Push(index); | 5898 __ Push(index); |
(...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5976 __ Push(scope_info); | 5976 __ Push(scope_info); |
5977 __ push(ToRegister(instr->function())); | 5977 __ push(ToRegister(instr->function())); |
5978 CallRuntime(Runtime::kPushBlockContext, 2, instr); | 5978 CallRuntime(Runtime::kPushBlockContext, 2, instr); |
5979 RecordSafepoint(Safepoint::kNoLazyDeopt); | 5979 RecordSafepoint(Safepoint::kNoLazyDeopt); |
5980 } | 5980 } |
5981 | 5981 |
5982 | 5982 |
5983 #undef __ | 5983 #undef __ |
5984 | 5984 |
5985 } } // namespace v8::internal | 5985 } } // namespace v8::internal |
OLD | NEW |