Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(675)

Side by Side Diff: src/mips/lithium-codegen-mips.cc

Issue 598953002: Make the detailed reason for deopts mandatory on all platforms. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: rebased Created 6 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/mips/lithium-codegen-mips.h ('k') | src/mips64/lithium-codegen-mips64.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved.7 1 // Copyright 2012 the V8 project authors. All rights reserved.7
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 798 matching lines...) Expand 10 before | Expand all | Expand 10 after
809 environment->Register(deoptimization_index, 809 environment->Register(deoptimization_index,
810 translation.index(), 810 translation.index(),
811 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1); 811 (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
812 deoptimizations_.Add(environment, zone()); 812 deoptimizations_.Add(environment, zone());
813 } 813 }
814 } 814 }
815 815
816 816
817 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr, 817 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
818 Deoptimizer::BailoutType bailout_type, 818 Deoptimizer::BailoutType bailout_type,
819 Register src1, const Operand& src2, 819 const char* detail, Register src1,
820 const char* detail) { 820 const Operand& src2) {
821 LEnvironment* environment = instr->environment(); 821 LEnvironment* environment = instr->environment();
822 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); 822 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
823 DCHECK(environment->HasBeenRegistered()); 823 DCHECK(environment->HasBeenRegistered());
824 int id = environment->deoptimization_index(); 824 int id = environment->deoptimization_index();
825 DCHECK(info()->IsOptimizing() || info()->IsStub()); 825 DCHECK(info()->IsOptimizing() || info()->IsStub());
826 Address entry = 826 Address entry =
827 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type); 827 Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
828 if (entry == NULL) { 828 if (entry == NULL) {
829 Abort(kBailoutWasNotPrepared); 829 Abort(kBailoutWasNotPrepared);
830 return; 830 return;
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
875 if (jump_table_.is_empty() || 875 if (jump_table_.is_empty() ||
876 !table_entry.IsEquivalentTo(jump_table_.last())) { 876 !table_entry.IsEquivalentTo(jump_table_.last())) {
877 jump_table_.Add(table_entry, zone()); 877 jump_table_.Add(table_entry, zone());
878 } 878 }
879 __ Branch(&jump_table_.last().label, condition, src1, src2); 879 __ Branch(&jump_table_.last().label, condition, src1, src2);
880 } 880 }
881 } 881 }
882 882
883 883
884 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr, 884 void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
885 Register src1, const Operand& src2, 885 const char* detail, Register src1,
886 const char* detail) { 886 const Operand& src2) {
887 Deoptimizer::BailoutType bailout_type = info()->IsStub() 887 Deoptimizer::BailoutType bailout_type = info()->IsStub()
888 ? Deoptimizer::LAZY 888 ? Deoptimizer::LAZY
889 : Deoptimizer::EAGER; 889 : Deoptimizer::EAGER;
890 DeoptimizeIf(condition, instr, bailout_type, src1, src2, detail); 890 DeoptimizeIf(condition, instr, bailout_type, detail, src1, src2);
891 } 891 }
892 892
893 893
894 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) { 894 void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
895 int length = deoptimizations_.length(); 895 int length = deoptimizations_.length();
896 if (length == 0) return; 896 if (length == 0) return;
897 Handle<DeoptimizationInputData> data = 897 Handle<DeoptimizationInputData> data =
898 DeoptimizationInputData::New(isolate(), length, TENURED); 898 DeoptimizationInputData::New(isolate(), length, TENURED);
899 899
900 Handle<ByteArray> translations = 900 Handle<ByteArray> translations =
(...skipping 209 matching lines...) Expand 10 before | Expand all | Expand 10 after
1110 HMod* hmod = instr->hydrogen(); 1110 HMod* hmod = instr->hydrogen();
1111 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); 1111 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1112 Label dividend_is_not_negative, done; 1112 Label dividend_is_not_negative, done;
1113 1113
1114 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) { 1114 if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
1115 __ Branch(&dividend_is_not_negative, ge, dividend, Operand(zero_reg)); 1115 __ Branch(&dividend_is_not_negative, ge, dividend, Operand(zero_reg));
1116 // Note: The code below even works when right contains kMinInt. 1116 // Note: The code below even works when right contains kMinInt.
1117 __ subu(dividend, zero_reg, dividend); 1117 __ subu(dividend, zero_reg, dividend);
1118 __ And(dividend, dividend, Operand(mask)); 1118 __ And(dividend, dividend, Operand(mask));
1119 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { 1119 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1120 DeoptimizeIf(eq, instr, dividend, Operand(zero_reg)); 1120 DeoptimizeIf(eq, instr, "minus zero", dividend, Operand(zero_reg));
1121 } 1121 }
1122 __ Branch(USE_DELAY_SLOT, &done); 1122 __ Branch(USE_DELAY_SLOT, &done);
1123 __ subu(dividend, zero_reg, dividend); 1123 __ subu(dividend, zero_reg, dividend);
1124 } 1124 }
1125 1125
1126 __ bind(&dividend_is_not_negative); 1126 __ bind(&dividend_is_not_negative);
1127 __ And(dividend, dividend, Operand(mask)); 1127 __ And(dividend, dividend, Operand(mask));
1128 __ bind(&done); 1128 __ bind(&done);
1129 } 1129 }
1130 1130
(...skipping 11 matching lines...) Expand all
1142 1142
1143 __ TruncatingDiv(result, dividend, Abs(divisor)); 1143 __ TruncatingDiv(result, dividend, Abs(divisor));
1144 __ Mul(result, result, Operand(Abs(divisor))); 1144 __ Mul(result, result, Operand(Abs(divisor)));
1145 __ Subu(result, dividend, Operand(result)); 1145 __ Subu(result, dividend, Operand(result));
1146 1146
1147 // Check for negative zero. 1147 // Check for negative zero.
1148 HMod* hmod = instr->hydrogen(); 1148 HMod* hmod = instr->hydrogen();
1149 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { 1149 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1150 Label remainder_not_zero; 1150 Label remainder_not_zero;
1151 __ Branch(&remainder_not_zero, ne, result, Operand(zero_reg)); 1151 __ Branch(&remainder_not_zero, ne, result, Operand(zero_reg));
1152 DeoptimizeIf(lt, instr, dividend, Operand(zero_reg)); 1152 DeoptimizeIf(lt, instr, "minus zero", dividend, Operand(zero_reg));
1153 __ bind(&remainder_not_zero); 1153 __ bind(&remainder_not_zero);
1154 } 1154 }
1155 } 1155 }
1156 1156
1157 1157
1158 void LCodeGen::DoModI(LModI* instr) { 1158 void LCodeGen::DoModI(LModI* instr) {
1159 HMod* hmod = instr->hydrogen(); 1159 HMod* hmod = instr->hydrogen();
1160 const Register left_reg = ToRegister(instr->left()); 1160 const Register left_reg = ToRegister(instr->left());
1161 const Register right_reg = ToRegister(instr->right()); 1161 const Register right_reg = ToRegister(instr->right());
1162 const Register result_reg = ToRegister(instr->result()); 1162 const Register result_reg = ToRegister(instr->result());
1163 1163
1164 // div runs in the background while we check for special cases. 1164 // div runs in the background while we check for special cases.
1165 __ Mod(result_reg, left_reg, right_reg); 1165 __ Mod(result_reg, left_reg, right_reg);
1166 1166
1167 Label done; 1167 Label done;
1168 // Check for x % 0, we have to deopt in this case because we can't return a 1168 // Check for x % 0, we have to deopt in this case because we can't return a
1169 // NaN. 1169 // NaN.
1170 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) { 1170 if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
1171 DeoptimizeIf(eq, instr, right_reg, Operand(zero_reg)); 1171 DeoptimizeIf(eq, instr, "division by zero", right_reg, Operand(zero_reg));
1172 } 1172 }
1173 1173
1174 // Check for kMinInt % -1, div will return kMinInt, which is not what we 1174 // Check for kMinInt % -1, div will return kMinInt, which is not what we
1175 // want. We have to deopt if we care about -0, because we can't return that. 1175 // want. We have to deopt if we care about -0, because we can't return that.
1176 if (hmod->CheckFlag(HValue::kCanOverflow)) { 1176 if (hmod->CheckFlag(HValue::kCanOverflow)) {
1177 Label no_overflow_possible; 1177 Label no_overflow_possible;
1178 __ Branch(&no_overflow_possible, ne, left_reg, Operand(kMinInt)); 1178 __ Branch(&no_overflow_possible, ne, left_reg, Operand(kMinInt));
1179 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { 1179 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1180 DeoptimizeIf(eq, instr, right_reg, Operand(-1)); 1180 DeoptimizeIf(eq, instr, "minus zero", right_reg, Operand(-1));
1181 } else { 1181 } else {
1182 __ Branch(&no_overflow_possible, ne, right_reg, Operand(-1)); 1182 __ Branch(&no_overflow_possible, ne, right_reg, Operand(-1));
1183 __ Branch(USE_DELAY_SLOT, &done); 1183 __ Branch(USE_DELAY_SLOT, &done);
1184 __ mov(result_reg, zero_reg); 1184 __ mov(result_reg, zero_reg);
1185 } 1185 }
1186 __ bind(&no_overflow_possible); 1186 __ bind(&no_overflow_possible);
1187 } 1187 }
1188 1188
1189 // If we care about -0, test if the dividend is <0 and the result is 0. 1189 // If we care about -0, test if the dividend is <0 and the result is 0.
1190 __ Branch(&done, ge, left_reg, Operand(zero_reg)); 1190 __ Branch(&done, ge, left_reg, Operand(zero_reg));
1191 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) { 1191 if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
1192 DeoptimizeIf(eq, instr, result_reg, Operand(zero_reg)); 1192 DeoptimizeIf(eq, instr, "minus zero", result_reg, Operand(zero_reg));
1193 } 1193 }
1194 __ bind(&done); 1194 __ bind(&done);
1195 } 1195 }
1196 1196
1197 1197
1198 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) { 1198 void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
1199 Register dividend = ToRegister(instr->dividend()); 1199 Register dividend = ToRegister(instr->dividend());
1200 int32_t divisor = instr->divisor(); 1200 int32_t divisor = instr->divisor();
1201 Register result = ToRegister(instr->result()); 1201 Register result = ToRegister(instr->result());
1202 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor))); 1202 DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
1203 DCHECK(!result.is(dividend)); 1203 DCHECK(!result.is(dividend));
1204 1204
1205 // Check for (0 / -x) that will produce negative zero. 1205 // Check for (0 / -x) that will produce negative zero.
1206 HDiv* hdiv = instr->hydrogen(); 1206 HDiv* hdiv = instr->hydrogen();
1207 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { 1207 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1208 DeoptimizeIf(eq, instr, dividend, Operand(zero_reg)); 1208 DeoptimizeIf(eq, instr, "minus zero", dividend, Operand(zero_reg));
1209 } 1209 }
1210 // Check for (kMinInt / -1). 1210 // Check for (kMinInt / -1).
1211 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) { 1211 if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
1212 DeoptimizeIf(eq, instr, dividend, Operand(kMinInt)); 1212 DeoptimizeIf(eq, instr, "overflow", dividend, Operand(kMinInt));
1213 } 1213 }
1214 // Deoptimize if remainder will not be 0. 1214 // Deoptimize if remainder will not be 0.
1215 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && 1215 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
1216 divisor != 1 && divisor != -1) { 1216 divisor != 1 && divisor != -1) {
1217 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1); 1217 int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
1218 __ And(at, dividend, Operand(mask)); 1218 __ And(at, dividend, Operand(mask));
1219 DeoptimizeIf(ne, instr, at, Operand(zero_reg)); 1219 DeoptimizeIf(ne, instr, "lost precision", at, Operand(zero_reg));
1220 } 1220 }
1221 1221
1222 if (divisor == -1) { // Nice shortcut, not needed for correctness. 1222 if (divisor == -1) { // Nice shortcut, not needed for correctness.
1223 __ Subu(result, zero_reg, dividend); 1223 __ Subu(result, zero_reg, dividend);
1224 return; 1224 return;
1225 } 1225 }
1226 uint16_t shift = WhichPowerOf2Abs(divisor); 1226 uint16_t shift = WhichPowerOf2Abs(divisor);
1227 if (shift == 0) { 1227 if (shift == 0) {
1228 __ Move(result, dividend); 1228 __ Move(result, dividend);
1229 } else if (shift == 1) { 1229 } else if (shift == 1) {
(...skipping 16 matching lines...) Expand all
1246 DCHECK(!dividend.is(result)); 1246 DCHECK(!dividend.is(result));
1247 1247
1248 if (divisor == 0) { 1248 if (divisor == 0) {
1249 DeoptimizeIf(al, instr); 1249 DeoptimizeIf(al, instr);
1250 return; 1250 return;
1251 } 1251 }
1252 1252
1253 // Check for (0 / -x) that will produce negative zero. 1253 // Check for (0 / -x) that will produce negative zero.
1254 HDiv* hdiv = instr->hydrogen(); 1254 HDiv* hdiv = instr->hydrogen();
1255 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { 1255 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1256 DeoptimizeIf(eq, instr, dividend, Operand(zero_reg)); 1256 DeoptimizeIf(eq, instr, "minus zero", dividend, Operand(zero_reg));
1257 } 1257 }
1258 1258
1259 __ TruncatingDiv(result, dividend, Abs(divisor)); 1259 __ TruncatingDiv(result, dividend, Abs(divisor));
1260 if (divisor < 0) __ Subu(result, zero_reg, result); 1260 if (divisor < 0) __ Subu(result, zero_reg, result);
1261 1261
1262 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) { 1262 if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
1263 __ Mul(scratch0(), result, Operand(divisor)); 1263 __ Mul(scratch0(), result, Operand(divisor));
1264 __ Subu(scratch0(), scratch0(), dividend); 1264 __ Subu(scratch0(), scratch0(), dividend);
1265 DeoptimizeIf(ne, instr, scratch0(), Operand(zero_reg)); 1265 DeoptimizeIf(ne, instr, "lost precision", scratch0(), Operand(zero_reg));
1266 } 1266 }
1267 } 1267 }
1268 1268
1269 1269
1270 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI. 1270 // TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
1271 void LCodeGen::DoDivI(LDivI* instr) { 1271 void LCodeGen::DoDivI(LDivI* instr) {
1272 HBinaryOperation* hdiv = instr->hydrogen(); 1272 HBinaryOperation* hdiv = instr->hydrogen();
1273 Register dividend = ToRegister(instr->dividend()); 1273 Register dividend = ToRegister(instr->dividend());
1274 Register divisor = ToRegister(instr->divisor()); 1274 Register divisor = ToRegister(instr->divisor());
1275 const Register result = ToRegister(instr->result()); 1275 const Register result = ToRegister(instr->result());
1276 Register remainder = ToRegister(instr->temp()); 1276 Register remainder = ToRegister(instr->temp());
1277 1277
1278 // On MIPS div is asynchronous - it will run in the background while we 1278 // On MIPS div is asynchronous - it will run in the background while we
1279 // check for special cases. 1279 // check for special cases.
1280 __ Div(remainder, result, dividend, divisor); 1280 __ Div(remainder, result, dividend, divisor);
1281 1281
1282 // Check for x / 0. 1282 // Check for x / 0.
1283 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { 1283 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1284 DeoptimizeIf(eq, instr, divisor, Operand(zero_reg)); 1284 DeoptimizeIf(eq, instr, "division by zero", divisor, Operand(zero_reg));
1285 } 1285 }
1286 1286
1287 // Check for (0 / -x) that will produce negative zero. 1287 // Check for (0 / -x) that will produce negative zero.
1288 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { 1288 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1289 Label left_not_zero; 1289 Label left_not_zero;
1290 __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg)); 1290 __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
1291 DeoptimizeIf(lt, instr, divisor, Operand(zero_reg)); 1291 DeoptimizeIf(lt, instr, "minus zero", divisor, Operand(zero_reg));
1292 __ bind(&left_not_zero); 1292 __ bind(&left_not_zero);
1293 } 1293 }
1294 1294
1295 // Check for (kMinInt / -1). 1295 // Check for (kMinInt / -1).
1296 if (hdiv->CheckFlag(HValue::kCanOverflow) && 1296 if (hdiv->CheckFlag(HValue::kCanOverflow) &&
1297 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { 1297 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1298 Label left_not_min_int; 1298 Label left_not_min_int;
1299 __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt)); 1299 __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
1300 DeoptimizeIf(eq, instr, divisor, Operand(-1)); 1300 DeoptimizeIf(eq, instr, "overflow", divisor, Operand(-1));
1301 __ bind(&left_not_min_int); 1301 __ bind(&left_not_min_int);
1302 } 1302 }
1303 1303
1304 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { 1304 if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1305 DeoptimizeIf(ne, instr, remainder, Operand(zero_reg)); 1305 DeoptimizeIf(ne, instr, "lost precision", remainder, Operand(zero_reg));
1306 } 1306 }
1307 } 1307 }
1308 1308
1309 1309
1310 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) { 1310 void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
1311 DoubleRegister addend = ToDoubleRegister(instr->addend()); 1311 DoubleRegister addend = ToDoubleRegister(instr->addend());
1312 DoubleRegister multiplier = ToDoubleRegister(instr->multiplier()); 1312 DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
1313 DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand()); 1313 DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
1314 1314
1315 // This is computed in-place. 1315 // This is computed in-place.
(...skipping 25 matching lines...) Expand all
1341 } 1341 }
1342 1342
1343 // If the divisor is negative, we have to negate and handle edge cases. 1343 // If the divisor is negative, we have to negate and handle edge cases.
1344 1344
1345 // dividend can be the same register as result so save the value of it 1345 // dividend can be the same register as result so save the value of it
1346 // for checking overflow. 1346 // for checking overflow.
1347 __ Move(scratch, dividend); 1347 __ Move(scratch, dividend);
1348 1348
1349 __ Subu(result, zero_reg, dividend); 1349 __ Subu(result, zero_reg, dividend);
1350 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 1350 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
1351 DeoptimizeIf(eq, instr, result, Operand(zero_reg)); 1351 DeoptimizeIf(eq, instr, "minus zero", result, Operand(zero_reg));
1352 } 1352 }
1353 1353
1354 // Dividing by -1 is basically negation, unless we overflow. 1354 // Dividing by -1 is basically negation, unless we overflow.
1355 __ Xor(scratch, scratch, result); 1355 __ Xor(scratch, scratch, result);
1356 if (divisor == -1) { 1356 if (divisor == -1) {
1357 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { 1357 if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1358 DeoptimizeIf(ge, instr, scratch, Operand(zero_reg)); 1358 DeoptimizeIf(ge, instr, "overflow", scratch, Operand(zero_reg));
1359 } 1359 }
1360 return; 1360 return;
1361 } 1361 }
1362 1362
1363 // If the negation could not overflow, simply shifting is OK. 1363 // If the negation could not overflow, simply shifting is OK.
1364 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) { 1364 if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
1365 __ sra(result, result, shift); 1365 __ sra(result, result, shift);
1366 return; 1366 return;
1367 } 1367 }
1368 1368
(...skipping 14 matching lines...) Expand all
1383 DCHECK(!dividend.is(result)); 1383 DCHECK(!dividend.is(result));
1384 1384
1385 if (divisor == 0) { 1385 if (divisor == 0) {
1386 DeoptimizeIf(al, instr); 1386 DeoptimizeIf(al, instr);
1387 return; 1387 return;
1388 } 1388 }
1389 1389
1390 // Check for (0 / -x) that will produce negative zero. 1390 // Check for (0 / -x) that will produce negative zero.
1391 HMathFloorOfDiv* hdiv = instr->hydrogen(); 1391 HMathFloorOfDiv* hdiv = instr->hydrogen();
1392 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) { 1392 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
1393 DeoptimizeIf(eq, instr, dividend, Operand(zero_reg)); 1393 DeoptimizeIf(eq, instr, "minus zero", dividend, Operand(zero_reg));
1394 } 1394 }
1395 1395
1396 // Easy case: We need no dynamic check for the dividend and the flooring 1396 // Easy case: We need no dynamic check for the dividend and the flooring
1397 // division is the same as the truncating division. 1397 // division is the same as the truncating division.
1398 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) || 1398 if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
1399 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) { 1399 (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
1400 __ TruncatingDiv(result, dividend, Abs(divisor)); 1400 __ TruncatingDiv(result, dividend, Abs(divisor));
1401 if (divisor < 0) __ Subu(result, zero_reg, result); 1401 if (divisor < 0) __ Subu(result, zero_reg, result);
1402 return; 1402 return;
1403 } 1403 }
(...skipping 23 matching lines...) Expand all
1427 Register dividend = ToRegister(instr->dividend()); 1427 Register dividend = ToRegister(instr->dividend());
1428 Register divisor = ToRegister(instr->divisor()); 1428 Register divisor = ToRegister(instr->divisor());
1429 const Register result = ToRegister(instr->result()); 1429 const Register result = ToRegister(instr->result());
1430 Register remainder = scratch0(); 1430 Register remainder = scratch0();
1431 // On MIPS div is asynchronous - it will run in the background while we 1431 // On MIPS div is asynchronous - it will run in the background while we
1432 // check for special cases. 1432 // check for special cases.
1433 __ Div(remainder, result, dividend, divisor); 1433 __ Div(remainder, result, dividend, divisor);
1434 1434
1435 // Check for x / 0. 1435 // Check for x / 0.
1436 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) { 1436 if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
1437 DeoptimizeIf(eq, instr, divisor, Operand(zero_reg)); 1437 DeoptimizeIf(eq, instr, "division by zero", divisor, Operand(zero_reg));
1438 } 1438 }
1439 1439
1440 // Check for (0 / -x) that will produce negative zero. 1440 // Check for (0 / -x) that will produce negative zero.
1441 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) { 1441 if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
1442 Label left_not_zero; 1442 Label left_not_zero;
1443 __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg)); 1443 __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
1444 DeoptimizeIf(lt, instr, divisor, Operand(zero_reg)); 1444 DeoptimizeIf(lt, instr, "minus zero", divisor, Operand(zero_reg));
1445 __ bind(&left_not_zero); 1445 __ bind(&left_not_zero);
1446 } 1446 }
1447 1447
1448 // Check for (kMinInt / -1). 1448 // Check for (kMinInt / -1).
1449 if (hdiv->CheckFlag(HValue::kCanOverflow) && 1449 if (hdiv->CheckFlag(HValue::kCanOverflow) &&
1450 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) { 1450 !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
1451 Label left_not_min_int; 1451 Label left_not_min_int;
1452 __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt)); 1452 __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
1453 DeoptimizeIf(eq, instr, divisor, Operand(-1)); 1453 DeoptimizeIf(eq, instr, "overflow", divisor, Operand(-1));
1454 __ bind(&left_not_min_int); 1454 __ bind(&left_not_min_int);
1455 } 1455 }
1456 1456
1457 // We performed a truncating division. Correct the result if necessary. 1457 // We performed a truncating division. Correct the result if necessary.
1458 Label done; 1458 Label done;
1459 __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT); 1459 __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT);
1460 __ Xor(remainder, remainder, Operand(divisor)); 1460 __ Xor(remainder, remainder, Operand(divisor));
1461 __ Branch(&done, ge, remainder, Operand(zero_reg)); 1461 __ Branch(&done, ge, remainder, Operand(zero_reg));
1462 __ Subu(result, result, Operand(1)); 1462 __ Subu(result, result, Operand(1));
1463 __ bind(&done); 1463 __ bind(&done);
(...skipping 10 matching lines...) Expand all
1474 bool bailout_on_minus_zero = 1474 bool bailout_on_minus_zero =
1475 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero); 1475 instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
1476 bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow); 1476 bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
1477 1477
1478 if (right_op->IsConstantOperand()) { 1478 if (right_op->IsConstantOperand()) {
1479 int32_t constant = ToInteger32(LConstantOperand::cast(right_op)); 1479 int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
1480 1480
1481 if (bailout_on_minus_zero && (constant < 0)) { 1481 if (bailout_on_minus_zero && (constant < 0)) {
1482 // The case of a null constant will be handled separately. 1482 // The case of a null constant will be handled separately.
1483 // If constant is negative and left is null, the result should be -0. 1483 // If constant is negative and left is null, the result should be -0.
1484 DeoptimizeIf(eq, instr, left, Operand(zero_reg)); 1484 DeoptimizeIf(eq, instr, "minus zero", left, Operand(zero_reg));
1485 } 1485 }
1486 1486
1487 switch (constant) { 1487 switch (constant) {
1488 case -1: 1488 case -1:
1489 if (overflow) { 1489 if (overflow) {
1490 __ SubuAndCheckForOverflow(result, zero_reg, left, scratch); 1490 __ SubuAndCheckForOverflow(result, zero_reg, left, scratch);
1491 DeoptimizeIf(lt, instr, scratch, Operand(zero_reg)); 1491 DeoptimizeIf(lt, instr, "overflow", scratch, Operand(zero_reg));
1492 } else { 1492 } else {
1493 __ Subu(result, zero_reg, left); 1493 __ Subu(result, zero_reg, left);
1494 } 1494 }
1495 break; 1495 break;
1496 case 0: 1496 case 0:
1497 if (bailout_on_minus_zero) { 1497 if (bailout_on_minus_zero) {
1498 // If left is strictly negative and the constant is null, the 1498 // If left is strictly negative and the constant is null, the
1499 // result is -0. Deoptimize if required, otherwise return 0. 1499 // result is -0. Deoptimize if required, otherwise return 0.
1500 DeoptimizeIf(lt, instr, left, Operand(zero_reg)); 1500 DeoptimizeIf(lt, instr, "minus zero", left, Operand(zero_reg));
1501 } 1501 }
1502 __ mov(result, zero_reg); 1502 __ mov(result, zero_reg);
1503 break; 1503 break;
1504 case 1: 1504 case 1:
1505 // Nothing to do. 1505 // Nothing to do.
1506 __ Move(result, left); 1506 __ Move(result, left);
1507 break; 1507 break;
1508 default: 1508 default:
1509 // Multiplying by powers of two and powers of two plus or minus 1509 // Multiplying by powers of two and powers of two plus or minus
1510 // one can be done faster with shifted operands. 1510 // one can be done faster with shifted operands.
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
1542 1542
1543 if (overflow) { 1543 if (overflow) {
1544 // hi:lo = left * right. 1544 // hi:lo = left * right.
1545 if (instr->hydrogen()->representation().IsSmi()) { 1545 if (instr->hydrogen()->representation().IsSmi()) {
1546 __ SmiUntag(result, left); 1546 __ SmiUntag(result, left);
1547 __ Mul(scratch, result, result, right); 1547 __ Mul(scratch, result, result, right);
1548 } else { 1548 } else {
1549 __ Mul(scratch, result, left, right); 1549 __ Mul(scratch, result, left, right);
1550 } 1550 }
1551 __ sra(at, result, 31); 1551 __ sra(at, result, 31);
1552 DeoptimizeIf(ne, instr, scratch, Operand(at)); 1552 DeoptimizeIf(ne, instr, "overflow", scratch, Operand(at));
1553 } else { 1553 } else {
1554 if (instr->hydrogen()->representation().IsSmi()) { 1554 if (instr->hydrogen()->representation().IsSmi()) {
1555 __ SmiUntag(result, left); 1555 __ SmiUntag(result, left);
1556 __ Mul(result, result, right); 1556 __ Mul(result, result, right);
1557 } else { 1557 } else {
1558 __ Mul(result, left, right); 1558 __ Mul(result, left, right);
1559 } 1559 }
1560 } 1560 }
1561 1561
1562 if (bailout_on_minus_zero) { 1562 if (bailout_on_minus_zero) {
1563 Label done; 1563 Label done;
1564 __ Xor(at, left, right); 1564 __ Xor(at, left, right);
1565 __ Branch(&done, ge, at, Operand(zero_reg)); 1565 __ Branch(&done, ge, at, Operand(zero_reg));
1566 // Bail out if the result is minus zero. 1566 // Bail out if the result is minus zero.
1567 DeoptimizeIf(eq, instr, result, Operand(zero_reg)); 1567 DeoptimizeIf(eq, instr, "minus zero", result, Operand(zero_reg));
1568 __ bind(&done); 1568 __ bind(&done);
1569 } 1569 }
1570 } 1570 }
1571 } 1571 }
1572 1572
1573 1573
1574 void LCodeGen::DoBitI(LBitI* instr) { 1574 void LCodeGen::DoBitI(LBitI* instr) {
1575 LOperand* left_op = instr->left(); 1575 LOperand* left_op = instr->left();
1576 LOperand* right_op = instr->right(); 1576 LOperand* right_op = instr->right();
1577 DCHECK(left_op->IsRegister()); 1577 DCHECK(left_op->IsRegister());
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after
1621 switch (instr->op()) { 1621 switch (instr->op()) {
1622 case Token::ROR: 1622 case Token::ROR:
1623 __ Ror(result, left, Operand(ToRegister(right_op))); 1623 __ Ror(result, left, Operand(ToRegister(right_op)));
1624 break; 1624 break;
1625 case Token::SAR: 1625 case Token::SAR:
1626 __ srav(result, left, ToRegister(right_op)); 1626 __ srav(result, left, ToRegister(right_op));
1627 break; 1627 break;
1628 case Token::SHR: 1628 case Token::SHR:
1629 __ srlv(result, left, ToRegister(right_op)); 1629 __ srlv(result, left, ToRegister(right_op));
1630 if (instr->can_deopt()) { 1630 if (instr->can_deopt()) {
1631 DeoptimizeIf(lt, instr, result, Operand(zero_reg)); 1631 DeoptimizeIf(lt, instr, "negative value", result, Operand(zero_reg));
1632 } 1632 }
1633 break; 1633 break;
1634 case Token::SHL: 1634 case Token::SHL:
1635 __ sllv(result, left, ToRegister(right_op)); 1635 __ sllv(result, left, ToRegister(right_op));
1636 break; 1636 break;
1637 default: 1637 default:
1638 UNREACHABLE(); 1638 UNREACHABLE();
1639 break; 1639 break;
1640 } 1640 }
1641 } else { 1641 } else {
(...skipping 14 matching lines...) Expand all
1656 } else { 1656 } else {
1657 __ Move(result, left); 1657 __ Move(result, left);
1658 } 1658 }
1659 break; 1659 break;
1660 case Token::SHR: 1660 case Token::SHR:
1661 if (shift_count != 0) { 1661 if (shift_count != 0) {
1662 __ srl(result, left, shift_count); 1662 __ srl(result, left, shift_count);
1663 } else { 1663 } else {
1664 if (instr->can_deopt()) { 1664 if (instr->can_deopt()) {
1665 __ And(at, left, Operand(0x80000000)); 1665 __ And(at, left, Operand(0x80000000));
1666 DeoptimizeIf(ne, instr, at, Operand(zero_reg)); 1666 DeoptimizeIf(ne, instr, "negative value", at, Operand(zero_reg));
1667 } 1667 }
1668 __ Move(result, left); 1668 __ Move(result, left);
1669 } 1669 }
1670 break; 1670 break;
1671 case Token::SHL: 1671 case Token::SHL:
1672 if (shift_count != 0) { 1672 if (shift_count != 0) {
1673 if (instr->hydrogen_value()->representation().IsSmi() && 1673 if (instr->hydrogen_value()->representation().IsSmi() &&
1674 instr->can_deopt()) { 1674 instr->can_deopt()) {
1675 if (shift_count != 1) { 1675 if (shift_count != 1) {
1676 __ sll(result, left, shift_count - 1); 1676 __ sll(result, left, shift_count - 1);
1677 __ SmiTagCheckOverflow(result, result, scratch); 1677 __ SmiTagCheckOverflow(result, result, scratch);
1678 } else { 1678 } else {
1679 __ SmiTagCheckOverflow(result, left, scratch); 1679 __ SmiTagCheckOverflow(result, left, scratch);
1680 } 1680 }
1681 DeoptimizeIf(lt, instr, scratch, Operand(zero_reg)); 1681 DeoptimizeIf(lt, instr, "overflow", scratch, Operand(zero_reg));
1682 } else { 1682 } else {
1683 __ sll(result, left, shift_count); 1683 __ sll(result, left, shift_count);
1684 } 1684 }
1685 } else { 1685 } else {
1686 __ Move(result, left); 1686 __ Move(result, left);
1687 } 1687 }
1688 break; 1688 break;
1689 default: 1689 default:
1690 UNREACHABLE(); 1690 UNREACHABLE();
1691 break; 1691 break;
(...skipping 27 matching lines...) Expand all
1719 overflow); // Reg at also used as scratch. 1719 overflow); // Reg at also used as scratch.
1720 } else { 1720 } else {
1721 DCHECK(right->IsRegister()); 1721 DCHECK(right->IsRegister());
1722 // Due to overflow check macros not supporting constant operands, 1722 // Due to overflow check macros not supporting constant operands,
1723 // handling the IsConstantOperand case was moved to prev if clause. 1723 // handling the IsConstantOperand case was moved to prev if clause.
1724 __ SubuAndCheckForOverflow(ToRegister(result), 1724 __ SubuAndCheckForOverflow(ToRegister(result),
1725 ToRegister(left), 1725 ToRegister(left),
1726 ToRegister(right), 1726 ToRegister(right),
1727 overflow); // Reg at also used as scratch. 1727 overflow); // Reg at also used as scratch.
1728 } 1728 }
1729 DeoptimizeIf(lt, instr, overflow, Operand(zero_reg)); 1729 DeoptimizeIf(lt, instr, "overflow", overflow, Operand(zero_reg));
1730 } 1730 }
1731 } 1731 }
1732 1732
1733 1733
1734 void LCodeGen::DoConstantI(LConstantI* instr) { 1734 void LCodeGen::DoConstantI(LConstantI* instr) {
1735 __ li(ToRegister(instr->result()), Operand(instr->value())); 1735 __ li(ToRegister(instr->result()), Operand(instr->value()));
1736 } 1736 }
1737 1737
1738 1738
1739 void LCodeGen::DoConstantS(LConstantS* instr) { 1739 void LCodeGen::DoConstantS(LConstantS* instr) {
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
1773 Register result = ToRegister(instr->result()); 1773 Register result = ToRegister(instr->result());
1774 Register scratch = ToRegister(instr->temp()); 1774 Register scratch = ToRegister(instr->temp());
1775 Smi* index = instr->index(); 1775 Smi* index = instr->index();
1776 Label runtime, done; 1776 Label runtime, done;
1777 DCHECK(object.is(a0)); 1777 DCHECK(object.is(a0));
1778 DCHECK(result.is(v0)); 1778 DCHECK(result.is(v0));
1779 DCHECK(!scratch.is(scratch0())); 1779 DCHECK(!scratch.is(scratch0()));
1780 DCHECK(!scratch.is(object)); 1780 DCHECK(!scratch.is(object));
1781 1781
1782 __ SmiTst(object, at); 1782 __ SmiTst(object, at);
1783 DeoptimizeIf(eq, instr, at, Operand(zero_reg)); 1783 DeoptimizeIf(eq, instr, "Smi", at, Operand(zero_reg));
1784 __ GetObjectType(object, scratch, scratch); 1784 __ GetObjectType(object, scratch, scratch);
1785 DeoptimizeIf(ne, instr, scratch, Operand(JS_DATE_TYPE)); 1785 DeoptimizeIf(ne, instr, "not a date object", scratch, Operand(JS_DATE_TYPE));
1786 1786
1787 if (index->value() == 0) { 1787 if (index->value() == 0) {
1788 __ lw(result, FieldMemOperand(object, JSDate::kValueOffset)); 1788 __ lw(result, FieldMemOperand(object, JSDate::kValueOffset));
1789 } else { 1789 } else {
1790 if (index->value() < JSDate::kFirstUncachedField) { 1790 if (index->value() < JSDate::kFirstUncachedField) {
1791 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate()); 1791 ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
1792 __ li(scratch, Operand(stamp)); 1792 __ li(scratch, Operand(stamp));
1793 __ lw(scratch, MemOperand(scratch)); 1793 __ lw(scratch, MemOperand(scratch));
1794 __ lw(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset)); 1794 __ lw(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset));
1795 __ Branch(&runtime, ne, scratch, Operand(scratch0())); 1795 __ Branch(&runtime, ne, scratch, Operand(scratch0()));
(...skipping 114 matching lines...) Expand 10 before | Expand all | Expand 10 after
1910 overflow); // Reg at also used as scratch. 1910 overflow); // Reg at also used as scratch.
1911 } else { 1911 } else {
1912 DCHECK(right->IsRegister()); 1912 DCHECK(right->IsRegister());
1913 // Due to overflow check macros not supporting constant operands, 1913 // Due to overflow check macros not supporting constant operands,
1914 // handling the IsConstantOperand case was moved to prev if clause. 1914 // handling the IsConstantOperand case was moved to prev if clause.
1915 __ AdduAndCheckForOverflow(ToRegister(result), 1915 __ AdduAndCheckForOverflow(ToRegister(result),
1916 ToRegister(left), 1916 ToRegister(left),
1917 ToRegister(right), 1917 ToRegister(right),
1918 overflow); // Reg at also used as scratch. 1918 overflow); // Reg at also used as scratch.
1919 } 1919 }
1920 DeoptimizeIf(lt, instr, overflow, Operand(zero_reg)); 1920 DeoptimizeIf(lt, instr, "overflow", overflow, Operand(zero_reg));
1921 } 1921 }
1922 } 1922 }
1923 1923
1924 1924
1925 void LCodeGen::DoMathMinMax(LMathMinMax* instr) { 1925 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
1926 LOperand* left = instr->left(); 1926 LOperand* left = instr->left();
1927 LOperand* right = instr->right(); 1927 LOperand* right = instr->right();
1928 HMathMinMax::Operation operation = instr->hydrogen()->operation(); 1928 HMathMinMax::Operation operation = instr->hydrogen()->operation();
1929 Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge; 1929 Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
1930 if (instr->hydrogen()->representation().IsSmiOrInteger32()) { 1930 if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
(...skipping 240 matching lines...) Expand 10 before | Expand all | Expand 10 after
2171 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at)); 2171 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
2172 } 2172 }
2173 2173
2174 if (expected.Contains(ToBooleanStub::SMI)) { 2174 if (expected.Contains(ToBooleanStub::SMI)) {
2175 // Smis: 0 -> false, all other -> true. 2175 // Smis: 0 -> false, all other -> true.
2176 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(zero_reg)); 2176 __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(zero_reg));
2177 __ JumpIfSmi(reg, instr->TrueLabel(chunk_)); 2177 __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
2178 } else if (expected.NeedsMap()) { 2178 } else if (expected.NeedsMap()) {
2179 // If we need a map later and have a Smi -> deopt. 2179 // If we need a map later and have a Smi -> deopt.
2180 __ SmiTst(reg, at); 2180 __ SmiTst(reg, at);
2181 DeoptimizeIf(eq, instr, at, Operand(zero_reg)); 2181 DeoptimizeIf(eq, instr, "Smi", at, Operand(zero_reg));
2182 } 2182 }
2183 2183
2184 const Register map = scratch0(); 2184 const Register map = scratch0();
2185 if (expected.NeedsMap()) { 2185 if (expected.NeedsMap()) {
2186 __ lw(map, FieldMemOperand(reg, HeapObject::kMapOffset)); 2186 __ lw(map, FieldMemOperand(reg, HeapObject::kMapOffset));
2187 if (expected.CanBeUndetectable()) { 2187 if (expected.CanBeUndetectable()) {
2188 // Undetectable -> false. 2188 // Undetectable -> false.
2189 __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset)); 2189 __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
2190 __ And(at, at, Operand(1 << Map::kIsUndetectable)); 2190 __ And(at, at, Operand(1 << Map::kIsUndetectable));
2191 __ Branch(instr->FalseLabel(chunk_), ne, at, Operand(zero_reg)); 2191 __ Branch(instr->FalseLabel(chunk_), ne, at, Operand(zero_reg));
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
2227 __ BranchF(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), 2227 __ BranchF(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
2228 ne, dbl_scratch, kDoubleRegZero); 2228 ne, dbl_scratch, kDoubleRegZero);
2229 // Falls through if dbl_scratch == 0. 2229 // Falls through if dbl_scratch == 0.
2230 __ Branch(instr->FalseLabel(chunk_)); 2230 __ Branch(instr->FalseLabel(chunk_));
2231 __ bind(&not_heap_number); 2231 __ bind(&not_heap_number);
2232 } 2232 }
2233 2233
2234 if (!expected.IsGeneric()) { 2234 if (!expected.IsGeneric()) {
2235 // We've seen something for the first time -> deopt. 2235 // We've seen something for the first time -> deopt.
2236 // This can only happen if we are not generic already. 2236 // This can only happen if we are not generic already.
2237 DeoptimizeIf(al, instr, zero_reg, Operand(zero_reg)); 2237 DeoptimizeIf(al, instr, "unexpected object", zero_reg,
2238 Operand(zero_reg));
2238 } 2239 }
2239 } 2240 }
2240 } 2241 }
2241 } 2242 }
2242 2243
2243 2244
2244 void LCodeGen::EmitGoto(int block) { 2245 void LCodeGen::EmitGoto(int block) {
2245 if (!IsNextEmittedBlock(block)) { 2246 if (!IsNextEmittedBlock(block)) {
2246 __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block))); 2247 __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
2247 } 2248 }
(...skipping 623 matching lines...) Expand 10 before | Expand all | Expand 10 after
2871 } 2872 }
2872 } 2873 }
2873 2874
2874 2875
2875 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) { 2876 void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
2876 Register result = ToRegister(instr->result()); 2877 Register result = ToRegister(instr->result());
2877 __ li(at, Operand(Handle<Object>(instr->hydrogen()->cell().handle()))); 2878 __ li(at, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
2878 __ lw(result, FieldMemOperand(at, Cell::kValueOffset)); 2879 __ lw(result, FieldMemOperand(at, Cell::kValueOffset));
2879 if (instr->hydrogen()->RequiresHoleCheck()) { 2880 if (instr->hydrogen()->RequiresHoleCheck()) {
2880 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); 2881 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2881 DeoptimizeIf(eq, instr, result, Operand(at)); 2882 DeoptimizeIf(eq, instr, "hole", result, Operand(at));
2882 } 2883 }
2883 } 2884 }
2884 2885
2885 2886
2886 template <class T> 2887 template <class T>
2887 void LCodeGen::EmitVectorLoadICRegisters(T* instr) { 2888 void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
2888 DCHECK(FLAG_vector_ics); 2889 DCHECK(FLAG_vector_ics);
2889 Register vector = ToRegister(instr->temp_vector()); 2890 Register vector = ToRegister(instr->temp_vector());
2890 DCHECK(vector.is(VectorLoadICDescriptor::VectorRegister())); 2891 DCHECK(vector.is(VectorLoadICDescriptor::VectorRegister()));
2891 __ li(vector, instr->hydrogen()->feedback_vector()); 2892 __ li(vector, instr->hydrogen()->feedback_vector());
(...skipping 29 matching lines...) Expand all
2921 2922
2922 // If the cell we are storing to contains the hole it could have 2923 // If the cell we are storing to contains the hole it could have
2923 // been deleted from the property dictionary. In that case, we need 2924 // been deleted from the property dictionary. In that case, we need
2924 // to update the property details in the property dictionary to mark 2925 // to update the property details in the property dictionary to mark
2925 // it as no longer deleted. 2926 // it as no longer deleted.
2926 if (instr->hydrogen()->RequiresHoleCheck()) { 2927 if (instr->hydrogen()->RequiresHoleCheck()) {
2927 // We use a temp to check the payload. 2928 // We use a temp to check the payload.
2928 Register payload = ToRegister(instr->temp()); 2929 Register payload = ToRegister(instr->temp());
2929 __ lw(payload, FieldMemOperand(cell, Cell::kValueOffset)); 2930 __ lw(payload, FieldMemOperand(cell, Cell::kValueOffset));
2930 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); 2931 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2931 DeoptimizeIf(eq, instr, payload, Operand(at)); 2932 DeoptimizeIf(eq, instr, "hole", payload, Operand(at));
2932 } 2933 }
2933 2934
2934 // Store the value. 2935 // Store the value.
2935 __ sw(value, FieldMemOperand(cell, Cell::kValueOffset)); 2936 __ sw(value, FieldMemOperand(cell, Cell::kValueOffset));
2936 // Cells are always rescanned, so no write barrier here. 2937 // Cells are always rescanned, so no write barrier here.
2937 } 2938 }
2938 2939
2939 2940
2940 2941
2941 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) { 2942 void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
2942 Register context = ToRegister(instr->context()); 2943 Register context = ToRegister(instr->context());
2943 Register result = ToRegister(instr->result()); 2944 Register result = ToRegister(instr->result());
2944 2945
2945 __ lw(result, ContextOperand(context, instr->slot_index())); 2946 __ lw(result, ContextOperand(context, instr->slot_index()));
2946 if (instr->hydrogen()->RequiresHoleCheck()) { 2947 if (instr->hydrogen()->RequiresHoleCheck()) {
2947 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); 2948 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2948 2949
2949 if (instr->hydrogen()->DeoptimizesOnHole()) { 2950 if (instr->hydrogen()->DeoptimizesOnHole()) {
2950 DeoptimizeIf(eq, instr, result, Operand(at)); 2951 DeoptimizeIf(eq, instr, "hole", result, Operand(at));
2951 } else { 2952 } else {
2952 Label is_not_hole; 2953 Label is_not_hole;
2953 __ Branch(&is_not_hole, ne, result, Operand(at)); 2954 __ Branch(&is_not_hole, ne, result, Operand(at));
2954 __ LoadRoot(result, Heap::kUndefinedValueRootIndex); 2955 __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
2955 __ bind(&is_not_hole); 2956 __ bind(&is_not_hole);
2956 } 2957 }
2957 } 2958 }
2958 } 2959 }
2959 2960
2960 2961
2961 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) { 2962 void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
2962 Register context = ToRegister(instr->context()); 2963 Register context = ToRegister(instr->context());
2963 Register value = ToRegister(instr->value()); 2964 Register value = ToRegister(instr->value());
2964 Register scratch = scratch0(); 2965 Register scratch = scratch0();
2965 MemOperand target = ContextOperand(context, instr->slot_index()); 2966 MemOperand target = ContextOperand(context, instr->slot_index());
2966 2967
2967 Label skip_assignment; 2968 Label skip_assignment;
2968 2969
2969 if (instr->hydrogen()->RequiresHoleCheck()) { 2970 if (instr->hydrogen()->RequiresHoleCheck()) {
2970 __ lw(scratch, target); 2971 __ lw(scratch, target);
2971 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); 2972 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
2972 2973
2973 if (instr->hydrogen()->DeoptimizesOnHole()) { 2974 if (instr->hydrogen()->DeoptimizesOnHole()) {
2974 DeoptimizeIf(eq, instr, scratch, Operand(at)); 2975 DeoptimizeIf(eq, instr, "hole", scratch, Operand(at));
2975 } else { 2976 } else {
2976 __ Branch(&skip_assignment, ne, scratch, Operand(at)); 2977 __ Branch(&skip_assignment, ne, scratch, Operand(at));
2977 } 2978 }
2978 } 2979 }
2979 2980
2980 __ sw(value, target); 2981 __ sw(value, target);
2981 if (instr->hydrogen()->NeedsWriteBarrier()) { 2982 if (instr->hydrogen()->NeedsWriteBarrier()) {
2982 SmiCheck check_needed = 2983 SmiCheck check_needed =
2983 instr->hydrogen()->value()->type().IsHeapObject() 2984 instr->hydrogen()->value()->type().IsHeapObject()
2984 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK; 2985 ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after
3043 Register scratch = scratch0(); 3044 Register scratch = scratch0();
3044 Register function = ToRegister(instr->function()); 3045 Register function = ToRegister(instr->function());
3045 Register result = ToRegister(instr->result()); 3046 Register result = ToRegister(instr->result());
3046 3047
3047 // Get the prototype or initial map from the function. 3048 // Get the prototype or initial map from the function.
3048 __ lw(result, 3049 __ lw(result,
3049 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); 3050 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3050 3051
3051 // Check that the function has a prototype or an initial map. 3052 // Check that the function has a prototype or an initial map.
3052 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); 3053 __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
3053 DeoptimizeIf(eq, instr, result, Operand(at)); 3054 DeoptimizeIf(eq, instr, "hole", result, Operand(at));
3054 3055
3055 // If the function does not have an initial map, we're done. 3056 // If the function does not have an initial map, we're done.
3056 Label done; 3057 Label done;
3057 __ GetObjectType(result, scratch, scratch); 3058 __ GetObjectType(result, scratch, scratch);
3058 __ Branch(&done, ne, scratch, Operand(MAP_TYPE)); 3059 __ Branch(&done, ne, scratch, Operand(MAP_TYPE));
3059 3060
3060 // Get the prototype from the initial map. 3061 // Get the prototype from the initial map.
3061 __ lw(result, FieldMemOperand(result, Map::kPrototypeOffset)); 3062 __ lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
3062 3063
3063 // All done. 3064 // All done.
(...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after
3179 __ lhu(result, mem_operand); 3180 __ lhu(result, mem_operand);
3180 break; 3181 break;
3181 case EXTERNAL_INT32_ELEMENTS: 3182 case EXTERNAL_INT32_ELEMENTS:
3182 case INT32_ELEMENTS: 3183 case INT32_ELEMENTS:
3183 __ lw(result, mem_operand); 3184 __ lw(result, mem_operand);
3184 break; 3185 break;
3185 case EXTERNAL_UINT32_ELEMENTS: 3186 case EXTERNAL_UINT32_ELEMENTS:
3186 case UINT32_ELEMENTS: 3187 case UINT32_ELEMENTS:
3187 __ lw(result, mem_operand); 3188 __ lw(result, mem_operand);
3188 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) { 3189 if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
3189 DeoptimizeIf(Ugreater_equal, instr, result, Operand(0x80000000)); 3190 DeoptimizeIf(Ugreater_equal, instr, "negative value", result,
3191 Operand(0x80000000));
3190 } 3192 }
3191 break; 3193 break;
3192 case FLOAT32_ELEMENTS: 3194 case FLOAT32_ELEMENTS:
3193 case FLOAT64_ELEMENTS: 3195 case FLOAT64_ELEMENTS:
3194 case EXTERNAL_FLOAT32_ELEMENTS: 3196 case EXTERNAL_FLOAT32_ELEMENTS:
3195 case EXTERNAL_FLOAT64_ELEMENTS: 3197 case EXTERNAL_FLOAT64_ELEMENTS:
3196 case FAST_DOUBLE_ELEMENTS: 3198 case FAST_DOUBLE_ELEMENTS:
3197 case FAST_ELEMENTS: 3199 case FAST_ELEMENTS:
3198 case FAST_SMI_ELEMENTS: 3200 case FAST_SMI_ELEMENTS:
3199 case FAST_HOLEY_DOUBLE_ELEMENTS: 3201 case FAST_HOLEY_DOUBLE_ELEMENTS:
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
3232 int shift_size = (instr->hydrogen()->key()->representation().IsSmi()) 3234 int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
3233 ? (element_size_shift - kSmiTagSize) : element_size_shift; 3235 ? (element_size_shift - kSmiTagSize) : element_size_shift;
3234 __ sll(at, key, shift_size); 3236 __ sll(at, key, shift_size);
3235 __ Addu(scratch, scratch, at); 3237 __ Addu(scratch, scratch, at);
3236 } 3238 }
3237 3239
3238 __ ldc1(result, MemOperand(scratch)); 3240 __ ldc1(result, MemOperand(scratch));
3239 3241
3240 if (instr->hydrogen()->RequiresHoleCheck()) { 3242 if (instr->hydrogen()->RequiresHoleCheck()) {
3241 __ lw(scratch, MemOperand(scratch, kHoleNanUpper32Offset)); 3243 __ lw(scratch, MemOperand(scratch, kHoleNanUpper32Offset));
3242 DeoptimizeIf(eq, instr, scratch, Operand(kHoleNanUpper32)); 3244 DeoptimizeIf(eq, instr, "hole", scratch, Operand(kHoleNanUpper32));
3243 } 3245 }
3244 } 3246 }
3245 3247
3246 3248
3247 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) { 3249 void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
3248 Register elements = ToRegister(instr->elements()); 3250 Register elements = ToRegister(instr->elements());
3249 Register result = ToRegister(instr->result()); 3251 Register result = ToRegister(instr->result());
3250 Register scratch = scratch0(); 3252 Register scratch = scratch0();
3251 Register store_base = scratch; 3253 Register store_base = scratch;
3252 int offset = instr->base_offset(); 3254 int offset = instr->base_offset();
(...skipping 15 matching lines...) Expand all
3268 __ sll(scratch, key, kPointerSizeLog2); 3270 __ sll(scratch, key, kPointerSizeLog2);
3269 __ addu(scratch, elements, scratch); 3271 __ addu(scratch, elements, scratch);
3270 } 3272 }
3271 } 3273 }
3272 __ lw(result, MemOperand(store_base, offset)); 3274 __ lw(result, MemOperand(store_base, offset));
3273 3275
3274 // Check for the hole value. 3276 // Check for the hole value.
3275 if (instr->hydrogen()->RequiresHoleCheck()) { 3277 if (instr->hydrogen()->RequiresHoleCheck()) {
3276 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) { 3278 if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
3277 __ SmiTst(result, scratch); 3279 __ SmiTst(result, scratch);
3278 DeoptimizeIf(ne, instr, scratch, Operand(zero_reg)); 3280 DeoptimizeIf(ne, instr, "not a Smi", scratch, Operand(zero_reg));
3279 } else { 3281 } else {
3280 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex); 3282 __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
3281 DeoptimizeIf(eq, instr, result, Operand(scratch)); 3283 DeoptimizeIf(eq, instr, "hole", result, Operand(scratch));
3282 } 3284 }
3283 } 3285 }
3284 } 3286 }
3285 3287
3286 3288
3287 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) { 3289 void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
3288 if (instr->is_typed_elements()) { 3290 if (instr->is_typed_elements()) {
3289 DoLoadKeyedExternalArray(instr); 3291 DoLoadKeyedExternalArray(instr);
3290 } else if (instr->hydrogen()->representation().IsDouble()) { 3292 } else if (instr->hydrogen()->representation().IsDouble()) {
3291 DoLoadKeyedFixedDoubleArray(instr); 3293 DoLoadKeyedFixedDoubleArray(instr);
(...skipping 125 matching lines...) Expand 10 before | Expand all | Expand 10 after
3417 } 3419 }
3418 3420
3419 // Normal function. Replace undefined or null with global receiver. 3421 // Normal function. Replace undefined or null with global receiver.
3420 __ LoadRoot(scratch, Heap::kNullValueRootIndex); 3422 __ LoadRoot(scratch, Heap::kNullValueRootIndex);
3421 __ Branch(&global_object, eq, receiver, Operand(scratch)); 3423 __ Branch(&global_object, eq, receiver, Operand(scratch));
3422 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); 3424 __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
3423 __ Branch(&global_object, eq, receiver, Operand(scratch)); 3425 __ Branch(&global_object, eq, receiver, Operand(scratch));
3424 3426
3425 // Deoptimize if the receiver is not a JS object. 3427 // Deoptimize if the receiver is not a JS object.
3426 __ SmiTst(receiver, scratch); 3428 __ SmiTst(receiver, scratch);
3427 DeoptimizeIf(eq, instr, scratch, Operand(zero_reg)); 3429 DeoptimizeIf(eq, instr, "Smi", scratch, Operand(zero_reg));
3428 3430
3429 __ GetObjectType(receiver, scratch, scratch); 3431 __ GetObjectType(receiver, scratch, scratch);
3430 DeoptimizeIf(lt, instr, scratch, Operand(FIRST_SPEC_OBJECT_TYPE)); 3432 DeoptimizeIf(lt, instr, "not a JavaScript object", scratch,
3433 Operand(FIRST_SPEC_OBJECT_TYPE));
3431 3434
3432 __ Branch(&result_in_receiver); 3435 __ Branch(&result_in_receiver);
3433 __ bind(&global_object); 3436 __ bind(&global_object);
3434 __ lw(result, FieldMemOperand(function, JSFunction::kContextOffset)); 3437 __ lw(result, FieldMemOperand(function, JSFunction::kContextOffset));
3435 __ lw(result, 3438 __ lw(result,
3436 ContextOperand(result, Context::GLOBAL_OBJECT_INDEX)); 3439 ContextOperand(result, Context::GLOBAL_OBJECT_INDEX));
3437 __ lw(result, 3440 __ lw(result,
3438 FieldMemOperand(result, GlobalObject::kGlobalProxyOffset)); 3441 FieldMemOperand(result, GlobalObject::kGlobalProxyOffset));
3439 3442
3440 if (result.is(receiver)) { 3443 if (result.is(receiver)) {
(...skipping 14 matching lines...) Expand all
3455 Register length = ToRegister(instr->length()); 3458 Register length = ToRegister(instr->length());
3456 Register elements = ToRegister(instr->elements()); 3459 Register elements = ToRegister(instr->elements());
3457 Register scratch = scratch0(); 3460 Register scratch = scratch0();
3458 DCHECK(receiver.is(a0)); // Used for parameter count. 3461 DCHECK(receiver.is(a0)); // Used for parameter count.
3459 DCHECK(function.is(a1)); // Required by InvokeFunction. 3462 DCHECK(function.is(a1)); // Required by InvokeFunction.
3460 DCHECK(ToRegister(instr->result()).is(v0)); 3463 DCHECK(ToRegister(instr->result()).is(v0));
3461 3464
3462 // Copy the arguments to this function possibly from the 3465 // Copy the arguments to this function possibly from the
3463 // adaptor frame below it. 3466 // adaptor frame below it.
3464 const uint32_t kArgumentsLimit = 1 * KB; 3467 const uint32_t kArgumentsLimit = 1 * KB;
3465 DeoptimizeIf(hi, instr, length, Operand(kArgumentsLimit)); 3468 DeoptimizeIf(hi, instr, "too many arguments", length,
3469 Operand(kArgumentsLimit));
3466 3470
3467 // Push the receiver and use the register to keep the original 3471 // Push the receiver and use the register to keep the original
3468 // number of arguments. 3472 // number of arguments.
3469 __ push(receiver); 3473 __ push(receiver);
3470 __ Move(receiver, length); 3474 __ Move(receiver, length);
3471 // The arguments are at a one pointer size offset from elements. 3475 // The arguments are at a one pointer size offset from elements.
3472 __ Addu(elements, elements, Operand(1 * kPointerSize)); 3476 __ Addu(elements, elements, Operand(1 * kPointerSize));
3473 3477
3474 // Loop through the arguments pushing them onto the execution 3478 // Loop through the arguments pushing them onto the execution
3475 // stack. 3479 // stack.
(...skipping 109 matching lines...) Expand 10 before | Expand all | Expand 10 after
3585 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) { 3589 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
3586 DCHECK(instr->context() != NULL); 3590 DCHECK(instr->context() != NULL);
3587 DCHECK(ToRegister(instr->context()).is(cp)); 3591 DCHECK(ToRegister(instr->context()).is(cp));
3588 Register input = ToRegister(instr->value()); 3592 Register input = ToRegister(instr->value());
3589 Register result = ToRegister(instr->result()); 3593 Register result = ToRegister(instr->result());
3590 Register scratch = scratch0(); 3594 Register scratch = scratch0();
3591 3595
3592 // Deoptimize if not a heap number. 3596 // Deoptimize if not a heap number.
3593 __ lw(scratch, FieldMemOperand(input, HeapObject::kMapOffset)); 3597 __ lw(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
3594 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); 3598 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
3595 DeoptimizeIf(ne, instr, scratch, Operand(at)); 3599 DeoptimizeIf(ne, instr, "not a heap number", scratch, Operand(at));
3596 3600
3597 Label done; 3601 Label done;
3598 Register exponent = scratch0(); 3602 Register exponent = scratch0();
3599 scratch = no_reg; 3603 scratch = no_reg;
3600 __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset)); 3604 __ lw(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
3601 // Check the sign of the argument. If the argument is positive, just 3605 // Check the sign of the argument. If the argument is positive, just
3602 // return it. 3606 // return it.
3603 __ Move(result, input); 3607 __ Move(result, input);
3604 __ And(at, exponent, Operand(HeapNumber::kSignMask)); 3608 __ And(at, exponent, Operand(HeapNumber::kSignMask));
3605 __ Branch(&done, eq, at, Operand(zero_reg)); 3609 __ Branch(&done, eq, at, Operand(zero_reg));
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after
3652 3656
3653 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) { 3657 void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
3654 Register input = ToRegister(instr->value()); 3658 Register input = ToRegister(instr->value());
3655 Register result = ToRegister(instr->result()); 3659 Register result = ToRegister(instr->result());
3656 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_); 3660 Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
3657 Label done; 3661 Label done;
3658 __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg)); 3662 __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg));
3659 __ mov(result, input); 3663 __ mov(result, input);
3660 __ subu(result, zero_reg, input); 3664 __ subu(result, zero_reg, input);
3661 // Overflow if result is still negative, i.e. 0x80000000. 3665 // Overflow if result is still negative, i.e. 0x80000000.
3662 DeoptimizeIf(lt, instr, result, Operand(zero_reg)); 3666 DeoptimizeIf(lt, instr, "overflow", result, Operand(zero_reg));
3663 __ bind(&done); 3667 __ bind(&done);
3664 } 3668 }
3665 3669
3666 3670
3667 void LCodeGen::DoMathAbs(LMathAbs* instr) { 3671 void LCodeGen::DoMathAbs(LMathAbs* instr) {
3668 // Class for deferred case. 3672 // Class for deferred case.
3669 class DeferredMathAbsTaggedHeapNumber FINAL : public LDeferredCode { 3673 class DeferredMathAbsTaggedHeapNumber FINAL : public LDeferredCode {
3670 public: 3674 public:
3671 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr) 3675 DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
3672 : LDeferredCode(codegen), instr_(instr) { } 3676 : LDeferredCode(codegen), instr_(instr) { }
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
3706 Register except_flag = ToRegister(instr->temp()); 3710 Register except_flag = ToRegister(instr->temp());
3707 3711
3708 __ EmitFPUTruncate(kRoundToMinusInf, 3712 __ EmitFPUTruncate(kRoundToMinusInf,
3709 result, 3713 result,
3710 input, 3714 input,
3711 scratch1, 3715 scratch1,
3712 double_scratch0(), 3716 double_scratch0(),
3713 except_flag); 3717 except_flag);
3714 3718
3715 // Deopt if the operation did not succeed. 3719 // Deopt if the operation did not succeed.
3716 DeoptimizeIf(ne, instr, except_flag, Operand(zero_reg)); 3720 DeoptimizeIf(ne, instr, "lost precision or NaN", except_flag,
3721 Operand(zero_reg));
3717 3722
3718 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 3723 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3719 // Test for -0. 3724 // Test for -0.
3720 Label done; 3725 Label done;
3721 __ Branch(&done, ne, result, Operand(zero_reg)); 3726 __ Branch(&done, ne, result, Operand(zero_reg));
3722 __ Mfhc1(scratch1, input); 3727 __ Mfhc1(scratch1, input);
3723 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); 3728 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
3724 DeoptimizeIf(ne, instr, scratch1, Operand(zero_reg)); 3729 DeoptimizeIf(ne, instr, "minus zero", scratch1, Operand(zero_reg));
3725 __ bind(&done); 3730 __ bind(&done);
3726 } 3731 }
3727 } 3732 }
3728 3733
3729 3734
3730 void LCodeGen::DoMathRound(LMathRound* instr) { 3735 void LCodeGen::DoMathRound(LMathRound* instr) {
3731 DoubleRegister input = ToDoubleRegister(instr->value()); 3736 DoubleRegister input = ToDoubleRegister(instr->value());
3732 Register result = ToRegister(instr->result()); 3737 Register result = ToRegister(instr->result());
3733 DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp()); 3738 DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp());
3734 Register scratch = scratch0(); 3739 Register scratch = scratch0();
(...skipping 12 matching lines...) Expand all
3747 __ mov(result, zero_reg); 3752 __ mov(result, zero_reg);
3748 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 3753 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3749 __ Branch(&check_sign_on_zero); 3754 __ Branch(&check_sign_on_zero);
3750 } else { 3755 } else {
3751 __ Branch(&done); 3756 __ Branch(&done);
3752 } 3757 }
3753 __ bind(&skip1); 3758 __ bind(&skip1);
3754 3759
3755 // The following conversion will not work with numbers 3760 // The following conversion will not work with numbers
3756 // outside of ]-2^32, 2^32[. 3761 // outside of ]-2^32, 2^32[.
3757 DeoptimizeIf(ge, instr, scratch, Operand(HeapNumber::kExponentBias + 32)); 3762 DeoptimizeIf(ge, instr, "overflow", scratch,
3763 Operand(HeapNumber::kExponentBias + 32));
3758 3764
3759 // Save the original sign for later comparison. 3765 // Save the original sign for later comparison.
3760 __ And(scratch, result, Operand(HeapNumber::kSignMask)); 3766 __ And(scratch, result, Operand(HeapNumber::kSignMask));
3761 3767
3762 __ Move(double_scratch0(), 0.5); 3768 __ Move(double_scratch0(), 0.5);
3763 __ add_d(double_scratch0(), input, double_scratch0()); 3769 __ add_d(double_scratch0(), input, double_scratch0());
3764 3770
3765 // Check sign of the result: if the sign changed, the input 3771 // Check sign of the result: if the sign changed, the input
3766 // value was in ]0.5, 0[ and the result should be -0. 3772 // value was in ]0.5, 0[ and the result should be -0.
3767 __ Mfhc1(result, double_scratch0()); 3773 __ Mfhc1(result, double_scratch0());
3768 __ Xor(result, result, Operand(scratch)); 3774 __ Xor(result, result, Operand(scratch));
3769 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 3775 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3770 // ARM uses 'mi' here, which is 'lt' 3776 // ARM uses 'mi' here, which is 'lt'
3771 DeoptimizeIf(lt, instr, result, Operand(zero_reg)); 3777 DeoptimizeIf(lt, instr, "minus zero", result, Operand(zero_reg));
3772 } else { 3778 } else {
3773 Label skip2; 3779 Label skip2;
3774 // ARM uses 'mi' here, which is 'lt' 3780 // ARM uses 'mi' here, which is 'lt'
3775 // Negating it results in 'ge' 3781 // Negating it results in 'ge'
3776 __ Branch(&skip2, ge, result, Operand(zero_reg)); 3782 __ Branch(&skip2, ge, result, Operand(zero_reg));
3777 __ mov(result, zero_reg); 3783 __ mov(result, zero_reg);
3778 __ Branch(&done); 3784 __ Branch(&done);
3779 __ bind(&skip2); 3785 __ bind(&skip2);
3780 } 3786 }
3781 3787
3782 Register except_flag = scratch; 3788 Register except_flag = scratch;
3783 __ EmitFPUTruncate(kRoundToMinusInf, 3789 __ EmitFPUTruncate(kRoundToMinusInf,
3784 result, 3790 result,
3785 double_scratch0(), 3791 double_scratch0(),
3786 at, 3792 at,
3787 double_scratch1, 3793 double_scratch1,
3788 except_flag); 3794 except_flag);
3789 3795
3790 DeoptimizeIf(ne, instr, except_flag, Operand(zero_reg)); 3796 DeoptimizeIf(ne, instr, "lost precision or NaN", except_flag,
3797 Operand(zero_reg));
3791 3798
3792 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 3799 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
3793 // Test for -0. 3800 // Test for -0.
3794 __ Branch(&done, ne, result, Operand(zero_reg)); 3801 __ Branch(&done, ne, result, Operand(zero_reg));
3795 __ bind(&check_sign_on_zero); 3802 __ bind(&check_sign_on_zero);
3796 __ Mfhc1(scratch, input); 3803 __ Mfhc1(scratch, input);
3797 __ And(scratch, scratch, Operand(HeapNumber::kSignMask)); 3804 __ And(scratch, scratch, Operand(HeapNumber::kSignMask));
3798 DeoptimizeIf(ne, instr, scratch, Operand(zero_reg)); 3805 DeoptimizeIf(ne, instr, "minus zero", scratch, Operand(zero_reg));
3799 } 3806 }
3800 __ bind(&done); 3807 __ bind(&done);
3801 } 3808 }
3802 3809
3803 3810
3804 void LCodeGen::DoMathFround(LMathFround* instr) { 3811 void LCodeGen::DoMathFround(LMathFround* instr) {
3805 DoubleRegister input = ToDoubleRegister(instr->value()); 3812 DoubleRegister input = ToDoubleRegister(instr->value());
3806 DoubleRegister result = ToDoubleRegister(instr->result()); 3813 DoubleRegister result = ToDoubleRegister(instr->result());
3807 __ cvt_s_d(result.low(), input); 3814 __ cvt_s_d(result.low(), input);
3808 __ cvt_d_s(result, result.low()); 3815 __ cvt_d_s(result, result.low());
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
3854 3861
3855 if (exponent_type.IsSmi()) { 3862 if (exponent_type.IsSmi()) {
3856 MathPowStub stub(isolate(), MathPowStub::TAGGED); 3863 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3857 __ CallStub(&stub); 3864 __ CallStub(&stub);
3858 } else if (exponent_type.IsTagged()) { 3865 } else if (exponent_type.IsTagged()) {
3859 Label no_deopt; 3866 Label no_deopt;
3860 __ JumpIfSmi(tagged_exponent, &no_deopt); 3867 __ JumpIfSmi(tagged_exponent, &no_deopt);
3861 DCHECK(!t3.is(tagged_exponent)); 3868 DCHECK(!t3.is(tagged_exponent));
3862 __ lw(t3, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset)); 3869 __ lw(t3, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
3863 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); 3870 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
3864 DeoptimizeIf(ne, instr, t3, Operand(at)); 3871 DeoptimizeIf(ne, instr, "not a heap number", t3, Operand(at));
3865 __ bind(&no_deopt); 3872 __ bind(&no_deopt);
3866 MathPowStub stub(isolate(), MathPowStub::TAGGED); 3873 MathPowStub stub(isolate(), MathPowStub::TAGGED);
3867 __ CallStub(&stub); 3874 __ CallStub(&stub);
3868 } else if (exponent_type.IsInteger32()) { 3875 } else if (exponent_type.IsInteger32()) {
3869 MathPowStub stub(isolate(), MathPowStub::INTEGER); 3876 MathPowStub stub(isolate(), MathPowStub::INTEGER);
3870 __ CallStub(&stub); 3877 __ CallStub(&stub);
3871 } else { 3878 } else {
3872 DCHECK(exponent_type.IsDouble()); 3879 DCHECK(exponent_type.IsDouble());
3873 MathPowStub stub(isolate(), MathPowStub::DOUBLE); 3880 MathPowStub stub(isolate(), MathPowStub::DOUBLE);
3874 __ CallStub(&stub); 3881 __ CallStub(&stub);
(...skipping 322 matching lines...) Expand 10 before | Expand all | Expand 10 after
4197 } else { 4204 } else {
4198 reg = ToRegister(instr->index()); 4205 reg = ToRegister(instr->index());
4199 operand = ToOperand(instr->length()); 4206 operand = ToOperand(instr->length());
4200 } 4207 }
4201 if (FLAG_debug_code && instr->hydrogen()->skip_check()) { 4208 if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
4202 Label done; 4209 Label done;
4203 __ Branch(&done, NegateCondition(cc), reg, operand); 4210 __ Branch(&done, NegateCondition(cc), reg, operand);
4204 __ stop("eliminated bounds check failed"); 4211 __ stop("eliminated bounds check failed");
4205 __ bind(&done); 4212 __ bind(&done);
4206 } else { 4213 } else {
4207 DeoptimizeIf(cc, instr, reg, operand); 4214 DeoptimizeIf(cc, instr, "out of bounds", reg, operand);
4208 } 4215 }
4209 } 4216 }
4210 4217
4211 4218
4212 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) { 4219 void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
4213 Register external_pointer = ToRegister(instr->elements()); 4220 Register external_pointer = ToRegister(instr->elements());
4214 Register key = no_reg; 4221 Register key = no_reg;
4215 ElementsKind elements_kind = instr->elements_kind(); 4222 ElementsKind elements_kind = instr->elements_kind();
4216 bool key_is_constant = instr->key()->IsConstantOperand(); 4223 bool key_is_constant = instr->key()->IsConstantOperand();
4217 int constant_key = 0; 4224 int constant_key = 0;
(...skipping 572 matching lines...) Expand 10 before | Expand all | Expand 10 after
4790 } 4797 }
4791 4798
4792 4799
4793 void LCodeGen::DoSmiTag(LSmiTag* instr) { 4800 void LCodeGen::DoSmiTag(LSmiTag* instr) {
4794 HChange* hchange = instr->hydrogen(); 4801 HChange* hchange = instr->hydrogen();
4795 Register input = ToRegister(instr->value()); 4802 Register input = ToRegister(instr->value());
4796 Register output = ToRegister(instr->result()); 4803 Register output = ToRegister(instr->result());
4797 if (hchange->CheckFlag(HValue::kCanOverflow) && 4804 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4798 hchange->value()->CheckFlag(HValue::kUint32)) { 4805 hchange->value()->CheckFlag(HValue::kUint32)) {
4799 __ And(at, input, Operand(0xc0000000)); 4806 __ And(at, input, Operand(0xc0000000));
4800 DeoptimizeIf(ne, instr, at, Operand(zero_reg)); 4807 DeoptimizeIf(ne, instr, "overflow", at, Operand(zero_reg));
4801 } 4808 }
4802 if (hchange->CheckFlag(HValue::kCanOverflow) && 4809 if (hchange->CheckFlag(HValue::kCanOverflow) &&
4803 !hchange->value()->CheckFlag(HValue::kUint32)) { 4810 !hchange->value()->CheckFlag(HValue::kUint32)) {
4804 __ SmiTagCheckOverflow(output, input, at); 4811 __ SmiTagCheckOverflow(output, input, at);
4805 DeoptimizeIf(lt, instr, at, Operand(zero_reg)); 4812 DeoptimizeIf(lt, instr, "overflow", at, Operand(zero_reg));
4806 } else { 4813 } else {
4807 __ SmiTag(output, input); 4814 __ SmiTag(output, input);
4808 } 4815 }
4809 } 4816 }
4810 4817
4811 4818
4812 void LCodeGen::DoSmiUntag(LSmiUntag* instr) { 4819 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
4813 Register scratch = scratch0(); 4820 Register scratch = scratch0();
4814 Register input = ToRegister(instr->value()); 4821 Register input = ToRegister(instr->value());
4815 Register result = ToRegister(instr->result()); 4822 Register result = ToRegister(instr->result());
4816 if (instr->needs_check()) { 4823 if (instr->needs_check()) {
4817 STATIC_ASSERT(kHeapObjectTag == 1); 4824 STATIC_ASSERT(kHeapObjectTag == 1);
4818 // If the input is a HeapObject, value of scratch won't be zero. 4825 // If the input is a HeapObject, value of scratch won't be zero.
4819 __ And(scratch, input, Operand(kHeapObjectTag)); 4826 __ And(scratch, input, Operand(kHeapObjectTag));
4820 __ SmiUntag(result, input); 4827 __ SmiUntag(result, input);
4821 DeoptimizeIf(ne, instr, scratch, Operand(zero_reg)); 4828 DeoptimizeIf(ne, instr, "not a Smi", scratch, Operand(zero_reg));
4822 } else { 4829 } else {
4823 __ SmiUntag(result, input); 4830 __ SmiUntag(result, input);
4824 } 4831 }
4825 } 4832 }
4826 4833
4827 4834
4828 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg, 4835 void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
4829 DoubleRegister result_reg, 4836 DoubleRegister result_reg,
4830 NumberUntagDMode mode) { 4837 NumberUntagDMode mode) {
4831 bool can_convert_undefined_to_nan = 4838 bool can_convert_undefined_to_nan =
4832 instr->hydrogen()->can_convert_undefined_to_nan(); 4839 instr->hydrogen()->can_convert_undefined_to_nan();
4833 bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero(); 4840 bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
4834 4841
4835 Register scratch = scratch0(); 4842 Register scratch = scratch0();
4836 Label convert, load_smi, done; 4843 Label convert, load_smi, done;
4837 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) { 4844 if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
4838 // Smi check. 4845 // Smi check.
4839 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi); 4846 __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
4840 // Heap number map check. 4847 // Heap number map check.
4841 __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); 4848 __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
4842 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); 4849 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
4843 if (can_convert_undefined_to_nan) { 4850 if (can_convert_undefined_to_nan) {
4844 __ Branch(&convert, ne, scratch, Operand(at)); 4851 __ Branch(&convert, ne, scratch, Operand(at));
4845 } else { 4852 } else {
4846 DeoptimizeIf(ne, instr, scratch, Operand(at)); 4853 DeoptimizeIf(ne, instr, "not a heap number", scratch, Operand(at));
4847 } 4854 }
4848 // Load heap number. 4855 // Load heap number.
4849 __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset)); 4856 __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4850 if (deoptimize_on_minus_zero) { 4857 if (deoptimize_on_minus_zero) {
4851 __ mfc1(at, result_reg.low()); 4858 __ mfc1(at, result_reg.low());
4852 __ Branch(&done, ne, at, Operand(zero_reg)); 4859 __ Branch(&done, ne, at, Operand(zero_reg));
4853 __ Mfhc1(scratch, result_reg); 4860 __ Mfhc1(scratch, result_reg);
4854 DeoptimizeIf(eq, instr, scratch, Operand(HeapNumber::kSignMask)); 4861 DeoptimizeIf(eq, instr, "minus zero", scratch,
4862 Operand(HeapNumber::kSignMask));
4855 } 4863 }
4856 __ Branch(&done); 4864 __ Branch(&done);
4857 if (can_convert_undefined_to_nan) { 4865 if (can_convert_undefined_to_nan) {
4858 __ bind(&convert); 4866 __ bind(&convert);
4859 // Convert undefined (and hole) to NaN. 4867 // Convert undefined (and hole) to NaN.
4860 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); 4868 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
4861 DeoptimizeIf(ne, instr, input_reg, Operand(at)); 4869 DeoptimizeIf(ne, instr, "not a heap number/undefined", input_reg,
4870 Operand(at));
4862 __ LoadRoot(scratch, Heap::kNanValueRootIndex); 4871 __ LoadRoot(scratch, Heap::kNanValueRootIndex);
4863 __ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset)); 4872 __ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
4864 __ Branch(&done); 4873 __ Branch(&done);
4865 } 4874 }
4866 } else { 4875 } else {
4867 __ SmiUntag(scratch, input_reg); 4876 __ SmiUntag(scratch, input_reg);
4868 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI); 4877 DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
4869 } 4878 }
4870 // Smi to double register conversion 4879 // Smi to double register conversion
4871 __ bind(&load_smi); 4880 __ bind(&load_smi);
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after
4915 __ mov(input_reg, zero_reg); // In delay slot. 4924 __ mov(input_reg, zero_reg); // In delay slot.
4916 4925
4917 __ bind(&check_bools); 4926 __ bind(&check_bools);
4918 __ LoadRoot(at, Heap::kTrueValueRootIndex); 4927 __ LoadRoot(at, Heap::kTrueValueRootIndex);
4919 __ Branch(&check_false, ne, scratch2, Operand(at)); 4928 __ Branch(&check_false, ne, scratch2, Operand(at));
4920 __ Branch(USE_DELAY_SLOT, &done); 4929 __ Branch(USE_DELAY_SLOT, &done);
4921 __ li(input_reg, Operand(1)); // In delay slot. 4930 __ li(input_reg, Operand(1)); // In delay slot.
4922 4931
4923 __ bind(&check_false); 4932 __ bind(&check_false);
4924 __ LoadRoot(at, Heap::kFalseValueRootIndex); 4933 __ LoadRoot(at, Heap::kFalseValueRootIndex);
4925 DeoptimizeIf(ne, instr, scratch2, Operand(at), "cannot truncate"); 4934 DeoptimizeIf(ne, instr, "not a heap number/undefined/true/false", scratch2,
4935 Operand(at));
4926 __ Branch(USE_DELAY_SLOT, &done); 4936 __ Branch(USE_DELAY_SLOT, &done);
4927 __ mov(input_reg, zero_reg); // In delay slot. 4937 __ mov(input_reg, zero_reg); // In delay slot.
4928 } else { 4938 } else {
4929 DeoptimizeIf(ne, instr, scratch1, Operand(at), "not a heap number"); 4939 DeoptimizeIf(ne, instr, "not a heap number", scratch1, Operand(at));
4930 4940
4931 // Load the double value. 4941 // Load the double value.
4932 __ ldc1(double_scratch, 4942 __ ldc1(double_scratch,
4933 FieldMemOperand(input_reg, HeapNumber::kValueOffset)); 4943 FieldMemOperand(input_reg, HeapNumber::kValueOffset));
4934 4944
4935 Register except_flag = scratch2; 4945 Register except_flag = scratch2;
4936 __ EmitFPUTruncate(kRoundToZero, 4946 __ EmitFPUTruncate(kRoundToZero,
4937 input_reg, 4947 input_reg,
4938 double_scratch, 4948 double_scratch,
4939 scratch1, 4949 scratch1,
4940 double_scratch2, 4950 double_scratch2,
4941 except_flag, 4951 except_flag,
4942 kCheckForInexactConversion); 4952 kCheckForInexactConversion);
4943 4953
4944 DeoptimizeIf(ne, instr, except_flag, Operand(zero_reg), 4954 DeoptimizeIf(ne, instr, "lost precision or NaN", except_flag,
4945 "lost precision or NaN"); 4955 Operand(zero_reg));
4946 4956
4947 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 4957 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
4948 __ Branch(&done, ne, input_reg, Operand(zero_reg)); 4958 __ Branch(&done, ne, input_reg, Operand(zero_reg));
4949 4959
4950 __ Mfhc1(scratch1, double_scratch); 4960 __ Mfhc1(scratch1, double_scratch);
4951 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); 4961 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
4952 DeoptimizeIf(ne, instr, scratch1, Operand(zero_reg), "minus zero"); 4962 DeoptimizeIf(ne, instr, "minus zero", scratch1, Operand(zero_reg));
4953 } 4963 }
4954 } 4964 }
4955 __ bind(&done); 4965 __ bind(&done);
4956 } 4966 }
4957 4967
4958 4968
4959 void LCodeGen::DoTaggedToI(LTaggedToI* instr) { 4969 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
4960 class DeferredTaggedToI FINAL : public LDeferredCode { 4970 class DeferredTaggedToI FINAL : public LDeferredCode {
4961 public: 4971 public:
4962 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr) 4972 DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after
5019 5029
5020 __ EmitFPUTruncate(kRoundToMinusInf, 5030 __ EmitFPUTruncate(kRoundToMinusInf,
5021 result_reg, 5031 result_reg,
5022 double_input, 5032 double_input,
5023 scratch1, 5033 scratch1,
5024 double_scratch0(), 5034 double_scratch0(),
5025 except_flag, 5035 except_flag,
5026 kCheckForInexactConversion); 5036 kCheckForInexactConversion);
5027 5037
5028 // Deopt if the operation did not succeed (except_flag != 0). 5038 // Deopt if the operation did not succeed (except_flag != 0).
5029 DeoptimizeIf(ne, instr, except_flag, Operand(zero_reg)); 5039 DeoptimizeIf(ne, instr, "lost precision or NaN", except_flag,
5040 Operand(zero_reg));
5030 5041
5031 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 5042 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5032 Label done; 5043 Label done;
5033 __ Branch(&done, ne, result_reg, Operand(zero_reg)); 5044 __ Branch(&done, ne, result_reg, Operand(zero_reg));
5034 __ Mfhc1(scratch1, double_input); 5045 __ Mfhc1(scratch1, double_input);
5035 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); 5046 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
5036 DeoptimizeIf(ne, instr, scratch1, Operand(zero_reg)); 5047 DeoptimizeIf(ne, instr, "minus zero", scratch1, Operand(zero_reg));
5037 __ bind(&done); 5048 __ bind(&done);
5038 } 5049 }
5039 } 5050 }
5040 } 5051 }
5041 5052
5042 5053
5043 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) { 5054 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
5044 Register result_reg = ToRegister(instr->result()); 5055 Register result_reg = ToRegister(instr->result());
5045 Register scratch1 = LCodeGen::scratch0(); 5056 Register scratch1 = LCodeGen::scratch0();
5046 DoubleRegister double_input = ToDoubleRegister(instr->value()); 5057 DoubleRegister double_input = ToDoubleRegister(instr->value());
5047 5058
5048 if (instr->truncating()) { 5059 if (instr->truncating()) {
5049 __ TruncateDoubleToI(result_reg, double_input); 5060 __ TruncateDoubleToI(result_reg, double_input);
5050 } else { 5061 } else {
5051 Register except_flag = LCodeGen::scratch1(); 5062 Register except_flag = LCodeGen::scratch1();
5052 5063
5053 __ EmitFPUTruncate(kRoundToMinusInf, 5064 __ EmitFPUTruncate(kRoundToMinusInf,
5054 result_reg, 5065 result_reg,
5055 double_input, 5066 double_input,
5056 scratch1, 5067 scratch1,
5057 double_scratch0(), 5068 double_scratch0(),
5058 except_flag, 5069 except_flag,
5059 kCheckForInexactConversion); 5070 kCheckForInexactConversion);
5060 5071
5061 // Deopt if the operation did not succeed (except_flag != 0). 5072 // Deopt if the operation did not succeed (except_flag != 0).
5062 DeoptimizeIf(ne, instr, except_flag, Operand(zero_reg)); 5073 DeoptimizeIf(ne, instr, "lost precision or NaN", except_flag,
5074 Operand(zero_reg));
5063 5075
5064 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) { 5076 if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
5065 Label done; 5077 Label done;
5066 __ Branch(&done, ne, result_reg, Operand(zero_reg)); 5078 __ Branch(&done, ne, result_reg, Operand(zero_reg));
5067 __ Mfhc1(scratch1, double_input); 5079 __ Mfhc1(scratch1, double_input);
5068 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask)); 5080 __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
5069 DeoptimizeIf(ne, instr, scratch1, Operand(zero_reg)); 5081 DeoptimizeIf(ne, instr, "minus zero", scratch1, Operand(zero_reg));
5070 __ bind(&done); 5082 __ bind(&done);
5071 } 5083 }
5072 } 5084 }
5073 __ SmiTagCheckOverflow(result_reg, result_reg, scratch1); 5085 __ SmiTagCheckOverflow(result_reg, result_reg, scratch1);
5074 DeoptimizeIf(lt, instr, scratch1, Operand(zero_reg)); 5086 DeoptimizeIf(lt, instr, "overflow", scratch1, Operand(zero_reg));
5075 } 5087 }
5076 5088
5077 5089
5078 void LCodeGen::DoCheckSmi(LCheckSmi* instr) { 5090 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
5079 LOperand* input = instr->value(); 5091 LOperand* input = instr->value();
5080 __ SmiTst(ToRegister(input), at); 5092 __ SmiTst(ToRegister(input), at);
5081 DeoptimizeIf(ne, instr, at, Operand(zero_reg)); 5093 DeoptimizeIf(ne, instr, "not a Smi", at, Operand(zero_reg));
5082 } 5094 }
5083 5095
5084 5096
5085 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) { 5097 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
5086 if (!instr->hydrogen()->value()->type().IsHeapObject()) { 5098 if (!instr->hydrogen()->value()->type().IsHeapObject()) {
5087 LOperand* input = instr->value(); 5099 LOperand* input = instr->value();
5088 __ SmiTst(ToRegister(input), at); 5100 __ SmiTst(ToRegister(input), at);
5089 DeoptimizeIf(eq, instr, at, Operand(zero_reg)); 5101 DeoptimizeIf(eq, instr, "Smi", at, Operand(zero_reg));
5090 } 5102 }
5091 } 5103 }
5092 5104
5093 5105
5094 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) { 5106 void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
5095 Register input = ToRegister(instr->value()); 5107 Register input = ToRegister(instr->value());
5096 Register scratch = scratch0(); 5108 Register scratch = scratch0();
5097 5109
5098 __ GetObjectType(input, scratch, scratch); 5110 __ GetObjectType(input, scratch, scratch);
5099 5111
5100 if (instr->hydrogen()->is_interval_check()) { 5112 if (instr->hydrogen()->is_interval_check()) {
5101 InstanceType first; 5113 InstanceType first;
5102 InstanceType last; 5114 InstanceType last;
5103 instr->hydrogen()->GetCheckInterval(&first, &last); 5115 instr->hydrogen()->GetCheckInterval(&first, &last);
5104 5116
5105 // If there is only one type in the interval check for equality. 5117 // If there is only one type in the interval check for equality.
5106 if (first == last) { 5118 if (first == last) {
5107 DeoptimizeIf(ne, instr, scratch, Operand(first)); 5119 DeoptimizeIf(ne, instr, "wrong instance type", scratch, Operand(first));
5108 } else { 5120 } else {
5109 DeoptimizeIf(lo, instr, scratch, Operand(first)); 5121 DeoptimizeIf(lo, instr, "wrong instance type", scratch, Operand(first));
5110 // Omit check for the last type. 5122 // Omit check for the last type.
5111 if (last != LAST_TYPE) { 5123 if (last != LAST_TYPE) {
5112 DeoptimizeIf(hi, instr, scratch, Operand(last)); 5124 DeoptimizeIf(hi, instr, "wrong instance type", scratch, Operand(last));
5113 } 5125 }
5114 } 5126 }
5115 } else { 5127 } else {
5116 uint8_t mask; 5128 uint8_t mask;
5117 uint8_t tag; 5129 uint8_t tag;
5118 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag); 5130 instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
5119 5131
5120 if (base::bits::IsPowerOfTwo32(mask)) { 5132 if (base::bits::IsPowerOfTwo32(mask)) {
5121 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag)); 5133 DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
5122 __ And(at, scratch, mask); 5134 __ And(at, scratch, mask);
5123 DeoptimizeIf(tag == 0 ? ne : eq, instr, at, Operand(zero_reg)); 5135 DeoptimizeIf(tag == 0 ? ne : eq, instr, "wrong instance type", at,
5136 Operand(zero_reg));
5124 } else { 5137 } else {
5125 __ And(scratch, scratch, Operand(mask)); 5138 __ And(scratch, scratch, Operand(mask));
5126 DeoptimizeIf(ne, instr, scratch, Operand(tag)); 5139 DeoptimizeIf(ne, instr, "wrong instance type", scratch, Operand(tag));
5127 } 5140 }
5128 } 5141 }
5129 } 5142 }
5130 5143
5131 5144
5132 void LCodeGen::DoCheckValue(LCheckValue* instr) { 5145 void LCodeGen::DoCheckValue(LCheckValue* instr) {
5133 Register reg = ToRegister(instr->value()); 5146 Register reg = ToRegister(instr->value());
5134 Handle<HeapObject> object = instr->hydrogen()->object().handle(); 5147 Handle<HeapObject> object = instr->hydrogen()->object().handle();
5135 AllowDeferredHandleDereference smi_check; 5148 AllowDeferredHandleDereference smi_check;
5136 if (isolate()->heap()->InNewSpace(*object)) { 5149 if (isolate()->heap()->InNewSpace(*object)) {
5137 Register reg = ToRegister(instr->value()); 5150 Register reg = ToRegister(instr->value());
5138 Handle<Cell> cell = isolate()->factory()->NewCell(object); 5151 Handle<Cell> cell = isolate()->factory()->NewCell(object);
5139 __ li(at, Operand(Handle<Object>(cell))); 5152 __ li(at, Operand(Handle<Object>(cell)));
5140 __ lw(at, FieldMemOperand(at, Cell::kValueOffset)); 5153 __ lw(at, FieldMemOperand(at, Cell::kValueOffset));
5141 DeoptimizeIf(ne, instr, reg, Operand(at)); 5154 DeoptimizeIf(ne, instr, "value mismatch", reg, Operand(at));
5142 } else { 5155 } else {
5143 DeoptimizeIf(ne, instr, reg, Operand(object)); 5156 DeoptimizeIf(ne, instr, "value mismatch", reg, Operand(object));
5144 } 5157 }
5145 } 5158 }
5146 5159
5147 5160
5148 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) { 5161 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
5149 { 5162 {
5150 PushSafepointRegistersScope scope(this); 5163 PushSafepointRegistersScope scope(this);
5151 __ push(object); 5164 __ push(object);
5152 __ mov(cp, zero_reg); 5165 __ mov(cp, zero_reg);
5153 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance); 5166 __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
5154 RecordSafepointWithRegisters( 5167 RecordSafepointWithRegisters(
5155 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt); 5168 instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
5156 __ StoreToSafepointRegisterSlot(v0, scratch0()); 5169 __ StoreToSafepointRegisterSlot(v0, scratch0());
5157 } 5170 }
5158 __ SmiTst(scratch0(), at); 5171 __ SmiTst(scratch0(), at);
5159 DeoptimizeIf(eq, instr, at, Operand(zero_reg)); 5172 DeoptimizeIf(eq, instr, "instance migration failed", at, Operand(zero_reg));
5160 } 5173 }
5161 5174
5162 5175
5163 void LCodeGen::DoCheckMaps(LCheckMaps* instr) { 5176 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
5164 class DeferredCheckMaps FINAL : public LDeferredCode { 5177 class DeferredCheckMaps FINAL : public LDeferredCode {
5165 public: 5178 public:
5166 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object) 5179 DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
5167 : LDeferredCode(codegen), instr_(instr), object_(object) { 5180 : LDeferredCode(codegen), instr_(instr), object_(object) {
5168 SetExit(check_maps()); 5181 SetExit(check_maps());
5169 } 5182 }
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
5202 Label success; 5215 Label success;
5203 for (int i = 0; i < maps->size() - 1; i++) { 5216 for (int i = 0; i < maps->size() - 1; i++) {
5204 Handle<Map> map = maps->at(i).handle(); 5217 Handle<Map> map = maps->at(i).handle();
5205 __ CompareMapAndBranch(map_reg, map, &success, eq, &success); 5218 __ CompareMapAndBranch(map_reg, map, &success, eq, &success);
5206 } 5219 }
5207 Handle<Map> map = maps->at(maps->size() - 1).handle(); 5220 Handle<Map> map = maps->at(maps->size() - 1).handle();
5208 // Do the CompareMap() directly within the Branch() and DeoptimizeIf(). 5221 // Do the CompareMap() directly within the Branch() and DeoptimizeIf().
5209 if (instr->hydrogen()->HasMigrationTarget()) { 5222 if (instr->hydrogen()->HasMigrationTarget()) {
5210 __ Branch(deferred->entry(), ne, map_reg, Operand(map)); 5223 __ Branch(deferred->entry(), ne, map_reg, Operand(map));
5211 } else { 5224 } else {
5212 DeoptimizeIf(ne, instr, map_reg, Operand(map)); 5225 DeoptimizeIf(ne, instr, "wrong map", map_reg, Operand(map));
5213 } 5226 }
5214 5227
5215 __ bind(&success); 5228 __ bind(&success);
5216 } 5229 }
5217 5230
5218 5231
5219 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) { 5232 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
5220 DoubleRegister value_reg = ToDoubleRegister(instr->unclamped()); 5233 DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
5221 Register result_reg = ToRegister(instr->result()); 5234 Register result_reg = ToRegister(instr->result());
5222 DoubleRegister temp_reg = ToDoubleRegister(instr->temp()); 5235 DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
(...skipping 17 matching lines...) Expand all
5240 5253
5241 // Both smi and heap number cases are handled. 5254 // Both smi and heap number cases are handled.
5242 __ UntagAndJumpIfSmi(scratch, input_reg, &is_smi); 5255 __ UntagAndJumpIfSmi(scratch, input_reg, &is_smi);
5243 5256
5244 // Check for heap number 5257 // Check for heap number
5245 __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset)); 5258 __ lw(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
5246 __ Branch(&heap_number, eq, scratch, Operand(factory()->heap_number_map())); 5259 __ Branch(&heap_number, eq, scratch, Operand(factory()->heap_number_map()));
5247 5260
5248 // Check for undefined. Undefined is converted to zero for clamping 5261 // Check for undefined. Undefined is converted to zero for clamping
5249 // conversions. 5262 // conversions.
5250 DeoptimizeIf(ne, instr, input_reg, Operand(factory()->undefined_value())); 5263 DeoptimizeIf(ne, instr, "not a heap number/undefined", input_reg,
5264 Operand(factory()->undefined_value()));
5251 __ mov(result_reg, zero_reg); 5265 __ mov(result_reg, zero_reg);
5252 __ jmp(&done); 5266 __ jmp(&done);
5253 5267
5254 // Heap number 5268 // Heap number
5255 __ bind(&heap_number); 5269 __ bind(&heap_number);
5256 __ ldc1(double_scratch0(), FieldMemOperand(input_reg, 5270 __ ldc1(double_scratch0(), FieldMemOperand(input_reg,
5257 HeapNumber::kValueOffset)); 5271 HeapNumber::kValueOffset));
5258 __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg); 5272 __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
5259 __ jmp(&done); 5273 __ jmp(&done);
5260 5274
(...skipping 402 matching lines...) Expand 10 before | Expand all | Expand 10 after
5663 void LCodeGen::DoDeoptimize(LDeoptimize* instr) { 5677 void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
5664 Deoptimizer::BailoutType type = instr->hydrogen()->type(); 5678 Deoptimizer::BailoutType type = instr->hydrogen()->type();
5665 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the 5679 // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
5666 // needed return address), even though the implementation of LAZY and EAGER is 5680 // needed return address), even though the implementation of LAZY and EAGER is
5667 // now identical. When LAZY is eventually completely folded into EAGER, remove 5681 // now identical. When LAZY is eventually completely folded into EAGER, remove
5668 // the special case below. 5682 // the special case below.
5669 if (info()->IsStub() && type == Deoptimizer::EAGER) { 5683 if (info()->IsStub() && type == Deoptimizer::EAGER) {
5670 type = Deoptimizer::LAZY; 5684 type = Deoptimizer::LAZY;
5671 } 5685 }
5672 5686
5673 DeoptimizeIf(al, instr, type, zero_reg, Operand(zero_reg), 5687 DeoptimizeIf(al, instr, type, instr->hydrogen()->reason(), zero_reg,
5674 instr->hydrogen()->reason()); 5688 Operand(zero_reg));
5675 } 5689 }
5676 5690
5677 5691
5678 void LCodeGen::DoDummy(LDummy* instr) { 5692 void LCodeGen::DoDummy(LDummy* instr) {
5679 // Nothing to see here, move on! 5693 // Nothing to see here, move on!
5680 } 5694 }
5681 5695
5682 5696
5683 void LCodeGen::DoDummyUse(LDummyUse* instr) { 5697 void LCodeGen::DoDummyUse(LDummyUse* instr) {
5684 // Nothing to see here, move on! 5698 // Nothing to see here, move on!
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after
5755 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt); 5769 RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
5756 5770
5757 GenerateOsrPrologue(); 5771 GenerateOsrPrologue();
5758 } 5772 }
5759 5773
5760 5774
5761 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) { 5775 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
5762 Register result = ToRegister(instr->result()); 5776 Register result = ToRegister(instr->result());
5763 Register object = ToRegister(instr->object()); 5777 Register object = ToRegister(instr->object());
5764 __ LoadRoot(at, Heap::kUndefinedValueRootIndex); 5778 __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
5765 DeoptimizeIf(eq, instr, object, Operand(at)); 5779 DeoptimizeIf(eq, instr, "undefined", object, Operand(at));
5766 5780
5767 Register null_value = t1; 5781 Register null_value = t1;
5768 __ LoadRoot(null_value, Heap::kNullValueRootIndex); 5782 __ LoadRoot(null_value, Heap::kNullValueRootIndex);
5769 DeoptimizeIf(eq, instr, object, Operand(null_value)); 5783 DeoptimizeIf(eq, instr, "null", object, Operand(null_value));
5770 5784
5771 __ And(at, object, kSmiTagMask); 5785 __ And(at, object, kSmiTagMask);
5772 DeoptimizeIf(eq, instr, at, Operand(zero_reg)); 5786 DeoptimizeIf(eq, instr, "Smi", at, Operand(zero_reg));
5773 5787
5774 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE); 5788 STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
5775 __ GetObjectType(object, a1, a1); 5789 __ GetObjectType(object, a1, a1);
5776 DeoptimizeIf(le, instr, a1, Operand(LAST_JS_PROXY_TYPE)); 5790 DeoptimizeIf(le, instr, "not a JavaScript object", a1,
5791 Operand(LAST_JS_PROXY_TYPE));
5777 5792
5778 Label use_cache, call_runtime; 5793 Label use_cache, call_runtime;
5779 DCHECK(object.is(a0)); 5794 DCHECK(object.is(a0));
5780 __ CheckEnumCache(null_value, &call_runtime); 5795 __ CheckEnumCache(null_value, &call_runtime);
5781 5796
5782 __ lw(result, FieldMemOperand(object, HeapObject::kMapOffset)); 5797 __ lw(result, FieldMemOperand(object, HeapObject::kMapOffset));
5783 __ Branch(&use_cache); 5798 __ Branch(&use_cache);
5784 5799
5785 // Get the set of properties to enumerate. 5800 // Get the set of properties to enumerate.
5786 __ bind(&call_runtime); 5801 __ bind(&call_runtime);
5787 __ push(object); 5802 __ push(object);
5788 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr); 5803 CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
5789 5804
5790 __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset)); 5805 __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
5791 DCHECK(result.is(v0)); 5806 DCHECK(result.is(v0));
5792 __ LoadRoot(at, Heap::kMetaMapRootIndex); 5807 __ LoadRoot(at, Heap::kMetaMapRootIndex);
5793 DeoptimizeIf(ne, instr, a1, Operand(at)); 5808 DeoptimizeIf(ne, instr, "wrong map", a1, Operand(at));
5794 __ bind(&use_cache); 5809 __ bind(&use_cache);
5795 } 5810 }
5796 5811
5797 5812
5798 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) { 5813 void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
5799 Register map = ToRegister(instr->map()); 5814 Register map = ToRegister(instr->map());
5800 Register result = ToRegister(instr->result()); 5815 Register result = ToRegister(instr->result());
5801 Label load_cache, done; 5816 Label load_cache, done;
5802 __ EnumLength(result, map); 5817 __ EnumLength(result, map);
5803 __ Branch(&load_cache, ne, result, Operand(Smi::FromInt(0))); 5818 __ Branch(&load_cache, ne, result, Operand(Smi::FromInt(0)));
5804 __ li(result, Operand(isolate()->factory()->empty_fixed_array())); 5819 __ li(result, Operand(isolate()->factory()->empty_fixed_array()));
5805 __ jmp(&done); 5820 __ jmp(&done);
5806 5821
5807 __ bind(&load_cache); 5822 __ bind(&load_cache);
5808 __ LoadInstanceDescriptors(map, result); 5823 __ LoadInstanceDescriptors(map, result);
5809 __ lw(result, 5824 __ lw(result,
5810 FieldMemOperand(result, DescriptorArray::kEnumCacheOffset)); 5825 FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
5811 __ lw(result, 5826 __ lw(result,
5812 FieldMemOperand(result, FixedArray::SizeFor(instr->idx()))); 5827 FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
5813 DeoptimizeIf(eq, instr, result, Operand(zero_reg)); 5828 DeoptimizeIf(eq, instr, "no cache", result, Operand(zero_reg));
5814 5829
5815 __ bind(&done); 5830 __ bind(&done);
5816 } 5831 }
5817 5832
5818 5833
5819 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) { 5834 void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
5820 Register object = ToRegister(instr->value()); 5835 Register object = ToRegister(instr->value());
5821 Register map = ToRegister(instr->map()); 5836 Register map = ToRegister(instr->map());
5822 __ lw(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset)); 5837 __ lw(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
5823 DeoptimizeIf(ne, instr, map, Operand(scratch0())); 5838 DeoptimizeIf(ne, instr, "wrong map", map, Operand(scratch0()));
5824 } 5839 }
5825 5840
5826 5841
5827 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, 5842 void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
5828 Register result, 5843 Register result,
5829 Register object, 5844 Register object,
5830 Register index) { 5845 Register index) {
5831 PushSafepointRegistersScope scope(this); 5846 PushSafepointRegistersScope scope(this);
5832 __ Push(object, index); 5847 __ Push(object, index);
5833 __ mov(cp, zero_reg); 5848 __ mov(cp, zero_reg);
(...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after
5909 __ li(at, scope_info); 5924 __ li(at, scope_info);
5910 __ Push(at, ToRegister(instr->function())); 5925 __ Push(at, ToRegister(instr->function()));
5911 CallRuntime(Runtime::kPushBlockContext, 2, instr); 5926 CallRuntime(Runtime::kPushBlockContext, 2, instr);
5912 RecordSafepoint(Safepoint::kNoLazyDeopt); 5927 RecordSafepoint(Safepoint::kNoLazyDeopt);
5913 } 5928 }
5914 5929
5915 5930
5916 #undef __ 5931 #undef __
5917 5932
5918 } } // namespace v8::internal 5933 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/mips/lithium-codegen-mips.h ('k') | src/mips64/lithium-codegen-mips64.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698